diff --git a/.openpublishing.publish.config.json b/.openpublishing.publish.config.json index 2e5ca40568f05..c8457b7752e59 100644 --- a/.openpublishing.publish.config.json +++ b/.openpublishing.publish.config.json @@ -806,6 +806,12 @@ "branch": "master", "branch_mapping": {} }, + { + "path_to_root": "ms-identity-b2c-javascript-spa", + "url": "https://github.com/Azure-Samples/ms-identity-b2c-javascript-spa", + "branch": "main", + "branch_mapping": {} + }, { "path_to_root": "ms-identity-dotnetcore-b2c-account-management", "url": "https://github.com/Azure-Samples/ms-identity-dotnetcore-b2c-account-management", @@ -885,7 +891,6 @@ ".openpublishing.redirection.json", ".openpublishing.redirection.active-directory.json", ".openpublishing.redirection.azure-blob.json", - ".openpublishing.redirection.azure-sql.json", "articles/data-factory/.openpublishing.redirection.data-factory.json", ".openpublishing.redirection.defender-for-cloud.json", ".openpublishing.redirection.defender-for-iot.json", @@ -893,7 +898,6 @@ ".openpublishing.redirection.iot-hub.json", ".openpublishing.redirection.key-vault.json", ".openpublishing.redirection.security-benchmark.json", - ".openpublishing.redirection.sql-database.json", "articles/synapse-analytics/.openpublishing.redirection.synapse-analytics.json", ".openpublishing.redirection.azure-web-pubsub.json", ".openpublishing.redirection.azure-monitor.json", @@ -930,6 +934,7 @@ "articles/mysql/.openpublishing.redirection.mysql.json", "articles/container-apps/.openpublishing.redirection.container-apps.json", "articles/spring-cloud/.openpublishing.redirection.spring-cloud.json", - "articles/load-testing/.openpublishing.redirection.azure-load-testing.json" + "articles/load-testing/.openpublishing.redirection.azure-load-testing.json", + "articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json" ] } diff --git a/.openpublishing.redirection.azure-sql.json b/.openpublishing.redirection.azure-sql.json deleted file mode 100644 index 1dd967f074542..0000000000000 --- a/.openpublishing.redirection.azure-sql.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "redirections": [ - { - "source_path_from_root": "/articles/azure-sql/database/doc-changes-updates-release-notes.md", - "redirect_url": "/azure/azure-sql/database/doc-changes-updates-release-notes-whats-new", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/doc-changes-updates-release-notes.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/doc-changes-updates-release-notes-whats-new", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-single-subnet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-single-subnet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/database/advanced-data-security.md", - "redirect_url": "/azure/azure-sql/database/azure-defender-for-sql", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/connect-query-r.md", - "redirect_url": "/sql/machine-learning/tutorials/quickstart-r-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/service-tiers-prs.md", - "redirect_url": "/azure/azure-sql/database/doc-changes-updates-release-notes", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/sql-data-sync-monitor-sync.md", - "redirect_url": "/azure/azure-sql/database/monitor-tune-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/policy-samples.md", - "redirect_url": "/azure/azure-sql/database/policy-reference", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/security-controls.md", - "redirect_url": "/azure/azure-sql/database/security-baseline", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/machine-learning-services-differences.md", - "redirect_url": "/azure/azure-sql/managed-instance/machine-learning-services-differences", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/database/machine-learning-services-overview.md", - "redirect_url": "/azure/azure-sql/managed-instance/machine-learning-services-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/managed-instance/migrate-to-instance-from-sql-server.md", - "redirect_url": "/azure/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/managed-instance/scripts/create-powershell-azure-resource-manager-template.md", - "redirect_url": "/azure/azure-sql/managed-instance/create-template-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/linux/index.yml", - "redirect_url": "/azure/azure-sql/virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/availability-group-az-cli-configure.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/availability-group-az-commandline-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/availability-group-azure-marketplace-template-configure.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/availability-group-quickstart-template-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/hadr-vnn-azure-load-balancer-configure.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/hadr-distributed-network-name-dnn-configure.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/failover-cluster-instance-distributed-network-name-dnn-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/sql-vm-resource-provider-automatic-registration.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/sql-agent-extension-automatic-registration-all-vms", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/sql-vm-resource-provider-register.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-single-vm", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/sql-vm-resource-provider-bulk-register.md", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-vms-bulk", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/index.yml", - "redirect_url": "/azure/azure-sql/virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/virtual-machines/windows/sql-vulnerability-assessment-enable.md", - "redirect_url": "/azure/security-center/defender-for-sql-on-machines-vulnerability-assessment", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/machine-learning-services-add-r-packages.md", - "redirect_url": "/sql/machine-learning/package-management/install-additional-r-packages-on-sql-server?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/r-script-create-quickstart.md", - "redirect_url": "/sql/machine-learning/tutorials/quickstart-r-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/machine-learning-services-data-issues.md", - "redirect_url": "/sql/machine-learning/tutorials/quickstart-r-data-types-and-objects?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/machine-learning-services-functions.md", - "redirect_url": "/sql/machine-learning/tutorials/quickstart-r-functions?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/r-train-score-model-create-quickstart.md", - "redirect_url": "/sql/machine-learning/tutorials/quickstart-r-train-score-model?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/clustering-model-build-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-clustering-model-build?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/clustering-model-deploy-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-clustering-model-deploy?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/clustering-model-prepare-data-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-clustering-model-introduction?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/predictive-model-deploy-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-predictive-model-deploy?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/predictive-model-prepare-data-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-predictive-model-introduction?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/predictive-model-build-compare-tutorial.md", - "redirect_url": "/sql/machine-learning/tutorials/r-predictive-model-train?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/azure-sql/database/auto-failover-group-overview.md", - "redirect_url": "/azure/azure-sql/database/auto-failover-group-sql-db", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/database/auto-failover-group-configure.md", - "redirect_url": "/azure/azure-sql/database/auto-failover-group-configure-sql-db", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/database/job-automation-managed-instances.md", - "redirect_url": "/azure/azure-sql/managed-instance/job-automation-managed-instance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/managed-instance/link-feature.md", - "redirect_url": "/azure/azure-sql/managed-instance/managed-instance-link-feature-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/managed-instance/link-feature-best-practices.md", - "redirect_url": "/azure/azure-sql/managed-instance/managed-instance-link-best-practices", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/azure-sql/database/service-tiers-general-purpose-business-critical.md", - "redirect_url": "/azure/azure-sql/database/service-tiers-vcore", - "redirect_document_id": false - } - ] -} diff --git a/.openpublishing.redirection.json b/.openpublishing.redirection.json index 2b77c17d90cf5..4b086ec3e971e 100644 --- a/.openpublishing.redirection.json +++ b/.openpublishing.redirection.json @@ -2498,6 +2498,36 @@ "redirect_url": "/azure/advisor/advisor-alerts-portal", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/aks/kubernetes-walkthrough-portal.md", + "redirect_url": "/azure/aks/learn/quick-kubernetes-deploy-portal", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/aks/kubernetes-walkthrough.md", + "redirect_url": "/azure/aks/learn/quick-kubernetes-deploy-cli", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/aks/kubernetes-walkthrough-powershell.md", + "redirect_url": "/azure/aks/learn/quick-kubernetes-deploy-powershell", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/aks/kubernetes-walkthrough-rm-template.md", + "redirect_url": "/azure/aks/learn/quick-kubernetes-deploy-rm-template", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/aks/windows-container-cli.md", + "redirect_url": "/azure/aks/learn/quick-windows-container-deploy-cli", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/aks/windows-container-powershell.md", + "redirect_url": "/azure/aks/learn/quick-windows-container-deploy-powershell", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/aks/integrate-azure.md", "redirect_url": "/azure/aks/", @@ -24173,6 +24203,21 @@ "redirect_url": "/azure/sql-database/sql-database-threat-detection-overview", "redirect_document_id": false }, + { + "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-create-manage", + "redirect_url": "/azure/azure-sql/managed-instance/instance-create-quickstart", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/sql-database/sql-database-single-database.md", + "redirect_url": "/azure/azure-sql/database/sql-database-paas-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/sql-database/sql-database-managed-instance.md", + "redirect_url": "/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview", + "redirect_document_id": false + }, { "source_path_from_root": "/articles/sql-data-warehouse/sql-data-warehouse-backups.md", "redirect_url": "/azure/sql-data-warehouse/backup-and-restore", @@ -42947,6 +42992,16 @@ "source_path_from_root": "/articles/cognitive-services/video-indexer/video-indexer-view-edit.md", "redirect_url": "/azure/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit", "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/cognitive-services/translator/translator-how-to-signup.md", + "redirect_url": "/azure/cognitive-services/translator/how-to-create-translator-resource", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/cognitive-services/translator/custom-translator/terminology.md", + "redirect_url": "/azure/cognitive-services/translator/custom-translator/key-terms", + "redirect_document_id": false } ] } diff --git a/.openpublishing.redirection.sql-database.json b/.openpublishing.redirection.sql-database.json deleted file mode 100644 index 7c281b4bc466b..0000000000000 --- a/.openpublishing.redirection.sql-database.json +++ /dev/null @@ -1,2570 +0,0 @@ -{ - "redirections": [ - { - "source_path_from_root": "/articles/sql-database/index.yml", - "redirect_url": "/azure/azure-sql/", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-accelerated-database-recovery.md", - "redirect_url": "/azure/azure-sql/accelerated-database-recovery", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-azure-hybrid-benefit.md", - "redirect_url": "/azure/azure-sql/azure-hybrid-benefit", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-glossary-terms.md", - "redirect_url": "/azure/azure-sql/azure-sql-iaas-vs-paas-what-is-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-paas-vs-sql-server-iaas.md", - "redirect_url": "/azure/azure-sql/azure-sql-iaas-vs-paas-what-is-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/active-directory-interactive-connect-azure-sql-db.md", - "redirect_url": "/azure/azure-sql/database/active-directory-interactive-connect-azure-sql-db", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-active-geo-replication-portal.md", - "redirect_url": "/azure/azure-sql/database/active-geo-replication-configure-portal", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-active-geo-replication.md", - "redirect_url": "/azure/azure-sql/database/active-geo-replication-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-security-config.md", - "redirect_url": "/azure/azure-sql/database/active-geo-replication-security-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-direct-route-ports-adonet-v12.md", - "redirect_url": "/azure/azure-sql/database/adonet-v12-develop-direct-route-ports", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-advanced-data-security.md", - "redirect_url": "/azure/azure-sql/database/advanced-data-security", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-insights-alerts-portal.md", - "redirect_url": "/azure/azure-sql/database/alerts-insights-configure-portal", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-always-encrypted-azure-key-vault.md", - "redirect_url": "/azure/azure-sql/database/always-encrypted-azure-key-vault-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-always-encrypted.md", - "redirect_url": "/azure/azure-sql/database/always-encrypted-certificate-store-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-client-id-keys.md", - "redirect_url": "/azure/azure-sql/database/application-authentication-get-client-id-keys", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-resource-manager-samples.md", - "redirect_url": "/azure/azure-sql/database/arm-templates-content-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing.md", - "redirect_url": "/azure/azure-sql/database/auditing-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-audit-log-format.md", - "redirect_url": "/azure/azure-sql/database/audit-log-format", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/create-auditing-storage-account-vnet-firewall.md", - "redirect_url": "/azure/azure-sql/database/audit-write-storage-account-behind-vnet-firewall", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-aad-authentication-configure.md", - "redirect_url": "/azure/azure-sql/database/authentication-aad-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-aad-authentication.md", - "redirect_url": "/azure/azure-sql/database/authentication-aad-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-ssms-mfa-authentication-configure.md", - "redirect_url": "/azure/azure-sql/database/authentication-mfa-ssms-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-ssms-mfa-authentication.md", - "redirect_url": "/azure/azure-sql/database/authentication-mfa-ssms-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-failover-group.md", - "redirect_url": "/azure/azure-sql/database/auto-failover-group-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auto-failover-group.md", - "redirect_url": "/azure/azure-sql/database/auto-failover-group-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-automated-backups.md", - "redirect_url": "/azure/azure-sql/database/automated-backups-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-automatic-tuning-email-notifications.md", - "redirect_url": "/azure/azure-sql/database/automatic-tuning-email-notifications-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-automatic-tuning-enable.md", - "redirect_url": "/azure/azure-sql/database/automatic-tuning-enable", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-automatic-tuning.md", - "redirect_url": "/azure/azure-sql/database/automatic-tuning-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-automation.md", - "redirect_url": "/azure/azure-sql/database/automation-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cli-samples.md", - "redirect_url": "/azure/azure-sql/database/az-cli-script-samples-content-guide", - "redirect_document_id": true - }, - - { - "source_path_from_root": "/articles/sql-database/sql-database-business-continuity.md", - "redirect_url": "/azure/azure-sql/database/business-continuity-high-availability-disaster-recover-hadr-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-clustering-model-build.md", - "redirect_url": "/azure/azure-sql/database/clustering-model-build-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-clustering-model-deploy.md", - "redirect_url": "/azure/azure-sql/database/clustering-model-deploy-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-clustering-model-prepare-data.md", - "redirect_url": "/azure/azure-sql/database/clustering-model-prepare-data-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-conditional-access.md", - "redirect_url": "/azure/azure-sql/database/conditional-access-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-excel.md", - "redirect_url": "/azure/azure-sql/database/connect-excel", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connectivity-architecture.md", - "redirect_url": "/azure/azure-sql/database/connectivity-architecture", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connectivity-settings.md", - "redirect_url": "/azure/azure-sql/database/connectivity-settings", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query.md", - "redirect_url": "/azure/azure-sql/database/connect-query-content-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-dotnet-core.md", - "redirect_url": "/azure/azure-sql/database/connect-query-dotnet-core", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-dotnet-visual-studio.md", - "redirect_url": "/azure/azure-sql/database/connect-query-dotnet-visual-studio", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-go.md", - "redirect_url": "/azure/azure-sql/database/connect-query-go", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-java.md", - "redirect_url": "/azure/azure-sql/database/connect-query-java", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-nodejs.md", - "redirect_url": "/azure/azure-sql/database/connect-query-nodejs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-php.md", - "redirect_url": "/azure/azure-sql/database/connect-query-php", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-portal.md", - "redirect_url": "/azure/azure-sql/database/connect-query-portal", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-python.md", - "redirect_url": "/azure/azure-sql/database/connect-query-python", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-r.md", - "redirect_url": "/azure/azure-sql/database/connect-query-r", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-ruby.md", - "redirect_url": "/azure/azure-sql/database/connect-query-ruby", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-ssms.md", - "redirect_url": "/azure/azure-sql/database/connect-query-ssms", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-vscode.md", - "redirect_url": "/azure/azure-sql/database/connect-query-vscode", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-advisor-portal.md", - "redirect_url": "/azure/azure-sql/database/database-advisor-find-recommendations-portal", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-advisor.md", - "redirect_url": "/azure/azure-sql/database/database-advisor-implement-performance-recommendations", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-copy.md", - "redirect_url": "/azure/azure-sql/database/database-copy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-export.md", - "redirect_url": "/azure/azure-sql/database/database-export", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-import.md", - "redirect_url": "/azure/azure-sql/database/database-import", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/import-export-from-vm.md", - "redirect_url": "/azure/azure-sql/database/database-import-export-azure-services-off", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/azure-sql-database-hangs-importing-exporting.md", - "redirect_url": "/azure/azure-sql/database/database-import-export-hang", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-data-discovery-and-classification.md", - "redirect_url": "/azure/azure-sql/database/data-discovery-and-classification-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-design-first-database-csharp.md", - "redirect_url": "/azure/azure-sql/database/design-first-database-csharp-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-design-first-database.md", - "redirect_url": "/azure/azure-sql/database/design-first-database-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-designing-cloud-solutions-for-disaster-recovery.md", - "redirect_url": "/azure/azure-sql/database/designing-cloud-solutions-for-disaster-recovery", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-cplusplus-simple.md", - "redirect_url": "/azure/azure-sql/database/develop-cplusplus-simple", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-overview.md", - "redirect_url": "/azure/azure-sql/database/develop-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-disaster-recovery-drills.md", - "redirect_url": "/azure/azure-sql/database/disaster-recovery-drills", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-disaster-recovery.md", - "redirect_url": "/azure/azure-sql/database/disaster-recovery-guidance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-disaster-recovery-strategies-for-applications-with-elastic-pool.md", - "redirect_url": "/azure/azure-sql/database/disaster-recovery-strategies-for-applications-with-elastic-pool", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/dns-alias-overview.md", - "redirect_url": "/azure/azure-sql/database/dns-alias-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/dns-alias-powershell.md", - "redirect_url": "/azure/azure-sql/database/dns-alias-powershell-create", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-release-notes.md", - "redirect_url": "/azure/azure-sql/database/doc-changes-updates-release-notes", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dynamic-data-masking-get-started-portal.md", - "redirect_url": "/azure/azure-sql/database/dynamic-data-masking-configure-portal", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dynamic-data-masking-get-started.md", - "redirect_url": "/azure/azure-sql/database/dynamic-data-masking-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-convert-to-use-elastic-tools.md", - "redirect_url": "/azure/azure-sql/database/elastic-convert-to-use-elastic-tools", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-database-client-library.md", - "redirect_url": "/azure/azure-sql/database/elastic-database-client-library", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-database-perf-counters.md", - "redirect_url": "/azure/azure-sql/database/elastic-database-perf-counters", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-database-recovery-manager.md", - "redirect_url": "/azure/azure-sql/database/elastic-database-recovery-manager", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/elastic-jobs-migrate.md", - "redirect_url": "/azure/azure-sql/database/elastic-jobs-migrate", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/elastic-jobs-overview.md", - "redirect_url": "/azure/azure-sql/database/elastic-jobs-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/elastic-jobs-powershell.md", - "redirect_url": "/azure/azure-sql/database/elastic-jobs-powershell-create", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/elastic-jobs-tsql.md", - "redirect_url": "/azure/azure-sql/database/elastic-jobs-tsql-create-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-manage.md", - "redirect_url": "/azure/azure-sql/database/elastic-pool-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool.md", - "redirect_url": "/azure/azure-sql/database/elastic-pool-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-resource-management.md", - "redirect_url": "/azure/azure-sql/database/elastic-pool-resource-management", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-scale.md", - "redirect_url": "/azure/azure-sql/database/elastic-pool-scale", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-query-getting-started.md", - "redirect_url": "/azure/azure-sql/database/elastic-query-getting-started", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-query-getting-started-vertical.md", - "redirect_url": "/azure/azure-sql/database/elastic-query-getting-started-vertical", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-query-horizontal-partitioning.md", - "redirect_url": "/azure/azure-sql/database/elastic-query-horizontal-partitioning", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-query-overview.md", - "redirect_url": "/azure/azure-sql/database/elastic-query-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-query-vertical-partitioning.md", - "redirect_url": "/azure/azure-sql/database/elastic-query-vertical-partitioning", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-add-a-shard.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-add-a-shard", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-configure-deploy-split-and-merge.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-configure-deploy-split-and-merge", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-data-dependent-routing.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-data-dependent-routing", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-faq.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-faq", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-get-started.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-get-started", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-glossary.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-glossary", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-introduction.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-introduction", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-manage-credentials.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-manage-credentials", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-multishard-querying.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-multishard-querying", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-overview-split-and-merge.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-overview-split-and-merge", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-shard-map-management.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-shard-map-management", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-split-merge-security-configuration.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-split-merge-security-configuration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-upgrade-client-library.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-upgrade-client-library", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-use-entity-framework-applications-visual-studio.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-use-entity-framework-applications-visual-studio", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-scale-working-with-dapper.md", - "redirect_url": "/azure/azure-sql/database/elastic-scale-working-with-dapper", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-transactions-overview.md", - "redirect_url": "/azure/azure-sql/database/elastic-transactions-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-failover-group-tutorial.md", - "redirect_url": "/azure/azure-sql/database/failover-group-add-elastic-pool-tutorial", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-failover-group-tutorial.md", - "redirect_url": "/azure/azure-sql/database/failover-group-add-single-database-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-features.md", - "redirect_url": "/azure/azure-sql/database/features-comparison", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-file-space-management.md", - "redirect_url": "/azure/azure-sql/database/file-space-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-firewall-configure.md", - "redirect_url": "/azure/azure-sql/database/firewall-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-server-level-firewall-rule.md", - "redirect_url": "/azure/azure-sql/database/firewall-create-server-level-portal-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-gateway-migration.md", - "redirect_url": "/azure/azure-sql/database/gateway-migration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-implement-geo-distributed-database.md", - "redirect_url": "/azure/azure-sql/database/geo-distributed-application-configure-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-high-availability.md", - "redirect_url": "/azure/azure-sql/database/high-availability-sla", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-howto-single-database.md", - "redirect_url": "/azure/azure-sql/database/how-to-content-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-hyperscale-performance-diagnostics.md", - "redirect_url": "/azure/azure-sql/database/hyperscale-performance-diagnostics", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-index.yml", - "redirect_url": "/azure/azure-sql/database/index", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-intelligent-insights.md", - "redirect_url": "/azure/azure-sql/database/intelligent-insights-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-intelligent-insights-troubleshoot-performance.md", - "redirect_url": "/azure/azure-sql/database/intelligent-insights-troubleshoot-performance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-intelligent-insights-use-diagnostics-log.md", - "redirect_url": "/azure/azure-sql/database/intelligent-insights-use-diagnostics-log", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-job-automation-overview.md", - "redirect_url": "/azure/azure-sql/database/job-automation-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-json-features.md", - "redirect_url": "/azure/azure-sql/database/json-features", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-servers.md", - "redirect_url": "/azure/azure-sql/database/logical-servers", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-logins.md", - "redirect_url": "/azure/azure-sql/database/logins-create-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-long-term-backup-retention-configure.md", - "redirect_url": "/azure/azure-sql/database/long-term-backup-retention-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-long-term-retention.md", - "redirect_url": "/azure/azure-sql/database/long-term-retention-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-machine-learning-services-add-r-packages.md", - "redirect_url": "/azure/azure-sql/database/machine-learning-services-add-r-packages", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-machine-learning-services-data-issues.md", - "redirect_url": "/azure/azure-sql/database/machine-learning-services-data-issues", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-machine-learning-services-differences.md", - "redirect_url": "/azure/azure-sql/database/machine-learning-services-differences", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-machine-learning-services-functions.md", - "redirect_url": "/azure/azure-sql/database/machine-learning-services-functions", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-machine-learning-services-overview.md", - "redirect_url": "/azure/azure-sql/database/machine-learning-services-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-application-rolling-upgrade.md", - "redirect_url": "/azure/azure-sql/database/manage-application-rolling-upgrade", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-after-migration.md", - "redirect_url": "/azure/azure-sql/database/manage-data-after-migrating-to-database", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-metrics-diag-logging.md", - "redirect_url": "/azure/azure-sql/database/metrics-diagnostic-telemetry-logging-streaming-export-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dtu-to-vcore.md", - "redirect_url": "/azure/azure-sql/database/migrate-dtu-to-vcore", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/tutorial-sqlite-db-to-azure-sql-serverless-offline.md", - "redirect_url": "/azure/azure-sql/database/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-migrate.md", - "redirect_url": "/azure/azure-sql/database/migrate-to-database-from-sql-server", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-monitoring-tuning-index.yml", - "redirect_url": "/azure/azure-sql/database/monitoring-tuning-index", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-monitoring-with-dmvs.md", - "redirect_url": "/azure/azure-sql/database/monitoring-with-dmvs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-monitor-tune-overview.md", - "redirect_url": "/azure/azure-sql/database/monitor-tune-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-move-resources-across-regions.md", - "redirect_url": "/azure/azure-sql/database/move-resources-across-regions", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-networkaccess-overview.md", - "redirect_url": "/azure/azure-sql/database/network-access-controls-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-performance-guidance.md", - "redirect_url": "/azure/azure-sql/database/performance-guidance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-planned-maintenance.md", - "redirect_url": "/azure/azure-sql/database/planned-maintenance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/policy-samples.md", - "redirect_url": "/azure/azure-sql/database/policy-samples", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-powershell-samples.md", - "redirect_url": "/azure/azure-sql/database/powershell-script-content-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-predictive-model-build-compare.md", - "redirect_url": "/azure/azure-sql/database/predictive-model-build-compare-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-predictive-model-deploy.md", - "redirect_url": "/azure/azure-sql/database/predictive-model-deploy-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-tutorial-predictive-model-prepare-data.md", - "redirect_url": "/azure/azure-sql/database/predictive-model-prepare-data-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-private-endpoint-overview.md", - "redirect_url": "/azure/azure-sql/database/private-endpoint-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-purchase-models.md", - "redirect_url": "/azure/azure-sql/database/purchasing-models", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-query-performance.md", - "redirect_url": "/azure/azure-sql/database/query-performance-insight-use", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-quickstart-guide.md", - "redirect_url": "/azure/azure-sql/database/quickstart-content-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/quota-increase-request.md", - "redirect_url": "/azure/azure-sql/database/quota-increase-request", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-read-scale-out.md", - "redirect_url": "/azure/azure-sql/database/read-scale-out", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-recovery-using-backups.md", - "redirect_url": "/azure/azure-sql/database/recovery-using-backups", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/replication-to-sql-database.md", - "redirect_url": "/azure/azure-sql/database/replication-to-sql-database", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-reserved-capacity.md", - "redirect_url": "/azure/azure-sql/database/reserved-capacity-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-resource-health.md", - "redirect_url": "/azure/azure-sql/database/resource-health-to-troubleshoot-connectivity", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dtu-resource-limits-elastic-pools.md", - "redirect_url": "/azure/azure-sql/database/resource-limits-dtu-elastic-pools", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dtu-resource-limits-single-databases.md", - "redirect_url": "/azure/azure-sql/database/resource-limits-dtu-single-databases", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-resource-limits-database-server.md", - "redirect_url": "/azure/azure-sql/database/resource-limits-logical-server", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vcore-resource-limits-elastic-pools.md", - "redirect_url": "/azure/azure-sql/database/resource-limits-vcore-elastic-pools", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vcore-resource-limits-single-databases.md", - "redirect_url": "/azure/azure-sql/database/resource-limits-vcore-single-databases", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-quickstart-r-create-script.md", - "redirect_url": "/azure/azure-sql/database/r-script-create-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-quickstart-r-train-score-model.md", - "redirect_url": "/azure/azure-sql/database/r-train-score-model-create-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-dr-geo-replication.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-dr-geo-replication", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-dr-geo-restore.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-dr-geo-restore", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-get-started-deploy.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-get-started-deploy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-log-analytics.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-log-analytics", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-performance-monitoring.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-performance-monitoring", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-provision-and-catalog.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-provision-and-catalog", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-restore-single-tenant.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-restore-single-tenant", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-wingtip-app-overview.md", - "redirect_url": "/azure/azure-sql/database/saas-dbpertenant-wingtip-app-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-adhoc-reporting.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-adhoc-reporting", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-get-started-deploy.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-get-started-deploy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-performance-monitoring.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-performance-monitoring", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-provision-and-catalog.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-provision-and-catalog", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-schema-management.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-schema-management", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-multitenantdb-tenant-analytics.md", - "redirect_url": "/azure/azure-sql/database/saas-multitenantdb-tenant-analytics", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-standaloneapp-get-started-deploy.md", - "redirect_url": "/azure/azure-sql/database/saas-standaloneapp-get-started-deploy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-standaloneapp-provision-and-catalog.md", - "redirect_url": "/azure/azure-sql/database/saas-standaloneapp-provision-and-catalog", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-app-design-patterns.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-app-design-patterns", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-cross-tenant-reporting.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-cross-tenant-reporting", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-elastic-tools-multi-tenant-row-level-security", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-schema-management.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-schema-management", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-tenant-analytics.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-tenant-analytics", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-tenant-analytics-adf.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-tenant-analytics-adf", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-video-index-wingtip-brk3120-20171011.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-video-index-wingtip-brk3120-20171011", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-welcome-wingtip-tickets-app.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-welcome-wingtip-tickets-app", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/saas-tenancy-wingtip-app-guidance-tips.md", - "redirect_url": "/azure/azure-sql/database/saas-tenancy-wingtip-app-guidance-tips", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-scale-resources.md", - "redirect_url": "/azure/azure-sql/database/scale-resources", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-add-single-db-to-failover-group-cli.md", - "redirect_url": "/azure/azure-sql/database/scripts/add-database-to-failover-group-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-add-single-db-to-failover-group-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/add-database-to-failover-group-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-add-elastic-pool-to-failover-group-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/add-elastic-pool-to-failover-group-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-auditing-and-threat-detection-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/auditing-threat-detection-powershell-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-copy-database-to-new-server-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/copy-database-to-new-server-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-create-and-configure-database-cli.md", - "redirect_url": "/azure/azure-sql/database/scripts/create-and-configure-database-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-create-and-configure-database-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/create-and-configure-database-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-import-from-bacpac-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/import-from-bacpac-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-monitor-and-scale-database-cli.md", - "redirect_url": "/azure/azure-sql/database/scripts/monitor-and-scale-database-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-monitor-and-scale-database-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/monitor-and-scale-database-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-monitor-and-scale-pool-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/monitor-and-scale-pool-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-move-database-between-pools-cli.md", - "redirect_url": "/azure/azure-sql/database/scripts/move-database-between-elastic-pools-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-move-database-between-pools-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/move-database-between-elastic-pools-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-restore-database-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/restore-database-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-scale-pool-cli.md", - "redirect_url": "/azure/azure-sql/database/scripts/scale-pool-cli", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-setup-geodr-and-failover-database-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/setup-geodr-and-failover-database-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-setup-geodr-and-failover-pool-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/setup-geodr-and-failover-elastic-pool-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-sync-data-between-azure-onprem.md", - "redirect_url": "/azure/azure-sql/database/scripts/sql-data-sync-sync-data-between-azure-onprem", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-sync-data-between-sql-databases.md", - "redirect_url": "/azure/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-sync-update-schema.md", - "redirect_url": "/azure/azure-sql/database/scripts/update-sync-schema-in-sync-group", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vnet-service-endpoint-rule-powershell.md", - "redirect_url": "/azure/azure-sql/database/scripts/vnet-service-endpoint-rule-powershell-create", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-tutorial.md", - "redirect_url": "/azure/azure-sql/database/secure-database-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/security-baseline.md", - "redirect_url": "/azure/azure-sql/database/security-baseline", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-attributes.md", - "redirect_url": "/azure/azure-sql/database/security-baseline", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-best-practice.md", - "redirect_url": "/azure/azure-sql/database/security-best-practice", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-controls.md", - "redirect_url": "/azure/azure-sql/database/security-controls", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-overview.md", - "redirect_url": "/azure/azure-sql/database/security-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-serverless.md", - "redirect_url": "/azure/azure-sql/database/serverless-tier-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tier-business-critical.md", - "redirect_url": "/azure/azure-sql/database/service-tier-business-critical", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tier-general-purpose.md", - "redirect_url": "/azure/azure-sql/database/service-tier-general-purpose", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tier-hyperscale.md", - "redirect_url": "/azure/azure-sql/database/service-tier-hyperscale", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tier-hyperscale-faq.md", - "redirect_url": "/azure/azure-sql/database/service-tier-hyperscale-frequently-asked-questions-faq", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tiers-dtu.md", - "redirect_url": "/azure/azure-sql/database/service-tiers-dtu", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tiers-general-purpose-business-critical.md", - "redirect_url": "/azure/azure-sql/database/service-tiers-general-purpose-business-critical", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tiers-prs.md", - "redirect_url": "/azure/azure-sql/database/service-tiers-prs", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tiers-vcore.md", - "redirect_url": "/azure/azure-sql/database/service-tiers-vcore", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-get-started-template.md", - "redirect_url": "/azure/azure-sql/database/single-database-create-arm-template-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-get-started.md", - "redirect_url": "/azure/azure-sql/database/single-database-create-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-databases-manage.md", - "redirect_url": "/azure/azure-sql/database/single-database-manage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database.md", - "redirect_url": "/azure/azure-sql/database/single-database-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-scale.md", - "redirect_url": "/azure/azure-sql/database/single-database-scale", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-spark-connector.md", - "redirect_url": "/azure/azure-sql/database/spark-connector", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-technical-overview.md", - "redirect_url": "/azure/azure-sql/database/sql-database-paas-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vulnerability-assessment-rules.md", - "redirect_url": "/azure/azure-sql/database/sql-database-vulnerability-assessment-rules", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vulnerability-assessment-storage.md", - "redirect_url": "/azure/azure-sql/database/sql-database-vulnerability-assessment-storage", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-data-sync-agent.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-agent-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-best-practices-data-sync.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-best-practices", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-sync-data.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-data-sql-server-sql-database", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-sync-monitor-oms.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-monitor-sync", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-sql-data-sync.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-sql-server-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-data-sync.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-troubleshoot", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-update-sync-schema.md", - "redirect_url": "/azure/azure-sql/database/sql-data-sync-update-sync-schema", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-vulnerability-assessment.md", - "redirect_url": "/azure/azure-sql/database/sql-vulnerability-assessment", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-stream-analytics.md", - "redirect_url": "/azure/azure-sql/database/stream-data-stream-analytics-integration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-temporal-tables-retention-policy.md", - "redirect_url": "/azure/azure-sql/database/temporal-tables-retention-policy", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-threat-detection.md", - "redirect_url": "/azure/azure-sql/database/threat-detection-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-threat-detection-overview.md", - "redirect_url": "/azure/azure-sql/database/threat-detection-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-transact-sql-information.md", - "redirect_url": "/azure/azure-sql/database/transact-sql-tsql-differences-sql-server", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/transparent-data-encryption-byok-azure-sql-configure.md", - "redirect_url": "/azure/azure-sql/database/transparent-data-encryption-byok-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/transparent-data-encryption-byok-azure-sql-key-rotation.md", - "redirect_url": "/azure/azure-sql/database/transparent-data-encryption-byok-key-rotation", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/transparent-data-encryption-byok-azure-sql.md", - "redirect_url": "/azure/azure-sql/database/transparent-data-encryption-byok-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/transparent-data-encryption-byok-azure-sql-remove-tde-protector.md", - "redirect_url": "/azure/azure-sql/database/transparent-data-encryption-byok-remove-tde-protector", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/transparent-data-encryption-azure-sql.md", - "redirect_url": "/azure/azure-sql/database/transparent-data-encryption-tde-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connectivity-issues.md", - "redirect_url": "/azure/azure-sql/database/troubleshoot-common-connectivity-issues", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/troubleshoot-connectivity-issues-microsoft-azure-sql-database.md", - "redirect_url": "/azure/azure-sql/database/troubleshoot-common-errors-issues", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vnet-service-endpoint-rule-overview.md", - "redirect_url": "/azure/azure-sql/database/vnet-service-endpoint-rule-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-xevent-code-event-file.md", - "redirect_url": "/azure/azure-sql/database/xevent-code-event-file", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-xevent-code-ring-buffer.md", - "redirect_url": "/azure/azure-sql/database/xevent-code-ring-buffer", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-xevent-db-diff-from-svr.md", - "redirect_url": "/azure/azure-sql/database/xevent-db-diff-from-svr", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-identify-query-performance-issues.md", - "redirect_url": "/azure/azure-sql/identify-query-performance-issues", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-in-memory-oltp-migration.md", - "redirect_url": "/azure/azure-sql/in-memory-oltp-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-in-memory-oltp-monitoring.md", - "redirect_url": "/azure/azure-sql/in-memory-oltp-monitor-space", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-in-memory.md", - "redirect_url": "/azure/azure-sql/in-memory-oltp-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-in-memory-sample.md", - "redirect_url": "/azure/azure-sql/in-memory-sample", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-load-from-csv-with-bcp.md", - "redirect_url": "/azure/azure-sql/load-from-csv-with-bcp", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-aad-security-tutorial.md", - "redirect_url": "/azure/azure-sql/managed-instance/aad-security-configure-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-alerts.md", - "redirect_url": "/azure/azure-sql/managed-instance/alerts-create", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-create-manage.md", - "redirect_url": "/azure/azure-sql/managed-instance/api-references-create-manage-instance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-auditing.md", - "redirect_url": "/azure/azure-sql/managed-instance/auditing-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-sync-network-configuration.md", - "redirect_url": "/azure/azure-sql/managed-instance/azure-app-sync-network-configuration", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-connect-app.md", - "redirect_url": "/azure/azure-sql/managed-instance/connect-application-instance", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-connection-type.md", - "redirect_url": "/azure/azure-sql/managed-instance/connection-types-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-connectivity-architecture.md", - "redirect_url": "/azure/azure-sql/managed-instance/connectivity-architecture-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-configure-vm.md", - "redirect_url": "/azure/azure-sql/managed-instance/connect-vm-instance-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-custom-dns.md", - "redirect_url": "/azure/azure-sql/managed-instance/custom-dns-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-failover-group-tutorial.md", - "redirect_url": "/azure/azure-sql/managed-instance/failover-group-add-instance-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-faq.md", - "redirect_url": "/azure/azure-sql/managed-instance/frequently-asked-questions-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-howto-managed-instance.md", - "redirect_url": "/azure/azure-sql/managed-instance/how-to-content-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-get-started.md", - "redirect_url": "/azure/azure-sql/managed-instance/instance-create-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-instance-pools-how-to.md", - "redirect_url": "/azure/azure-sql/managed-instance/instance-pools-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-instance-pools.md", - "redirect_url": "/azure/azure-sql/managed-instance/instance-pools-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-long-term-backup-retention-configure.md", - "redirect_url": "/azure/azure-sql/managed-instance/long-term-backup-retention-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-find-management-endpoint-ip-address.md", - "redirect_url": "/azure/azure-sql/managed-instance/management-endpoint-find-ip-address", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-management-endpoint-verify-built-in-firewall.md", - "redirect_url": "/azure/azure-sql/managed-instance/management-endpoint-verify-built-in-firewall", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/tutorial-managed-instance-azure-active-directory-migration.md", - "redirect_url": "/azure/azure-sql/managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-migrate.md", - "redirect_url": "/azure/azure-sql/managed-instance/migrate-to-instance-from-sql-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-point-in-time-restore.md", - "redirect_url": "/azure/azure-sql/managed-instance/point-in-time-restore", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-configure-p2s.md", - "redirect_url": "/azure/azure-sql/managed-instance/point-to-site-p2s-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-public-endpoint-configure.md", - "redirect_url": "/azure/azure-sql/managed-instance/public-endpoint-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-public-endpoint-securely.md", - "redirect_url": "/azure/azure-sql/managed-instance/public-endpoint-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-quickstart-guide.md", - "redirect_url": "/azure/azure-sql/managed-instance/quickstart-content-reference-guide", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/replication-with-sql-database-managed-instance.md", - "redirect_url": "/azure/azure-sql/managed-instance/replication-between-two-instances-configure-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-transactional-replication.md", - "redirect_url": "/azure/azure-sql/managed-instance/replication-transactional-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-configure-replication-tutorial.md", - "redirect_url": "/azure/azure-sql/managed-instance/replication-two-instances-and-sql-server-configure-tutorial", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-resource-limits.md", - "redirect_url": "/azure/azure-sql/managed-instance/resource-limits", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-get-started-restore.md", - "redirect_url": "/azure/azure-sql/managed-instance/restore-sample-database-quickstart", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-add-managed-instance-to-failover-group-powershell.md", - "redirect_url": "/azure/azure-sql/managed-instance/scripts/add-to-failover-group-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-database-create-configure-managed-instance-powershell.md", - "redirect_url": "/azure/azure-sql/managed-instance/scripts/create-configure-managed-instance-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-managed-instance-create-powershell-azure-resource-manager-template.md", - "redirect_url": "/azure/azure-sql/managed-instance/scripts/create-powershell-azure-resource-manager-template", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/scripts/sql-managed-instance-restore-geo-backup.md", - "redirect_url": "/azure/azure-sql/managed-instance/scripts/restore-geo-backup", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/scripts/transparent-data-encryption-byok-sql-managed-instance-powershell.md", - "redirect_url": "/azure/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance.md", - "redirect_url": "/azure/azure-sql/managed-instance/sql-managed-instance-paas-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-enabling-service-aided-subnet-configuration.md", - "redirect_url": "/azure/azure-sql/managed-instance/subnet-service-aided-configuration-enable", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-migrate-tde-certificate.md", - "redirect_url": "/azure/azure-sql/managed-instance/tde-certificate-migrate", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-threat-detection.md", - "redirect_url": "/azure/azure-sql/managed-instance/threat-detection-configure", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-timezone.md", - "redirect_url": "/azure/azure-sql/managed-instance/timezones-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-transact-sql-information.md", - "redirect_url": "/azure/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-delete-virtual-cluster.md", - "redirect_url": "/azure/azure-sql/managed-instance/virtual-cluster-delete", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-create-vnet-subnet.md", - "redirect_url": "/azure/azure-sql/managed-instance/virtual-network-subnet-create-arm-template", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-configure-vnet-subnet.md", - "redirect_url": "/azure/azure-sql/managed-instance/vnet-existing-add-subnet", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-determine-size-vnet-subnet.md", - "redirect_url": "/azure/azure-sql/managed-instance/vnet-subnet-determine-size", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-multi-model-features.md", - "redirect_url": "/azure/azure-sql/multi-model-features", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-use-batching-to-improve-performance.md", - "redirect_url": "/azure/azure-sql/performance-improve-use-batching", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-public-data-sets.md", - "redirect_url": "/azure/azure-sql/public-data-sets", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-temporal-tables.md", - "redirect_url": "/azure/azure-sql/temporal-tables", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-migrate-your-sql-server-database.md", - "redirect_url": "/azure/dms/tutorial-sql-server-to-azure-sql", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-feature-restrictions.md", - "redirect_url": "/azure/sql-database", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-create-and-manage.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-getting-started.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-overview.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-powershell.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-service-installation.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-jobs-uninstall.md", - "redirect_url": "/azure/sql-database/elastic-jobs-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-get-started-deploy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-log-analytics.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-log-analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-performance-monitoring.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-performance-monitoring", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-provision-and-catalog.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-provision-and-catalog", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-restore-single-tenant.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-restore-single-tenant", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-multi-tenant-application.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-wingtip-app-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-wtp-overview.md", - "redirect_url": "/azure/sql-database/saas-dbpertenant-wingtip-app-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-build-multi-tenant-apps.md", - "redirect_url": "/azure/sql-database/saas-tenancy-app-design-patterns", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-design-patterns-multi-tenancy-saas-applications.md", - "redirect_url": "/azure/sql-database/saas-tenancy-app-design-patterns", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-tools-multi-tenant-row-level-security.md", - "redirect_url": "/azure/sql-database/saas-tenancy-elastic-tools-multi-tenant-row-level-security", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-schema-management.md", - "redirect_url": "/azure/sql-database/saas-tenancy-schema-management", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-adhoc-analytics.md", - "redirect_url": "/azure/sql-database/saas-tenancy-tenant-analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-saas-tutorial-tenant-analytics.md", - "redirect_url": "/azure/sql-database/saas-tenancy-tenant-analytics", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/saas-dbpertenant-wingtip-app-guidance-tips.md", - "redirect_url": "/azure/sql-database/saas-tenancy-wingtip-app-guidance-tips", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing-and-threat-detection-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-auditing-and-threat-detection-powershell", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-copy-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-copy-database-to-new-server-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-servers-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-create-and-configure-database-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-single-databases-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-create-and-configure-database-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-import-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-import-from-bacpac-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-restore-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-restore-database-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-failover-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-setup-geodr-and-failover-database-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-powershell.md", - "redirect_url": "/azure/sql-database/scripts/sql-database-setup-geodr-and-failover-database-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-control-access-aad-authentication-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-aad-authentication-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-failover-portal.md", - "redirect_url": "/azure/sql-database/sql-database-active-geo-replication-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-portal.md", - "redirect_url": "/azure/sql-database/sql-database-active-geo-replication-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-advanced-threat-protection.md", - "redirect_url": "/azure/sql-database/sql-database-advanced-data-security", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-threat-protection.md", - "redirect_url": "/azure/sql-database/sql-database-advanced-data-security", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-learn-and-adapt.md", - "redirect_url": "/azure/sql-database/sql-database-advisor", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-auditing", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing-portal.md", - "redirect_url": "/azure/sql-database/sql-database-auditing", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-auditing", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-auditing-rest.md", - "redirect_url": "/azure/sql-database/sql-database-auditing", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-overview.md", - "redirect_url": "/azure/sql-database/sql-database-auto-failover-group", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-cli.md", - "redirect_url": "/azure/sql-database/sql-database-cli-samples", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-libraries.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query#libraries", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-dotnet-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-dotnet", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-connect-query-dotnet.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-dotnet-visual-studio", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-java-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-java", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-nodejs-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-nodejs", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-php-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-php", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-python-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-python", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-ruby-simple.md", - "redirect_url": "/azure/sql-database/sql-database-connect-query-ruby", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-guidelines.md", - "redirect_url": "/azure/sql-database/sql-database-control-access", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-security.md", - "redirect_url": "/azure/sql-database/sql-database-control-access-sql-authentication-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-copy-portal.md", - "redirect_url": "/azure/sql-database/sql-database-copy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-copy-transact-sql.md", - "redirect_url": "/azure/sql-database/sql-database-copy", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-csharp.md", - "redirect_url": "/azure/sql-database/sql-database-design-first-database-csharp", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-works-in-your-environment.md", - "redirect_url": "/azure/sql-database/sql-database-develop-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-dtu-resource-limits.md", - "redirect_url": "/azure/sql-database/sql-database-dtu-resource-limits-single-databases", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-database-assessment-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-guidance.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-manage-csharp.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-manage-portal.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-manage-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-manage-tsql.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-price.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-create-csharp.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool-manage-csharp", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-create-portal.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool-manage-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-elastic-pool.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool-manage-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-elastic-pool-create-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-elastic-pool-manage-powershell", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-solution-quick-starts.md", - "redirect_url": "/azure/sql-database/sql-database-explore-tutorials", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-export-bacpac-sqlpackage.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-export-bacpac-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-export-bacpac-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-export-portal.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-export-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-export-sqlpackage.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-export-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-export", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-moving-data.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-permissions.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-upgrade-server-portal.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-upgrade-server-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-v12-plan-prepare-upgrade.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-web-business-sunset-faq.md", - "redirect_url": "/azure/sql-database/sql-database-faq", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-features-index.yml", - "redirect_url": "/azure/sql-database/sql-database-features", - "redirect_document_id": "" - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-general-limitations.md", - "redirect_url": "/azure/sql-database/sql-database-features", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-v12-whats-new.md", - "redirect_url": "/azure/sql-database/sql-database-features", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-firewall-settings.md", - "redirect_url": "/azure/sql-database/sql-database-firewall-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-firewall-settings-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-firewall-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-firewall-settings-rest.md", - "redirect_url": "/azure/sql-database/sql-database-firewall-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-firewall-settings-tsql.md", - "redirect_url": "/azure/sql-database/sql-database-firewall-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-transact-sql.md", - "redirect_url": "/azure/sql-database/sql-database-geo-replication-overview", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-explore-tutorials.md", - "redirect_url": "/azure/sql-database/sql-database-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-servers-portal.md", - "redirect_url": "/azure/sql-database/sql-database-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-backup-recovery.md", - "redirect_url": "/azure/sql-database/sql-database-get-started-backup-recovery-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-import-bacpac-sqlpackage.md", - "redirect_url": "/azure/sql-database/sql-database-import", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-import-bacpac-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-import", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-import-portal.md", - "redirect_url": "/azure/sql-database/sql-database-import", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-import-sqlpackage.md", - "redirect_url": "/azure/sql-database/sql-database-import", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-pools.md", - "redirect_url": "/azure/sql-database/sql-database-instance-pools", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-backup-recovery-portal.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-backup-recovery-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-long-term-backup-retention-configure-vault.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-long-term-retention-delete.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-long-term-backup-retention-portal.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-long-term-backup-retention-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-restore-from-long-term-retention.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-view-backups-in-vault.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-view-backups-in-vault-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-backup-retention-configure", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-configure-long-term-retention.md", - "redirect_url": "/azure/sql-database/sql-database-long-term-retention", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-index.yml", - "redirect_url": "/azure/sql-database/sql-database-managed-instance", - "redirect_document_id": "" - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-vnet-configuration.md", - "redirect_url": "/azure/sql-database/sql-database-managed-instance-configure-vnet-subnet", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-management-endpoint.md", - "redirect_url": "/azure/sql-database/sql-database-managed-instance-connectivity-architecture#management-endpoint", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-tutorial-portal.md", - "redirect_url": "/azure/sql-database/sql-database-managed-instance-create-tutorial-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-create-tutorial-portal.md", - "redirect_url": "/azure/sql-database/sql-database-managed-instance-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-managed-instance-restore-from-backup-tutorial.md", - "redirect_url": "/azure/sql-database/sql-database-managed-instance-get-started-restore", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-control-access.md", - "redirect_url": "/azure/sql-database/sql-database-manage-logins", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-command-line-tools.md", - "redirect_url": "/azure/sql-database/sql-database-manage-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-azure-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-manage-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-portal.md", - "redirect_url": "/azure/sql-database/sql-database-manage-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-manage-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-create-servers.md", - "redirect_url": "/azure/sql-database/sql-database-manage-servers-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-view-update-server-settings.md", - "redirect_url": "/azure/sql-database/sql-database-manage-servers-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-create-databases.md", - "redirect_url": "/azure/sql-database/sql-database-manage-single-databases-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-view-update-database-settings.md", - "redirect_url": "/azure/sql-database/sql-database-manage-single-databases-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-monitor-log-analytics-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-metrics-diag-logging", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-performance.md", - "redirect_url": "/azure/sql-database/sql-database-monitor-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-monitor.md", - "redirect_url": "/azure/sql-database/sql-database-monitor-tune-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-performance.md", - "redirect_url": "/azure/sql-database/sql-database-monitor-tune-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-paas.yml", - "redirect_url": "/azure/sql-database/sql-database-paas-index", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-performance-tutorial.md", - "redirect_url": "/azure/sql-database/sql-database-performance-guidance", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-powershell-samples", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tiers.md", - "redirect_url": "/azure/sql-database/sql-database-purchase-models", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-restore-single-table-azure-backup.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-point-in-time-restore.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-point-in-time-restore-portal.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-point-in-time-restore-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-restore-database-portal.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-restore-database-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-restore-deleted-database-portal.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-restore-deleted-database-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-backup-and-restore.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-view-oldest-restore-point.md", - "redirect_url": "/azure/sql-database/sql-database-recovery-using-backups", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-resource-limits.md", - "redirect_url": "/azure/sql-database/sql-database-resource-limits-database-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-resource-limits-logical-server.md", - "redirect_url": "/azure/sql-database/sql-database-resource-limits-database-server", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-restore.md", - "redirect_url": "/azure/sql-database/sql-database-restore-database-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-restore-portal.md", - "redirect_url": "/azure/sql-database/sql-database-restore-database-portal", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-scalability-index.yml", - "redirect_url": "/azure/sql-database/sql-database-scale-resources", - "redirect_document_id": "" - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-helps-secures-and-protects.md", - "redirect_url": "/azure/sql-database/sql-database-security-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-protect-data.md", - "redirect_url": "/azure/sql-database/sql-database-security-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security.md", - "redirect_url": "/azure/sql-database/sql-database-security-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-security-index.yml", - "redirect_url": "/azure/sql-database/sql-database-security-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-control-access-sql-authentication-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-security-tutorial", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-portal-firewall.md", - "redirect_url": "/azure/sql-database/sql-database-server-level-firewall-rule", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-logical-servers.md", - "redirect_url": "/azure/sql-database/sql-database-servers", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-server-overview.md", - "redirect_url": "/azure/sql-database/sql-database-servers", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-servers-databases.md", - "redirect_url": "/azure/sql-database/sql-database-servers", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-hyperscale.md", - "redirect_url": "/azure/sql-database/sql-database-service-tier-hyperscale", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-benchmark-overview.md", - "redirect_url": "/azure/sql-database/sql-database-service-tiers-dtu", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-what-is-a-dtu.md", - "redirect_url": "/azure/sql-database/sql-database-service-tiers-dtu", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-single-databases-portal.md", - "redirect_url": "/azure/sql-database/sql-database-service-tiers-vcore", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-scale-on-the-fly.md", - "redirect_url": "/azure/sql-database/sql-database-service-tiers-vcore", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-service-tier-advisor.md", - "redirect_url": "/azure/sql-database/sql-database-service-tiers-vcore", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-get-started-portal.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-get-started", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-using-ssms-migration-wizard.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-compatible-using-transactional-replication.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-determine-compatibility-sqlpackage.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-determine-compatibility-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-fix-compatibility-issues.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-fix-compatibility-issues-ssdt.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-cloud-migrate-fix-compatibility-issues-ssms.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-migrate", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-quickstart-guide.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-quickstart-guide", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-scale-up.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-scale", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-scale-up-powershell.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-scale", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-single-database-resources.md", - "redirect_url": "/azure/sql-database/sql-database-single-database-scale", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-overview.md", - "redirect_url": "/azure/sql-database/sql-database-single-databases-manage", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-servers-databases-manage.md", - "redirect_url": "/azure/sql-database/sql-database-single-databases-manage", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-faq.md", - "redirect_url": "/azure/sql-database/sql-database-technical-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-overview.md", - "redirect_url": "/azure/sql-database/sql-database-technical-overview", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-paas-index.yml", - "redirect_url": "/azure/sql-database/sql-database-technical-overview", - "redirect_document_id": "" - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-threat-detection-get-started.md", - "redirect_url": "/azure/sql-database/sql-database-threat-detection", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-threat-detection-portal.md", - "redirect_url": "/azure/sql-database/sql-database-threat-detection", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-connection.md", - "redirect_url": "/azure/sql-database/sql-database-troubleshoot-common-connection-issues", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-vcore-resource-limits.md", - "redirect_url": "/azure/sql-database/sql-database-vcore-resource-limits-single-databases", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-develop-error-messages.md", - "redirect_url": "/azure/sql-database/troubleshoot-connectivity-issues-microsoft-azure-sql-database", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-troubleshoot-common-connection-issues.md", - "redirect_url": "/azure/sql-database/troubleshoot-connectivity-issues-microsoft-azure-sql-database", - "redirect_document_id": true - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-operate-query-store.md", - "redirect_url": "/sql/relational-databases/performance/best-practice-with-the-query-store#Insight", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-geo-replication-failover-transact-sql.md", - "redirect_url": "/sql/t-sql/statements/alter-database-transact-sql", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-manage-single-databases-tsql.md", - "redirect_url": "/sql/t-sql/statements/alter-database-transact-sql", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-compatibility-level-query-performance-130.md", - "redirect_url": "/sql/t-sql/statements/alter-database-transact-sql-compatibility-level", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-implementation-gep.md", - "redirect_url": "http://customers.microsoft.com/story/azure-gives-gep-global-reach-and-greater-efficiency", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-implementation-daxko.md", - "redirect_url": "http://customers.microsoft.com/story/csi-used-azure-to-accelerate-its-development-cycle-and-to-enhance-its-customer-services", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-implementation-umbraco.md", - "redirect_url": "http://customers.microsoft.com/story/umbraco-uses-azure-sql-database-to-quickly-provision-and-scale-services-for-thousands-of-tenants", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-implementation-snelstart.md", - "redirect_url": "http://customers.microsoft.com/story/with-azure-snelstart-has-rapidly-expanded-its-business-services", - "redirect_document_id": false - }, - { - "source_path_from_root": "/articles/sql-database/sql-database-customer-implementations.md", - "redirect_url": "https://customers.microsoft.com", - "redirect_document_id": false - } - ] -} diff --git a/articles/active-directory-b2c/configure-authentication-sample-spa-app.md b/articles/active-directory-b2c/configure-authentication-sample-spa-app.md index e6baf16fa2943..03fe9b9da8b7d 100644 --- a/articles/active-directory-b2c/configure-authentication-sample-spa-app.md +++ b/articles/active-directory-b2c/configure-authentication-sample-spa-app.md @@ -7,7 +7,7 @@ manager: CelesteDG ms.service: active-directory ms.workload: identity ms.topic: reference -ms.date: 03/30/2022 +ms.date: 04/30/2022 ms.author: kengaderdus ms.subservice: B2C ms.custom: "b2c-support" @@ -145,23 +145,49 @@ Your resulting code should look similar to following sample: ```javascript const msalConfig = { - auth: { - clientId: "" - authority: b2cPolicies.authorities.signUpSignIn.authority, - knownAuthorities: [b2cPolicies.authorityDomain], - }, - cache: { - cacheLocation: "localStorage", - storeAuthStateInCookie: true - } + auth: { + clientId: "", // This is the ONLY mandatory field; everything else is optional. + authority: b2cPolicies.authorities.signUpSignIn.authority, // Choose sign-up/sign-in user-flow as your default. + knownAuthorities: [b2cPolicies.authorityDomain], // You must identify your tenant's domain as a known authority. + redirectUri: "http://localhost:6420", // You must register this URI on Azure Portal/App Registration. Defaults to "window.location.href". + }, + cache: { + cacheLocation: "sessionStorage", + storeAuthStateInCookie: false, + }, + system: { + loggerOptions: { + loggerCallback: (level, message, containsPii) => { + if (containsPii) { + return; + } + switch (level) { + case msal.LogLevel.Error: + console.error(message); + return; + case msal.LogLevel.Info: + console.info(message); + return; + case msal.LogLevel.Verbose: + console.debug(message); + return; + case msal.LogLevel.Warning: + console.warn(message); + return; + } + } + } + } + }; }; const loginRequest = { - scopes: ["openid", "profile"], + scopes: ["openid", ...apiConfig.b2cScopes], }; const tokenRequest = { - scopes: apiConfig.b2cScopes + scopes: [...apiConfig.b2cScopes], // e.g. ["https://fabrikamb2c.onmicrosoft.com/helloapi/demo.read"] + forceRefresh: false // Set this to "true" to skip a cached token and go to the server to get a new token }; ``` diff --git a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png index 1242bc4e4a06b..2e750c2d521ea 100644 Binary files a/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png and b/articles/active-directory-b2c/media/partner-gallery/asignio-logo.png differ diff --git a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md index 8f2e0d75f8092..31dc70544f969 100644 --- a/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md +++ b/articles/active-directory/conditional-access/concept-condition-filters-for-devices.md @@ -4,7 +4,7 @@ description: Use filter for devices in Conditional Access to enhance security po ms.service: active-directory ms.subservice: conditional-access ms.topic: conceptual -ms.date: 04/05/2022 +ms.date: 04/28/2022 ms.author: joflore author: MicrosoftGuyJFlo manager: karenhoran @@ -21,16 +21,18 @@ When creating Conditional Access policies, administrators have asked for the abi There are multiple scenarios that organizations can now enable using filter for devices condition. Below are some core scenarios with examples of how to use this new condition. -- Restrict access to privileged resources like Microsoft Azure Management, to privileged users, accessing from [privileged or secure admin workstations](/security/compass/privileged-access-devices). For this scenario, organizations would create two Conditional Access policies: +- **Restrict access to privileged resources**. For this example, lets say you want to allow access to Microsoft Azure Management from a user who is assigned a privilged role Global Admin, has satisfied multifactor authentication and accessing from a device that is [privileged or secure admin workstations](/security/compass/privileged-access-devices) and attested as compliant. For this scenario, organizations would create two Conditional Access policies: - Policy 1: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, and for Access controls, Grant access, but require multifactor authentication and require device to be marked as compliant. - - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. -- Block access to organization resources from devices running an unsupported Operating System version like Windows 7. For this scenario, organizations would create the following two Conditional Access policies: - - Policy 1: All users, accessing all cloud apps and for Access controls, Grant access, but require device to be marked as compliant or require device to be hybrid Azure AD joined. - - Policy 2: All users, accessing all cloud apps, including a filter for devices using rule expression device.operatingSystem equals Windows and device.operatingSystemVersion startsWith "6.1" and for Access controls, Block. -- Do not require multifactor authentication for specific accounts like service accounts when used on specific devices like Teams phones or Surface Hub devices. For this scenario, organizations would create the following two Conditional Access policies: + - Policy 2: All users with the directory role of Global administrator, accessing the Microsoft Azure Management cloud app, excluding a filter for devices using rule expression device.extensionAttribute1 equals SAW and for Access controls, Block. Learn how to [update extensionAttributes on an Azure AD device object](https://docs.microsoft.com/graph/api/device-update?view=graph-rest-1.0&tabs=http). +- **Block access to organization resources from devices running an unsupported Operating System**. For this example, lets say you want to block access to resources from Windows OS version older than Windows 10. For this scenario, organizations would create the following Conditional Access policy: + - All users, accessing all cloud apps, excluding a filter for devices using rule expression device.operatingSystem equals Windows and device.operatingSystemVersion startsWith "10.0" and for Access controls, Block. +- **Do not require multifactor authentication for specific accounts on specific devices**. For this example, lets say you want to not require multifactor authentication when using service accounts on specific devices like Teams phones or Surface Hub devices. For this scenario, organizations would create the following two Conditional Access policies: - Policy 1: All users excluding service accounts, accessing all cloud apps, and for Access controls, Grant access, but require multifactor authentication. - Policy 2: Select users and groups and include group that contains service accounts only, accessing all cloud apps, excluding a filter for devices using rule expression device.extensionAttribute2 not equals TeamsPhoneDevice and for Access controls, Block. +> [!NOTE] +> Azure AD uses device authentication to evaluate device filter rules. For devices that are unregistered with Azure AD, all device properties are considered as null values. + ## Create a Conditional Access policy Filter for devices is an option when creating a Conditional Access policy in the Azure portal or using the Microsoft Graph API. diff --git a/articles/active-directory/develop/publisher-verification-overview.md b/articles/active-directory/develop/publisher-verification-overview.md index 1c6a328160f6c..c6a61bda93198 100644 --- a/articles/active-directory/develop/publisher-verification-overview.md +++ b/articles/active-directory/develop/publisher-verification-overview.md @@ -26,9 +26,6 @@ A blue "verified" badge appears on the Azure AD consent prompt and other screens ![Consent prompt](./media/publisher-verification-overview/consent-prompt.png) -> [!NOTE] -> We recently changed the color of the "verified" badge from blue to gray. We will revert that change sometime in the last half of February 2022, so the "verified" badge will be blue. - This feature is primarily for developers building multi-tenant apps that leverage [OAuth 2.0 and OpenID Connect](active-directory-v2-protocols.md) with the [Microsoft identity platform](v2-overview.md). These apps can sign users in using OpenID Connect, or they may use OAuth 2.0 to request access to data using APIs like [Microsoft Graph](https://developer.microsoft.com/graph/). ## Benefits diff --git a/articles/active-directory/develop/v2-protocols-oidc.md b/articles/active-directory/develop/v2-protocols-oidc.md index 195f099a461ea..734d813cd3ecb 100644 --- a/articles/active-directory/develop/v2-protocols-oidc.md +++ b/articles/active-directory/develop/v2-protocols-oidc.md @@ -284,7 +284,7 @@ Review the [UserInfo documentation](userinfo.md#calling-the-api) to look over ho When you want to sign out the user from your app, it isn't sufficient to clear your app's cookies or otherwise end the user's session. You must also redirect the user to the Microsoft identity platform to sign out. If you don't do this, the user reauthenticates to your app without entering their credentials again, because they will have a valid single sign-in session with the Microsoft identity platform. -You can redirect the user to the `end_session_endpoint` listed in the OpenID Connect metadata document: +You can redirect the user to the `end_session_endpoint` (which supports both HTTP GET and POST requests) listed in the OpenID Connect metadata document: ```HTTP GET https://login.microsoftonline.com/common/oauth2/v2.0/logout? diff --git a/articles/active-directory/develop/workload-identity-federation.md b/articles/active-directory/develop/workload-identity-federation.md index 8dc5b530fb341..50d25ad903dd8 100644 --- a/articles/active-directory/develop/workload-identity-federation.md +++ b/articles/active-directory/develop/workload-identity-federation.md @@ -29,6 +29,8 @@ Typically, a software workload (such as an application, service, script, or cont You use workload identity federation to configure an Azure AD app registration to trust tokens from an external identity provider (IdP), such as GitHub. Once that trust relationship is created, your software workload can exchange trusted tokens from the external IdP for access tokens from Microsoft identity platform. Your software workload then uses that access token to access the Azure AD protected resources to which the workload has been granted access. This eliminates the maintenance burden of manually managing credentials and eliminates the risk of leaking secrets or having certificates expire. ## Supported scenarios +> [!NOTE] +> Azure AD-issued tokens might not be used for federated identity flows. The following scenarios are supported for accessing Azure AD protected resources using workload identity federation: diff --git a/articles/active-directory/fundamentals/recoverability-overview.md b/articles/active-directory/fundamentals/recoverability-overview.md index 5f5af1af7cdfd..a8ec4a029bb97 100644 --- a/articles/active-directory/fundamentals/recoverability-overview.md +++ b/articles/active-directory/fundamentals/recoverability-overview.md @@ -203,7 +203,7 @@ There are several Azure Monitor workbooks that can help you to monitor configura - Directory role and group membership updates for service principals - Modified federation settings -The [Cross-tenant access activity workbook ](../reports-monitoring/workbook-cross-tenant-access-activity.md)can help you monitor which applications in external tenants your users are accessing, and which applications I your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. +The [Cross-tenant access activity workbook ](../reports-monitoring/workbook-cross-tenant-access-activity.md)can help you monitor which applications in external tenants your users are accessing, and which applications in your tenant external users are accessing. Use this workbook to look for anomalous changes in either inbound or outbound application access across tenants. ## Operational security diff --git a/articles/active-directory/fundamentals/whats-new-archive.md b/articles/active-directory/fundamentals/whats-new-archive.md index 296a46774eb39..5bb5d3eb7a23a 100644 --- a/articles/active-directory/fundamentals/whats-new-archive.md +++ b/articles/active-directory/fundamentals/whats-new-archive.md @@ -31,6 +31,247 @@ The What's new in Azure Active Directory? release notes provide information abou --- +## October 2021 + +### Limits on the number of configured API permissions for an application registration will be enforced starting in October 2021 + +**Type:** Plan for change +**Service category:** Other +**Product capability:** Developer Experience + +Sometimes, application developers configure their apps to require more permissions than it's possible to grant. To prevent this from happening, a limit on the total number of required permissions that can be configured for an app registration will be enforced. + +The total number of required permissions for any single application registration mustn't exceed 400 permissions, across all APIs. The change to enforce this limit will begin rolling out mid-October 2021. Applications exceeding the limit can't increase the number of permissions they’re configured for. The existing limit on the number of distinct APIs for which permissions are required remains unchanged and may not exceed 50 APIs. + +In the Azure portal, the required permissions are listed under API permissions for the application you wish to configure. Using Microsoft Graph or Microsoft Graph PowerShell, the required permissions are listed in the requiredResourceAccess property of an [application](/graph/api/resources/application) entity. [Learn more](../enterprise-users/directory-service-limits-restrictions.md). + +--- + +### Email one-time passcode on by default change beginning rollout in November 2021 + +**Type:** Plan for change +**Service category:** B2B +**Product capability:** B2B/B2C + +Previously, we announced that starting October 31, 2021, Microsoft Azure Active Directory [email one-time passcode](../external-identities/one-time-passcode.md) authentication will become the default method for inviting accounts and tenants for B2B collaboration scenarios. However, because of deployment schedules, we'll begin rolling out on November 1, 2021. Most of the tenants will see the change rolled out in January 2022 to minimize disruptions during the holidays and deployment lock downs. After this change, Microsoft will no longer allow redemption of invitations using Azure Active Directory accounts that are unmanaged. [Learn more](../external-identities/one-time-passcode.md#frequently-asked-questions). + +--- + +### Conditional Access Guest Access Blocking Screen + +**Type:** Fixed +**Service category:** Conditional Access +**Product capability:** End User Experiences + +If there's no trust relation between a home and resource tenant, a guest user would have previously been asked to re-register their device, which would break the previous registration. However, the user would end up in a registration loop because only home tenant device registration is supported. In this specific scenario, instead of this loop, we’ve created a new conditional access blocking page. The page tells the end user that they can't get access to conditional access protected resources as a guest user. [Learn more](../external-identities/b2b-quickstart-add-guest-users-portal.md#prerequisites). + +--- + +### 50105 Errors will now result in a UX error message instead of an error response to the application + +**Type:** Fixed +**Service category:** Authentications (Logins) +**Product capability:** Developer Experience + +Azure AD has fixed a bug in an error response that occurs when a user isn't assigned to an app that requires a user assignment. Previously, Azure AD would return error 50105 with the OIDC error code "interaction_required" even during interactive authentication. This would cause well-coded applications to loop indefinitely, as they do interactive authentication and receive an error telling them to do interactive authentication, which they would then do. + +The bug has been fixed, so that during non-interactive auth an "interaction_required" error will still be returned. Also, during interactive authentication an error page will be directly displayed to the user. + +For greater details, see the change notices for [Azure AD protocols](../develop/reference-breaking-changes.md#error-50105-has-been-fixed-to-not-return-interaction_required-during-interactive-authentication). + +--- + +### Public preview - New claims transformation capabilities + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** SSO + +The following new capabilities have been added to the claims transformations available for manipulating claims in tokens issued from Azure AD: + +- Join() on NameID. Used to be restricted to joining an email format address with a verified domain. Now Join() can be used on the NameID claim in the same way as any other claim, so NameID transforms can be used to create Windows account style NameIDs or any other string. For now if the result is an email address, the Azure AD will still validate that the domain is one that is verified in the tenant. +- Substring(). A new transformation in the claims configuration UI allows extraction of defined position substrings such as five characters starting at character three - substring(3,5) +- Claims transformations. These transformations can now be performed on Multi-valued attributes, and can emit multi-valued claims. Microsoft Graph can now be used to read/write multi-valued directory schema extension attributes. [Learn more](../develop/active-directory-saml-claims-customization.md). + +--- + +### Public Preview – Flagged Sign-ins + +**Type:** New feature +**Service category:** Reporting +**Product capability:** Monitoring & Reporting + +Flagged sign-ins is a feature that will increase the signal to noise ratio for user sign-ins where users need help. The functionality is intended to empower users to raise awareness about sign-in errors they want help with. Also to help admins and help desk workers find the right sign-in events quickly and efficiently. [Learn more](../reports-monitoring/overview-flagged-sign-ins.md). + +--- + +### Public preview - Device overview + +**Type:** New feature +**Service category:** Device Registration and Management +**Product capability:** Device Lifecycle Management + +The new Device Overview feature provides actionable insights about devices in your tenant. [Learn more](../devices/device-management-azure-portal.md). + +--- + +### Public preview - Azure Active Directory workload identity federation + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** Developer Experience + +Azure AD workload identity federation is a new capability that's in public preview. It frees developers from handling application secrets or certificates. This includes secrets in scenarios such as using GitHub Actions and building applications on Kubernetes. Rather than creating an application secret and using that to get tokens for that application, developers can instead use tokens provided by the respective platforms such as GitHub and Kubernetes without having to manage any secrets manually.[Learn more](../develop/workload-identity-federation.md). + +--- + +### Public Preview - Updates to Sign-in Diagnostic + +**Type:** Changed feature +**Service category:** Reporting +**Product capability:** Monitoring & Reporting + +With this update, the diagnostic covers more scenarios and is made more easily available to admins. + +New scenarios covered when using the Sign-in Diagnostic: +- Pass Through Authentication sign-in failures +- Seamless Single-Sign On sign-in failures + +Other changes include: +- Flagged Sign-ins will automatically appear for investigation when using the Sign-in Diagnostic from Diagnose and Solve. +- Sign-in Diagnostic is now available from the Enterprise Apps Diagnose and Solve blade. +- The Sign-in Diagnostic is now available in the Basic Info tab of the Sign-in Log event view for all sign-in events. [Learn more](../reports-monitoring/concept-sign-in-diagnostics-scenarios.md#supported-scenarios). + +--- + +### General Availability - Privileged Role Administrators can now create Azure AD access reviews on role-assignable groups + +**Type:** Fixed +**Service category:** Access Reviews +**Product capability:** Identity Governance + +Privileged Role Administrators can now create Azure AD access reviews on Azure AD role-assignable groups, in addition to Azure AD roles. [Learn more](../governance/deploy-access-reviews.md#who-will-create-and-manage-access-reviews). + +--- + +### General Availability - Azure AD single Sign on and device-based Conditional Access support in Firefox on Windows 10/11 + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** SSO + +We now support native single sign-on (SSO) support and device-based Conditional Access to Firefox browser on Windows 10 and Windows Server 2019 starting in Firefox version 91. [Learn more](../conditional-access/require-managed-devices.md#prerequisites). + +--- + +### General Availability - New app indicator in My Apps + +**Type:** New feature +**Service category:** My Apps +**Product capability:** End User Experiences + +Apps that have been recently assigned to the user show up with a "new" indicator. When the app is launched or the page is refreshed, this indicator disappears. [Learn more](/azure/active-directory/user-help/my-apps-portal-end-user-access). + +--- + +### General availability - Custom domain support in Azure AD B2C + +**Type:** New feature +**Service category:** B2C - Consumer Identity Management +**Product capability:** B2B/B2C + +Azure AD B2C customers can now enable custom domains so their end-users are redirected to a custom URL domain for authentication. This is done via integration with Azure Front Door's custom domains capability. [Learn more](../../active-directory-b2c/custom-domain.md?pivots=b2c-user-flow). + +--- + +### General availability - Edge Administrator built-in role + +**Type:** New feature +**Service category:** RBAC +**Product capability:** Access Control + + +Users in this role can create and manage the enterprise site list required for Internet Explorer mode on Microsoft Edge. This role grants permissions to create, edit, and publish the site list and additionally allows access to manage support tickets. [Learn more](/deployedge/edge-ie-mode-cloud-site-list-mgmt) + +--- + +### General availability - Windows 365 Administrator built-in role + +**Type:** New feature +**Service category:** RBAC +**Product capability:** Access Control + +Users with this role have global permissions on Windows 365 resources, when the service is present. Additionally, this role contains the ability to manage users and devices to associate a policy, and create and manage groups. [Learn more](../roles/permissions-reference.md) + +--- + +### New Federated Apps available in Azure AD Application gallery - October 2021 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** 3rd Party Integration + +In October 2021 we've added the following 10 new applications in our App gallery with Federation support: + +[Adaptive Shield](../saas-apps/adaptive-shield-tutorial.md), [SocialChorus Search](https://socialchorus.com/), [Hiretual-SSO](../saas-apps/hiretual-tutorial.md), [TeamSticker by Communitio](../saas-apps/teamsticker-by-communitio-tutorial.md), [embed signage](../saas-apps/embed-signage-tutorial.md), [JoinedUp](../saas-apps/joinedup-tutorial.md), [VECOS Releezme Locker management system](../saas-apps/vecos-releezme-locker-management-system-tutorial.md), [Altoura](../saas-apps/altoura-tutorial.md), [Dagster Cloud](../saas-apps/dagster-cloud-tutorial.md), [Qualaroo](../saas-apps/qualaroo-tutorial.md) + +You can also find the documentation of all the applications here: https://aka.ms/AppsTutorial + +For listing your application in the Azure AD app gallery, read the following article: https://aka.ms/AzureADAppRequest + +--- + +### Continuous Access Evaluation migration with Conditional Access + +**Type:** Changed feature +**Service category:** Conditional Access +**Product capability:** User Authentication + +A new user experience is available for our CAE tenants. Tenants will now access CAE as part of Conditional Access. Any tenants that were previously using CAE for some (but not all) user accounts under the old UX or had previously disabled the old CAE UX will now be required to undergo a one time migration experience.[Learn more](../conditional-access/concept-continuous-access-evaluation.md#migration). + +--- + +### Improved group list blade + +**Type:** Changed feature +**Service category:** Group Management +**Product capability:** Directory + +The new group list blade offers more sort and filtering capabilities, infinite scrolling, and better performance. [Learn more](../enterprise-users/groups-members-owners-search.md). + +--- + +### General availability - Google deprecation of Gmail sign-in support on embedded webviews on September 30, 2021 + +**Type:** Changed feature +**Service category:** B2B +**Product capability:** B2B/B2C + +Google has deprecated Gmail sign-ins on Microsoft Teams mobile and custom apps that run Gmail authentications on embedded webviews on Sept. 30th, 2021. + +If you would like to request an extension, impacted customers with affected OAuth client ID(s) should have received an email from Google Developers with the following information regarding a one-time policy enforcement extension, which must be completed by Jan 31, 2022. + +To continue allowing your Gmail users to sign in and redeem, we strongly recommend that you refer to [Embedded vs System Web](../develop/msal-net-web-browsers.md#embedded-vs-system-web-ui) UI in the MSAL.NET documentation and modify your apps to use the system browser for sign-in. All MSAL SDKs use the system web-view by default. + +As a workaround, we are deploying the device login flow by October 8. Between today and until then, it is likely that it may not be rolled out to all regions yet (in which case, end-users will be met with an error screen until it gets deployed to your region.) + +For more details on the device login flow and details on requesting extension to Google, see [Add Google as an identity provider for B2B guest users](../external-identities/google-federation.md#deprecation-of-web-view-sign-in-support). + +--- + +### Identity Governance Administrator can create and manage Azure AD access reviews of groups and applications + +**Type:** Changed feature +**Service category:** Access Reviews +**Product capability:** Identity Governance + +Identity Governance Administrator can create and manage Azure AD access reviews of groups and applications. [Learn more](../governance/deploy-access-reviews.md#who-will-create-and-manage-access-reviews). + +--- + + + + ## September 2021 ### Limits on the number of configured API permissions for an application registration will be enforced starting in October 2021 diff --git a/articles/active-directory/fundamentals/whats-new.md b/articles/active-directory/fundamentals/whats-new.md index 94a9bf9ad5bd9..4c5670eb80a54 100644 --- a/articles/active-directory/fundamentals/whats-new.md +++ b/articles/active-directory/fundamentals/whats-new.md @@ -31,6 +31,195 @@ Azure AD receives improvements on an ongoing basis. To stay up to date with the This page is updated monthly, so revisit it regularly. If you're looking for items older than six months, you can find them in [Archive for What's new in Azure Active Directory](whats-new-archive.md). + +## April 2022 + +### General Availability- Microsoft Defender for Cloud for Endpoint Signal in Identity Protection + + +**Type:** New feature +**Service category:** Identity Protection +**Product capability:** Identity Security & Protection + + +Identity Protection now integrates a signal from Microsoft Defender for Cloud for Endpoint (MDE) that will protect against PRT theft detection. To learn more, see: [What is risk? Azure AD Identity Protection | Microsoft Docs](../identity-protection/concept-identity-protection-risks.md). + + +--- + +### General availability - Entitlement management 3 stages of approval + + +**Type:** Changed feature +**Service category:** Other +**Product capability:** Entitlement Management +**Clouds impacted:** Public (Microsoft 365, GCC) + + +This update extends the Azure AD entitlement management access package policy to allow a third approval stage. This will be able to be configured via the Azure portal or Microsoft Graph. For more information, see: [Change approval and requestor information settings for an access package in Azure AD entitlement management](../governance/entitlement-management-access-package-approval-policy.md). + + +--- + +### General Availability - Improvements to Azure AD Smart Lockout + + +**Type:** Changed feature +**Service category:** Identity Protection +**Product capability:** User Management +**Clouds impacted:** Public (Microsoft 365, GCC), China, US Gov(GCC-H, DOD), US Nat, US Sec + + +With a recent improvement, Smart Lockout now synchronizes the lockout state across Azure AD data centers, so the total number of failed sign-in attempts allowed before an account is locked out will match the configured lockout threshold. For more information, see: [Protect user accounts from attacks with Azure Active Directory smart lockout](../authentication/howto-password-smart-lockout.md). + + +--- + + +### Public Preview - Enabling customization capabilities for the Self-Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icons in Company Branding. + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** User Authentication + +Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). + +--- + +### Public Preview - Integration of Microsoft 365 App Certification details into AAD UX and Consent Experiences + + +**Type:** New feature +**Service category:** User Access Management +**Product capability:** AuthZ/Access Delegation +**Clouds impacted:** Public (Microsoft 365, GCC) + +Microsoft 365 Certification status for an app is now available in Azure AD consent UX, and custom app consent policies. The status will later be displayed in several other Identity-owned interfaces such as enterprise apps. For more information, see: [Understanding Azure AD application consent experiences](../develop/application-consent-experience.md). + +--- + +### Public Preview - Organizations can replace all references to Microsoft on the AAD auth experience + +**Type:** New feature +**Service category:** Authentications (Logins) +**Product capability:** User Authentication + +Updating the Company Branding functionality on the Azure AD/Microsoft 365 sign-in experience to allow customizing Self Service Password Reset (SSPR) hyperlinks, footer hyperlinks and browser icon. For more information, see: [Add branding to your organization’s Azure Active Directory sign-in page](customize-branding.md). + +--- + +### Public preview - Use Azure AD access reviews to review access of B2B direct connect users in Teams shared channels + + +**Type:** New feature +**Service category:** Access Reviews +**Product capability:** Identity Governance + + +Use Azure AD access reviews to review access of B2B direct connect users in Teams shared channels. For more information, see: [Include B2B direct connect users and teams accessing Teams Shared Channels in access reviews (preview)](../governance/create-access-review.md#include-b2b-direct-connect-users-and-teams-accessing-teams-shared-channels-in-access-reviews-preview). + +--- + +### Public Preview - New MS Graph APIs to configure federated settings when federated with Azure AD + +**Type:** New feature +**Service category:** MS Graph +**Product capability:** Identity Security & Protection +**Clouds impacted:** Public (Microsoft 365, GCC) + + +We're announcing the public preview of following MS Graph APIs and PowerShell cmdlets for configuring federated settings when federated with Azure AD: + + +|Action |MS Graph API |PowerShell cmdlet | +|---------|---------|---------| +|Get federation settings for a federated domain | [Get internalDomainFederation](https://docs.microsoft.com/graph/api/internaldomainfederation-get?view=graph-rest-beta) | [Get-MgDomainFederationConfiguration](https://docs.microsoft.com/powershell/module/microsoft.graph.identity.directorymanagement/get-mgdomainfederationconfiguration?view=graph-powershell-beta) | +|Create federation settings for a federated domain | [Create internalDomainFederation](https://docs.microsoft.com/graph/api/domain-post-federationconfiguration?view=graph-rest-beta) | [New-MgDomainFederationConfiguration](https://docs.microsoft.com/powershell/module/microsoft.graph.identity.directorymanagement/new-mgdomainfederationconfiguration?view=graph-powershell-beta) | +|Remove federation settings for a federated domain | [Delete internalDomainFederation](https://docs.microsoft.com/graph/api/internaldomainfederation-delete?view=graph-rest-beta) | [Remove-MgDomainFederationConfiguration](https://docs.microsoft.com/powershell/module/microsoft.graph.identity.directorymanagement/remove-mgdomainfederationconfiguration?view=graph-powershell-beta) | +|Update federation settings for a federated domain | [Update internalDomainFederation](https://docs.microsoft.com/graph/api/internaldomainfederation-update?view=graph-rest-beta) | [Update-MgDomainFederationConfiguration](https://docs.microsoft.com/powershell/module/microsoft.graph.identity.directorymanagement/update-mgdomainfederationconfiguration?view=graph-powershell-beta) | + + + +If using older MSOnline cmdlets ([Get-MsolDomainFederationSettings](https://docs.microsoft.com/powershell/module/msonline/get-msoldomainfederationsettings?view=azureadps-1.0) and [Set-MsolDomainFederationSettings](https://docs.microsoft.com/powershell/module/msonline/set-msoldomainfederationsettings?view=azureadps-1.0)), we highly recommend transitioning to the latest MS Graph APIs and PowerShell cmdlets. + + +For more information, see [internalDomainFederation resource type - Microsoft Graph beta | Microsoft Docs](https://docs.microsoft.com/graph/api/resources/internaldomainfederation?view=graph-rest-beta). + + +--- + +### Public Preview – Ability to force reauthentication on Intune enrollment, risky sign-ins, and risky users + +**Type:** New feature +**Service category:** RBAC role +**Product capability:** AuthZ/Access Delegation +**Clouds impacted:** Public (Microsoft 365, GCC) + +Added functionality to session controls allowing admins to reauthenticate a user on every sign-in if a user or particular sign-in event is deemed risky, or when enrolling a device in Intune. For more information, see [Configure authentication session management with conditional Access](../conditional-access/howto-conditional-access-session-lifetime.md). + +--- + +### Public Preview – Protect against by-passing of cloud Azure AD Multi-Factor Authentication when federated with Azure AD + +**Type:** New feature +**Service category:** MS Graph +**Product capability:** Identity Security & Protection +**Clouds impacted:** Public (Microsoft 365, GCC) + +We're delighted to announce a new security protection that prevents bypassing of cloud Azure AD Multi-Factor Authentication when federated with Azure AD. When enabled for a federated domain in your Azure AD tenant, it ensures that a compromised federated account can't bypass Azure AD Multi-Factor Authentication by imitating that a multi factor authentication has already been performed by the identity provider. The protection can be enabled via new security setting, [federatedIdpMfaBehavior](https://docs.microsoft.com/graph/api/resources/internaldomainfederation?view=graph-rest-beta#federatedidpmfabehavior-values). + +We highly recommend enabling this new protection when using Azure AD Multi-Factor Authentication as your multi factor authentication for your federated users. To learn more about the protection and how to enable it, visit [Enable protection to prevent by-passing of cloud Azure AD Multi-Factor Authentication when federated with Azure AD](https://docs.microsoft.com/windows-server/identity/ad-fs/deployment/best-practices-securing-ad-fs#enable-protection-to-prevent-by-passing-of-cloud-azure-ad-multi-factor-authentication-when-federated-with-azure-ad). + +--- + +### New Federated Apps available in Azure AD Application gallery - April 2022 + +**Type:** New feature +**Service category:** Enterprise Apps +**Product capability:** Third Party Integration + +In April 2022 we added the following 24 new applications in our App gallery with Federation support +[X-1FBO](https://www.x1fbo.com/), [select Armor](https://app.clickarmor.ca/), [Smint.io Portals for SharePoint](https://www.smint.io/portals-for-sharepoint/), [Pluto](../saas-apps/pluto-tutorial.md), [ADEM](../saas-apps/adem-tutorial.md), [Smart360](../saas-apps/smart360-tutorial.md), [MessageWatcher SSO](https://messagewatcher.com/), [Beatrust](../saas-apps/beatrust-tutorial.md), [AeyeScan](https://aeyescan.com/azure_sso), [ABa Customer](https://abacustomer.com/), [Twilio Sendgrid](../saas-apps/twilio-sendgrid-tutorial.md), [Vault Platform](../saas-apps/vault-platform-tutorial.md), [Speexx](../saas-apps/speexx-tutorial.md), [Clicksign](https://app.clicksign.com/signin), [Per Angusta](../saas-apps/per-angusta-tutorial.md), [EruditAI](https://dashboard.erudit.ai/login), [MetaMoJi ClassRoom](https://business.metamoji.com/), [Numici](https://app.numici.com/), [MCB.CLOUD](https://identity.mcb.cloud/Identity/Account/Manage), [DepositLink](https://depositlink.com/external-login), [Last9](https://auth.last9.io/auth), [ParkHere Corporate](../saas-apps/parkhere-corporate-tutorial.md), [Keepabl](../saas-apps/keepabl-tutorial.md), [Swit](../saas-apps/swit-tutorial.md) + +You can also find the documentation of all the applications from here https://aka.ms/AppsTutorial. + + +For listing your application in the Azure AD app gallery, please read the details here https://aka.ms/AzureADAppRequest + +--- + +### General Availability - Customer data storage for Japan customers in Japanese data centers + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** GoLocal +**Clouds impacted:** Public (Microsoft 365, GCC) + +From April 15, 2022, Microsoft began storing Azure AD’s Customer Data for new tenants with a Japan billing address within the Japanese data centers. For more information, see: [Customer data storage for Japan customers in Azure Active Directory](active-directory-data-storage-japan.md). + + +--- + + + +### Public Preview - New provisioning connectors in the Azure AD Application Gallery - April 2022 + +**Type:** New feature +**Service category:** App Provisioning +**Product capability:** Third Party Integration +**Clouds impacted:** Public (Microsoft 365, GCC) + +You can now automate creating, updating, and deleting user accounts for these newly integrated apps: +- [Adobe Identity Management (OIDC)](../saas-apps/adobe-identity-management-provisioning-oidc-tutorial.md) +- [embed signage](../saas-apps/embed-signage-provisioning-tutorial.md) +- [KnowBe4 Security Awareness Training](../saas-apps/knowbe4-security-awareness-training-provisioning-tutorial.md) +- [NordPass](../saas-apps/nordpass-provisioning-tutorial.md) + +For more information about how to better secure your organization by using automated user account provisioning, see: [Automate user provisioning to SaaS applications with Azure AD](../app-provisioning/user-provisioning.md) + + +--- + ## March 2022 @@ -42,7 +231,7 @@ This page is updated monthly, so revisit it regularly. If you're looking for ite **Clouds impacted:** Public (Microsoft 365, GCC) -We announced in April 2020 General Availability of our new combined registration experience, enabling users to register security information for multi-factor authentication and self-service password reset at the same time, which was available for existing customers to opt in. We're happy to announce the combined security information registration experience will be enabled to all non-enabled customers after September 30th, 2022. This change does not impact tenants created after August 15th, 2020, or tenants located in the China region. For more information, see: [Combined security information registration for Azure Active Directory overview](../authentication/concept-registration-mfa-sspr-combined.md). +We announced in April 2020 General Availability of our new combined registration experience, enabling users to register security information for multi-factor authentication and self-service password reset at the same time, which was available for existing customers to opt in. We're happy to announce the combined security information registration experience will be enabled to all non-enabled customers after September 30, 2022. This change doesn't impact tenants created after August 15, 2020, or tenants located in the China region. For more information, see: [Combined security information registration for Azure Active Directory overview](../authentication/concept-registration-mfa-sspr-combined.md). --- @@ -52,7 +241,7 @@ We announced in April 2020 General Availability of our new combined registration **Type:** New feature **Service category:** App Provisioning -**Product capability:** 3rd Party Integration +**Product capability:** Third Party Integration @@ -79,7 +268,7 @@ For more information about how to better secure your organization by using autom **Type:** New feature **Service category:** Reporting **Product capability:** Monitoring & Reporting -**Clouds impacted:** Public (Microsoft 365,GCC) +**Clouds impacted:** Public (Microsoft 365, GCC) Azure AD Recommendations is now in public preview. This feature provides personalized insights with actionable guidance to help you identify opportunities to implement Azure AD best practices, and optimize the state of your tenant. For more information, see: [What is Azure Active Directory recommendations](../reports-monitoring/overview-recommendations.md) @@ -91,9 +280,9 @@ Azure AD Recommendations is now in public preview. This feature provides persona ### Public Preview: Dynamic administrative unit membership for users and devices **Type:** New feature -**Service category:** RBAC +**Service category:** RBAC role **Product capability:** Access Control -**Clouds impacted:** Public (Microsoft 365,GCC) +**Clouds impacted:** Public (Microsoft 365, GCC) Administrative units now support dynamic membership rules for user and device members. Instead of manually assigning users and devices to administrative units, tenant admins can set up a query for the administrative unit. The membership will be automatically maintained by Azure AD. For more information, see:[Administrative units in Azure Active Directory](../roles/administrative-units.md). @@ -105,7 +294,7 @@ Administrative units now support dynamic membership rules for user and device me ### Public Preview: Devices in Administrative Units **Type:** New feature -**Service category:** RBAC +**Service category:** RBAC role **Product capability:** AuthZ/Access Delegation **Clouds impacted:** Public (Microsoft 365,GCC) @@ -120,10 +309,10 @@ Devices can now be added as members of administrative units. This enables scoped **Type:** New feature **Service category:** Enterprise Apps -**Product capability:** 3rd Party Integration +**Product capability:** Third Party Integration -In March 2022 we have added the following 29 new applications in our App gallery with Federation support: +In March 2022 we've added the following 29 new applications in our App gallery with Federation support: [Informatica Platform](../saas-apps/informatica-platform-tutorial.md), [Buttonwood Central SSO](../saas-apps/buttonwood-central-sso-tutorial.md), [Blockbax](../saas-apps/blockbax-tutorial.md), [Datto Workplace Single Sign On](../saas-apps/datto-workplace-tutorial.md), [Atlas by Workland](https://atlas.workland.com/), [Simply.Coach](https://app.simply.coach/signup), [Benevity](https://benevity.com/), [Engage Absence Management](https://engage.honeydew-health.com/users/sign_in), [LitLingo App Authentication](https://www.litlingo.com/litlingo-deployment-guide), [ADP EMEA French HR Portal mon.adp.com](../saas-apps/adp-emea-french-hr-portal-tutorial.md), [Ready Room](https://app.readyroom.net/), [Rainmaker UPSMQDEV](https://upsmqdev.rainmaker.aero/rainmaker.security.web/), [Axway CSOS](../saas-apps/axway-csos-tutorial.md), [Alloy](https://alloyapp.io/), [U.S. Bank Prepaid](../saas-apps/us-bank-prepaid-tutorial.md), [EdApp](https://admin.edapp.com/login), [GoSimplo](https://app.gosimplo.com/External/Microsoft/Signup), [Snow Atlas SSO](https://www.snowsoftware.io/), [Abacus.AI](https://alloyapp.io/), [Culture Shift](../saas-apps/culture-shift-tutorial.md), [StaySafe Hub](https://hub.staysafeapp.net/login), [OpenLearning](../saas-apps/openlearning-tutorial.md), [Draup, Inc](https://draup.com/platformlogin/), [Air](../saas-apps/air-tutorial.md), [Regulatory Lab](https://clientidentification.com/), [SafetyLine](https://slmonitor.com/login), [Zest](../saas-apps/zest-tutorial.md), [iGrafx Platform](../saas-apps/igrafx-platform-tutorial.md), [Tracker Software Technologies](../saas-apps/tracker-software-technologies-tutorial.md) @@ -138,7 +327,7 @@ For listing your application in the Azure AD app gallery, please read the detail ### Public Preview - New APIs for fetching transitive role assignments and role permissions **Type:** New feature -**Service category:** RBAC +**Service category:** RBAC role **Product capability:** Access Control @@ -238,7 +427,7 @@ Use multi-stage reviews to create Azure AD access reviews in sequential stages, **Type:** New feature **Service category:** Enterprise Apps -**Product capability:** 3rd Party Integration +**Product capability:** Third Party Integration In February 2022 we added the following 20 new applications in our App gallery with Federation support: @@ -413,7 +602,7 @@ For more information about how to better secure your organization by using autom In January 2022, we’ve added the following 47 new applications in our App gallery with Federation support: -[Jooto](../saas-apps/jooto-tutorial.md), [Proprli](https://app.proprli.com/), [Pace Scheduler](https://www.pacescheduler.com/accounts/login/), [DRTrack](../saas-apps/drtrack-tutorial.md), [Dining Sidekick](../saas-apps/dining-sidekick-tutorial.md), [Cryotos](https://app.cryotos.com/oauth2/authorization/azure-client), [Emergency Management Systems](https://secure.emsystems.com.au/), [Manifestly Checklists](../saas-apps/manifestly-checklists-tutorial.md), [eLearnPOSH](../saas-apps/elearnposh-tutorial.md), [Scuba Analytics](../saas-apps/scuba-analytics-tutorial.md), [Athena Systems Login Platform](../saas-apps/athena-systems-login-platform-tutorial.md), [TimeTrack](../saas-apps/timetrack-tutorial.md), [MiHCM](../saas-apps/mihcm-tutorial.md), [Health Note](https://www.healthnote.com/), [Active Directory SSO for DoubleYou](../saas-apps/active-directory-sso-for-doubleyou-tutorial.md), [Emplifi platform](../saas-apps/emplifi-platform-tutorial.md), [Flexera One](../saas-apps/flexera-one-tutorial.md), [Hypothesis](https://web.hypothes.is/help/authorizing-hypothesis-from-the-azure-ad-app-gallery/), [Recurly](../saas-apps/recurly-tutorial.md), [XpressDox AU Cloud](https://au.xpressdox.com/Authentication/Login.aspx), [Zoom for Intune](https://zoom.us/), [UPWARD AGENT](https://app.upward.jp/login/), [Linux Foundation ID](https://openprofile.dev/), [Asset Planner](../saas-apps/asset-planner-tutorial.md), [Kiho](https://v3.kiho.fi/index/sso), [chezie](https://app.chezie.co/), [Excelity HCM](../saas-apps/excelity-hcm-tutorial.md), [yuccaHR](https://app.yuccahr.com/), [Blue Ocean Brain](../saas-apps/blue-ocean-brain-tutorial.md), [EchoSpan](../saas-apps/echospan-tutorial.md), [Archie](../saas-apps/archie-tutorial.md), [Equifax Workforce Solutions](../saas-apps/equifax-workforce-solutions-tutorial.md), [Palantir Foundry](../saas-apps/palantir-foundry-tutorial.md), [ATP SpotLight and ChronicX](../saas-apps/atp-spotlight-and-chronicx-tutorial.md), [DigiSign](https://app.digisign.org/selfcare/sso), [mConnect](https://mconnect.skooler.com/), [BrightHR](https://login.brighthr.com/), [Mural Identity](../saas-apps/mural-identity-tutorial.md), [NordPass SSO](https://app.nordpass.com/login%20use%20%22Log%20in%20to%20business%22%20option), [CloudClarity](https://portal.cloudclarity.app/dashboard), [Twic](../saas-apps/twic-tutorial.md), [Eduhouse Online](https://app.eduhouse.fi/palvelu/kirjaudu/microsoft), [Bealink](../saas-apps/bealink-tutorial.md), [Time Intelligence Bot](https://teams.microsoft.com/), [SentinelOne](https://sentinelone.com/) +[Jooto](../saas-apps/jooto-tutorial.md), [Proprli](https://app.proprli.com/), [Pace Scheduler](https://www.pacescheduler.com/accounts/login/), [DRTrack](../saas-apps/drtrack-tutorial.md), [Dining Sidekick](../saas-apps/dining-sidekick-tutorial.md), [Cryotos](https://app.cryotos.com/oauth2/authorization/azure-client), [Emergency Management Systems](https://secure.emsystems.com.au/), [Manifestly Checklists](../saas-apps/manifestly-checklists-tutorial.md), [eLearnPOSH](../saas-apps/elearnposh-tutorial.md), [Scuba Analytics](../saas-apps/scuba-analytics-tutorial.md), [Athena Systems sign-in Platform](../saas-apps/athena-systems-login-platform-tutorial.md), [TimeTrack](../saas-apps/timetrack-tutorial.md), [MiHCM](../saas-apps/mihcm-tutorial.md), [Health Note](https://www.healthnote.com/), [Active Directory SSO for DoubleYou](../saas-apps/active-directory-sso-for-doubleyou-tutorial.md), [Emplifi platform](../saas-apps/emplifi-platform-tutorial.md), [Flexera One](../saas-apps/flexera-one-tutorial.md), [Hypothesis](https://web.hypothes.is/help/authorizing-hypothesis-from-the-azure-ad-app-gallery/), [Recurly](../saas-apps/recurly-tutorial.md), [XpressDox AU Cloud](https://au.xpressdox.com/Authentication/Login.aspx), [Zoom for Intune](https://zoom.us/), [UPWARD AGENT](https://app.upward.jp/login/), [Linux Foundation ID](https://openprofile.dev/), [Asset Planner](../saas-apps/asset-planner-tutorial.md), [Kiho](https://v3.kiho.fi/index/sso), [chezie](https://app.chezie.co/), [Excelity HCM](../saas-apps/excelity-hcm-tutorial.md), [yuccaHR](https://app.yuccahr.com/), [Blue Ocean Brain](../saas-apps/blue-ocean-brain-tutorial.md), [EchoSpan](../saas-apps/echospan-tutorial.md), [Archie](../saas-apps/archie-tutorial.md), [Equifax Workforce Solutions](../saas-apps/equifax-workforce-solutions-tutorial.md), [Palantir Foundry](../saas-apps/palantir-foundry-tutorial.md), [ATP SpotLight and ChronicX](../saas-apps/atp-spotlight-and-chronicx-tutorial.md), [DigiSign](https://app.digisign.org/selfcare/sso), [mConnect](https://mconnect.skooler.com/), [BrightHR](https://login.brighthr.com/), [Mural Identity](../saas-apps/mural-identity-tutorial.md), [NordPass SSO](https://app.nordpass.com/login%20use%20%22Log%20in%20to%20business%22%20option), [CloudClarity](https://portal.cloudclarity.app/dashboard), [Twic](../saas-apps/twic-tutorial.md), [Eduhouse Online](https://app.eduhouse.fi/palvelu/kirjaudu/microsoft), [Bealink](../saas-apps/bealink-tutorial.md), [Time Intelligence Bot](https://teams.microsoft.com/), [SentinelOne](https://sentinelone.com/) You can also find the documentation of all the applications from: https://aka.ms/AppsTutorial, @@ -609,7 +798,7 @@ New updates have been made to the Microsoft Authenticator app icon. To learn mor --- -### General availability - Azure AD single Sign on and device-based Conditional Access support in Firefox on Windows 10/11 +### General availability - Azure AD single Sign-on and device-based Conditional Access support in Firefox on Windows 10/11 **Type:** New feature **Service category:** Authentications (Logins) @@ -676,241 +865,3 @@ Updated "switch organizations" user interface in My Account. This visually impro --- -## October 2021 - -### Limits on the number of configured API permissions for an application registration will be enforced starting in October 2021 - -**Type:** Plan for change -**Service category:** Other -**Product capability:** Developer Experience - -Sometimes, application developers configure their apps to require more permissions than it's possible to grant. To prevent this from happening, a limit on the total number of required permissions that can be configured for an app registration will be enforced. - -The total number of required permissions for any single application registration mustn't exceed 400 permissions, across all APIs. The change to enforce this limit will begin rolling out mid-October 2021. Applications exceeding the limit can't increase the number of permissions they’re configured for. The existing limit on the number of distinct APIs for which permissions are required remains unchanged and may not exceed 50 APIs. - -In the Azure portal, the required permissions are listed under API permissions for the application you wish to configure. Using Microsoft Graph or Microsoft Graph PowerShell, the required permissions are listed in the requiredResourceAccess property of an [application](/graph/api/resources/application) entity. [Learn more](../enterprise-users/directory-service-limits-restrictions.md). - ---- - -### Email one-time passcode on by default change beginning rollout in November 2021 - -**Type:** Plan for change -**Service category:** B2B -**Product capability:** B2B/B2C - -Previously, we announced that starting October 31, 2021, Microsoft Azure Active Directory [email one-time passcode](../external-identities/one-time-passcode.md) authentication will become the default method for inviting accounts and tenants for B2B collaboration scenarios. However, because of deployment schedules, we'll begin rolling out on November 1, 2021. Most of the tenants will see the change rolled out in January 2022 to minimize disruptions during the holidays and deployment lock downs. After this change, Microsoft will no longer allow redemption of invitations using Azure Active Directory accounts that are unmanaged. [Learn more](../external-identities/one-time-passcode.md#frequently-asked-questions). - ---- - -### Conditional Access Guest Access Blocking Screen - -**Type:** Fixed -**Service category:** Conditional Access -**Product capability:** End User Experiences - -If there's no trust relation between a home and resource tenant, a guest user would have previously been asked to re-register their device, which would break the previous registration. However, the user would end up in a registration loop because only home tenant device registration is supported. In this specific scenario, instead of this loop, we’ve created a new conditional access blocking page. The page tells the end user that they can't get access to conditional access protected resources as a guest user. [Learn more](../external-identities/b2b-quickstart-add-guest-users-portal.md#prerequisites). - ---- - -### 50105 Errors will now result in a UX error message instead of an error response to the application - -**Type:** Fixed -**Service category:** Authentications (Logins) -**Product capability:** Developer Experience - -Azure AD has fixed a bug in an error response that occurs when a user isn't assigned to an app that requires a user assignment. Previously, Azure AD would return error 50105 with the OIDC error code "interaction_required" even during interactive authentication. This would cause well-coded applications to loop indefinitely, as they do interactive authentication and receive an error telling them to do interactive authentication, which they would then do. - -The bug has been fixed, so that during non-interactive auth an "interaction_required" error will still be returned. Also, during interactive authentication an error page will be directly displayed to the user. - -For greater details, see the change notices for [Azure AD protocols](../develop/reference-breaking-changes.md#error-50105-has-been-fixed-to-not-return-interaction_required-during-interactive-authentication). - ---- - -### Public preview - New claims transformation capabilities - -**Type:** New feature -**Service category:** Enterprise Apps -**Product capability:** SSO - -The following new capabilities have been added to the claims transformations available for manipulating claims in tokens issued from Azure AD: - -- Join() on NameID. Used to be restricted to joining an email format address with a verified domain. Now Join() can be used on the NameID claim in the same way as any other claim, so NameID transforms can be used to create Windows account style NameIDs or any other string. For now if the result is an email address, the Azure AD will still validate that the domain is one that is verified in the tenant. -- Substring(). A new transformation in the claims configuration UI allows extraction of defined position substrings such as five characters starting at character three - substring(3,5) -- Claims transformations. These transformations can now be performed on Multi-valued attributes, and can emit multi-valued claims. Microsoft Graph can now be used to read/write multi-valued directory schema extension attributes. [Learn more](../develop/active-directory-saml-claims-customization.md). - ---- - -### Public Preview – Flagged Sign-ins - -**Type:** New feature -**Service category:** Reporting -**Product capability:** Monitoring & Reporting - -Flagged sign-ins is a feature that will increase the signal to noise ratio for user sign-ins where users need help. The functionality is intended to empower users to raise awareness about sign-in errors they want help with. Also to help admins and help desk workers find the right sign-in events quickly and efficiently. [Learn more](../reports-monitoring/overview-flagged-sign-ins.md). - ---- - -### Public preview - Device overview - -**Type:** New feature -**Service category:** Device Registration and Management -**Product capability:** Device Lifecycle Management - -The new Device Overview feature provides actionable insights about devices in your tenant. [Learn more](../devices/device-management-azure-portal.md). - ---- - -### Public preview - Azure Active Directory workload identity federation - -**Type:** New feature -**Service category:** Enterprise Apps -**Product capability:** Developer Experience - -Azure AD workload identity federation is a new capability that's in public preview. It frees developers from handling application secrets or certificates. This includes secrets in scenarios such as using GitHub Actions and building applications on Kubernetes. Rather than creating an application secret and using that to get tokens for that application, developers can instead use tokens provided by the respective platforms such as GitHub and Kubernetes without having to manage any secrets manually.[Learn more](../develop/workload-identity-federation.md). - ---- - -### Public Preview - Updates to Sign-in Diagnostic - -**Type:** Changed feature -**Service category:** Reporting -**Product capability:** Monitoring & Reporting - -With this update, the diagnostic covers more scenarios and is made more easily available to admins. - -New scenarios covered when using the Sign-in Diagnostic: -- Pass Through Authentication sign-in failures -- Seamless Single-Sign On sign-in failures - -Other changes include: -- Flagged Sign-ins will automatically appear for investigation when using the Sign-in Diagnostic from Diagnose and Solve. -- Sign-in Diagnostic is now available from the Enterprise Apps Diagnose and Solve blade. -- The Sign-in Diagnostic is now available in the Basic Info tab of the Sign-in Log event view for all sign-in events. [Learn more](../reports-monitoring/concept-sign-in-diagnostics-scenarios.md#supported-scenarios). - ---- - -### General Availability - Privileged Role Administrators can now create Azure AD access reviews on role-assignable groups - -**Type:** Fixed -**Service category:** Access Reviews -**Product capability:** Identity Governance - -Privileged Role Administrators can now create Azure AD access reviews on Azure AD role-assignable groups, in addition to Azure AD roles. [Learn more](../governance/deploy-access-reviews.md#who-will-create-and-manage-access-reviews). - ---- - -### General Availability - Azure AD single Sign on and device-based Conditional Access support in Firefox on Windows 10/11 - -**Type:** New feature -**Service category:** Authentications (Logins) -**Product capability:** SSO - -We now support native single sign-on (SSO) support and device-based Conditional Access to Firefox browser on Windows 10 and Windows Server 2019 starting in Firefox version 91. [Learn more](../conditional-access/require-managed-devices.md#prerequisites). - ---- - -### General Availability - New app indicator in My Apps - -**Type:** New feature -**Service category:** My Apps -**Product capability:** End User Experiences - -Apps that have been recently assigned to the user show up with a "new" indicator. When the app is launched or the page is refreshed, this indicator disappears. [Learn more](/azure/active-directory/user-help/my-apps-portal-end-user-access). - ---- - -### General availability - Custom domain support in Azure AD B2C - -**Type:** New feature -**Service category:** B2C - Consumer Identity Management -**Product capability:** B2B/B2C - -Azure AD B2C customers can now enable custom domains so their end-users are redirected to a custom URL domain for authentication. This is done via integration with Azure Front Door's custom domains capability. [Learn more](../../active-directory-b2c/custom-domain.md?pivots=b2c-user-flow). - ---- - -### General availability - Edge Administrator built-in role - -**Type:** New feature -**Service category:** RBAC -**Product capability:** Access Control - - -Users in this role can create and manage the enterprise site list required for Internet Explorer mode on Microsoft Edge. This role grants permissions to create, edit, and publish the site list and additionally allows access to manage support tickets. [Learn more](/deployedge/edge-ie-mode-cloud-site-list-mgmt) - ---- - -### General availability - Windows 365 Administrator built-in role - -**Type:** New feature -**Service category:** RBAC -**Product capability:** Access Control - -Users with this role have global permissions on Windows 365 resources, when the service is present. Additionally, this role contains the ability to manage users and devices to associate a policy, and create and manage groups. [Learn more](../roles/permissions-reference.md) - ---- - -### New Federated Apps available in Azure AD Application gallery - October 2021 - -**Type:** New feature -**Service category:** Enterprise Apps -**Product capability:** 3rd Party Integration - -In October 2021 we've added the following 10 new applications in our App gallery with Federation support: - -[Adaptive Shield](../saas-apps/adaptive-shield-tutorial.md), [SocialChorus Search](https://socialchorus.com/), [Hiretual-SSO](../saas-apps/hiretual-tutorial.md), [TeamSticker by Communitio](../saas-apps/teamsticker-by-communitio-tutorial.md), [embed signage](../saas-apps/embed-signage-tutorial.md), [JoinedUp](../saas-apps/joinedup-tutorial.md), [VECOS Releezme Locker management system](../saas-apps/vecos-releezme-locker-management-system-tutorial.md), [Altoura](../saas-apps/altoura-tutorial.md), [Dagster Cloud](../saas-apps/dagster-cloud-tutorial.md), [Qualaroo](../saas-apps/qualaroo-tutorial.md) - -You can also find the documentation of all the applications here: https://aka.ms/AppsTutorial - -For listing your application in the Azure AD app gallery, read the following article: https://aka.ms/AzureADAppRequest - ---- - -### Continuous Access Evaluation migration with Conditional Access - -**Type:** Changed feature -**Service category:** Conditional Access -**Product capability:** User Authentication - -A new user experience is available for our CAE tenants. Tenants will now access CAE as part of Conditional Access. Any tenants that were previously using CAE for some (but not all) user accounts under the old UX or had previously disabled the old CAE UX will now be required to undergo a one time migration experience.[Learn more](../conditional-access/concept-continuous-access-evaluation.md#migration). - ---- - -### Improved group list blade - -**Type:** Changed feature -**Service category:** Group Management -**Product capability:** Directory - -The new group list blade offers more sort and filtering capabilities, infinite scrolling, and better performance. [Learn more](../enterprise-users/groups-members-owners-search.md). - ---- - -### General availability - Google deprecation of Gmail sign-in support on embedded webviews on September 30, 2021 - -**Type:** Changed feature -**Service category:** B2B -**Product capability:** B2B/B2C - -Google has deprecated Gmail sign-ins on Microsoft Teams mobile and custom apps that run Gmail authentications on embedded webviews on Sept. 30th, 2021. - -If you would like to request an extension, impacted customers with affected OAuth client ID(s) should have received an email from Google Developers with the following information regarding a one-time policy enforcement extension, which must be completed by Jan 31, 2022. - -To continue allowing your Gmail users to sign in and redeem, we strongly recommend that you refer to [Embedded vs System Web](../develop/msal-net-web-browsers.md#embedded-vs-system-web-ui) UI in the MSAL.NET documentation and modify your apps to use the system browser for sign-in. All MSAL SDKs use the system web-view by default. - -As a workaround, we are deploying the device login flow by October 8. Between today and until then, it is likely that it may not be rolled out to all regions yet (in which case, end-users will be met with an error screen until it gets deployed to your region.) - -For more details on the device login flow and details on requesting extension to Google, see [Add Google as an identity provider for B2B guest users](../external-identities/google-federation.md#deprecation-of-web-view-sign-in-support). - ---- - -### Identity Governance Administrator can create and manage Azure AD access reviews of groups and applications - -**Type:** Changed feature -**Service category:** Access Reviews -**Product capability:** Identity Governance - -Identity Governance Administrator can create and manage Azure AD access reviews of groups and applications. [Learn more](../governance/deploy-access-reviews.md#who-will-create-and-manage-access-reviews). - ---- - diff --git a/articles/active-directory/governance/entitlement-management-access-package-incompatible.md b/articles/active-directory/governance/entitlement-management-access-package-incompatible.md index 0ccb539a50eda..d58b0b190b298 100644 --- a/articles/active-directory/governance/entitlement-management-access-package-incompatible.md +++ b/articles/active-directory/governance/entitlement-management-access-package-incompatible.md @@ -124,6 +124,10 @@ Follow these steps to view the list of users who have assignments to two access ### Identifying users who already have incompatible access programmatically +You can retrieve assignments to an access package using Microsoft Graph, that are scoped to just those users who also have an assignment to another access package. A user in an administrative role with an application that has the delegated `EntitlementManagement.Read.All` or `EntitlementManagement.ReadWrite.All` permission can call the API to [list additional access](/graph/api/accesspackageassignment-additionalaccess?view=graph-rest-beta&preserve-view=true). + +### Identifying users who already have incompatible access using PowerShell + You can also query the users who have assignments to an access package with the `Get-MgEntitlementManagementAccessPackageAssignment` cmdlet from the [Microsoft Graph PowerShell cmdlets for Identity Governance](https://www.powershellgallery.com/packages/Microsoft.Graph.Identity.Governance/) module version 1.6.0 or later. For example, if you have two access packages, one with ID `29be137f-b006-426c-b46a-0df3d4e25ccd` and the other with ID `cce10272-68d8-4482-8ba3-a5965c86cfe5`, then you could retrieve the users who have assignments to the first access package, and then compare them to the users who have assignments to the second access package. You can also report the users who have assignments delivered to both, using a PowerShell script similar to the following: diff --git a/articles/active-directory/index.yml b/articles/active-directory/index.yml index f1581c1d90942..0d45f162f4322 100644 --- a/articles/active-directory/index.yml +++ b/articles/active-directory/index.yml @@ -12,7 +12,7 @@ metadata: ms.collection: M365-identity-device-management author: rolyon ms.author: rolyon - manager: karenhoran + manager: CelesteDG ms.date: 01/25/2022 highlightedContent: @@ -322,4 +322,4 @@ additionalContent: url: /powershell/module/azuread/ - title: Azure CLI commands for Azure AD summary: Find the Azure AD commands in the CLI reference. - url: /cli/azure/ad \ No newline at end of file + url: /cli/azure/ad diff --git a/articles/active-directory/saas-apps/datto-file-protection-tutorial.md b/articles/active-directory/saas-apps/datto-file-protection-tutorial.md new file mode 100644 index 0000000000000..b9ab99ba4a1e3 --- /dev/null +++ b/articles/active-directory/saas-apps/datto-file-protection-tutorial.md @@ -0,0 +1,144 @@ +--- +title: 'Tutorial: Azure AD SSO integration with Datto File Protection Single Sign On' +description: Learn how to configure single sign-on between Azure Active Directory and Datto File Protection Single Sign On. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: CelesteDG +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 04/13/2022 +ms.author: jeedes + +--- + +# Tutorial: Azure AD SSO integration with Datto File Protection Single Sign On + +In this tutorial, you'll learn how to integrate Datto File Protection Single Sign On with Azure Active Directory (Azure AD). When you integrate Datto File Protection Single Sign On with Azure AD, you can: + +* Control in Azure AD who has access to Datto File Protection Single Sign On. +* Enable your users to be automatically signed-in to Datto File Protection Single Sign On with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* Datto File Protection Single Sign On enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* Datto File Protection Single Sign On supports **SP** and **IDP** initiated SSO. + +## Add Datto File Protection Single Sign On from the gallery + +To configure the integration of Datto File Protection Single Sign On into Azure AD, you need to add Datto File Protection Single Sign On from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **Datto File Protection Single Sign On** in the search box. +1. Select **Datto File Protection Single Sign On** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for Datto File Protection Single Sign On + +Configure and test Azure AD SSO with Datto File Protection Single Sign On using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in Datto File Protection Single Sign On. + +To configure and test Azure AD SSO with Datto File Protection Single Sign On, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure Datto File Protection Single Sign On SSO](#configure-datto-file-protection-single-sign-on-sso)** - to configure the single sign-on settings on application side. + 1. **[Create Datto File Protection Single Sign On test user](#create-datto-file-protection-single-sign-on-test-user)** - to have a counterpart of B.Simon in Datto File Protection Single Sign On that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **Datto File Protection Single Sign On** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, the user does not have to perform any step as the app is already pre-integrated with Azure. + +1. On the **Basic SAML Configuration** section, if you wish to configure the application in **SP** initiated mode then perform the following steps: + + a. In the **Identifier** textbox, type the URL: + `https://saml.fileprotection.datto.com/singlesignon/saml/metadata` + + b. In the **Reply URL** textbox, type the URL: + `https://saml.fileprotection.datto.com/singlesignon/saml/SSO` + + c. In the **Sign on URL** textbox, type a URL using the following pattern: + `https://.fileprotection.datto.com` + + > [!NOTE] + > This value is not real. Update this value with the actual Sign on URL. Contact [Datto File Protection Single Sign On Client support team](mailto:ms-sso-support@ot.soonr.com) to get this value. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![Screenshot shows the Certificate download link.](common/copy-metadataurl.png "Certificate") + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to Datto File Protection Single Sign On. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **Datto File Protection Single Sign On**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you are expecting a role to be assigned to the users, you can select it from the **Select a role** dropdown. If no role has been set up for this app, you see "Default Access" role selected. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure Datto File Protection Single Sign On SSO + +To configure single sign-on on **Datto File Protection Single Sign On** side, you need to send the **App Federation Metadata Url** to [Datto File Protection Single Sign On support team](mailto:ms-sso-support@ot.soonr.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create Datto File Protection Single Sign On test user + +In this section, you create a user called Britta Simon in Datto File Protection Single Sign On. Work with [Datto File Protection Single Sign On support team](mailto:ms-sso-support@ot.soonr.com) to add the users in the Datto File Protection Single Sign On platform. Users must be created and activated before you use single sign-on. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to Datto File Protection Single Sign On Sign on URL where you can initiate the login flow. + +* Go to Datto File Protection Single Sign On Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the Datto File Protection Single Sign On for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the Datto File Protection Single Sign On tile in the My Apps, if configured in SP mode you would be redirected to the application sign on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the Datto File Protection Single Sign On for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](../user-help/my-apps-portal-end-user-access.md). + +## Next steps + +Once you configure Datto File Protection Single Sign On you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Cloud App Security](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/debroome-brand-portal-tutorial.md b/articles/active-directory/saas-apps/debroome-brand-portal-tutorial.md new file mode 100644 index 0000000000000..e8dde53db2572 --- /dev/null +++ b/articles/active-directory/saas-apps/debroome-brand-portal-tutorial.md @@ -0,0 +1,155 @@ +--- +title: 'Tutorial: Azure AD SSO integration with deBroome Brand Portal' +description: Learn how to configure single sign-on between Azure Active Directory and deBroome Brand Portal. +services: active-directory +author: jeevansd +manager: CelesteDG +ms.reviewer: celested +ms.service: active-directory +ms.subservice: saas-app-tutorial +ms.workload: identity +ms.topic: tutorial +ms.date: 04/29/2022 +ms.author: jeedes +--- + +# Tutorial: Azure AD SSO integration with deBroome Brand Portal + +In this tutorial, you'll learn how to integrate deBroome Brand Portal with Azure Active Directory (Azure AD). When you integrate deBroome Brand Portal with Azure AD, you can: + +* Control in Azure AD who has access to deBroome Brand Portal. +* Enable your users to be automatically signed-in to deBroome Brand Portal with their Azure AD accounts. +* Manage your accounts in one central location - the Azure portal. + +## Prerequisites + +To get started, you need the following items: + +* An Azure AD subscription. If you don't have a subscription, you can get a [free account](https://azure.microsoft.com/free/). +* deBroome Brand Portal single sign-on (SSO) enabled subscription. +* Along with Cloud Application Administrator, Application Administrator can also add or manage applications in Azure AD. +For more information, see [Azure built-in roles](../roles/permissions-reference.md). + +## Scenario description + +In this tutorial, you configure and test Azure AD SSO in a test environment. + +* deBroome Brand Portal supports **SP and IDP** initiated SSO. +* deBroome Brand Portal supports **Just In Time** user provisioning. + +## Add deBroome Brand Portal from the gallery + +To configure the integration of deBroome Brand Portal into Azure AD, you need to add deBroome Brand Portal from the gallery to your list of managed SaaS apps. + +1. Sign in to the Azure portal using either a work or school account, or a personal Microsoft account. +1. On the left navigation pane, select the **Azure Active Directory** service. +1. Navigate to **Enterprise Applications** and then select **All Applications**. +1. To add new application, select **New application**. +1. In the **Add from the gallery** section, type **deBroome Brand Portal** in the search box. +1. Select **deBroome Brand Portal** from results panel and then add the app. Wait a few seconds while the app is added to your tenant. + +## Configure and test Azure AD SSO for deBroome Brand Portal + +Configure and test Azure AD SSO with deBroome Brand Portal using a test user called **B.Simon**. For SSO to work, you need to establish a link relationship between an Azure AD user and the related user in deBroome Brand Portal. + +To configure and test Azure AD SSO with deBroome Brand Portal, perform the following steps: + +1. **[Configure Azure AD SSO](#configure-azure-ad-sso)** - to enable your users to use this feature. + 1. **[Create an Azure AD test user](#create-an-azure-ad-test-user)** - to test Azure AD single sign-on with B.Simon. + 1. **[Assign the Azure AD test user](#assign-the-azure-ad-test-user)** - to enable B.Simon to use Azure AD single sign-on. +1. **[Configure deBroome Brand Portal SSO](#configure-debroome-brand-portal-sso)** - to configure the single sign-on settings on application side. + 1. **[Create deBroome Brand Portal test user](#create-debroome-brand-portal-test-user)** - to have a counterpart of B.Simon in deBroome Brand Portal that is linked to the Azure AD representation of user. +1. **[Test SSO](#test-sso)** - to verify whether the configuration works. + +## Configure Azure AD SSO + +Follow these steps to enable Azure AD SSO in the Azure portal. + +1. In the Azure portal, on the **deBroome Brand Portal** application integration page, find the **Manage** section and select **single sign-on**. +1. On the **Select a single sign-on method** page, select **SAML**. +1. On the **Set up single sign-on with SAML** page, click the pencil icon for **Basic SAML Configuration** to edit the settings. + + ![Screenshot shows to edit Basic SAML Configuration.](common/edit-urls.png "Basic Configuration") + +1. On the **Basic SAML Configuration** section, perform the following steps: + + a. In the **Identifier** textbox, type a URL using the following pattern: + `https:///rv2/saml2/metadata` + + b. In the **Reply URL** textbox, type a URL using the following pattern: + `https:///rv2/saml2/acs` + +1. Click **Set additional URLs** and perform the following step if you wish to configure the application in **SP** initiated mode: + + In the **Sign-on URL** text box, type a URL using the following pattern: + `https:///sso` + + > [!NOTE] + > These values are not real. Update these values with the actual Identifier, Reply URL and Sign-on URL. Contact [deBroome Brand Portal Client support team](mailto:support@debroome.com) to get these values. You can also refer to the patterns shown in the **Basic SAML Configuration** section in the Azure portal. + +1. deBroome Brand Portal application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. + + ![image](common/default-attributes.png) + +1. In addition to above, deBroome Brand Portal application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirements. + + | Name | Source Attribute| + | ------------ | --------- | + | firstName | user.givenname | + | lastName | user.surname | + +1. On the **Set up single sign-on with SAML** page, In the **SAML Signing Certificate** section, click copy button to copy **App Federation Metadata Url** and save it on your computer. + + ![The Certificate download link](common/copy-metadataurl.png) + +### Create an Azure AD test user + +In this section, you'll create a test user in the Azure portal called B.Simon. + +1. From the left pane in the Azure portal, select **Azure Active Directory**, select **Users**, and then select **All users**. +1. Select **New user** at the top of the screen. +1. In the **User** properties, follow these steps: + 1. In the **Name** field, enter `B.Simon`. + 1. In the **User name** field, enter the username@companydomain.extension. For example, `B.Simon@contoso.com`. + 1. Select the **Show password** check box, and then write down the value that's displayed in the **Password** box. + 1. Click **Create**. + +### Assign the Azure AD test user + +In this section, you'll enable B.Simon to use Azure single sign-on by granting access to deBroome Brand Portal. + +1. In the Azure portal, select **Enterprise Applications**, and then select **All applications**. +1. In the applications list, select **deBroome Brand Portal**. +1. In the app's overview page, find the **Manage** section and select **Users and groups**. +1. Select **Add user**, then select **Users and groups** in the **Add Assignment** dialog. +1. In the **Users and groups** dialog, select **B.Simon** from the Users list, then click the **Select** button at the bottom of the screen. +1. If you're expecting any role value in the SAML assertion, in the **Select Role** dialog, select the appropriate role for the user from the list and then click the **Select** button at the bottom of the screen. +1. In the **Add Assignment** dialog, click the **Assign** button. + +## Configure deBroome Brand Portal SSO + +To configure single sign-on on **deBroome Brand Portal** side, you need to send the **App Federation Metadata Url** to [deBroome Brand Portal support team](mailto:support@debroome.com). They set this setting to have the SAML SSO connection set properly on both sides. + +### Create deBroome Brand Portal test user + +In this section, a user called B.Simon is created in deBroome Brand Portal. deBroome Brand Portal supports just-in-time user provisioning, which is enabled by default. There is no action item for you in this section. If a user doesn't already exist in deBroome Brand Portal, a new one is created after authentication. + +## Test SSO + +In this section, you test your Azure AD single sign-on configuration with following options. + +#### SP initiated: + +* Click on **Test this application** in Azure portal. This will redirect to deBroome Brand Portal Sign on URL where you can initiate the login flow. + +* Go to deBroome Brand Portal Sign-on URL directly and initiate the login flow from there. + +#### IDP initiated: + +* Click on **Test this application** in Azure portal and you should be automatically signed in to the deBroome Brand Portal for which you set up the SSO. + +You can also use Microsoft My Apps to test the application in any mode. When you click the deBroome Brand Portal tile in the My Apps, if configured in SP mode you would be redirected to the application sign-on page for initiating the login flow and if configured in IDP mode, you should be automatically signed in to the deBroome Brand Portal for which you set up the SSO. For more information about the My Apps, see [Introduction to the My Apps](https://support.microsoft.com/account-billing/sign-in-and-start-apps-from-the-my-apps-portal-2f3b1bae-0e5a-4a86-a33e-876fbd2a4510). + +## Next steps + +Once you configure deBroome Brand Portal you can enforce session control, which protects exfiltration and infiltration of your organization’s sensitive data in real time. Session control extends from Conditional Access. [Learn how to enforce session control with Microsoft Defender for Cloud Apps](/cloud-app-security/proxy-deployment-aad). \ No newline at end of file diff --git a/articles/active-directory/saas-apps/goalquest-tutorial.md b/articles/active-directory/saas-apps/goalquest-tutorial.md index 5a5f2e3139f64..abe182c8d2280 100644 --- a/articles/active-directory/saas-apps/goalquest-tutorial.md +++ b/articles/active-directory/saas-apps/goalquest-tutorial.md @@ -9,7 +9,7 @@ ms.service: active-directory ms.subservice: saas-app-tutorial ms.workload: identity ms.topic: tutorial -ms.date: 04/13/2022 +ms.date: 04/29/2022 ms.author: jeedes --- @@ -73,11 +73,11 @@ Follow these steps to enable Azure AD SSO in the Azure portal. 1. On the **Basic SAML Configuration** section, the application is pre-configured and the necessary URLs are already pre-populated with Azure. The user needs to save the configuration by clicking the **Save** button. -1. Airtable application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. +1. GoalQuest application expects the SAML assertions in a specific format, which requires you to add custom attribute mappings to your SAML token attributes configuration. The following screenshot shows the list of default attributes. ![Screenshot that shows attributes configuration image.](common/default-attributes.png "Image") -1. In addition to above, Airtable application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirements. +1. In addition to above, GoalQuest application expects few more attributes to be passed back in SAML response, which are shown below. These attributes are also pre populated but you can review them as per your requirements. | Name | Source Attribute| | ------- | --------- | diff --git a/articles/active-directory/saas-apps/toc.yml b/articles/active-directory/saas-apps/toc.yml index 59254f1c8ee38..d2bca18e46a53 100644 --- a/articles/active-directory/saas-apps/toc.yml +++ b/articles/active-directory/saas-apps/toc.yml @@ -601,10 +601,14 @@ href: datasite-tutorial.md - name: Datava Enterprise Service Platform href: datava-enterprise-service-platform-tutorial.md + - name: Datto File Protection Single Sign On + href: datto-file-protection-tutorial.md - name: Datto Workplace Single Sign On href: datto-workplace-tutorial.md - name: Dealpath href: dealpath-tutorial.md + - name: deBroome Brand Portal + href: debroome-brand-portal-tutorial.md - name: Degreed href: degreed-tutorial.md - name: Deputy diff --git a/articles/aks/TOC.yml b/articles/aks/TOC.yml index 0e4dbadaf3f09..1daf0b64a2b8f 100644 --- a/articles/aks/TOC.yml +++ b/articles/aks/TOC.yml @@ -16,18 +16,24 @@ - name: Quickstarts expanded: true items: - - name: Deploy an AKS Cluster + - name: Deploy a Linux-based AKS Cluster expanded: true items: - name: Use the Azure CLI - href: kubernetes-walkthrough.md + href: learn/quick-kubernetes-deploy-cli.md - name: Use Azure PowerShell - href: kubernetes-walkthrough-powershell.md + href: learn/quick-kubernetes-deploy-powershell.md - name: Use the Azure portal - href: kubernetes-walkthrough-portal.md + href: learn/quick-kubernetes-deploy-portal.md - name: Use ARM template displayName: Resource Manager - href: kubernetes-walkthrough-rm-template.md + href: learn/quick-kubernetes-deploy-rm-template.md + - name: Deploy a Windows-based AKS Cluster + items: + - name: Use the Azure CLI + href: learn/quick-windows-container-deploy-cli.md + - name: Use Azure PowerShell + href: learn/quick-windows-container-deploy-powershell.md - name: Develop applications expanded: true items: @@ -397,10 +403,6 @@ maintainContext: true - name: Use Windows Server containers items: - - name: Create an AKS cluster - href: windows-container-cli.md - - name: Create an AKS cluster with PowerShell - href: windows-container-powershell.md - name: Connect remotely href: rdp.md - name: Windows Server containers FAQ diff --git a/articles/aks/azure-disk-csi.md b/articles/aks/azure-disk-csi.md index 51baeb5173aa7..edd748720e769 100644 --- a/articles/aks/azure-disk-csi.md +++ b/articles/aks/azure-disk-csi.md @@ -262,7 +262,7 @@ Filesystem Size Used Avail Use% Mounted on ## Windows containers -The Azure disk CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers tutorial](windows-container-cli.md) to add a Windows node pool. +The Azure disk CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart][aks-quickstart-cli] to add a Windows node pool. After you have a Windows node pool, you can now use the built-in storage classes like `managed-csi`. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into the file `data.txt` by deploying the following command with the [kubectl apply][kubectl-apply] command: @@ -306,8 +306,8 @@ $ kubectl exec -it busybox-azuredisk-0 -- cat c:\mnt\azuredisk\data.txt # on Win [az-snapshot-create]: /cli/azure/snapshot#az_snapshot_create [az-disk-create]: /cli/azure/disk#az_disk_create [az-disk-show]: /cli/azure/disk#az_disk_show -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md [install-azure-cli]: /cli/azure/install-azure-cli [operator-best-practices-storage]: operator-best-practices-storage.md [concepts-storage]: concepts-storage.md diff --git a/articles/aks/azure-disk-volume.md b/articles/aks/azure-disk-volume.md index b5b7d64d3e0b2..a18138c263f8e 100644 --- a/articles/aks/azure-disk-volume.md +++ b/articles/aks/azure-disk-volume.md @@ -20,7 +20,7 @@ For more information on Kubernetes volumes, see [Storage options for application ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -158,8 +158,9 @@ For more information about AKS clusters interact with Azure disks, see the [Kube [az-disk-create]: /cli/azure/disk#az_disk_create [az-group-list]: /cli/azure/group#az_group_list [az-resource-show]: /cli/azure/resource#az_resource_show -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [az-aks-show]: /cli/azure/aks#az_aks_show [install-azure-cli]: /cli/azure/install-azure-cli [azure-files-volume]: azure-files-volume.md diff --git a/articles/aks/azure-disks-dynamic-pv.md b/articles/aks/azure-disks-dynamic-pv.md index 44432da99265d..4faceef89909b 100644 --- a/articles/aks/azure-disks-dynamic-pv.md +++ b/articles/aks/azure-disks-dynamic-pv.md @@ -21,7 +21,7 @@ For more information on Kubernetes volumes, see [Storage options for application ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -283,8 +283,9 @@ Learn more about Kubernetes persistent volumes using Azure disks. [az-snapshot-create]: /cli/azure/snapshot#az_snapshot_create [az-disk-create]: /cli/azure/disk#az_disk_create [az-disk-show]: /cli/azure/disk#az_disk_show -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [operator-best-practices-storage]: operator-best-practices-storage.md [concepts-storage]: concepts-storage.md diff --git a/articles/aks/azure-files-csi.md b/articles/aks/azure-files-csi.md index 389e175a1116d..aef94c285df15 100644 --- a/articles/aks/azure-files-csi.md +++ b/articles/aks/azure-files-csi.md @@ -322,7 +322,7 @@ accountname.file.core.windows.net:/accountname/pvc-fa72ec43-ae64-42e4-a8a2-55660 ## Windows containers -The Azure Files CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers tutorial](windows-container-cli.md) to add a Windows node pool. +The Azure Files CSI driver also supports Windows nodes and containers. If you want to use Windows containers, follow the [Windows containers quickstart](./learn/quick-windows-container-deploy-cli.md) to add a Windows node pool. After you have a Windows node pool, use the built-in storage classes like `azurefile-csi` or create custom ones. You can deploy an example [Windows-based stateful set](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/deploy/example/windows/statefulset.yaml) that saves timestamps into a file `data.txt` by deploying the following command with the [kubectl apply][kubectl-apply] command: @@ -366,8 +366,9 @@ $ kubectl exec -it busybox-azurefile-0 -- cat c:\mnt\azurefile\data.txt # on Win [az-snapshot-create]: /cli/azure/snapshot#az_snapshot_create [az-disk-create]: /cli/azure/disk#az_disk_create [az-disk-show]: /cli/azure/disk#az_disk_show -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [operator-best-practices-storage]: operator-best-practices-storage.md [concepts-storage]: concepts-storage.md diff --git a/articles/aks/azure-files-dynamic-pv.md b/articles/aks/azure-files-dynamic-pv.md index 6643af5c5610a..220d276fa0082 100644 --- a/articles/aks/azure-files-dynamic-pv.md +++ b/articles/aks/azure-files-dynamic-pv.md @@ -18,7 +18,7 @@ For more information on Kubernetes volumes, see [Storage options for application ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -223,8 +223,9 @@ Learn more about Kubernetes persistent volumes using Azure Files. [az-storage-key-list]: /cli/azure/storage/account/keys#az_storage_account_keys_list [az-storage-share-create]: /cli/azure/storage/share#az_storage_share_create [mount-options]: #mount-options -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [az-aks-show]: /cli/azure/aks#az_aks_show [storage-skus]: ../storage/common/storage-redundancy.md diff --git a/articles/aks/azure-files-volume.md b/articles/aks/azure-files-volume.md index a43f14e21a860..7fbf1c2c5c6e6 100644 --- a/articles/aks/azure-files-volume.md +++ b/articles/aks/azure-files-volume.md @@ -19,7 +19,7 @@ For more information on Kubernetes volumes, see [Storage options for application ## Before you begin -This article assumes that you have an existing AKS 1.21 or above cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -215,8 +215,9 @@ For associated best practices, see [Best practices for storage and backups in AK [CSI driver parameters]: https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md#static-provisionbring-your-own-file-share -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [operator-best-practices-storage]: operator-best-practices-storage.md [concepts-storage]: concepts-storage.md diff --git a/articles/aks/azure-hpc-cache.md b/articles/aks/azure-hpc-cache.md index 8649340a3430f..975736b50bf45 100644 --- a/articles/aks/azure-hpc-cache.md +++ b/articles/aks/azure-hpc-cache.md @@ -16,7 +16,7 @@ ms.date: 09/08/2021 ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. > [!IMPORTANT] > Your AKS cluster must be [in a region that supports Azure HPC Cache][hpc-cache-regions]. @@ -331,8 +331,9 @@ We'd love to hear from you! Please send any feedback or questions to [!IMPORTANT] > Your AKS cluster must also be [in a region that supports Azure NetApp Files][anf-regions]. @@ -520,8 +520,9 @@ For more details on using Azure tags, see [Use Azure tags in Azure Kubernetes Se * For more information on Azure NetApp Files, see [What is Azure NetApp Files][anf]. -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-nfs]: azure-nfs-volume.md [anf]: ../azure-netapp-files/azure-netapp-files-introduction.md [anf-delegate-subnet]: ../azure-netapp-files/azure-netapp-files-delegate-subnet.md diff --git a/articles/aks/azure-nfs-volume.md b/articles/aks/azure-nfs-volume.md index 924d7b5b3a4e3..e5d6998e39a64 100644 --- a/articles/aks/azure-nfs-volume.md +++ b/articles/aks/azure-nfs-volume.md @@ -10,13 +10,14 @@ ms.author: obboms --- # Manually create and use an NFS (Network File System) Linux Server volume with Azure Kubernetes Service (AKS) -Sharing data between containers is often a necessary component of container-based services and applications. You usually have various pods that need access to the same information on an external persistent volume. +Sharing data between containers is often a necessary component of container-based services and applications. You usually have various pods that need access to the same information on an external persistent volume. While Azure files are an option, creating an NFS Server on an Azure VM is another form of persistent shared storage. This article will show you how to create an NFS Server on an Ubuntu virtual machine. And also give your AKS containers access to this shared file system. ## Before you begin -This article assumes that you have an existing AKS Cluster. If you need an AKS Cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. + +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. Your AKS Cluster will need to live in the same or peered virtual networks as the NFS Server. The cluster must be created in an existing VNET, which can be the same VNET as your VM. @@ -162,6 +163,7 @@ For associated best practices, see [Best practices for storage and backups in AK [peer-virtual-networks]: ../virtual-network/tutorial-connect-virtual-networks-portal.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [operator-best-practices-storage]: operator-best-practices-storage.md diff --git a/articles/aks/best-practices.md b/articles/aks/best-practices.md index b173677e3f10e..15c720388443d 100644 --- a/articles/aks/best-practices.md +++ b/articles/aks/best-practices.md @@ -76,4 +76,9 @@ To help understand some of the features and components of these best practices, ## Next steps -If you need to get started with AKS, follow one of the quickstarts to deploy an Azure Kubernetes Service (AKS) cluster using the [Azure CLI](kubernetes-walkthrough.md) or [Azure portal](kubernetes-walkthrough-portal.md). +If you need to get started with AKS, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. + + +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md \ No newline at end of file diff --git a/articles/aks/cluster-configuration.md b/articles/aks/cluster-configuration.md index 4061fcf26e1b2..1d0fbd55a224f 100644 --- a/articles/aks/cluster-configuration.md +++ b/articles/aks/cluster-configuration.md @@ -33,7 +33,7 @@ By using `containerd` for AKS nodes, pod startup latency improves and node resou > [!IMPORTANT] > Clusters with Linux node pools created on Kubernetes v1.19 or greater default to `containerd` for its container runtime. Clusters with node pools on a earlier supported Kubernetes versions receive Docker for their container runtime. Linux node pools will be updated to `containerd` once the node pool Kubernetes version is updated to a version that supports `containerd`. You can still use Docker node pools and clusters on older supported versions until those fall off support. > -> Using `containerd` with Windows Server 2019 node pools is generally available, although the default for node pools created on Kubernetes v1.22 and earlier is still Docker. For more details, see [Add a Windows Server node pool with `containerd`][aks-add-np-containerd]. +> Using `containerd` with Windows Server 2019 node pools is generally available, although the default for node pools created on Kubernetes v1.22 and earlier is still Docker. For more details, see [Add a Windows Server node pool with `containerd`][/learn/aks-add-np-containerd]. > > It is highly recommended to test your workloads on AKS node pools with `containerd` prior to using clusters with a Kubernetes version that supports `containerd` for your node pools. diff --git a/articles/aks/concepts-scale.md b/articles/aks/concepts-scale.md index 8e00c00d654bb..81bd76efb3025 100644 --- a/articles/aks/concepts-scale.md +++ b/articles/aks/concepts-scale.md @@ -101,7 +101,7 @@ For more information on core Kubernetes and AKS concepts, see the following arti [virtual-kubelet]: https://virtual-kubelet.io/ -[aks-quickstart]: kubernetes-walkthrough.md +[aks-quickstart]: ./learn/quick-kubernetes-deploy-cli.md [aks-hpa]: tutorial-kubernetes-scale.md#autoscale-pods [aks-scale]: tutorial-kubernetes-scale.md [aks-manually-scale-pods]: tutorial-kubernetes-scale.md#manually-scale-pods @@ -112,4 +112,4 @@ For more information on core Kubernetes and AKS concepts, see the following arti [aks-concepts-storage]: concepts-storage.md [aks-concepts-identity]: concepts-identity.md [aks-concepts-network]: concepts-network.md -[virtual-nodes-cli]: virtual-nodes-cli.md +[virtual-nodes-cli]: virtual-nodes-cli.md \ No newline at end of file diff --git a/articles/aks/concepts-security.md b/articles/aks/concepts-security.md index 1b068f7de59e9..221df0bda510a 100644 --- a/articles/aks/concepts-security.md +++ b/articles/aks/concepts-security.md @@ -63,7 +63,7 @@ When an AKS cluster is created or scaled up, the nodes are automatically deploye > [!NOTE] > AKS clusters using: -> * Kubernetes version 1.19 and greater for Linux node pools use `containerd` as its container runtime. Using `containerd` with Windows Server 2019 node pools is currently in preview. For more details, see [Add a Windows Server node pool with `containerd`][aks-add-np-containerd]. +> * Kubernetes version 1.19 and greater for Linux node pools use `containerd` as its container runtime. Using `containerd` with Windows Server 2019 node pools is currently in preview. For more details, see [Add a Windows Server node pool with `containerd`][/learn/aks-add-np-containerd]. > * Kubernetes prior to v1.19 for Linux node pools use Docker as its container runtime. For Windows Server 2019 node pools, Docker is the default container runtime. ### Node security patches diff --git a/articles/aks/control-kubeconfig-access.md b/articles/aks/control-kubeconfig-access.md index 5efcfba2b654e..119958571cb7f 100644 --- a/articles/aks/control-kubeconfig-access.md +++ b/articles/aks/control-kubeconfig-access.md @@ -15,7 +15,7 @@ This article shows you how to assign Azure roles that limit who can get the conf ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. This article also requires that you are running the Azure CLI version 2.0.65 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][azure-cli-install]. @@ -154,8 +154,9 @@ For enhanced security on access to AKS clusters, [integrate Azure Active Directo [kubectl-config-view]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: /learn/quick-kubernetes-deploy-powershell.md [azure-cli-install]: /cli/azure/install-azure-cli [az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials [azure-rbac]: ../role-based-access-control/overview.md diff --git a/articles/aks/coredns-custom.md b/articles/aks/coredns-custom.md index 0a81a45c43db8..740a93388db0b 100644 --- a/articles/aks/coredns-custom.md +++ b/articles/aks/coredns-custom.md @@ -23,7 +23,7 @@ This article shows you how to use ConfigMaps for basic customization options of ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. When creating a configuration like the examples below, your names in the *data* section must end in either *.server* or *.override*. This naming convention is defined in the default AKS CoreDNS Configmap which you can view using the `kubectl get configmaps --namespace=kube-system coredns -o yaml` command. @@ -224,5 +224,6 @@ To learn more about core network concepts, see [Network concepts for application [concepts-network]: concepts-network.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md diff --git a/articles/aks/faq.md b/articles/aks/faq.md index d1413867c961e..922c7a6e295fc 100644 --- a/articles/aks/faq.md +++ b/articles/aks/faq.md @@ -133,7 +133,7 @@ Label: ```"admissions.enforcer/disabled": "true"``` or Annotation: ```"admission ## Can I run Windows Server containers on AKS? -Yes, Windows Server containers are available on AKS. To run Windows Server containers in AKS, you create a node pool that runs Windows Server as the guest OS. Windows Server containers can use only Windows Server 2019. To get started, see [Create an AKS cluster with a Windows Server node pool][aks-windows-cli]. +Yes, Windows Server containers are available on AKS. To run Windows Server containers in AKS, you create a node pool that runs Windows Server as the guest OS. Windows Server containers can use only Windows Server 2019. To get started, see [Create an AKS cluster with a Windows Server node pool](./learn/quick-windows-container-deploy-cli.md). Windows Server support for node pool includes some limitations that are part of the upstream Windows Server in Kubernetes project. For more information on these limitations, see [Windows Server containers in AKS limitations][aks-windows-limitations]. diff --git a/articles/aks/gpu-cluster.md b/articles/aks/gpu-cluster.md index 7dcee412fc46e..8fedda98ac6c5 100644 --- a/articles/aks/gpu-cluster.md +++ b/articles/aks/gpu-cluster.md @@ -19,7 +19,7 @@ Currently, using GPU-enabled node pools is only available for Linux node pools. ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see [Quickstart: Deploy an Azure Kubernetes Service cluster using the Azure CLI][aks-quickstart]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.64 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -428,7 +428,9 @@ For information on using Azure Kubernetes Service with Azure Machine Learning, s [az-group-create]: /cli/azure/group#az_group_create [az-aks-create]: /cli/azure/aks#az_aks_create [az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials -[aks-quickstart]: kubernetes-walkthrough.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-spark]: spark-job.md [gpu-skus]: ../virtual-machines/sizes-gpu.md [install-azure-cli]: /cli/azure/install-azure-cli diff --git a/articles/aks/index.yml b/articles/aks/index.yml index 32361ec18c95c..c29fd69f02569 100644 --- a/articles/aks/index.yml +++ b/articles/aks/index.yml @@ -55,11 +55,13 @@ landingContent: - linkListType: quickstart links: - text: Azure CLI - url: kubernetes-walkthrough.md + url: ./learn/quick-kubernetes-deploy-cli.md + - text: Azure PowerShell + url: ./learn/quick-kubernetes-deploy-powershell.md - text: Azure Portal - url: kubernetes-walkthrough-portal.md + url: ./learn/quick-kubernetes-deploy-portal.md - text: Resource Manager template - url: kubernetes-walkthrough-rm-template.md + url: ./learn/quick-kubernetes-deploy-rm-template.md # Card - title: Develop and debug applications diff --git a/articles/aks/ingress-internal-ip.md b/articles/aks/ingress-internal-ip.md index e0142f94f3a76..91af0880f3812 100644 --- a/articles/aks/ingress-internal-ip.md +++ b/articles/aks/ingress-internal-ip.md @@ -479,9 +479,9 @@ You can also: [aks-http-app-routing]: http-application-routing.md [aks-ingress-own-tls]: ingress-own-tls.md [client-source-ip]: concepts-network.md#ingress-controllers -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-powershell]: kubernetes-walkthrough-powershell.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md [aks-configure-kubenet-networking]: configure-kubenet.md [aks-configure-advanced-networking]: configure-azure-cni.md [aks-supported versions]: supported-kubernetes-versions.md diff --git a/articles/aks/ingress-static-ip.md b/articles/aks/ingress-static-ip.md index ce3005d6f417e..0dea8970fecf5 100644 --- a/articles/aks/ingress-static-ip.md +++ b/articles/aks/ingress-static-ip.md @@ -676,9 +676,9 @@ You can also: [aks-ingress-tls]: ingress-tls.md [aks-http-app-routing]: http-application-routing.md [aks-ingress-own-tls]: ingress-own-tls.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-powershell]: kubernetes-walkthrough-powershell.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md [client-source-ip]: concepts-network.md#ingress-controllers [aks-static-ip]: static-ip.md [aks-supported versions]: supported-kubernetes-versions.md diff --git a/articles/aks/ingress-tls.md b/articles/aks/ingress-tls.md index bc518cd4dfa54..797cc676ee0ac 100644 --- a/articles/aks/ingress-tls.md +++ b/articles/aks/ingress-tls.md @@ -702,9 +702,9 @@ You can also: [aks-ingress-basic]: ingress-basic.md [aks-http-app-routing]: http-application-routing.md [aks-ingress-own-tls]: ingress-own-tls.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-powershell]: kubernetes-walkthrough-powershell.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md [client-source-ip]: concepts-network.md#ingress-controllers [install-azure-cli]: /cli/azure/install-azure-cli [aks-supported versions]: supported-kubernetes-versions.md diff --git a/articles/aks/internal-lb.md b/articles/aks/internal-lb.md index 1e0657a75c42c..68ca708437c98 100644 --- a/articles/aks/internal-lb.md +++ b/articles/aks/internal-lb.md @@ -19,7 +19,7 @@ To restrict access to your applications in Azure Kubernetes Service (AKS), you c ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -151,8 +151,9 @@ Learn more about Kubernetes services at the [Kubernetes services documentation][ [az-role-assignment-create]: /cli/azure/role/assignment#az_role_assignment_create [azure-lb-comparison]: ../load-balancer/skus.md [use-kubenet]: configure-kubenet.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [aks-sp]: kubernetes-service-principal.md#delegate-access-to-other-azure-resources [different-subnet]: #specify-a-different-subnet \ No newline at end of file diff --git a/articles/aks/intro-kubernetes.md b/articles/aks/intro-kubernetes.md index e3f280933162c..d5600d4d26487 100644 --- a/articles/aks/intro-kubernetes.md +++ b/articles/aks/intro-kubernetes.md @@ -13,12 +13,12 @@ ms.custom: mvc Azure Kubernetes Service (AKS) simplifies deploying a managed Kubernetes cluster in Azure by offloading the operational overhead to Azure. As a hosted Kubernetes service, Azure handles critical tasks, like health monitoring and maintenance. Since Kubernetes masters are managed by Azure, you only manage and maintain the agent nodes. Thus, AKS is free; you only pay for the agent nodes within your clusters, not for the masters. You can create an AKS cluster using: -* [The Azure CLI](kubernetes-walkthrough.md) -* [The Azure portal](kubernetes-walkthrough-portal.md) -* [Azure PowerShell](kubernetes-walkthrough-powershell.md) -* Using template-driven deployment options, like [Azure Resource Manager templates](kubernetes-walkthrough-rm-template.md), [Bicep](../azure-resource-manager/bicep/overview.md) and Terraform +* [The Azure CLI][aks-quickstart-cli] +* [The Azure portal][aks-quickstart-portal] +* [Azure PowerShell][aks-quickstart-powershell] +* Using template-driven deployment options, like [Azure Resource Manager templates][aks-quickstart-template], [Bicep](../azure-resource-manager/bicep/overview.md) and Terraform. -When you deploy an AKS cluster, the Kubernetes master and all nodes are deployed and configured for you. Advanced networking, Azure Active Directory (Azure AD) integration, monitoring, and other features can be configured during the deployment process. +When you deploy an AKS cluster, the Kubernetes master and all nodes are deployed and configured for you. Advanced networking, Azure Active Directory (Azure AD) integration, monitoring, and other features can be configured during the deployment process. For more information on Kubernetes basics, see [Kubernetes core concepts for AKS][concepts-clusters-workloads]. @@ -146,7 +146,7 @@ AKS is compliant with SOC, ISO, PCI DSS, and HIPAA. For more information, see [O Learn more about deploying and managing AKS with the Azure CLI Quickstart. > [!div class="nextstepaction"] -> [Deploy an AKS Cluster using Azure CLI][aks-cli] +> [Deploy an AKS Cluster using Azure CLI][aks-quickstart-cli] [aks-engine]: https://github.com/Azure/aks-engine @@ -156,11 +156,13 @@ Learn more about deploying and managing AKS with the Azure CLI Quickstart. [acr-docs]: ../container-registry/container-registry-intro.md [aks-aad]: ./azure-ad-integration-cli.md -[aks-cli]: ./kubernetes-walkthrough.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md +[aks-quickstart-template]: ./learn/quick-kubernetes-deploy-rm-template.md [aks-gpu]: ./gpu-cluster.md [aks-http-routing]: ./http-application-routing.md [aks-networking]: ./concepts-network.md -[aks-portal]: ./kubernetes-walkthrough-portal.md [aks-scale]: ./tutorial-kubernetes-scale.md [aks-upgrade]: ./upgrade-cluster.md [azure-dev-spaces]: /previous-versions/azure/dev-spaces/ diff --git a/articles/aks/kubelet-logs.md b/articles/aks/kubelet-logs.md index 67b8a036dbe3d..9e3ec94b3a233 100644 --- a/articles/aks/kubelet-logs.md +++ b/articles/aks/kubelet-logs.md @@ -17,7 +17,7 @@ This article shows you how you can use `journalctl` to view the *kubelet* logs o ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. ## Create an SSH connection @@ -73,7 +73,8 @@ If you need additional troubleshooting information from the Kubernetes master, s [aks-ssh]: ssh.md [aks-master-logs]: monitor-aks-reference.md#resource-logs -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-master-logs]: monitor-aks-reference.md#resource-logs [azure-container-logs]: ../azure-monitor/containers/container-insights-overview.md diff --git a/articles/aks/kubernetes-helm.md b/articles/aks/kubernetes-helm.md index f933f0ece9a1d..934dd311ff98f 100644 --- a/articles/aks/kubernetes-helm.md +++ b/articles/aks/kubernetes-helm.md @@ -18,7 +18,7 @@ This article shows you how to configure and use Helm in a Kubernetes cluster on ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. In addition, this article assumes you have an existing AKS cluster with an integrated ACR. For more details on creating an AKS cluster with an integrated ACR, see [Authenticate with Azure Container Registry from Azure Kubernetes Service][aks-integrated-acr]. @@ -235,6 +235,7 @@ For more information about managing Kubernetes application deployments with Helm [acr-helm]: ../container-registry/container-registry-helm-repos.md [aks-integrated-acr]: cluster-container-registry-integration.md?tabs=azure-cli#create-a-new-aks-cluster-with-acr-integration -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [taints]: operator-best-practices-advanced-scheduler.md \ No newline at end of file diff --git a/articles/aks/kubernetes-portal.md b/articles/aks/kubernetes-portal.md index cfe45677aa198..1a9790f82b58f 100644 --- a/articles/aks/kubernetes-portal.md +++ b/articles/aks/kubernetes-portal.md @@ -14,7 +14,7 @@ The Kubernetes resource view from the Azure portal replaces the AKS dashboard ad ## Prerequisites -To view Kubernetes resources in the Azure portal, you need an AKS cluster. Any cluster is supported, but if using Azure Active Directory (Azure AD) integration, your cluster must use [AKS-managed Azure AD integration][aks-managed-aad]. If your cluster uses legacy Azure AD, you can upgrade your cluster in the portal or with the [Azure CLI][cli-aad-upgrade]. You can also [use the Azure portal][portal-cluster] to create a new AKS cluster. +To view Kubernetes resources in the Azure portal, you need an AKS cluster. Any cluster is supported, but if using Azure Active Directory (Azure AD) integration, your cluster must use [AKS-managed Azure AD integration][aks-managed-aad]. If your cluster uses legacy Azure AD, you can upgrade your cluster in the portal or with the [Azure CLI][cli-aad-upgrade]. You can also [use the Azure portal][aks-quickstart-portal] to create a new AKS cluster. ## View Kubernetes resources @@ -30,11 +30,11 @@ To see the Kubernetes resources, navigate to your AKS cluster in the Azure porta ### Deploy an application -In this example, we'll use our sample AKS cluster to deploy the Azure Vote application from the [AKS quickstart][portal-quickstart]. +In this example, we'll use our sample AKS cluster to deploy the Azure Vote application from the [AKS quickstart][aks-quickstart-portal]. 1. Select **Add** from any of the resource views (Namespace, Workloads, Services and ingresses, Storage, or Configuration). -1. Paste the YAML for the Azure Vote application from the [AKS quickstart][portal-quickstart]. -1. Select **Add** at the bottom of the YAML editor to deploy the application. +1. Paste the YAML for the Azure Vote application from the [AKS quickstart][aks-quickstart-portal]. +1. Select **Add** at the bottom of the YAML editor to deploy the application. Once the YAML file is added, the resource viewer shows both Kubernetes services that were created: the internal service (azure-vote-back), and the external service (azure-vote-front) to access the Azure Vote application. The external service includes a linked external IP address so you can easily view the application in your browser. @@ -42,7 +42,7 @@ Once the YAML file is added, the resource viewer shows both Kubernetes services ### Monitor deployment insights -AKS clusters with [Azure Monitor for containers][enable-monitor] enabled can quickly view deployment and other insights. From the Kubernetes resources view, users can see the live status of individual deployments, including CPU and memory usage, as well as transition to Azure monitor for more in-depth information about specific nodes and containers. Here's an example of deployment insights from a sample AKS cluster: +AKS clusters with [Container insights][enable-monitor] enabled can quickly view deployment and other insights. From the Kubernetes resources view, users can see the live status of individual deployments, including CPU and memory usage, as well as transition to Azure monitor for more in-depth information about specific nodes and containers. Here's an example of deployment insights from a sample AKS cluster: :::image type="content" source="media/kubernetes-portal/deployment-insights.png" alt-text="Deployment insights displayed in the Azure portal." lightbox="media/kubernetes-portal/deployment-insights.png"::: @@ -91,9 +91,9 @@ This article showed you how to access Kubernetes resources for your AKS cluster. [concepts-identity]: concepts-identity.md -[portal-quickstart]: kubernetes-walkthrough-portal.md#run-the-application +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md [deployments]: concepts-clusters-workloads.md#deployments-and-yaml-manifests [aks-managed-aad]: managed-aad.md [cli-aad-upgrade]: managed-aad.md#upgrading-to-aks-managed-azure-ad-integration [enable-monitor]: ../azure-monitor/containers/container-insights-enable-existing-clusters.md -[portal-cluster]: kubernetes-walkthrough-portal.md diff --git a/articles/aks/kubernetes-walkthrough-portal.md b/articles/aks/kubernetes-walkthrough-portal.md deleted file mode 100644 index 80dcc8bfe8262..0000000000000 --- a/articles/aks/kubernetes-walkthrough-portal.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -title: 'Quickstart: Deploy an AKS cluster by using the Azure portal' -titleSuffix: Azure Kubernetes Service -description: Learn how to quickly create a Kubernetes cluster, deploy an application, and monitor performance in Azure Kubernetes Service (AKS) using the Azure portal. -services: container-service -ms.topic: quickstart -ms.date: 1/13/2022 -ms.custom: mvc, seo-javascript-october2019, contperf-fy21q3, mode-ui -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run and monitor applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure portal - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: -* Deploy an AKS cluster using the Azure portal. -* Run a multi-container application with a web front-end and a Redis instance in the cluster. -* Monitor the health of the cluster and pods that run your application. - -:::image type="content" source="media/container-service-kubernetes-walkthrough/azure-voting-application.png" alt-text="Image of browsing to Azure Vote sample application"::: - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -## Prerequisites - -Sign in to the Azure portal at [https://portal.azure.com](https://portal.azure.com). - -## Create an AKS cluster - -1. On the Azure portal menu or from the **Home** page, select **Create a resource**. - -2. Select **Containers** > **Kubernetes Service**. - -3. On the **Basics** page, configure the following options: - - **Project details**: - * Select an Azure **Subscription**. - * Select or create an Azure **Resource group**, such as *myResourceGroup*. - - **Cluster details**: - * Ensure the the **Preset configuration** is *Standard ($$)*. For more details on preset configurations, see [Cluster configuration presets in the Azure portal][preset-config]. - * Enter a **Kubernetes cluster name**, such as *myAKSCluster*. - * Select a **Region** and **Kubernetes version** for the AKS cluster. - - **Primary node pool**: - * Leave the default values selected. - - :::image type="content" source="media/kubernetes-walkthrough-portal/create-cluster-basics.png" alt-text="Create AKS cluster - provide basic information"::: - - > [!NOTE] - > You can change the preset configuration when creating your cluster by selecting *View all preset configurations* and choosing a different option. - > :::image type="content" source="media/kubernetes-walkthrough-portal/cluster-preset-options.png" alt-text="Create AKS cluster - portal preset options"::: - -4. Select **Next: Node pools** when complete. - -5. Keep the default **Node pools** options. At the bottom of the screen, click **Next: Authentication**. - > [!CAUTION] - > Newly created Azure AD service principals may take several minutes to propagate and become available, causing "service principal not found" errors and validation failures in Azure portal. If you hit this bump, please visit [our troubleshooting article](troubleshooting.md#received-an-error-saying-my-service-principal-wasnt-found-or-is-invalid-when-i-try-to-create-a-new-cluster) for mitigation. - -6. On the **Authentication** page, configure the following options: - - Create a new cluster identity by either: - * Leaving the **Authentication** field with **System-assigned managed identity**, or - * Choosing **Service Principal** to use a service principal. - * Select *(new) default service principal* to create a default service principal, or - * Select *Configure service principal* to use an existing one. You will need to provide the existing principal's SPN client ID and secret. - - Enable the Kubernetes role-based access control (Kubernetes RBAC) option to provide more fine-grained control over access to the Kubernetes resources deployed in your AKS cluster. - - By default, *Basic* networking is used, and Azure Monitor for containers is enabled. - -7. Click **Review + create** and then **Create** when validation completes. - - -8. It takes a few minutes to create the AKS cluster. When your deployment is complete, navigate to your resource by either: - * Clicking **Go to resource**, or - * Browsing to the AKS cluster resource group and selecting the AKS resource. - * Per example cluster dashboard below: browsing for *myResourceGroup* and selecting *myAKSCluster* resource. - - :::image type="content" source="media/kubernetes-walkthrough-portal/aks-portal-dashboard.png" alt-text="Example AKS dashboard in the Azure portal"::: - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. - -1. Open Cloud Shell using the `>_` button on the top of the Azure portal. - - ![Open the Azure Cloud Shell in the portal](media/kubernetes-walkthrough-portal/aks-cloud-shell.png) - - > [!NOTE] - > To perform these operations in a local shell installation: - > 1. Verify Azure CLI is installed. - > 2. Connect to Azure via the `az login` command. - -2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. The following command downloads credentials and configures the Kubernetes CLI to use them. - - ```azurecli - az aks get-credentials --resource-group myResourceGroup --name myAKSCluster - ``` - -3. Verify the connection to your cluster using `kubectl get` to return a list of the cluster nodes. - - ```console - kubectl get nodes - ``` - - Output shows the single node created in the previous steps. Make sure the node status is *Ready*: - - ```output - NAME STATUS ROLES AGE VERSION - aks-agentpool-12345678-vmss000000 Ready agent 23m v1.19.11 - aks-agentpool-12345678-vmss000001 Ready agent 24m v1.19.11 - ``` - -## Run the application - -A Kubernetes manifest file defines a cluster's desired state, like which container images to run. - -In this quickstart, you will use a manifest to create all objects needed to run the Azure Vote application. This manifest includes two Kubernetes deployments: -* The sample Azure Vote Python applications. -* A Redis instance. - -Two Kubernetes Services are also created: -* An internal service for the Redis instance. -* An external service to access the Azure Vote application from the internet. - -1. In the Cloud Shell, use an editor to create a file named `azure-vote.yaml`, such as: - * `code azure-vote.yaml` - * `nano azure-vote.yaml`, or - * `vi azure-vote.yaml`. - -1. Copy in the following YAML definition: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-back - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-back - template: - metadata: - labels: - app: azure-vote-back - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-back - image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 - env: - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 6379 - name: redis - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-back - spec: - ports: - - port: 6379 - selector: - app: azure-vote-back - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-front - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-front - template: - metadata: - labels: - app: azure-vote-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-front - image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 80 - env: - - name: REDIS - value: "azure-vote-back" - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-front - spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: azure-vote-front - ``` - -1. Deploy the application using the `kubectl apply` command and specify the name of your YAML manifest: - - ```console - kubectl apply -f azure-vote.yaml - ``` - - Output shows the successfully created deployments and services: - - ```output - deployment "azure-vote-back" created - service "azure-vote-back" created - deployment "azure-vote-front" created - service "azure-vote-front" created - ``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. - -To monitor progress, use the `kubectl get service` command with the `--watch` argument. - -```console -kubectl get service azure-vote-front --watch -``` - -The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. - -```output -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: - - -```output -azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the Azure Vote app in action, open a web browser to the external IP address of your service. - -:::image type="content" source="media/container-service-kubernetes-walkthrough/azure-voting-application.png" alt-text="Image of browsing to Azure Vote sample application"::: - -## Monitor health and logs - -When you created the cluster, Azure Monitor for containers was enabled. Azure Monitor for containers provides health metrics for both the AKS cluster and pods running on the cluster. - -Metric data takes a few minutes to populate in the Azure portal. To see current health status, uptime, and resource usage for the Azure Vote pods: - -1. Browse back to the AKS resource in the Azure portal. -1. Under **Monitoring** on the left-hand side, choose **Insights**. -1. Across the top, choose to **+ Add Filter**. -1. Select **Namespace** as the property, then choose *\*. -1. Select **Containers** to view them. - -The `azure-vote-back` and `azure-vote-front` containers will display, as shown in the following example: - -:::image type="content" source="media/kubernetes-walkthrough-portal/monitor-containers.png" alt-text="View the health of running containers in AKS"::: - -To view logs for the `azure-vote-front` pod, select **View in Log Analytics** from the top of the *azure-vote-front | Overview* area on the right side. These logs include the *stdout* and *stderr* streams from the container. - -:::image type="content" source="media/kubernetes-walkthrough-portal/monitor-container-logs.png" alt-text="View the containers logs in AKS"::: - -## Delete cluster - -To avoid Azure charges, clean up your unnecessary resources. Select the **Delete** button on the AKS cluster dashboard. You can also use the [az aks delete][az-aks-delete] command in the Cloud Shell: - -```azurecli -az aks delete --resource-group myResourceGroup --name myAKSCluster --yes --no-wait -``` -> [!NOTE] -> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. -> -> If you used a managed identity, the identity is managed by the platform and does not require removal. - -## Get the code - -Pre-existing container images were used in this quickstart to create a Kubernetes deployment. The related application code, Dockerfile, and Kubernetes manifest file are [available on GitHub.][azure-vote-app] - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a multi-container application to it. Access the Kubernetes web dashboard for your AKS cluster. - -To learn more about AKS by walking through a complete example, including building an application, deploying from Azure Container Registry, updating a running application, and scaling and upgrading your cluster, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[kubernetes-documentation]: https://kubernetes.io/docs/home/ - - -[kubernetes-concepts]: concepts-clusters-workloads.md -[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials -[az-aks-delete]: /cli/azure/aks#az_aks_delete -[aks-monitor]: ../azure-monitor/containers/container-insights-overview.md -[aks-network]: ./concepts-network.md -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md -[http-routing]: ./http-application-routing.md -[preset-config]: ./quotas-skus-regions.md#cluster-configuration-presets-in-the-azure-portal -[sp-delete]: kubernetes-service-principal.md#additional-considerations diff --git a/articles/aks/kubernetes-walkthrough-powershell.md b/articles/aks/kubernetes-walkthrough-powershell.md deleted file mode 100644 index 5930568e754d8..0000000000000 --- a/articles/aks/kubernetes-walkthrough-powershell.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: 'Quickstart: Deploy an AKS cluster by using PowerShell' -description: Learn how to quickly create a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using PowerShell. -services: container-service -ms.topic: quickstart -ms.date: 01/13/2022 -ms.custom: devx-track-azurepowershell, mode-api -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service cluster using PowerShell - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: -* Deploy an AKS cluster using PowerShell. -* Run a multi-container application with a web front-end and a Redis instance in the cluster. - -To learn more about creating a Windows Server node pool, see -[Create an AKS cluster that supports Windows Server containers][windows-container-powershell]. - -![Voting app deployed in Azure Kubernetes Service](./media/kubernetes-walkthrough-powershell/voting-app-deployed-in-azure-kubernetes-service.png) - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see -[Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -## Prerequisites - -If you don't have an Azure subscription, create a [free](https://azure.microsoft.com/free/) account before you begin. - -If you're running PowerShell locally, install the Az PowerShell module and connect to your Azure account using the [Connect-AzAccount](/powershell/module/az.accounts/Connect-AzAccount) cmdlet. For more information about installing the Az PowerShell module, see [Install Azure PowerShell][install-azure-powershell]. - -[!INCLUDE [cloud-shell-try-it](../../includes/cloud-shell-try-it.md)] - -If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the -[Set-AzContext](/powershell/module/az.accounts/set-azcontext) cmdlet. - -```azurepowershell-interactive -Set-AzContext -SubscriptionId 00000000-0000-0000-0000-000000000000 -``` - -## Create a resource group - -An [Azure resource group](../azure-resource-manager/management/overview.md) is a logical group in which Azure resources are deployed and managed. When you create a resource group, you will be prompted to specify a location. This location is: -* The storage location of your resource group metadata. -* Where your resources will run in Azure if you don't specify another region during resource creation. - -The following example creates a resource group named **myResourceGroup** in the **eastus** region. - -Create a resource group using the [New-AzResourceGroup][new-azresourcegroup] -cmdlet. - -```azurepowershell-interactive -New-AzResourceGroup -Name myResourceGroup -Location eastus -``` - -Output for successfully created resource group: - -```plaintext -ResourceGroupName : myResourceGroup -Location : eastus -ProvisioningState : Succeeded -Tags : -ResourceId : /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup -``` - -## Create AKS cluster - -1. Generate an SSH key pair using the `ssh-keygen` command-line utility. For more details, see: - * [Quick steps: Create and use an SSH public-private key pair for Linux VMs in Azure](../virtual-machines/linux/mac-create-ssh-keys.md) - * [How to use SSH keys with Windows on Azure](../virtual-machines/linux/ssh-from-windows.md) - -1. Create an AKS cluster using the [New-AzAksCluster][new-azakscluster] cmdlet. - - The following example creates a cluster named **myAKSCluster** with one node. - - ```azurepowershell-interactive - New-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster -NodeCount 1 - ``` - -After a few minutes, the command completes and returns information about the cluster. - -> [!NOTE] -> When you create an AKS cluster, a second resource group is automatically created to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](./faq.md#why-are-two-resource-groups-created-with-aks) - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. - -1. Install `kubectl` locally using the `Install-AzAksKubectl` cmdlet: - - ```azurepowershell - Install-AzAksKubectl - ``` - -2. Configure `kubectl` to connect to your Kubernetes cluster using the [Import-AzAksCredential][import-azakscredential] cmdlet. The following cmdlet downloads credentials and configures the Kubernetes CLI to use them. - - ```azurepowershell-interactive - Import-AzAksCredential -ResourceGroupName myResourceGroup -Name myAKSCluster - ``` - -3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - - ```azurepowershell-interactive - kubectl get nodes - ``` - - Output shows the single node created in the previous steps. Make sure the node status is *Ready*: - - ```plaintext - NAME STATUS ROLES AGE VERSION - aks-nodepool1-31718369-0 Ready agent 6m44s v1.15.10 - ``` - -## Run the application - -A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. - -In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: -* The sample Azure Vote Python applications. -* A Redis instance. - -Two [Kubernetes Services][kubernetes-service] are also created: -* An internal service for the Redis instance. -* An external service to access the Azure Vote application from the internet. - -1. Create a file named `azure-vote.yaml`. - * If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system -1. Copy in the following YAML definition: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-back - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-back - template: - metadata: - labels: - app: azure-vote-back - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-back - image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 - env: - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 6379 - name: redis - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-back - spec: - ports: - - port: 6379 - selector: - app: azure-vote-back - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-front - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-front - template: - metadata: - labels: - app: azure-vote-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-front - image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 80 - env: - - name: REDIS - value: "azure-vote-back" - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-front - spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: azure-vote-front - ``` - -1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: - - ```azurepowershell-interactive - kubectl apply -f azure-vote.yaml - ``` - - Output shows the successfully created deployments and services: - - ```plaintext - deployment.apps/azure-vote-back created - service/azure-vote-back created - deployment.apps/azure-vote-front created - service/azure-vote-front created - ``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. - -Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. - -```azurepowershell-interactive -kubectl get service azure-vote-front --watch -``` - -The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. - -```plaintext -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: - -```plaintext -azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the Azure Vote app in action, open a web browser to the external IP address of your service. - -![Voting app deployed in Azure Kubernetes Service](./media/kubernetes-walkthrough-powershell/voting-app-deployed-in-azure-kubernetes-service.png) - -## Delete the cluster - -To avoid Azure charges, clean up your unnecessary resources. Use the [Remove-AzResourceGroup][remove-azresourcegroup] cmdlet to remove the resource group, container service, and all related resources. - -```azurepowershell-interactive -Remove-AzResourceGroup -Name myResourceGroup -``` - -> [!NOTE] -> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. -> -> If you used a managed identity, the identity is managed by the platform and does not require removal. - -## Get the code - -Pre-existing container images were used in this quickstart to create a Kubernetes deployment. The related application code, Dockerfile, and Kubernetes manifest file are [available on GitHub.][azure-vote-app] - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a multi-container application to it. - -To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[azure-dev-spaces]: /previous-versions/azure/dev-spaces/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git - - -[windows-container-powershell]: windows-container-powershell.md -[kubernetes-concepts]: concepts-clusters-workloads.md -[install-azure-powershell]: /powershell/azure/install-az-ps -[new-azresourcegroup]: /powershell/module/az.resources/new-azresourcegroup -[new-azakscluster]: /powershell/module/az.aks/new-azakscluster -[import-azakscredential]: /powershell/module/az.aks/import-azakscredential -[kubernetes-deployment]: concepts-clusters-workloads.md#deployments-and-yaml-manifests -[kubernetes-service]: concepts-network.md#services -[remove-azresourcegroup]: /powershell/module/az.resources/remove-azresourcegroup -[sp-delete]: kubernetes-service-principal.md#additional-considerations -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md diff --git a/articles/aks/kubernetes-walkthrough-rm-template.md b/articles/aks/kubernetes-walkthrough-rm-template.md deleted file mode 100644 index d642bc7506c60..0000000000000 --- a/articles/aks/kubernetes-walkthrough-rm-template.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -title: Quickstart - Create an Azure Kubernetes Service (AKS) cluster -description: Learn how to quickly create a Kubernetes cluster using an Azure Resource Manager template and deploy an application in Azure Kubernetes Service (AKS) -services: container-service -ms.topic: quickstart -ms.date: 03/15/2021 -ms.custom: mvc, subject-armqs, devx-track-azurecli, mode-arm -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using an ARM template - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: -* Deploy an AKS cluster using an Azure Resource Manager template. -* Run a multi-container application with a web front-end and a Redis instance in the cluster. - -![Image of browsing to Azure Vote](media/container-service-kubernetes-walkthrough/azure-voting-application.png) - -[!INCLUDE [About Azure Resource Manager](../../includes/resource-manager-quickstart-introduction.md)] - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. - -[![Deploy to Azure](../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.kubernetes%2Faks%2Fazuredeploy.json) - -[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] - -- This article requires version 2.0.61 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. - -- To create an AKS cluster using a Resource Manager template, you provide an SSH public key. If you need this resource, see the following section; otherwise skip to the [Review the template](#review-the-template) section. - -### Create an SSH key pair - -To access AKS nodes, you connect using an SSH key pair (public and private), which you generate using the `ssh-keygen` command. By default, these files are created in the *~/.ssh* directory. Running the `ssh-keygen` command will overwrite any SSH key pair with the same name already existing in the given location. - -1. Go to [https://shell.azure.com](https://shell.azure.com) to open Cloud Shell in your browser. - -1. Run the `ssh-keygen` command. The following example creates an SSH key pair using RSA encryption and a bit length of 4096: - - ```console - ssh-keygen -t rsa -b 4096 - ``` - -For more information about creating SSH keys, see [Create and manage SSH keys for authentication in Azure][ssh-keys]. - -## Review the template - -The template used in this quickstart is from [Azure Quickstart templates](https://azure.microsoft.com/resources/templates/aks/). - -:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.kubernetes/aks/azuredeploy.json"::: - -For more AKS samples, see the [AKS quickstart templates][aks-quickstart-templates] site. - -## Deploy the template - -1. Select the following button to sign in to Azure and open a template. - - [![Deploy to Azure](../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.kubernetes%2Faks%2Fazuredeploy.json) - -2. Select or enter the following values. - - For this quickstart, leave the default values for the *OS Disk Size GB*, *Agent Count*, *Agent VM Size*, *OS Type*, and *Kubernetes Version*. Provide your own values for the following template parameters: - - * **Subscription**: Select an Azure subscription. - * **Resource group**: Select **Create new**. Enter a unique name for the resource group, such as *myResourceGroup*, then choose **OK**. - * **Location**: Select a location, such as **East US**. - * **Cluster name**: Enter a unique name for the AKS cluster, such as *myAKSCluster*. - * **DNS prefix**: Enter a unique DNS prefix for your cluster, such as *myakscluster*. - * **Linux Admin Username**: Enter a username to connect using SSH, such as *azureuser*. - * **SSH RSA Public Key**: Copy and paste the *public* part of your SSH key pair (by default, the contents of *~/.ssh/id_rsa.pub*). - - ![Resource Manager template to create an Azure Kubernetes Service cluster in the portal](./media/kubernetes-walkthrough-rm-template/create-aks-cluster-using-template-portal.png) - -3. Select **Review + Create**. - -It takes a few minutes to create the AKS cluster. Wait for the cluster to be successfully deployed before you move on to the next step. - -## Validate the deployment - -### Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. - -1. Install `kubectl` locally using the [az aks install-cli][az-aks-install-cli] command: - - ```azurecli - az aks install-cli - ``` - -2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - - ```azurecli-interactive - az aks get-credentials --resource-group myResourceGroup --name myAKSCluster - ``` - -3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - - ```console - kubectl get nodes - ``` - - Output shows the nodes created in the previous steps. Make sure that the status for all the nodes is *Ready*: - - ```output - NAME STATUS ROLES AGE VERSION - aks-agentpool-41324942-0 Ready agent 6m44s v1.12.6 - aks-agentpool-41324942-1 Ready agent 6m46s v1.12.6 - aks-agentpool-41324942-2 Ready agent 6m45s v1.12.6 - ``` - -### Run the application - -A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. - -In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: -* The sample Azure Vote Python applications. -* A Redis instance. - -Two [Kubernetes Services][kubernetes-service] are also created: -* An internal service for the Redis instance. -* An external service to access the Azure Vote application from the internet. - -1. Create a file named `azure-vote.yaml`. - * If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system -1. Copy in the following YAML definition: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-back - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-back - template: - metadata: - labels: - app: azure-vote-back - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-back - image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 - env: - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 6379 - name: redis - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-back - spec: - ports: - - port: 6379 - selector: - app: azure-vote-back - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-front - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-front - template: - metadata: - labels: - app: azure-vote-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-front - image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 80 - env: - - name: REDIS - value: "azure-vote-back" - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-front - spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: azure-vote-front - ``` - -1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: - - ```console - kubectl apply -f azure-vote.yaml - ``` - - Output shows the successfully created deployments and services: - - ```output - deployment "azure-vote-back" created - service "azure-vote-back" created - deployment "azure-vote-front" created - service "azure-vote-front" created - ``` - -### Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. - -Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. - -```console -kubectl get service azure-vote-front --watch -``` - -The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. - -```output -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: - -```output -azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the Azure Vote app in action, open a web browser to the external IP address of your service. - -![Image of browsing to Azure Vote](media/container-service-kubernetes-walkthrough/azure-voting-application.png) - -## Clean up resources - -To avoid Azure charges, clean up your unnecessary resources. Use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. - -```azurecli-interactive -az group delete --name myResourceGroup --yes --no-wait -``` - -> [!NOTE] -> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. -> -> If you used a managed identity, the identity is managed by the platform and does not require removal. - -## Get the code - -Pre-existing container images were used in this quickstart to create a Kubernetes deployment. The related application code, Dockerfile, and Kubernetes manifest file are [available on GitHub.][azure-vote-app] - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a multi-container application to it. - -To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[azure-dev-spaces]: /previous-versions/azure/dev-spaces/ -[aks-quickstart-templates]: https://azure.microsoft.com/resources/templates/?term=Azure+Kubernetes+Service - - -[kubernetes-concepts]: concepts-clusters-workloads.md -[aks-monitor]: ../azure-monitor/containers/container-insights-onboard.md -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md -[az-aks-browse]: /cli/azure/aks#az_aks_browse -[az-aks-create]: /cli/azure/aks#az_aks_create -[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials -[az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli -[az-group-create]: /cli/azure/group#az_group_create -[az-group-delete]: /cli/azure/group#az_group_delete -[azure-cli-install]: /cli/azure/install-azure-cli -[sp-delete]: kubernetes-service-principal.md#additional-considerations -[azure-portal]: https://portal.azure.com -[kubernetes-deployment]: concepts-clusters-workloads.md#deployments-and-yaml-manifests -[kubernetes-service]: concepts-network.md#services -[ssh-keys]: ../virtual-machines/linux/create-ssh-keys-detailed.md -[az-ad-sp-create-for-rbac]: /cli/azure/ad/sp#az_ad_sp_create_for_rbac diff --git a/articles/aks/kubernetes-walkthrough.md b/articles/aks/kubernetes-walkthrough.md deleted file mode 100644 index 685a2f0596040..0000000000000 --- a/articles/aks/kubernetes-walkthrough.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -title: 'Quickstart: Deploy an AKS cluster by using Azure CLI' -description: Learn how to quickly create a Kubernetes cluster, deploy an application, and monitor performance in Azure Kubernetes Service (AKS) using the Azure CLI. -services: container-service -ms.topic: quickstart -ms.date: 01/18/2022 -ms.custom: H1Hack27Feb2017, mvc, devcenter, seo-javascript-september2019, seo-javascript-october2019, seo-python-october2019, devx-track-azurecli, contperf-fy21q1, mode-api -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run and monitor applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service cluster using the Azure CLI - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: -* Deploy an AKS cluster using the Azure CLI. -* Run a multi-container application with a web front-end and a Redis instance in the cluster. -* Monitor the health of the cluster and pods that run your application. - - ![Voting app deployed in Azure Kubernetes Service](./media/container-service-kubernetes-walkthrough/voting-app-deployed-in-azure-kubernetes-service.png) - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] - -To learn more about creating a Windows Server node pool, see [Create an AKS cluster that supports Windows Server containers][windows-container-cli]. - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] - -- This article requires version 2.0.64 or greater of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. -- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](concepts-identity.md). -- Verify *Microsoft.OperationsManagement* and *Microsoft.OperationalInsights* are registered on your subscription. To check the registration status: - - ```azurecli - az provider show -n Microsoft.OperationsManagement -o table - az provider show -n Microsoft.OperationalInsights -o table - ``` - - If they are not registered, register *Microsoft.OperationsManagement* and *Microsoft.OperationalInsights* using: - - ```azurecli - az provider register --namespace Microsoft.OperationsManagement - az provider register --namespace Microsoft.OperationalInsights - ``` - -> [!NOTE] -> Run the commands as administrator if you plan to run the commands in this quickstart locally instead of in Azure Cloud Shell. - -## Create a resource group - -An [Azure resource group](../azure-resource-manager/management/overview.md) is a logical group in which Azure resources are deployed and managed. When you create a resource group, you will be prompted to specify a location. This location is: -* The storage location of your resource group metadata. -* Where your resources will run in Azure if you don't specify another region during resource creation. - -The following example creates a resource group named *myResourceGroup* in the *eastus* location. - -Create a resource group using the [az group create][az-group-create] command. - - -```azurecli-interactive -az group create --name myResourceGroup --location eastus -``` - -Output for successfully created resource group: - -```json -{ - "id": "/subscriptions//resourceGroups/myResourceGroup", - "location": "eastus", - "managedBy": null, - "name": "myResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null -} -``` - -## Create AKS cluster - -Create an AKS cluster using the [az aks create][az-aks-create] command with the *--enable-addons monitoring* parameter to enable [Azure Monitor container insights][azure-monitor-containers]. The following example creates a cluster named *myAKSCluster* with one node: - -```azurecli-interactive -az aks create --resource-group myResourceGroup --name myAKSCluster --node-count 1 --enable-addons monitoring --generate-ssh-keys -``` - -After a few minutes, the command completes and returns JSON-formatted information about the cluster. - -> [!NOTE] -> When you create an AKS cluster, a second resource group is automatically created to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](./faq.md#why-are-two-resource-groups-created-with-aks) - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. - -1. Install `kubectl` locally using the [az aks install-cli][az-aks-install-cli] command: - - ```azurecli - az aks install-cli - ``` - -2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. The following command: - * Downloads credentials and configures the Kubernetes CLI to use them. - * Uses `~/.kube/config`, the default location for the [Kubernetes configuration file][kubeconfig-file]. Specify a different location for your Kubernetes configuration file using *--file*. - - - ```azurecli-interactive - az aks get-credentials --resource-group myResourceGroup --name myAKSCluster - ``` - -3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - - ```azurecli-interactive - kubectl get nodes - ``` - - Output shows the single node created in the previous steps. Make sure the node status is *Ready*: - - ```output - NAME STATUS ROLES AGE VERSION - aks-nodepool1-31718369-0 Ready agent 6m44s v1.12.8 - ``` - -## Run the application - -A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. - -In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: -* The sample Azure Vote Python applications. -* A Redis instance. - -Two [Kubernetes Services][kubernetes-service] are also created: -* An internal service for the Redis instance. -* An external service to access the Azure Vote application from the internet. - -1. Create a file named `azure-vote.yaml`. - * If you use the Azure Cloud Shell, this file can be created using `code`, `vi`, or `nano` as if working on a virtual or physical system -1. Copy in the following YAML definition: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-back - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-back - template: - metadata: - labels: - app: azure-vote-back - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-back - image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 - env: - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 6379 - name: redis - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-back - spec: - ports: - - port: 6379 - selector: - app: azure-vote-back - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: azure-vote-front - spec: - replicas: 1 - selector: - matchLabels: - app: azure-vote-front - template: - metadata: - labels: - app: azure-vote-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: azure-vote-front - image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - ports: - - containerPort: 80 - env: - - name: REDIS - value: "azure-vote-back" - --- - apiVersion: v1 - kind: Service - metadata: - name: azure-vote-front - spec: - type: LoadBalancer - ports: - - port: 80 - selector: - app: azure-vote-front - ``` - -1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: - - ```console - kubectl apply -f azure-vote.yaml - ``` - - Output shows the successfully created deployments and services: - - ```output - deployment "azure-vote-back" created - service "azure-vote-back" created - deployment "azure-vote-front" created - service "azure-vote-front" created - ``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. - -Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. - -```azurecli-interactive -kubectl get service azure-vote-front --watch -``` - -The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. - -```output -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: - -```output -azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the Azure Vote app in action, open a web browser to the external IP address of your service. - -![Voting app deployed in Azure Kubernetes Service](./media/container-service-kubernetes-walkthrough/voting-app-deployed-in-azure-kubernetes-service.png) - -View the cluster nodes' and pods' health metrics captured by [Azure Monitor container insights][azure-monitor-containers] in the Azure portal. - -## Delete the cluster - -To avoid Azure charges, clean up your unnecessary resources. Use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. - -```azurecli-interactive -az group delete --name myResourceGroup --yes --no-wait -``` - -> [!NOTE] -> If the AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. -> -> If the AKS cluster was created with service principal as the identity option instead, then when you delete the cluster, the service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. - -## Get the code - -Pre-existing container images were used in this quickstart to create a Kubernetes deployment. The related application code, Dockerfile, and Kubernetes manifest file are [available on GitHub.][azure-vote-app] - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a multi-container application to it. - -To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - -This quickstart is for introductory purposes. For guidance on a creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. - - -[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[kubeconfig-file]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ - - -[kubernetes-concepts]: concepts-clusters-workloads.md -[aks-monitor]: ../azure-monitor/containers/container-insights-onboard.md -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md -[az-aks-browse]: /cli/azure/aks#az-aks-browse -[az-aks-create]: /cli/azure/aks#az-aks-create -[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials -[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli -[az-group-create]: /cli/azure/group#az-group-create -[az-group-delete]: /cli/azure/group#az-group-delete -[azure-cli-install]: /cli/azure/install_azure_cli -[azure-monitor-containers]: ../azure-monitor/containers/container-insights-overview.md -[sp-delete]: kubernetes-service-principal.md#additional-considerations -[azure-portal]: https://portal.azure.com -[kubernetes-deployment]: concepts-clusters-workloads.md#deployments-and-yaml-manifests -[kubernetes-service]: concepts-network.md#services -[windows-container-cli]: windows-container-cli.md -[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?WT.mc_id=AKSDOCSPAGE \ No newline at end of file diff --git a/articles/aks/media/kubernetes-walkthrough/monitor-container-logs.png b/articles/aks/learn/media/quick-kubernetes-deploy-cli/monitor-container-logs.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough/monitor-container-logs.png rename to articles/aks/learn/media/quick-kubernetes-deploy-cli/monitor-container-logs.png diff --git a/articles/aks/media/kubernetes-walkthrough/monitor-containers.png b/articles/aks/learn/media/quick-kubernetes-deploy-cli/monitor-containers.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough/monitor-containers.png rename to articles/aks/learn/media/quick-kubernetes-deploy-cli/monitor-containers.png diff --git a/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-cloud-shell.png b/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-cloud-shell.png new file mode 100644 index 0000000000000..8a7682bb5eb56 Binary files /dev/null and b/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-cloud-shell.png differ diff --git a/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-portal-dashboard.png b/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-portal-dashboard.png new file mode 100644 index 0000000000000..d4badabbbf44e Binary files /dev/null and b/articles/aks/learn/media/quick-kubernetes-deploy-portal/aks-portal-dashboard.png differ diff --git a/articles/aks/media/kubernetes-walkthrough-powershell/azure-voting-application.png b/articles/aks/learn/media/quick-kubernetes-deploy-portal/azure-voting-application.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough-powershell/azure-voting-application.png rename to articles/aks/learn/media/quick-kubernetes-deploy-portal/azure-voting-application.png diff --git a/articles/aks/media/kubernetes-walkthrough-portal/cluster-preset-options.png b/articles/aks/learn/media/quick-kubernetes-deploy-portal/cluster-preset-options.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough-portal/cluster-preset-options.png rename to articles/aks/learn/media/quick-kubernetes-deploy-portal/cluster-preset-options.png diff --git a/articles/aks/learn/media/quick-kubernetes-deploy-portal/create-cluster-basics.png b/articles/aks/learn/media/quick-kubernetes-deploy-portal/create-cluster-basics.png new file mode 100644 index 0000000000000..ad21cf1e3b8f3 Binary files /dev/null and b/articles/aks/learn/media/quick-kubernetes-deploy-portal/create-cluster-basics.png differ diff --git a/articles/aks/media/kubernetes-walkthrough-powershell/voting-app-deployed-in-azure-kubernetes-service.png b/articles/aks/learn/media/quick-kubernetes-deploy-powershell/azure-voting-application.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough-powershell/voting-app-deployed-in-azure-kubernetes-service.png rename to articles/aks/learn/media/quick-kubernetes-deploy-powershell/azure-voting-application.png diff --git a/articles/aks/learn/media/quick-kubernetes-deploy-powershell/voting-app-deployed-in-azure-kubernetes-service.png b/articles/aks/learn/media/quick-kubernetes-deploy-powershell/voting-app-deployed-in-azure-kubernetes-service.png new file mode 100644 index 0000000000000..ab9bf126b784b Binary files /dev/null and b/articles/aks/learn/media/quick-kubernetes-deploy-powershell/voting-app-deployed-in-azure-kubernetes-service.png differ diff --git a/articles/aks/media/kubernetes-walkthrough-rm-template/create-aks-cluster-using-template-portal.png b/articles/aks/learn/media/quick-kubernetes-deploy-rm-template/create-aks-cluster-using-template-portal.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough-rm-template/create-aks-cluster-using-template-portal.png rename to articles/aks/learn/media/quick-kubernetes-deploy-rm-template/create-aks-cluster-using-template-portal.png diff --git a/articles/aks/media/kubernetes-walkthrough-rm-template/deploy-to-azure.png b/articles/aks/learn/media/quick-kubernetes-deploy-rm-template/deploy-to-azure.png similarity index 100% rename from articles/aks/media/kubernetes-walkthrough-rm-template/deploy-to-azure.png rename to articles/aks/learn/media/quick-kubernetes-deploy-rm-template/deploy-to-azure.png diff --git a/articles/aks/media/windows-container-powershell/asp-net-sample-app.png b/articles/aks/learn/media/quick-windows-container-deploy-cli/asp-net-sample-app.png similarity index 100% rename from articles/aks/media/windows-container-powershell/asp-net-sample-app.png rename to articles/aks/learn/media/quick-windows-container-deploy-cli/asp-net-sample-app.png diff --git a/articles/aks/media/windows-container/asp-net-sample-app.png b/articles/aks/learn/media/quick-windows-container-deploy-powershell/asp-net-sample-app.png similarity index 100% rename from articles/aks/media/windows-container/asp-net-sample-app.png rename to articles/aks/learn/media/quick-windows-container-deploy-powershell/asp-net-sample-app.png diff --git a/articles/aks/learn/quick-kubernetes-deploy-cli.md b/articles/aks/learn/quick-kubernetes-deploy-cli.md new file mode 100644 index 0000000000000..0d1acdb7ff43f --- /dev/null +++ b/articles/aks/learn/quick-kubernetes-deploy-cli.md @@ -0,0 +1,320 @@ +--- +title: 'Quickstart: Deploy an AKS cluster by using Azure CLI' +description: Learn how to quickly create a Kubernetes cluster, deploy an application, and monitor performance in Azure Kubernetes Service (AKS) using the Azure CLI. +services: container-service +ms.topic: quickstart +ms.date: 04/29/2022 +ms.custom: H1Hack27Feb2017, mvc, devcenter, seo-javascript-september2019, seo-javascript-october2019, seo-python-october2019, devx-track-azurecli, contperf-fy21q1, mode-api +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run and monitor applications using the managed Kubernetes service in Azure. +--- + +# Quickstart: Deploy an Azure Kubernetes Service cluster using the Azure CLI + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: + +* Deploy an AKS cluster using the Azure CLI. +* Run a sample multi-container application with a web front-end and a Redis instance in the cluster. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + +To learn more about creating a Windows Server node pool, see [Create an AKS cluster that supports Windows Server containers](quick-windows-container-deploy-cli.md). + +[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] + +- This article requires version 2.0.64 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). + +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the +[Az account](/cli/azure/account) command. + +- Verify *Microsoft.OperationsManagement* and *Microsoft.OperationalInsights* are registered on your subscription. To check the registration status: + + ```azurecli + az provider show -n Microsoft.OperationsManagement -o table + az provider show -n Microsoft.OperationalInsights -o table + ``` + + If they are not registered, register *Microsoft.OperationsManagement* and *Microsoft.OperationalInsights* using: + + ```azurecli + az provider register --namespace Microsoft.OperationsManagement + az provider register --namespace Microsoft.OperationalInsights + ``` + +> [!NOTE] +> Run the commands with administrative privileges if you plan to run the commands in this quickstart locally instead of in Azure Cloud Shell. + +## Create a resource group + +An [Azure resource group](../../azure-resource-manager/management/overview.md) is a logical group in which Azure resources are deployed and managed. When you create a resource group, you are prompted to specify a location. This location is: + +* The storage location of your resource group metadata. +* Where your resources will run in Azure if you don't specify another region during resource creation. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +Create a resource group using the [az group create][az-group-create] command. + +```azurecli-interactive +az group create --name myResourceGroup --location eastus +``` + +The following output example resembles successful creation of the resource group: + +```json +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null +} +``` + +## Create AKS cluster + +Create an AKS cluster using the [az aks create][az-aks-create] command with the *--enable-addons monitoring* parameter to enable [Container insights][azure-monitor-containers]. The following example creates a cluster named *myAKSCluster* with one node: + +```azurecli-interactive +az aks create --resource-group myResourceGroup --name myAKSCluster --node-count 1 --enable-addons monitoring --generate-ssh-keys +``` + +After a few minutes, the command completes and returns JSON-formatted information about the cluster. + +> [!NOTE] +> When you create an AKS cluster, a second resource group is automatically created to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. + +1. Install `kubectl` locally using the [az aks install-cli][az-aks-install-cli] command: + + ```azurecli + az aks install-cli + ``` + +2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. The following command: + * Downloads credentials and configures the Kubernetes CLI to use them. + * Uses `~/.kube/config`, the default location for the [Kubernetes configuration file][kubeconfig-file]. Specify a different location for your Kubernetes configuration file using *--file* argument. + + ```azurecli-interactive + az aks get-credentials --resource-group myResourceGroup --name myAKSCluster + ``` + +3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + + ```azurecli-interactive + kubectl get nodes + ``` + + The following output example shows the single node created in the previous steps. Make sure the node status is *Ready*: + + ```output + NAME STATUS ROLES AGE VERSION + aks-nodepool1-31718369-0 Ready agent 6m44s v1.12.8 + ``` + +## Deploy the application + +A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. + +In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: + +* The sample Azure Vote Python applications. +* A Redis instance. + +Two [Kubernetes Services][kubernetes-service] are also created: + +* An internal service for the Redis instance. +* An external service to access the Azure Vote application from the internet. + +1. Create a file named `azure-vote.yaml`. + * If you use the Azure Cloud Shell, this file can be created using `code`, `vi`, or `nano` as if working on a virtual or physical system +1. Copy in the following YAML definition: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-back + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-back + spec: + ports: + - port: 6379 + selector: + app: azure-vote-back + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-front + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-front + spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + ``` + +1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: + + ```console + kubectl apply -f azure-vote.yaml + ``` + + The following example resembles output showing the successfully created deployments and services: + + ```output + deployment "azure-vote-back" created + service "azure-vote-back" created + deployment "azure-vote-front" created + service "azure-vote-front" created + ``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. + +```azurecli-interactive +kubectl get service azure-vote-front --watch +``` + +The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. + +```output +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: + +```output +azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the Azure Vote app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +## Delete the cluster + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, clean up your unnecessary resources. Use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. + +```azurecli-interactive +az group delete --name myResourceGroup --yes --no-wait +``` + +> [!NOTE] +> The AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then deployed a simple multi-container application to it. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + +This quickstart is for introductory purposes. For guidance on a creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. + + +[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[kubeconfig-file]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-monitor]: ../../azure-monitor/containers/container-insights-onboard.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[az-aks-browse]: /cli/azure/aks#az-aks-browse +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-group-create]: /cli/azure/group#az-group-create +[az-group-delete]: /cli/azure/group#az-group-delete +[azure-cli-install]: /cli/azure/install_azure_cli +[azure-monitor-containers]: ../../azure-monitor/containers/container-insights-overview.md +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations +[azure-portal]: https://portal.azure.com +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[kubernetes-service]: ../concepts-network.md#services +[windows-container-cli]: ../windows-container-cli.md +[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?WT.mc_id=AKSDOCSPAGE \ No newline at end of file diff --git a/articles/aks/learn/quick-kubernetes-deploy-portal.md b/articles/aks/learn/quick-kubernetes-deploy-portal.md new file mode 100644 index 0000000000000..f3881b619084e --- /dev/null +++ b/articles/aks/learn/quick-kubernetes-deploy-portal.md @@ -0,0 +1,298 @@ +--- +title: 'Quickstart: Deploy an AKS cluster by using the Azure portal' +titleSuffix: Azure Kubernetes Service +description: Learn how to quickly create a Kubernetes cluster, deploy an application, and monitor performance in Azure Kubernetes Service (AKS) using the Azure portal. +services: container-service +ms.topic: quickstart +ms.date: 04/29/2022 +ms.custom: mvc, seo-javascript-october2019, contperf-fy21q3, mode-ui +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run and monitor applications using the managed Kubernetes service in Azure. +--- + +# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure portal + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: + +* Deploy an AKS cluster using the Azure portal. +* Run a sample multi-container application with a web front-end and a Redis instance in the cluster. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +## Prerequisites + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + +- If you are unfamiliar with using the Bash environment in Azure Cloud Shell, review [Overview of Azure Cloud Shell](../../cloud-shell/overview.md). + +- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). + +## Create an AKS cluster + +1. Sign in to the [Azure portal](https://portal.azure.com). + +2. On the Azure portal menu or from the **Home** page, select **Create a resource**. + +3. Select **Containers** > **Kubernetes Service**. + +4. On the **Basics** page, configure the following options: + + - **Project details**: + * Select an Azure **Subscription**. + * Select or create an Azure **Resource group**, such as *myResourceGroup*. + - **Cluster details**: + * Ensure the the **Preset configuration** is *Standard ($$)*. For more details on preset configurations, see [Cluster configuration presets in the Azure portal][preset-config]. + * Enter a **Kubernetes cluster name**, such as *myAKSCluster*. + * Select a **Region** for the AKS cluster, and leave the default value selected for **Kubernetes version**. + * Select **99.5%** for **API server availability**. + - **Primary node pool**: + * Leave the default values selected. + + :::image type="content" source="media/quick-kubernetes-deploy-portal/create-cluster-basics.png" alt-text="Screenshot of Create AKS cluster - provide basic information."::: + + > [!NOTE] + > You can change the preset configuration when creating your cluster by selecting *Learn more and compare presets* and choosing a different option. + > :::image type="content" source="media/quick-kubernetes-deploy-portal/cluster-preset-options.png" alt-text="Screenshot of Create AKS cluster - portal preset options."::: + +5. Select **Next: Node pools** when complete. + +6. Keep the default **Node pools** options. At the bottom of the screen, click **Next: Access**. + +7. On the **Access** page, configure the following options: + + - The default value for **Resource identity** is **System-assigned managed identity**. Managed identities provide an identity for applications to use when connecting to resources that support Azure Active Directory (Azure AD) authentication. For more details about managed identities, see [What are managed identities for Azure resources?](../../active-directory/managed-identities-azure-resources/overview.md). + - The Kubernetes role-based access control (RBAC) option is the default value to provide more fine-grained control over access to the Kubernetes resources deployed in your AKS cluster. + + By default, *Basic* networking is used, and [Container insights](../../azure-monitor/containers/container-insights-overview.md) is enabled. + +8. Click **Review + create**. When you navigate to the **Review + create** tab, Azure runs validation on the settings that you have chosen. If validation passes, you can proceed to create the AKS cluster by selecting **Create**. If validation fails, then it indicates which settings need to be modified. + +9. It takes a few minutes to create the AKS cluster. When your deployment is complete, navigate to your resource by either: + * Selecting **Go to resource**, or + * Browsing to the AKS cluster resource group and selecting the AKS resource. In this example you browse for *myResourceGroup* and select the resource *myAKSCluster*. + + :::image type="content" source="media/quick-kubernetes-deploy-portal/aks-portal-dashboard.png" alt-text="Screenshot of AKS dashboard in the Azure portal."::: + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. If you are unfamiliar with the Cloud Shell, review [Overview of Azure Cloud Shell](../../cloud-shell/overview.md). + +1. Open Cloud Shell using the `>_` button on the top of the Azure portal. + + :::image type="content" source="media/quick-kubernetes-deploy-portal/aks-cloud-shell.png" alt-text="Screenshot of Open the Azure Cloud Shell in the portal option."::: + + > [!NOTE] + > To perform these operations in a local shell installation: + > + > 1. Verify Azure CLI is installed. + > 2. Connect to Azure via the `az login` command. + +2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. The following command downloads credentials and configures the Kubernetes CLI to use them. + + ```azurecli + az aks get-credentials --resource-group myResourceGroup --name myAKSCluster + ``` + +3. Verify the connection to your cluster using `kubectl get` to return a list of the cluster nodes. + + ```console + kubectl get nodes + ``` + + Output shows the single node created in the previous steps. Make sure the node status is *Ready*: + + ```output + NAME STATUS ROLES AGE VERSION + aks-agentpool-12345678-vmss000000 Ready agent 23m v1.19.11 + aks-agentpool-12345678-vmss000001 Ready agent 24m v1.19.11 + ``` + +## Deploy the application + +A Kubernetes manifest file defines a cluster's desired state, like which container images to run. + +In this quickstart, you will use a manifest to create all objects needed to run the Azure Vote application. This manifest includes two Kubernetes deployments: + +* The sample Azure Vote Python applications. +* A Redis instance. + +Two Kubernetes Services are also created: + +* An internal service for the Redis instance. +* An external service to access the Azure Vote application from the internet. + +1. In the Cloud Shell, use an editor to create a file named `azure-vote.yaml`, such as: + * `code azure-vote.yaml` + * `nano azure-vote.yaml`, or + * `vi azure-vote.yaml`. + +1. Copy in the following YAML definition: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-back + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-back + spec: + ports: + - port: 6379 + selector: + app: azure-vote-back + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-front + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-front + spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + ``` + +1. Deploy the application using the `kubectl apply` command and specify the name of your YAML manifest: + + ```console + kubectl apply -f azure-vote.yaml + ``` + + Output shows the successfully created deployments and services: + + ```output + deployment "azure-vote-back" created + service "azure-vote-back" created + deployment "azure-vote-front" created + service "azure-vote-front" created + ``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +To monitor progress, use the `kubectl get service` command with the `--watch` argument. + +```console +kubectl get service azure-vote-front --watch +``` + +The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. + +```output +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: + +```output +azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the Azure Vote app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +## Delete cluster + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, clean up your unnecessary resources. Select the **Delete** button on the AKS cluster dashboard. You can also use the [az aks delete][az-aks-delete] command in the Cloud Shell: + +```azurecli +az aks delete --resource-group myResourceGroup --name myAKSCluster --yes --no-wait +``` + +> [!NOTE] +> When you delete the cluster, system-assigned managed identity is managed by the platform and does not require removal. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then deployed a sample multi-container application to it. + +To learn more about AKS by walking through a complete example, including building an application, deploying from Azure Container Registry, updating a running application, and scaling and upgrading your cluster, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[kubernetes-documentation]: https://kubernetes.io/docs/home/ + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials +[az-aks-delete]: /cli/azure/aks#az_aks_delete +[aks-monitor]: ../azure-monitor/containers/container-insights-overview.md +[aks-network]: ../concepts-network.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[http-routing]: ../http-application-routing.md +[preset-config]: ../quotas-skus-regions.md#cluster-configuration-presets-in-the-azure-portal +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations \ No newline at end of file diff --git a/articles/aks/learn/quick-kubernetes-deploy-powershell.md b/articles/aks/learn/quick-kubernetes-deploy-powershell.md new file mode 100644 index 0000000000000..e255e536f8e1b --- /dev/null +++ b/articles/aks/learn/quick-kubernetes-deploy-powershell.md @@ -0,0 +1,302 @@ +--- +title: 'Quickstart: Deploy an AKS cluster by using PowerShell' +description: Learn how to quickly create a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using PowerShell. +services: container-service +ms.topic: quickstart +ms.date: 04/29/2022 +ms.custom: devx-track-azurepowershell, mode-api +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run applications using the managed Kubernetes service in Azure. +--- + +# Quickstart: Deploy an Azure Kubernetes Service cluster using PowerShell + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: + +* Deploy an AKS cluster using PowerShell. +* Run a sample multi-container application with a web front-end and a Redis instance in the cluster. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +## Prerequisites + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + +- If you're running PowerShell locally, install the Az PowerShell module and connect to your Azure account using the [Connect-AzAccount](/powershell/module/az.accounts/Connect-AzAccount) cmdlet. For more information about installing the Az PowerShell module, see [Install Azure PowerShell][install-azure-powershell]. + +- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). + +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the +[Set-AzContext](/powershell/module/az.accounts/set-azcontext) cmdlet. + + ```azurepowershell-interactive + Set-AzContext -SubscriptionId 00000000-0000-0000-0000-000000000000 + ``` + +[!INCLUDE [cloud-shell-try-it](../../../includes/cloud-shell-try-it.md)] + +## Create a resource group + +An [Azure resource group](../../azure-resource-manager/management/overview.md) is a logical group in which Azure resources are deployed and managed. When you create a resource group, you will be prompted to specify a location. This location is: + +* The storage location of your resource group metadata. +* Where your resources will run in Azure if you don't specify another region during resource creation. + +The following example creates a resource group named *myResourceGroup* in the *eastus* region. + +Create a resource group using the [New-AzResourceGroup][new-azresourcegroup] +cmdlet. + +```azurepowershell-interactive +New-AzResourceGroup -Name myResourceGroup -Location eastus +``` + +The following output example resembles successful creation of the resource group: + +```plaintext +ResourceGroupName : myResourceGroup +Location : eastus +ProvisioningState : Succeeded +Tags : +ResourceId : /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup +``` + +## Create AKS cluster + +Create an AKS cluster using the [New-AzAksCluster][new-azakscluster] cmdlet with the *--WorkspaceResourceId* parameter to enable [Azure Monitor container insights][azure-monitor-containers]. + +1. Generate an SSH key pair using the `ssh-keygen` command-line utility. For more details, see: + * [Quick steps: Create and use an SSH public-private key pair for Linux VMs in Azure](../../virtual-machines/linux/mac-create-ssh-keys.md) + * [How to use SSH keys with Windows on Azure](../../virtual-machines/linux/ssh-from-windows.md) + +1. Create an AKS cluster named **myAKSCluster** with one node. + + ```azurepowershell-interactive + New-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster -NodeCount 1 + ``` + +After a few minutes, the command completes and returns information about the cluster. + +> [!NOTE] +> When you create an AKS cluster, a second resource group is automatically created to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. + +1. Install `kubectl` locally using the `Install-AzAksKubectl` cmdlet: + + ```azurepowershell + Install-AzAksKubectl + ``` + +2. Configure `kubectl` to connect to your Kubernetes cluster using the [Import-AzAksCredential][import-azakscredential] cmdlet. The following cmdlet downloads credentials and configures the Kubernetes CLI to use them. + + ```azurepowershell-interactive + Import-AzAksCredential -ResourceGroupName myResourceGroup -Name myAKSCluster + ``` + +3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + + ```azurepowershell-interactive + kubectl get nodes + ``` + + The following output example shows the single node created in the previous steps. Make sure the node status is *Ready*: + + ```plaintext + NAME STATUS ROLES AGE VERSION + aks-nodepool1-31718369-0 Ready agent 6m44s v1.15.10 + ``` + +## Deploy the application + +A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. + +In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: + +* The sample Azure Vote Python applications. +* A Redis instance. + +Two [Kubernetes Services][kubernetes-service] are also created: + +* An internal service for the Redis instance. +* An external service to access the Azure Vote application from the internet. + +1. Create a file named `azure-vote.yaml`. + * If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system +1. Copy in the following YAML definition: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-back + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-back + spec: + ports: + - port: 6379 + selector: + app: azure-vote-back + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-front + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-front + spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + ``` + +1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: + + ```azurepowershell-interactive + kubectl apply -f azure-vote.yaml + ``` + + The following example resembles output showing the successfully created deployments and services: + + ```plaintext + deployment.apps/azure-vote-back created + service/azure-vote-back created + deployment.apps/azure-vote-front created + service/azure-vote-front created + ``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. + +```azurepowershell-interactive +kubectl get service azure-vote-front --watch +``` + +The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. + +```plaintext +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: + +```plaintext +azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the Azure Vote app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +## Delete the cluster + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, clean up your unnecessary resources. Use the [Remove-AzResourceGroup][remove-azresourcegroup] cmdlet to remove the resource group, container service, and all related resources. + +```azurepowershell-interactive +Remove-AzResourceGroup -Name myResourceGroup +``` + +> [!NOTE] +> The AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then deployed a sample multi-container application to it. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[azure-monitor-containers]: ../../azure-monitor/containers/container-insights-overview.md +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[azure-dev-spaces]: /previous-versions/azure/dev-spaces/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git + + +[windows-container-powershell]: ../windows-container-powershell.md +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-monitor]: ../../azure-monitor/containers/container-insights-onboard.md +[install-azure-powershell]: /powershell/azure/install-az-ps +[new-azresourcegroup]: /powershell/module/az.resources/new-azresourcegroup +[new-azakscluster]: /powershell/module/az.aks/new-azakscluster +[import-azakscredential]: /powershell/module/az.aks/import-azakscredential +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[azure-monitor-containers]: ../../azure-monitor/containers/container-insights-overview.md +[kubernetes-service]: ../concepts-network.md#services +[remove-azresourcegroup]: /powershell/module/az.resources/remove-azresourcegroup +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md diff --git a/articles/aks/learn/quick-kubernetes-deploy-rm-template.md b/articles/aks/learn/quick-kubernetes-deploy-rm-template.md new file mode 100644 index 0000000000000..5947c5da4fa7b --- /dev/null +++ b/articles/aks/learn/quick-kubernetes-deploy-rm-template.md @@ -0,0 +1,311 @@ +--- +title: Quickstart - Create an Azure Kubernetes Service (AKS) cluster +description: Learn how to quickly create a Kubernetes cluster using an Azure Resource Manager template and deploy an application in Azure Kubernetes Service (AKS) +services: container-service +ms.topic: quickstart +ms.date: 04/29/2021 +ms.custom: mvc, subject-armqs, devx-track-azurecli, mode-arm +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy an application so that I can see how to run applications using the managed Kubernetes service in Azure. +--- + +# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using an ARM template + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you will: + +* Deploy an AKS cluster using an Azure Resource Manager template. +* Run a sample multi-container application with a web front-end and a Redis instance in the cluster. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +[!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. + +[![Deploy to Azure](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.kubernetes%2Faks%2Fazuredeploy.json) + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] + +- This article requires version 2.0.64 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +- To create an AKS cluster using a Resource Manager template, you provide an SSH public key. If you need this resource, see the following section; otherwise skip to the [Review the template](#review-the-template) section. + +- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). + +- To deploy a Bicep file or ARM template, you need write access on the resources you're deploying and access to all operations on the Microsoft.Resources/deployments resource type. For example, to deploy a virtual machine, you need Microsoft.Compute/virtualMachines/write and Microsoft.Resources/deployments/* permissions. For a list of roles and permissions, see [Azure built-in roles](../../role-based-access-control/built-in-roles.md). + +### Create an SSH key pair + +To access AKS nodes, you connect using an SSH key pair (public and private), which you generate using the `ssh-keygen` command. By default, these files are created in the *~/.ssh* directory. Running the `ssh-keygen` command will overwrite any SSH key pair with the same name already existing in the given location. + +1. Go to [https://shell.azure.com](https://shell.azure.com) to open Cloud Shell in your browser. + +1. Run the `ssh-keygen` command. The following example creates an SSH key pair using RSA encryption and a bit length of 4096: + + ```console + ssh-keygen -t rsa -b 4096 + ``` + +For more information about creating SSH keys, see [Create and manage SSH keys for authentication in Azure][ssh-keys]. + +## Review the template + +The template used in this quickstart is from [Azure Quickstart templates](https://azure.microsoft.com/resources/templates/aks/). + +:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.kubernetes/aks/azuredeploy.json"::: + +For more AKS samples, see the [AKS quickstart templates][aks-quickstart-templates] site. + +## Deploy the template + +1. Select the following button to sign in to Azure and open a template. + + [![Deploy to Azure](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.kubernetes%2Faks%2Fazuredeploy.json) + +2. Select or enter the following values. + + For this quickstart, leave the default values for the *OS Disk Size GB*, *Agent Count*, *Agent VM Size*, *OS Type*, and *Kubernetes Version*. Provide your own values for the following template parameters: + + * **Subscription**: Select an Azure subscription. + * **Resource group**: Select **Create new**. Enter a unique name for the resource group, such as *myResourceGroup*, then choose **OK**. + * **Location**: Select a location, such as **East US**. + * **Cluster name**: Enter a unique name for the AKS cluster, such as *myAKSCluster*. + * **DNS prefix**: Enter a unique DNS prefix for your cluster, such as *myakscluster*. + * **Linux Admin Username**: Enter a username to connect using SSH, such as *azureuser*. + * **SSH RSA Public Key**: Copy and paste the *public* part of your SSH key pair (by default, the contents of *~/.ssh/id_rsa.pub*). + + :::image type="content" source="./media/quick-kubernetes-deploy-rm-template/create-aks-cluster-using-template-portal.png" alt-text="Screenshot of Resource Manager template to create an Azure Kubernetes Service cluster in the portal."::: + +3. Select **Review + Create**. + +It takes a few minutes to create the AKS cluster. Wait for the cluster to be successfully deployed before you move on to the next step. + +## Validate the deployment + +### Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. + +1. Install `kubectl` locally using the [az aks install-cli][az-aks-install-cli] command: + + ```azurecli + az aks install-cli + ``` + +2. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + + ```azurecli-interactive + az aks get-credentials --resource-group myResourceGroup --name myAKSCluster + ``` + +3. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + + ```console + kubectl get nodes + ``` + + The following output example shows the single node created in the previous steps. Make sure the node status is *Ready*: + + ```output + NAME STATUS ROLES AGE VERSION + aks-agentpool-41324942-0 Ready agent 6m44s v1.12.6 + aks-agentpool-41324942-1 Ready agent 6m46s v1.12.6 + aks-agentpool-41324942-2 Ready agent 6m45s v1.12.6 + ``` + +### Deploy the application + +A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. + +In this quickstart, you will use a manifest to create all objects needed to run the [Azure Vote application][azure-vote-app]. This manifest includes two [Kubernetes deployments][kubernetes-deployment]: + +* The sample Azure Vote Python applications. +* A Redis instance. + +Two [Kubernetes Services][kubernetes-service] are also created: + +* An internal service for the Redis instance. +* An external service to access the Azure Vote application from the internet. + +1. Create a file named `azure-vote.yaml`. + * If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system +1. Copy in the following YAML definition: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-back + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-back + template: + metadata: + labels: + app: azure-vote-back + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-back + image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 + env: + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 6379 + name: redis + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-back + spec: + ports: + - port: 6379 + selector: + app: azure-vote-back + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: azure-vote-front + spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS + value: "azure-vote-back" + --- + apiVersion: v1 + kind: Service + metadata: + name: azure-vote-front + spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front + ``` + +1. Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: + + ```console + kubectl apply -f azure-vote.yaml + ``` + + The following example resembles output showing the successfully created deployments and services: + + ```output + deployment "azure-vote-back" created + service "azure-vote-back" created + deployment "azure-vote-front" created + service "azure-vote-front" created + ``` + +### Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. + +```console +kubectl get service azure-vote-front --watch +``` + +The **EXTERNAL-IP** output for the `azure-vote-front` service will initially show as *pending*. + +```output +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +Once the **EXTERNAL-IP** address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: + +```output +azure-vote-front LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the Azure Vote app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-kubernetes-deploy-portal/azure-voting-application.png" alt-text="Screenshot of browsing to Azure Vote sample application."::: + +## Clean up resources + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, clean up your unnecessary resources. Use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. + +```azurecli-interactive +az group delete --name myResourceGroup --yes --no-wait +``` + +> [!NOTE] +> The AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then deployed a sample multi-container application to it. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[azure-vote-app]: https://github.com/Azure-Samples/azure-voting-app-redis.git +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[azure-dev-spaces]: /previous-versions/azure/dev-spaces/ +[aks-quickstart-templates]: https://azure.microsoft.com/resources/templates/?term=Azure+Kubernetes+Service + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-monitor]: ../../azure-monitor/containers/container-insights-onboard.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[az-aks-browse]: /cli/azure/aks#az_aks_browse +[az-aks-create]: /cli/azure/aks#az_aks_create +[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials +[az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli +[az-group-create]: /cli/azure/group#az_group_create +[az-group-delete]: /cli/azure/group#az_group_delete +[azure-cli-install]: /cli/azure/install-azure-cli +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations +[azure-portal]: https://portal.azure.com +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[kubernetes-service]: ../concepts-network.md#services +[ssh-keys]: ../../virtual-machines/linux/create-ssh-keys-detailed.md +[az-ad-sp-create-for-rbac]: /cli/azure/ad/sp#az_ad_sp_create_for_rbac diff --git a/articles/aks/learn/quick-windows-container-deploy-cli.md b/articles/aks/learn/quick-windows-container-deploy-cli.md new file mode 100644 index 0000000000000..e4d3de9af0a4f --- /dev/null +++ b/articles/aks/learn/quick-windows-container-deploy-cli.md @@ -0,0 +1,376 @@ +--- +title: Create a Windows Server container on an AKS cluster by using Azure CLI +description: Learn how to quickly create a Kubernetes cluster, deploy an application in a Windows Server container in Azure Kubernetes Service (AKS) using the Azure CLI. +services: container-service +ms.topic: article +ms.date: 04/29/2022 + + +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy a Windows Server container so that I can see how to run applications running on a Windows Server container using the managed Kubernetes service in Azure. +--- + +# Create a Windows Server container on an Azure Kubernetes Service (AKS) cluster using the Azure CLI + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this article, you deploy an AKS cluster that runs Windows Server 2019 containers using the Azure CLI. You also deploy an ASP.NET sample application in a Windows Server container to the cluster. + +:::image type="content" source="media/quick-windows-container-deploy-cli/asp-net-sample-app.png" alt-text="Screenshot of browsing to ASP.NET sample application."::: + +This article assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)](../concepts-clusters-workloads.md). + +[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] + +- This article requires version 2.0.64 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +- The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). + +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the +[Az account](/cli/azure/account) command. + +### Limitations + +The following limitations apply when you create and manage AKS clusters that support multiple node pools: + +* You can't delete the first node pool. + +The following additional limitations apply to Windows Server node pools: + +* The AKS cluster can have a maximum of 10 node pools. +* The AKS cluster can have a maximum of 100 nodes in each node pool. +* The Windows Server node pool name has a limit of 6 characters. + +## Create a resource group + +An Azure resource group is a logical group in which Azure resources are deployed and managed. When you create a resource group, you are asked to specify a location. This location is where resource group metadata is stored, it is also where your resources run in Azure if you don't specify another region during resource creation. Create a resource group using the [az group create][az-group-create] command. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +> [!NOTE] +> This article uses Bash syntax for the commands in this tutorial. +> If you are using Azure Cloud Shell, ensure that the dropdown in the upper-left of the Cloud Shell window is set to **Bash**. + +```azurecli-interactive +az group create --name myResourceGroup --location eastus +``` + +The following example output shows the resource group created successfully: + +```json +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": null +} +``` + +## Create an AKS cluster + +To run an AKS cluster that supports node pools for Windows Server containers, your cluster needs to use a network policy that uses [Azure CNI][azure-cni-about] (advanced) network plugin. For more detailed information to help plan out the required subnet ranges and network considerations, see [configure Azure CNI networking][use-advanced-networking]. Use the [az aks create][az-aks-create] command to create an AKS cluster named *myAKSCluster*. This command will create the necessary network resources if they don't exist. + +* The cluster is configured with two nodes. +* The `--windows-admin-password` and `--windows-admin-username` parameters set the administrator credentials for any Windows Server nodes on the cluster and must meet [Windows Server password requirements][windows-server-password]. If you don't specify the *windows-admin-password* parameter, you will be prompted to provide a value. +* The node pool uses `VirtualMachineScaleSets`. + +> [!NOTE] +> To ensure your cluster to operate reliably, you should run at least 2 (two) nodes in the default node pool. + +Create a username to use as administrator credentials for the Windows Server nodes on your cluster. The following commands prompt you for a username and sets it to *WINDOWS_USERNAME* for use in a later command (remember that the commands in this article are entered into a BASH shell). + +```azurecli-interactive +echo "Please enter the username to use as administrator credentials for Windows Server nodes on your cluster: " && read WINDOWS_USERNAME +``` + +Create your cluster ensuring you specify `--windows-admin-username` parameter. The following example command creates a cluster using the value from *WINDOWS_USERNAME* you set in the previous command. Alternatively you can provide a different username directly in the parameter instead of using *WINDOWS_USERNAME*. The following command will also prompt you to create a password for the administrator credentials for the Windows Server nodes on your cluster. Alternatively, you can use the *windows-admin-password* parameter and specify your own value there. + +```azurecli-interactive +az aks create \ + --resource-group myResourceGroup \ + --name myAKSCluster \ + --node-count 2 \ + --enable-addons monitoring \ + --generate-ssh-keys \ + --windows-admin-username $WINDOWS_USERNAME \ + --vm-set-type VirtualMachineScaleSets \ + --kubernetes-version 1.20.7 \ + --network-plugin azure +``` + +> [!NOTE] +> If you get a password validation error, verify the password you set meets the [Windows Server password requirements][windows-server-password]. If your password meets the requirements, try creating your resource group in another region. Then try creating the cluster with the new resource group. +> +> If you do not specify an administrator username and password when setting `--vm-set-type VirtualMachineScaleSets` and `--network-plugin azure`, the username is set to *azureuser* and the password is set to a random value. +> +> The administrator username can't be changed, but you can change the administrator password your AKS cluster uses for Windows Server nodes using `az aks update`. For more details, see [Windows Server node pools FAQ][win-faq-change-admin-creds]. + +After a few minutes, the command completes and returns JSON-formatted information about the cluster. Occasionally the cluster can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. + +## Add a Windows Server node pool + +By default, an AKS cluster is created with a node pool that can run Linux containers. Use `az aks nodepool add` command to add an additional node pool that can run Windows Server containers alongside the Linux node pool. + +```azurecli +az aks nodepool add \ + --resource-group myResourceGroup \ + --cluster-name myAKSCluster \ + --os-type Windows \ + --name npwin \ + --node-count 1 +``` + +The above command creates a new node pool named *npwin* and adds it to the *myAKSCluster*. The above command also uses the default subnet in the default vnet created when running `az aks create`. + +## Optional: Using `containerd` with Windows Server node pools + +Beginning in Kubernetes version 1.20 and greater, you can specify `containerd` as the container runtime for Windows Server 2019 node pools. From Kubernetes 1.23, containerd will be the default container runtime for Windows. + +> [!IMPORTANT] +> When using `containerd` with Windows Server 2019 node pools: +> - Both the control plane and Windows Server 2019 node pools must use Kubernetes version 1.20 or greater. +> - When creating or updating a node pool to run Windows Server containers, the default value for *node-vm-size* is *Standard_D2s_v3* which was minimum recommended size for Windows Server 2019 node pools prior to Kubernetes 1.20. The minimum recommended size for Windows Server 2019 node pools using `containerd` is *Standard_D4s_v3*. When setting the *node-vm-size* parameter, please check the list of [restricted VM sizes][restricted-vm-sizes]. +> - It is highly recommended that you use [taints or labels][aks-taints] with your Windows Server 2019 node pools running `containerd` and tolerations or node selectors with your deployments to guarantee your workloads are scheduled correctly. + +### Add a Windows Server node pool with `containerd` + +Use the `az aks nodepool add` command to add an additional node pool that can run Windows Server containers with the `containerd` runtime. + +> [!NOTE] +> If you do not specify the *WindowsContainerRuntime=containerd* custom header, the node pool will use Docker as the container runtime. + +```azurecli +az aks nodepool add \ + --resource-group myResourceGroup \ + --cluster-name myAKSCluster \ + --os-type Windows \ + --name npwcd \ + --node-vm-size Standard_D4s_v3 \ + --kubernetes-version 1.20.5 \ + --aks-custom-headers WindowsContainerRuntime=containerd \ + --node-count 1 +``` + +The above command creates a new Windows Server node pool using `containerd` as the runtime named *npwcd* and adds it to the *myAKSCluster*. The above command also uses the default subnet in the default vnet created when running `az aks create`. + +### Upgrade an existing Windows Server node pool to `containerd` + +Use the `az aks nodepool upgrade` command to upgrade a specific node pool from Docker to `containerd`. + +```azurecli +az aks nodepool upgrade \ + --resource-group myResourceGroup \ + --cluster-name myAKSCluster \ + --name npwd \ + --kubernetes-version 1.20.7 \ + --aks-custom-headers WindowsContainerRuntime=containerd +``` + +The above command upgrades a node pool named *npwd* to the `containerd` runtime. + +To upgrade all existing node pools in a cluster to use the `containerd` runtime for all Windows Server node pools: + +```azurecli +az aks upgrade \ + --resource-group myResourceGroup \ + --name myAKSCluster \ + --kubernetes-version 1.20.7 \ + --aks-custom-headers WindowsContainerRuntime=containerd +``` + +The above command upgrades all Windows Server node pools in the *myAKSCluster* to use the `containerd` runtime. + +> [!NOTE] +> After upgrading all existing Windows Server node pools to use the `containerd` runtime, Docker will still be the default runtime when adding new Windows Server node pools. + +## Connect to the cluster + +To manage a Kubernetes cluster, you use [kubectl][kubectl], the Kubernetes command-line client. If you use Azure Cloud Shell, `kubectl` is already installed. To install `kubectl` locally, use the [az aks install-cli][az-aks-install-cli] command: + +```azurecli +az aks install-cli +``` + +To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli-interactive +az aks get-credentials --resource-group myResourceGroup --name myAKSCluster +``` + +To verify the connection to your cluster, use the [kubectl get][kubectl-get] command to return a list of the cluster nodes. + +```console +kubectl get nodes -o wide +``` + +The following example output shows the all the nodes in the cluster. Make sure that the status of all nodes is *Ready*: + +```output +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +aks-nodepool1-12345678-vmss000000 Ready agent 34m v1.20.7 10.240.0.4 Ubuntu 18.04.5 LTS 5.4.0-1046-azure containerd://1.4.4+azure +aks-nodepool1-12345678-vmss000001 Ready agent 34m v1.20.7 10.240.0.35 Ubuntu 18.04.5 LTS 5.4.0-1046-azure containerd://1.4.4+azure +aksnpwcd123456 Ready agent 9m6s v1.20.7 10.240.0.97 Windows Server 2019 Datacenter 10.0.17763.1879 containerd://1.4.4+unknown +aksnpwin987654 Ready agent 25m v1.20.7 10.240.0.66 Windows Server 2019 Datacenter 10.0.17763.1879 docker://19.3.14 +``` + +> [!NOTE] +> The container runtime for each node pool is shown under *CONTAINER-RUNTIME*. Notice *aksnpwin987654* begins with `docker://` which means it is using Docker for the container runtime. Notice *aksnpwcd123456* begins with `containerd://` which means it is using `containerd` for the container runtime. + +## Deploy the application + +A Kubernetes manifest file defines a desired state for the cluster, such as what container images to run. In this article, a manifest is used to create all objects needed to run the ASP.NET sample application in a Windows Server container. This manifest includes a [Kubernetes deployment][kubernetes-deployment] for the ASP.NET sample application and an external [Kubernetes service][kubernetes-service] to access the application from the internet. + +The ASP.NET sample application is provided as part of the [.NET Framework Samples][dotnet-samples] and runs in a Windows Server container. AKS requires Windows Server containers to be based on images of *Windows Server 2019* or greater. The Kubernetes manifest file must also define a [node selector][node-selector] to tell your AKS cluster to run your ASP.NET sample application's pod on a node that can run Windows Server containers. + +Create a file named `sample.yaml` and copy in the following YAML definition. If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sample + labels: + app: sample +spec: + replicas: 1 + template: + metadata: + name: sample + labels: + app: sample + spec: + nodeSelector: + "kubernetes.io/os": windows + containers: + - name: sample + image: mcr.microsoft.com/dotnet/framework/samples:aspnetapp + resources: + limits: + cpu: 1 + memory: 800M + requests: + cpu: .1 + memory: 300M + ports: + - containerPort: 80 + selector: + matchLabels: + app: sample +--- +apiVersion: v1 +kind: Service +metadata: + name: sample +spec: + type: LoadBalancer + ports: + - protocol: TCP + port: 80 + selector: + app: sample +``` + +Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: + +```console +kubectl apply -f sample.yaml +``` + +The following example output shows the Deployment and Service created successfully: + +```output +deployment.apps/sample created +service/sample created +``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. Occasionally the service can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. + +To monitor progress, use the [kubectl get service][kubectl-get] command with the `--watch` argument. + +```console +kubectl get service sample --watch +``` + +Initially the *EXTERNAL-IP* for the *sample* service is shown as *pending*. + +```output +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sample LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +When the *EXTERNAL-IP* address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: + +```output +sample LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the sample app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-windows-container-deploy-cli/asp-net-sample-app.png" alt-text="Screenshot of browsing to ASP.NET sample application."::: + +> [!Note] +> If you receive a connection timeout when trying to load the page then you should verify the sample app is ready with the following command [kubectl get pods --watch]. Sometimes the Windows container will not be started by the time your external IP address is available. + +## Delete cluster + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. + +```azurecli-interactive +az group delete --name myResourceGroup --yes --no-wait +``` + +> [!NOTE] +> The AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. + +## Next steps + +In this article, you deployed a Kubernetes cluster and deployed an ASP.NET sample application in a Windows Server container to it. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[node-selector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +[dotnet-samples]: https://hub.docker.com/_/microsoft-dotnet-framework-samples/ +[azure-cni]: https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-monitor]: ../../azure-monitor/containers/container-insights-onboard.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[aks-taints]: ../use-multiple-node-pools.md#specify-a-taint-label-or-tag-for-a-node-pool +[az-aks-browse]: /cli/azure/aks#az_aks_browse +[az-aks-create]: /cli/azure/aks#az_aks_create +[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials +[az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli +[az-extension-add]: /cli/azure/extension#az_extension_add +[az-feature-list]: /cli/azure/feature#az_feature_list +[az-feature-register]: /cli/azure/feature#az_feature_register +[az-group-create]: /cli/azure/group#az_group_create +[az-group-delete]: /cli/azure/group#az_group_delete +[az-provider-register]: /cli/azure/provider#az_provider_register +[azure-cli-install]: /cli/azure/install-azure-cli +[azure-cni-about]: ../concepts-network.md#azure-cni-advanced-networking +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations +[azure-portal]: https://portal.azure.com +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[kubernetes-service]: ../concepts-network.md#services +[restricted-vm-sizes]: ../quotas-skus-regions.md#restricted-vm-sizes +[use-advanced-networking]: ../configure-azure-cni.md +[aks-support-policies]: ../support-policies.md +[aks-faq]: faq.md +[az-extension-add]: /cli/azure/extension#az-extension-add +[az-extension-update]: /cli/azure/extension#az-extension-update +[windows-server-password]: /windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference +[win-faq-change-admin-creds]: ../windows-faq.md#how-do-i-change-the-administrator-password-for-windows-server-nodes-on-my-cluster diff --git a/articles/aks/learn/quick-windows-container-deploy-powershell.md b/articles/aks/learn/quick-windows-container-deploy-powershell.md new file mode 100644 index 0000000000000..e4fed7b2564f2 --- /dev/null +++ b/articles/aks/learn/quick-windows-container-deploy-powershell.md @@ -0,0 +1,329 @@ +--- +title: Create a Windows Server container on an AKS cluster by using PowerShell +description: Learn how to quickly create a Kubernetes cluster, deploy an application in a Windows Server container in Azure Kubernetes Service (AKS) using PowerShell. +services: container-service +ms.topic: article +ms.date: 04/29/2022 +ms.custom: devx-track-azurepowershell + + +#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy a Windows Server container so that I can see how to run applications running on a Windows Server container using the managed Kubernetes service in Azure. +--- + +# Create a Windows Server container on an Azure Kubernetes Service (AKS) cluster using PowerShell + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and +manage clusters. In this article, you deploy an AKS cluster running Windows Server 2019 containers using PowerShell. You also deploy an +`ASP.NET` sample application in a Windows Server container to the cluster. + +:::image type="content" source="media/quick-windows-container-deploy-powershell/asp-net-sample-app.png" alt-text="Screenshot of browsing to ASP.NET sample application."::: + +This article assumes a basic understanding of Kubernetes concepts. For more information, see +[Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +## Prerequisites + +If you don't have an Azure subscription, create a [free](https://azure.microsoft.com/free/) account +before you begin. + +* The identity you are using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). +* If you choose to use PowerShell locally, you need to install the [Az PowerShell](/powershell/azure/new-azureps-module-az) +module and connect to your Azure account using the +[Connect-AzAccount](/powershell/module/az.accounts/Connect-AzAccount) cmdlet. For more information +about installing the Az PowerShell module, see +[Install Azure PowerShell][install-azure-powershell]. +* You also must install the [Az.Aks](/powershell/module/az.aks) PowerShell module: + + ```azurepowershell-interactive + Install-Module Az.Aks + ``` + +[!INCLUDE [cloud-shell-try-it](../../../includes/cloud-shell-try-it.md)] + +If you have multiple Azure subscriptions, choose the appropriate subscription in which the resources +should be billed. Select a specific subscription ID using the +[Set-AzContext](/powershell/module/az.accounts/set-azcontext) cmdlet. + +```azurepowershell-interactive +Set-AzContext -SubscriptionId 00000000-0000-0000-0000-000000000000 +``` + +## Limitations + +The following limitations apply when you create and manage AKS clusters that support multiple node pools: + +* You can't delete the first node pool. + +The following additional limitations apply to Windows Server node pools: + +* The AKS cluster can have a maximum of 10 node pools. +* The AKS cluster can have a maximum of 100 nodes in each node pool. +* The Windows Server node pool name has a limit of 6 characters. + +## Create a resource group + +An [Azure resource group](../../azure-resource-manager/management/overview.md) +is a logical group in which Azure resources are deployed and managed. When you create a resource +group, you are asked to specify a location. This location is where resource group metadata is +stored, it is also where your resources run in Azure if you don't specify another region during +resource creation. Create a resource group using the [New-AzResourceGroup][new-azresourcegroup] +cmdlet. + +The following example creates a resource group named **myResourceGroup** in the **eastus** location. + +> [!NOTE] +> This article uses PowerShell syntax for the commands in this tutorial. If you are using Azure Cloud +> Shell, ensure that the dropdown in the upper-left of the Cloud Shell window is set to **PowerShell**. + +```azurepowershell-interactive +New-AzResourceGroup -Name myResourceGroup -Location eastus +``` + +The following example output shows the resource group created successfully: + +```plaintext +ResourceGroupName : myResourceGroup +Location : eastus +ProvisioningState : Succeeded +Tags : +ResourceId : /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup +``` + +## Create an AKS cluster + +Use the `ssh-keygen` command-line utility to generate an SSH key pair. For more details, see +[Quick steps: Create and use an SSH public-private key pair for Linux VMs in Azure](../../virtual-machines/linux/mac-create-ssh-keys.md). + +To run an AKS cluster that supports node pools for Windows Server containers, your cluster needs to +use a network policy that uses [Azure CNI][azure-cni-about] (advanced) network plugin. For more +detailed information to help plan out the required subnet ranges and network considerations, see +[configure Azure CNI networking][use-advanced-networking]. Use the [New-AzAksCluster][new-azakscluster] cmdlet +below to create an AKS cluster named **myAKSCluster**. The following example creates the necessary +network resources if they don't exist. + +> [!NOTE] +> To ensure your cluster operates reliably, you should run at least 2 (two) nodes in the default +> node pool. + +```azurepowershell-interactive +$Username = Read-Host -Prompt 'Please create a username for the administrator credentials on your Windows Server containers: ' +$Password = Read-Host -Prompt 'Please create a password for the administrator credentials on your Windows Server containers: ' -AsSecureString +New-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster -NodeCount 2 -NetworkPlugin azure -NodeVmSetType VirtualMachineScaleSets -WindowsProfileAdminUserName $Username -WindowsProfileAdminUserPassword $Password +``` + +> [!Note] +> If you are unable to create the AKS cluster because the version is not supported in this region +> then you can use the `Get-AzAksVersion -Location eastus` command to find the supported version +> list for this region. + +After a few minutes, the command completes and returns information about the cluster. Occasionally +the cluster can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. + +## Add a Windows Server node pool + +By default, an AKS cluster is created with a node pool that can run Linux containers. Use +`New-AzAksNodePool` cmdlet to add a node pool that can run Windows Server containers alongside the +Linux node pool. + +```azurepowershell-interactive +New-AzAksNodePool -ResourceGroupName myResourceGroup -ClusterName myAKSCluster -VmSetType VirtualMachineScaleSets -OsType Windows -Name npwin +``` + +The above command creates a new node pool named **npwin** and adds it to the **myAKSCluster**. When +creating a node pool to run Windows Server containers, the default value for **VmSize** is +**Standard_D2s_v3**. If you choose to set the **VmSize** parameter, check the list of +[restricted VM sizes][restricted-vm-sizes]. The minimum recommended size is **Standard_D2s_v3**. The +previous command also uses the default subnet in the default vnet created when running `New-AzAksCluster`. + +## Connect to the cluster + +To manage a Kubernetes cluster, you use [kubectl][kubectl], the Kubernetes command-line client. If +you use Azure Cloud Shell, `kubectl` is already installed. To install `kubectl` locally, use the +`Install-AzAksKubectl` cmdlet: + +```azurepowershell-interactive +Install-AzAksKubectl +``` + +To configure `kubectl` to connect to your Kubernetes cluster, use the +[Import-AzAksCredential][import-azakscredential] cmdlet. This command +downloads credentials and configures the Kubernetes CLI to use them. + +```azurepowershell-interactive +Import-AzAksCredential -ResourceGroupName myResourceGroup -Name myAKSCluster +``` + +To verify the connection to your cluster, use the [kubectl get][kubectl-get] command to return a +list of the cluster nodes. + +```azurepowershell-interactive +kubectl get nodes +``` + +The following example output shows all the nodes in the cluster. Make sure that the status of all +nodes is **Ready**: + +```plaintext +NAME STATUS ROLES AGE VERSION +aks-nodepool1-12345678-vmssfedcba Ready agent 13m v1.16.7 +aksnpwin987654 Ready agent 108s v1.16.7 +``` + +## Deploy the application + +A Kubernetes manifest file defines a desired state for the cluster, such as what container images to +run. In this article, a manifest is used to create all objects needed to run the ASP.NET sample +application in a Windows Server container. This manifest includes a +[Kubernetes deployment][kubernetes-deployment] for the ASP.NET sample application and an external +[Kubernetes service][kubernetes-service] to access the application from the internet. + +The ASP.NET sample application is provided as part of the [.NET Framework Samples][dotnet-samples] +and runs in a Windows Server container. AKS requires Windows Server containers to be based on images +of **Windows Server 2019** or greater. The Kubernetes manifest file must also define a +[node selector][node-selector] to tell your AKS cluster to run your ASP.NET sample application's pod +on a node that can run Windows Server containers. + +Create a file named `sample.yaml` and copy in the following YAML definition. If you use the Azure +Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical +system: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sample + labels: + app: sample +spec: + replicas: 1 + template: + metadata: + name: sample + labels: + app: sample + spec: + nodeSelector: + "kubernetes.io/os": windows + containers: + - name: sample + image: mcr.microsoft.com/dotnet/framework/samples:aspnetapp + resources: + limits: + cpu: 1 + memory: 800M + requests: + cpu: .1 + memory: 300M + ports: + - containerPort: 80 + selector: + matchLabels: + app: sample +--- +apiVersion: v1 +kind: Service +metadata: + name: sample +spec: + type: LoadBalancer + ports: + - protocol: TCP + port: 80 + selector: + app: sample +``` + +Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your +YAML manifest: + +```azurepowershell-interactive +kubectl apply -f sample.yaml +``` + +The following example output shows the Deployment and Service created successfully: + +```plaintext +deployment.apps/sample created +service/sample created +``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application frontend to the internet. +This process can take a few minutes to complete. Occasionally the service can take longer than a few +minutes to provision. Allow up to 10 minutes in these cases. + +To monitor progress, use the [kubectl get service][kubectl-get] command with the `--watch` argument. + +```azurepowershell-interactive +kubectl get service sample --watch +``` + +Initially the **EXTERNAL-IP** for the **sample** service is shown as **pending**. + +```plaintext +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sample LoadBalancer 10.0.37.27 80:30572/TCP 6s +``` + +When the **EXTERNAL-IP** address changes from **pending** to an actual public IP address, use `CTRL-C` +to stop the `kubectl` watch process. The following example output shows a valid public IP address +assigned to the service: + +```plaintext +sample LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m +``` + +To see the sample app in action, open a web browser to the external IP address of your service. + +:::image type="content" source="media/quick-windows-container-deploy-powershell/asp-net-sample-app.png" alt-text="Screenshot of browsing to ASP.NET sample application."::: + +> [!Note] +> If you receive a connection timeout when trying to load the page then you should verify the sample +> app is ready with the following command `kubectl get pods --watch`. Sometimes the Windows +> container will not be started by the time your external IP address is available. + +## Delete cluster + +To avoid Azure charges, if you don't plan on going through the tutorials that follow, use the +[Remove-AzResourceGroup][remove-azresourcegroup] cmdlet to remove the resource group, container service, and all related resources. + +```azurepowershell-interactive +Remove-AzResourceGroup -Name myResourceGroup +``` + +> [!NOTE] +> The AKS cluster was created with system-assigned managed identity (default identity option used in this quickstart), the identity is managed by the platform and does not require removal. + +## Next steps + +In this article, you deployed a Kubernetes cluster and deployed an `ASP.NET` sample application in a +Windows Server container to it. + +To learn more about AKS, and walk through a complete code to deployment example, continue to the +Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get +[dotnet-samples]: https://hub.docker.com/_/microsoft-dotnet-framework-samples/ +[node-selector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[install-azure-powershell]: /powershell/azure/install-az-ps +[new-azresourcegroup]: /powershell/module/az.resources/new-azresourcegroup +[azure-cni-about]: ../concepts-network.md#azure-cni-advanced-networking +[use-advanced-networking]: ../configure-azure-cni.md +[new-azakscluster]: /powershell/module/az.aks/new-azakscluster +[restricted-vm-sizes]: ../quotas-skus-regions.md#restricted-vm-sizes +[import-azakscredential]: /powershell/module/az.aks/import-azakscredential +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[kubernetes-service]: ../concepts-network.md#services +[remove-azresourcegroup]: /powershell/module/az.resources/remove-azresourcegroup +[sp-delete]: ../kubernetes-service-principal.md#additional-considerations +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md diff --git a/articles/aks/load-balancer-standard.md b/articles/aks/load-balancer-standard.md index 3570e9fc1bbbd..d05184f253085 100644 --- a/articles/aks/load-balancer-standard.md +++ b/articles/aks/load-balancer-standard.md @@ -30,8 +30,7 @@ Azure Load Balancer is available in two SKUs - *Basic* and *Standard*. By defaul For more information on the *Basic* and *Standard* SKUs, see [Azure load balancer SKU comparison][azure-lb-comparison]. -This article assumes you have an AKS cluster with the *Standard* SKU Azure Load Balancer and walks through how to use and configure some of the capabilities and features of the load balancer. -If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes you have an AKS cluster with the *Standard* SKU Azure Load Balancer and walks through how to use and configure some of the capabilities and features of the load balancer. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. > [!IMPORTANT] > If you prefer not to leverage the Azure Load Balancer to provide outbound connection and instead have your own gateway, firewall or proxy for that purpose you can skip the creation of the load balancer outbound pool and respective frontend IP by using [**Outbound type as UserDefinedRouting (UDR)**](egress-outboundtype.md). The Outbound type defines the egress method for a cluster and it defaults to type: load balancer. @@ -247,7 +246,6 @@ az aks update \ When SNAT port resources are exhausted, outbound flows fail until existing flows release SNAT ports. Load Balancer reclaims SNAT ports when the flow closes and the AKS-configured load balancer uses a 30-minute idle timeout for reclaiming SNAT ports from idle flows. You can also use transport (for example, **`TCP keepalives`**) or **`application-layer keepalives`** to refresh an idle flow and reset this idle timeout if necessary. You can configure this timeout following the below example: - ```azurecli-interactive az aks update \ --resource-group myResourceGroup \ @@ -368,7 +366,6 @@ The following limitations apply when you create and manage AKS clusters that sup * You can only use one type of load balancer SKU (Basic or Standard) in a single cluster. * *Standard* SKU Load Balancers only support *Standard* SKU IP Addresses. - ## Next steps Learn more about Kubernetes services at the [Kubernetes services documentation][kubernetes-services]. @@ -387,8 +384,9 @@ Learn more about using Internal Load Balancer for Inbound traffic at the [AKS In [advanced-networking]: configure-azure-cni.md [aks-support-policies]: support-policies.md [aks-faq]: faq.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-sp]: kubernetes-service-principal.md#delegate-access-to-other-azure-resources [az-aks-show]: /cli/azure/aks#az_aks_show [az-aks-create]: /cli/azure/aks#az_aks_create diff --git a/articles/aks/media/kubernetes-walkthrough-portal/aks-cloud-shell.png b/articles/aks/media/kubernetes-walkthrough-portal/aks-cloud-shell.png deleted file mode 100644 index 757e4f1b387a4..0000000000000 Binary files a/articles/aks/media/kubernetes-walkthrough-portal/aks-cloud-shell.png and /dev/null differ diff --git a/articles/aks/media/kubernetes-walkthrough-portal/aks-portal-dashboard.png b/articles/aks/media/kubernetes-walkthrough-portal/aks-portal-dashboard.png deleted file mode 100644 index efadcc79a95d3..0000000000000 Binary files a/articles/aks/media/kubernetes-walkthrough-portal/aks-portal-dashboard.png and /dev/null differ diff --git a/articles/aks/media/kubernetes-walkthrough-portal/create-cluster-basics.png b/articles/aks/media/kubernetes-walkthrough-portal/create-cluster-basics.png deleted file mode 100644 index 723153ac50fa8..0000000000000 Binary files a/articles/aks/media/kubernetes-walkthrough-portal/create-cluster-basics.png and /dev/null differ diff --git a/articles/aks/media/kubernetes-walkthrough-portal/monitor-container-logs.png b/articles/aks/media/kubernetes-walkthrough-portal/monitor-container-logs.png deleted file mode 100644 index 98ac72f315c05..0000000000000 Binary files a/articles/aks/media/kubernetes-walkthrough-portal/monitor-container-logs.png and /dev/null differ diff --git a/articles/aks/media/kubernetes-walkthrough-portal/monitor-containers.png b/articles/aks/media/kubernetes-walkthrough-portal/monitor-containers.png deleted file mode 100644 index 3f909a0c3835f..0000000000000 Binary files a/articles/aks/media/kubernetes-walkthrough-portal/monitor-containers.png and /dev/null differ diff --git a/articles/aks/node-access.md b/articles/aks/node-access.md index 2cb87e90eb684..220e48efc28dc 100644 --- a/articles/aks/node-access.md +++ b/articles/aks/node-access.md @@ -18,7 +18,7 @@ This article shows you how to create a connection to an AKS node. ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. This article also assumes you have an SSH key. You can create an SSH key using [macOS or Linux][ssh-nix] or [Windows][ssh-windows]. If you use PuTTY Gen to create the key pair, save the key pair in an OpenSSH format rather than the default PuTTy private key format (.ppk file). @@ -145,12 +145,12 @@ kubectl delete pod node-debugger-aks-nodepool1-12345678-vmss000000-bkmmx If you need more troubleshooting data, you can [view the kubelet logs][view-kubelet-logs] or [view the Kubernetes master node logs][view-master-logs]. - [view-kubelet-logs]: kubelet-logs.md [view-master-logs]: monitor-aks-reference.md#resource-logs -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [aks-windows-rdp]: rdp.md [ssh-nix]: ../virtual-machines/linux/mac-create-ssh-keys.md diff --git a/articles/aks/node-auto-repair.md b/articles/aks/node-auto-repair.md index 8a0b867a775b2..c0d47f242d35a 100644 --- a/articles/aks/node-auto-repair.md +++ b/articles/aks/node-auto-repair.md @@ -43,7 +43,7 @@ If AKS finds multiple unhealthy nodes during a health check, each node is repair ## Node Autodrain -[Scheduled Events][scheduled-events] can occur on the underlying virtual machines (VMs) in any of your node pools. For [spot node pools][spot-node-pools], scheduled events may cause a *preempt* node event for the node. Certain node events, such as *preempt*, cause AKS node autodrain to attempt a cordon and drain of the affected node, which allows for a graceful reschedule of any affected workloads on that node. +[Scheduled Events][scheduled-events] can occur on the underlying virtual machines (VMs) in any of your node pools. For [spot node pools][spot-node-pools], scheduled events may cause a *preempt* node event for the node. Certain node events, such as *preempt*, cause AKS node autodrain to attempt a cordon and drain of the affected node, which allows for a graceful reschedule of any affected workloads on that node. When this happens, you might notice the node to receive a taint with *"remediator.aks.microsoft.com/unschedulable"*, because of *"kubernetes.azure.com/scalesetpriority: spot"*. The following table shows the node events, and the actions they cause for AKS node autodrain. diff --git a/articles/aks/node-pool-snapshot.md b/articles/aks/node-pool-snapshot.md index 584ef81c9c3e4..6bda9d41cf3f1 100644 --- a/articles/aks/node-pool-snapshot.md +++ b/articles/aks/node-pool-snapshot.md @@ -19,7 +19,7 @@ The snapshot is an Azure resource that will contain the configuration informatio ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. ### Limitations @@ -100,8 +100,9 @@ az aks create --name myAKSCluster2 --resource-group myResourceGroup --snapshot-i - Learn more about multiple node pools and how to upgrade node pools with [Create and manage multiple node pools][use-multiple-node-pools]. -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [supported-versions]: supported-kubernetes-versions.md [upgrade-cluster]: upgrade-cluster.md [node-image-upgrade]: node-image-upgrade.md diff --git a/articles/aks/node-updates-kured.md b/articles/aks/node-updates-kured.md index 81d78d6e86d00..7059f35bd5774 100644 --- a/articles/aks/node-updates-kured.md +++ b/articles/aks/node-updates-kured.md @@ -23,9 +23,9 @@ This article shows you how to use the open-source [kured (KUbernetes REboot Daem ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. -You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. +You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. ## Understand the AKS node update experience @@ -115,8 +115,9 @@ For AKS clusters that use Windows Server nodes, see [Upgrade a node pool in AKS] [kubectl-get-nodes]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [DaemonSet]: concepts-clusters-workloads.md#statefulsets-and-daemonsets [aks-ssh]: ssh.md diff --git a/articles/aks/node-upgrade-github-actions.md b/articles/aks/node-upgrade-github-actions.md index 132daa763d788..f9d7c346cb474 100644 --- a/articles/aks/node-upgrade-github-actions.md +++ b/articles/aks/node-upgrade-github-actions.md @@ -29,7 +29,7 @@ This article shows you how you can automate the update process of AKS nodes. You ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. @@ -211,8 +211,9 @@ jobs: [cron-syntax]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/crontab.html#tag_20_25_07 -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [managed-node-upgrades-article]: node-image-upgrade.md [cluster-upgrades-article]: upgrade-cluster.md diff --git a/articles/aks/planned-maintenance.md b/articles/aks/planned-maintenance.md index dcc663062ea92..ac7d8da169c76 100644 --- a/articles/aks/planned-maintenance.md +++ b/articles/aks/planned-maintenance.md @@ -16,7 +16,7 @@ Your AKS cluster has regular maintenance performed on it automatically. By defau ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. [!INCLUDE [preview features callout](./includes/preview/preview-callout.md)] @@ -217,10 +217,10 @@ Planned Maintenance will detect if you are using Cluster Auto-Upgrade and schedu - To get started with upgrading your AKS cluster, see [Upgrade an AKS cluster][aks-upgrade] - -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-support-policies]: support-policies.md [aks-faq]: faq.md [az-extension-add]: /cli/azure/extension#az_extension_add diff --git a/articles/aks/rdp.md b/articles/aks/rdp.md index 192bc9edc2433..9d8cec7708faf 100644 --- a/articles/aks/rdp.md +++ b/articles/aks/rdp.md @@ -18,7 +18,7 @@ This article shows you how to create an RDP connection with an AKS node using th ## Before you begin -This article assumes that you have an existing AKS cluster with a Windows Server node. If you need an AKS cluster, see the article on [creating an AKS cluster with a Windows container using the Azure CLI][aks-windows-cli]. You need the Windows administrator username and password for the Windows Server node you want to troubleshoot. You also need an RDP client such as [Microsoft Remote Desktop][rdp-mac]. +This article assumes that you have an existing AKS cluster with a Windows Server node. If you need an AKS cluster, see the article on [creating an AKS cluster with a Windows container using the Azure CLI][aks-quickstart-windows-cli]. You need the Windows administrator username and password for the Windows Server node you want to troubleshoot. You also need an RDP client such as [Microsoft Remote Desktop][rdp-mac]. If you need to reset the password you can use `az aks update` to change the password. @@ -164,7 +164,7 @@ If you need additional troubleshooting data, you can [view the Kubernetes master [rdp-mac]: https://aka.ms/rdmac -[aks-windows-cli]: windows-container-cli.md +[aks-quickstart-windows-cli]: ./learn/quick-windows-container-deploy-cli.md [az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli [az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials [az-vm-delete]: /cli/azure/vm#az_vm_delete diff --git a/articles/aks/scale-down-mode.md b/articles/aks/scale-down-mode.md index ccf1324f933d1..c510bcd6f48ae 100644 --- a/articles/aks/scale-down-mode.md +++ b/articles/aks/scale-down-mode.md @@ -20,7 +20,7 @@ When an Azure VM is in the `Stopped` (deallocated) state, you will not be charge > [!WARNING] > In order to preserve any deallocated VMs, you must set Scale-down Mode to Deallocate. That includes VMs that have been deallocated using IaaS APIs (Virtual Machine Scale Set APIs). Setting Scale-down Mode to Delete will remove any deallocate VMs. -This article assumes that you have an existing AKS cluster and the latest version of the Azure CLI installed. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. ### Limitations @@ -72,8 +72,9 @@ az aks nodepool add --enable-cluster-autoscaler --min-count 1 --max-count 10 --m - To learn more about the cluster autoscaler, see [Automatically scale a cluster to meet application demands on AKS][cluster-autoscaler] -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [aks-support-policies]: support-policies.md [aks-faq]: faq.md [az-extension-add]: /cli/azure/extension#az_extension_add diff --git a/articles/aks/start-stop-cluster.md b/articles/aks/start-stop-cluster.md index c049b6f5b4f68..d759901888e92 100644 --- a/articles/aks/start-stop-cluster.md +++ b/articles/aks/start-stop-cluster.md @@ -15,7 +15,7 @@ To optimize your costs further during these periods, you can completely turn off ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][kubernetes-walkthrough-powershell], or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. ### Limitations @@ -148,8 +148,9 @@ If the `ProvisioningState` shows `Starting` that means your cluster hasn't fully -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: /learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [az-extension-add]: /cli/azure/extension#az_extension_add [az-extension-update]: /cli/azure/extension#az_extension_update diff --git a/articles/aks/start-stop-nodepools.md b/articles/aks/start-stop-nodepools.md index 2e2ce8f80075b..ebc3ed84db400 100644 --- a/articles/aks/start-stop-nodepools.md +++ b/articles/aks/start-stop-nodepools.md @@ -14,7 +14,7 @@ Your AKS workloads may not need to run continuously, for example a development c ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][kubernetes-walkthrough-powershell], or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. ### Install aks-preview CLI extension @@ -126,8 +126,9 @@ You can verify your node pool has started using [az aks show][az-aks-show] and c -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [az-extension-add]: /cli/azure/extension#az_extension_add [az-extension-update]: /cli/azure/extension#az_extension_update diff --git a/articles/aks/static-ip.md b/articles/aks/static-ip.md index afdf5bd15ce7d..c0d4191a66652 100644 --- a/articles/aks/static-ip.md +++ b/articles/aks/static-ip.md @@ -18,9 +18,9 @@ This article shows you how to create a static public IP address and assign it to ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. -You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. +You also need the Azure CLI version 2.0.59 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. This article covers using a *Standard* SKU IP with a *Standard* SKU load balancer. For more information, see [IP address types and allocation methods in Azure][ip-sku]. @@ -169,7 +169,8 @@ For additional control over the network traffic to your applications, you may wa [az-aks-show]: /cli/azure/aks#az_aks_show [aks-ingress-basic]: ingress-basic.md [aks-static-ingress]: ingress-static-ip.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [ip-sku]: ../virtual-network/ip-services/public-ip-addresses.md#sku \ No newline at end of file diff --git a/articles/aks/use-azure-policy.md b/articles/aks/use-azure-policy.md index f9898514d75f6..7c19d3812fb7b 100644 --- a/articles/aks/use-azure-policy.md +++ b/articles/aks/use-azure-policy.md @@ -15,7 +15,7 @@ This article shows you how to apply policy definitions to your cluster and verif ## Prerequisites -- An existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +- This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. - The Azure Policy Add-on for AKS installed on an AKS cluster. Follow these [steps to install the Azure Policy Add-on][azure-policy-addon]. ## Assign a built-in policy definition or initiative @@ -182,8 +182,9 @@ For more information about how Azure Policy works: [aks-policies]: policy-reference.md -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [azure-policy]: ../governance/policy/overview.md [azure-policy-addon]: ../governance/policy/concepts/policy-for-kubernetes.md#install-azure-policy-add-on-for-aks [azure-policy-addon-remove]: ../governance/policy/concepts/policy-for-kubernetes.md#remove-the-add-on-from-aks diff --git a/articles/aks/use-multiple-node-pools.md b/articles/aks/use-multiple-node-pools.md index 68304130b5c5a..03cf834e2a0a9 100644 --- a/articles/aks/use-multiple-node-pools.md +++ b/articles/aks/use-multiple-node-pools.md @@ -822,7 +822,7 @@ Learn more about [system node pools][use-system-pool]. In this article, you learned how to create and manage multiple node pools in an AKS cluster. For more information about how to control pods across node pools, see [Best practices for advanced scheduler features in AKS][operator-best-practices-advanced-scheduler]. -To create and use Windows Server container node pools, see [Create a Windows Server container in AKS][aks-windows]. +To create and use Windows Server container node pools, see [Create a Windows Server container in AKS][aks-quickstart-windows-cli]. Use [proximity placement groups][reduce-latency-ppg] to reduce latency for your AKS applications. @@ -836,7 +836,7 @@ Use [proximity placement groups][reduce-latency-ppg] to reduce latency for your [capacity-reservation-groups]:/azure/virtual-machines/capacity-reservation-associate-virtual-machine-scale-set -[aks-windows]: windows-container-cli.md +[aks-quickstart-windows-cli]: ./learn/quick-windows-container-deploy-cli.md [az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials [az-aks-create]: /cli/azure/aks#az_aks_create [az-aks-get-upgrades]: /cli/azure/aks#az_aks_get_upgrades diff --git a/articles/aks/use-pod-security-policies.md b/articles/aks/use-pod-security-policies.md index 3b70baffbd7de..46d6d426b546e 100644 --- a/articles/aks/use-pod-security-policies.md +++ b/articles/aks/use-pod-security-policies.md @@ -19,9 +19,9 @@ To improve the security of your AKS cluster, you can limit what pods can be sche ## Before you begin -This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli] or [using the Azure portal][aks-quickstart-portal]. +This article assumes that you have an existing AKS cluster. If you need an AKS cluster, see the AKS quickstart [using the Azure CLI][aks-quickstart-cli], [using Azure PowerShell][aks-quickstart-powershell], or [using the Azure portal][aks-quickstart-portal]. -You need the Azure CLI version 2.0.61 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. +You need the Azure CLI version 2.0.61 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. ### Install aks-preview CLI extension @@ -449,8 +449,9 @@ For more information about limiting pod network traffic, see [Secure traffic bet [terms-of-use]: https://azure.microsoft.com/support/legal/preview-supplemental-terms/ [kubernetes-policy-reference]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-reference -[aks-quickstart-cli]: kubernetes-walkthrough.md -[aks-quickstart-portal]: kubernetes-walkthrough-portal.md +[aks-quickstart-cli]: ./learn/quick-kubernetes-deploy-cli.md +[aks-quickstart-portal]: ./learn/quick-kubernetes-deploy-portal.md +[aks-quickstart-powershell]: ./learn/quick-kubernetes-deploy-powershell.md [install-azure-cli]: /cli/azure/install-azure-cli [network-policies]: use-network-policies.md [az-feature-register]: /cli/azure/feature#az_feature_register diff --git a/articles/aks/use-tags.md b/articles/aks/use-tags.md index a572d7600f7d1..34d05c2009402 100644 --- a/articles/aks/use-tags.md +++ b/articles/aks/use-tags.md @@ -174,16 +174,16 @@ $ az aks show -g myResourceGroup -n myAKSCluster --query 'agentPoolProfiles[].{n You can apply Azure tags to public IPs, disks, and files by using a Kubernetes manifest. -For public IPs, use *service.beta.kubernetes.io/azure-pip-tags*. For example: +For public IPs, use *service.beta.kubernetes.io/azure-pip-tags* under *annotations*. For example: ```yml apiVersion: v1 kind: Service -... +metadata: + annotations: + service.beta.kubernetes.io/azure-pip-tags: costcenter=3333,team=beta spec: ... - service.beta.kubernetes.io/azure-pip-tags: costcenter=3333,team=beta - ... ``` For files and disks, use *tags* under *parameters*. For example: diff --git a/articles/aks/windows-container-cli.md b/articles/aks/windows-container-cli.md deleted file mode 100644 index 7be2723f367e2..0000000000000 --- a/articles/aks/windows-container-cli.md +++ /dev/null @@ -1,371 +0,0 @@ ---- -title: Create a Windows Server container on an AKS cluster by using Azure CLI -description: Learn how to quickly create a Kubernetes cluster, deploy an application in a Windows Server container in Azure Kubernetes Service (AKS) using the Azure CLI. -services: container-service -ms.topic: article -ms.date: 08/06/2021 - - -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy a Windows Server container so that I can see how to run applications running on a Windows Server container using the managed Kubernetes service in Azure. ---- - -# Create a Windows Server container on an Azure Kubernetes Service (AKS) cluster using the Azure CLI - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this article, you deploy an AKS cluster using the Azure CLI. You also deploy an ASP.NET sample application in a Windows Server container to the cluster. - -![Image of browsing to ASP.NET sample application](media/windows-container/asp-net-sample-app.png) - -This article assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../includes/azure-cli-prepare-your-environment.md)] - -### Limitations - -The following limitations apply when you create and manage AKS clusters that support multiple node pools: - -* You can't delete the first node pool. - -The following additional limitations apply to Windows Server node pools: - -* The AKS cluster can have a maximum of 10 node pools. -* The AKS cluster can have a maximum of 100 nodes in each node pool. -* The Windows Server node pool name has a limit of 6 characters. - -## Create a resource group - -An Azure resource group is a logical group in which Azure resources are deployed and managed. When you create a resource group, you are asked to specify a location. This location is where resource group metadata is stored, it is also where your resources run in Azure if you don't specify another region during resource creation. Create a resource group using the [az group create][az-group-create] command. - -The following example creates a resource group named *myResourceGroup* in the *eastus* location. - -> [!NOTE] -> This article uses Bash syntax for the commands in this tutorial. -> If you are using Azure Cloud Shell, ensure that the dropdown in the upper-left of the Cloud Shell window is set to **Bash**. - -```azurecli-interactive -az group create --name myResourceGroup --location eastus -``` - -The following example output shows the resource group created successfully: - -```json -{ - "id": "/subscriptions//resourceGroups/myResourceGroup", - "location": "eastus", - "managedBy": null, - "name": "myResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null, - "type": null -} -``` - -## Create an AKS cluster - -To run an AKS cluster that supports node pools for Windows Server containers, your cluster needs to use a network policy that uses [Azure CNI][azure-cni-about] (advanced) network plugin. For more detailed information to help plan out the required subnet ranges and network considerations, see [configure Azure CNI networking][use-advanced-networking]. Use the [az aks create][az-aks-create] command to create an AKS cluster named *myAKSCluster*. This command will create the necessary network resources if they don't exist. - -* The cluster is configured with two nodes. -* The `--windows-admin-password` and `--windows-admin-username` parameters set the administrator credentials for any Windows Server nodes on the cluster and must meet [Windows Server password requirements][windows-server-password]. If you don't specify the *windows-admin-password* parameter, you will be prompted to provide a value. -* The node pool uses `VirtualMachineScaleSets`. - -> [!NOTE] -> To ensure your cluster to operate reliably, you should run at least 2 (two) nodes in the default node pool. - -Create a username to use as administrator credentials for the Windows Server nodes on your cluster. The following commands prompt you for a username and set it WINDOWS_USERNAME for use in a later command (remember that the commands in this article are entered into a BASH shell). - -```azurecli-interactive -echo "Please enter the username to use as administrator credentials for Windows Server nodes on your cluster: " && read WINDOWS_USERNAME -``` - -Create your cluster ensuring you specify `--windows-admin-username` parameter. The following example command creates a cluster using the value from *WINDOWS_USERNAME* you set in the previous command. Alternatively you can provide a different username directly in the parameter instead of using *WINDOWS_USERNAME*. The following command will also prompt you to create a password for the administrator credentials for the Windows Server nodes on your cluster. Alternatively, you can use the *windows-admin-password* parameter and specify your own value there. - -```azurecli-interactive -az aks create \ - --resource-group myResourceGroup \ - --name myAKSCluster \ - --node-count 2 \ - --enable-addons monitoring \ - --generate-ssh-keys \ - --windows-admin-username $WINDOWS_USERNAME \ - --vm-set-type VirtualMachineScaleSets \ - --kubernetes-version 1.20.7 \ - --network-plugin azure -``` - -> [!NOTE] -> If you get a password validation error, verify the password you set meets the [Windows Server password requirements][windows-server-password]. If your password meets the requirements, try creating your resource group in another region. Then try creating the cluster with the new resource group. -> -> If you do not specify an administrator username and password when setting `--vm-set-type VirtualMachineScaleSets` and `--network-plugin azure`, the username is set to *azureuser* and the password is set to a random value. -> -> The administrator username can't be changed, but you can change the administrator password your AKS cluster uses for Windows Server nodes using `az aks update`. For more details, see [Windows Server node pools FAQ][win-faq-change-admin-creds]. - -After a few minutes, the command completes and returns JSON-formatted information about the cluster. Occasionally the cluster can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. - -## Add a Windows Server node pool - -By default, an AKS cluster is created with a node pool that can run Linux containers. Use `az aks nodepool add` command to add an additional node pool that can run Windows Server containers alongside the Linux node pool. - -```azurecli -az aks nodepool add \ - --resource-group myResourceGroup \ - --cluster-name myAKSCluster \ - --os-type Windows \ - --name npwin \ - --node-count 1 -``` - -The above command creates a new node pool named *npwin* and adds it to the *myAKSCluster*. The above command also uses the default subnet in the default vnet created when running `az aks create`. - -## Optional: Using `containerd` with Windows Server node pools - -Beginning in Kubernetes version 1.20 and greater, you can specify `containerd` as the container runtime for Windows Server 2019 node pools. From Kubernetes 1.23, containerd will be the default container runtime for Windows. - - -> [!IMPORTANT] -> When using `containerd` with Windows Server 2019 node pools: -> - Both the control plane and Windows Server 2019 node pools must use Kubernetes version 1.20 or greater. -> - When creating or updating a node pool to run Windows Server containers, the default value for *node-vm-size* is *Standard_D2s_v3* which was minimum recommended size for Windows Server 2019 node pools prior to Kubernetes 1.20. The minimum recommended size for Windows Server 2019 node pools using `containerd` is *Standard_D4s_v3*. When setting the *node-vm-size* parameter, please check the list of [restricted VM sizes][restricted-vm-sizes]. -> - It is highly recommended that you use [taints or labels][aks-taints] with your Windows Server 2019 node pools running `containerd` and tolerations or node selectors with your deployments to guarantee your workloads are scheduled correctly. - - -### Add a Windows Server node pool with `containerd` - -Use the `az aks nodepool add` command to add an additional node pool that can run Windows Server containers with the `containerd` runtime. - -> [!NOTE] -> If you do not specify the *WindowsContainerRuntime=containerd* custom header, the node pool will use Docker as the container runtime. - -```azurecli -az aks nodepool add \ - --resource-group myResourceGroup \ - --cluster-name myAKSCluster \ - --os-type Windows \ - --name npwcd \ - --node-vm-size Standard_D4s_v3 \ - --kubernetes-version 1.20.5 \ - --aks-custom-headers WindowsContainerRuntime=containerd \ - --node-count 1 -``` - -The above command creates a new Windows Server node pool using `containerd` as the runtime named *npwcd* and adds it to the *myAKSCluster*. The above command also uses the default subnet in the default vnet created when running `az aks create`. - -### Upgrade an existing Windows Server node pool to `containerd` - -Use the `az aks nodepool upgrade` command to upgrade a specific node pool from Docker to `containerd`. - -```azurecli -az aks nodepool upgrade \ - --resource-group myResourceGroup \ - --cluster-name myAKSCluster \ - --name npwd \ - --kubernetes-version 1.20.7 \ - --aks-custom-headers WindowsContainerRuntime=containerd -``` - -The above command upgrades a node pool named *npwd* to the `containerd` runtime. - -To upgrade all existing node pools in a cluster to use the `containerd` runtime for all Windows Server node pools: - -```azurecli -az aks upgrade \ - --resource-group myResourceGroup \ - --name myAKSCluster \ - --kubernetes-version 1.20.7 \ - --aks-custom-headers WindowsContainerRuntime=containerd -``` - -The above command upgrades all Windows Server node pools in the *myAKSCluster* to use the `containerd` runtime. - -> [!NOTE] -> After upgrading all existing Windows Server node pools to use the `containerd` runtime, Docker will still be the default runtime when adding new Windows Server node pools. - -## Connect to the cluster - -To manage a Kubernetes cluster, you use [kubectl][kubectl], the Kubernetes command-line client. If you use Azure Cloud Shell, `kubectl` is already installed. To install `kubectl` locally, use the [az aks install-cli][az-aks-install-cli] command: - -```azurecli -az aks install-cli -``` - -To configure `kubectl` to connect to your Kubernetes cluster, use the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - -```azurecli-interactive -az aks get-credentials --resource-group myResourceGroup --name myAKSCluster -``` - -To verify the connection to your cluster, use the [kubectl get][kubectl-get] command to return a list of the cluster nodes. - -```console -kubectl get nodes -o wide -``` - -The following example output shows the all the nodes in the cluster. Make sure that the status of all nodes is *Ready*: - -```output -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -aks-nodepool1-12345678-vmss000000 Ready agent 34m v1.20.7 10.240.0.4 Ubuntu 18.04.5 LTS 5.4.0-1046-azure containerd://1.4.4+azure -aks-nodepool1-12345678-vmss000001 Ready agent 34m v1.20.7 10.240.0.35 Ubuntu 18.04.5 LTS 5.4.0-1046-azure containerd://1.4.4+azure -aksnpwcd123456 Ready agent 9m6s v1.20.7 10.240.0.97 Windows Server 2019 Datacenter 10.0.17763.1879 containerd://1.4.4+unknown -aksnpwin987654 Ready agent 25m v1.20.7 10.240.0.66 Windows Server 2019 Datacenter 10.0.17763.1879 docker://19.3.14 -``` - -> [!NOTE] -> The container runtime for each node pool is shown under *CONTAINER-RUNTIME*. Notice *aksnpwin987654* begins with `docker://` which means it is using Docker for the container runtime. Notice *aksnpwcd123456* begins with `containerd://` which means it is using `containerd` for the container runtime. - -## Run the application - -A Kubernetes manifest file defines a desired state for the cluster, such as what container images to run. In this article, a manifest is used to create all objects needed to run the ASP.NET sample application in a Windows Server container. This manifest includes a [Kubernetes deployment][kubernetes-deployment] for the ASP.NET sample application and an external [Kubernetes service][kubernetes-service] to access the application from the internet. - -The ASP.NET sample application is provided as part of the [.NET Framework Samples][dotnet-samples] and runs in a Windows Server container. AKS requires Windows Server containers to be based on images of *Windows Server 2019* or greater. The Kubernetes manifest file must also define a [node selector][node-selector] to tell your AKS cluster to run your ASP.NET sample application's pod on a node that can run Windows Server containers. - -Create a file named `sample.yaml` and copy in the following YAML definition. If you use the Azure Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical system: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sample - labels: - app: sample -spec: - replicas: 1 - template: - metadata: - name: sample - labels: - app: sample - spec: - nodeSelector: - "kubernetes.io/os": windows - containers: - - name: sample - image: mcr.microsoft.com/dotnet/framework/samples:aspnetapp - resources: - limits: - cpu: 1 - memory: 800M - requests: - cpu: .1 - memory: 300M - ports: - - containerPort: 80 - selector: - matchLabels: - app: sample ---- -apiVersion: v1 -kind: Service -metadata: - name: sample -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - selector: - app: sample -``` - -Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your YAML manifest: - -```console -kubectl apply -f sample.yaml -``` - -The following example output shows the Deployment and Service created successfully: - -```output -deployment.apps/sample created -service/sample created -``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. Occasionally the service can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. - -To monitor progress, use the [kubectl get service][kubectl-get] command with the `--watch` argument. - -```console -kubectl get service sample --watch -``` - -Initially the *EXTERNAL-IP* for the *sample* service is shown as *pending*. - -```output -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -sample LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -When the *EXTERNAL-IP* address changes from *pending* to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. The following example output shows a valid public IP address assigned to the service: - -```output -sample LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the sample app in action, open a web browser to the external IP address of your service. - -![Image of browsing to ASP.NET sample application](media/windows-container/asp-net-sample-app.png) - -> [!Note] -> If you receive a connection timeout when trying to load the page then you should verify the sample app is ready with the following command [kubectl get pods --watch]. Sometimes the Windows container will not be started by the time your external IP address is available. - -## Delete cluster - -When the cluster is no longer needed, use the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. - -```azurecli-interactive -az group delete --name myResourceGroup --yes --no-wait -``` - -> [!NOTE] -> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster is not removed. For steps on how to remove the service principal, see [AKS service principal considerations and deletion][sp-delete]. If you used a managed identity, the identity is managed by the platform and does not require removal. - -## Next steps - -In this article, you deployed a Kubernetes cluster and deployed an ASP.NET sample application in a Windows Server container to it. - -To learn more about AKS, and walk through a complete code to deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[node-selector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -[dotnet-samples]: https://hub.docker.com/_/microsoft-dotnet-framework-samples/ -[azure-cni]: https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md - - -[kubernetes-concepts]: concepts-clusters-workloads.md -[aks-monitor]: ../azure-monitor/containers/container-insights-onboard.md -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md -[aks-taints]: use-multiple-node-pools.md#specify-a-taint-label-or-tag-for-a-node-pool -[az-aks-browse]: /cli/azure/aks#az_aks_browse -[az-aks-create]: /cli/azure/aks#az_aks_create -[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials -[az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli -[az-extension-add]: /cli/azure/extension#az_extension_add -[az-feature-list]: /cli/azure/feature#az_feature_list -[az-feature-register]: /cli/azure/feature#az_feature_register -[az-group-create]: /cli/azure/group#az_group_create -[az-group-delete]: /cli/azure/group#az_group_delete -[az-provider-register]: /cli/azure/provider#az_provider_register -[azure-cli-install]: /cli/azure/install-azure-cli -[azure-cni-about]: concepts-network.md#azure-cni-advanced-networking -[sp-delete]: kubernetes-service-principal.md#additional-considerations -[azure-portal]: https://portal.azure.com -[kubernetes-deployment]: concepts-clusters-workloads.md#deployments-and-yaml-manifests -[kubernetes-service]: concepts-network.md#services -[restricted-vm-sizes]: quotas-skus-regions.md#restricted-vm-sizes -[use-advanced-networking]: configure-azure-cni.md -[aks-support-policies]: support-policies.md -[aks-faq]: faq.md -[az-extension-add]: /cli/azure/extension#az-extension-add -[az-extension-update]: /cli/azure/extension#az-extension-update -[windows-server-password]: /windows/security/threat-protection/security-policy-settings/password-must-meet-complexity-requirements#reference -[win-faq-change-admin-creds]: windows-faq.md#how-do-i-change-the-administrator-password-for-windows-server-nodes-on-my-cluster diff --git a/articles/aks/windows-container-powershell.md b/articles/aks/windows-container-powershell.md deleted file mode 100644 index 0e20f0c34dc33..0000000000000 --- a/articles/aks/windows-container-powershell.md +++ /dev/null @@ -1,331 +0,0 @@ ---- -title: Create a Windows Server container on an AKS cluster by using PowerShell -description: Learn how to quickly create a Kubernetes cluster, deploy an application in a Windows Server container in Azure Kubernetes Service (AKS) using PowerShell. -services: container-service -ms.topic: article -ms.date: 03/12/2021 -ms.custom: devx-track-azurepowershell - - -#Customer intent: As a developer or cluster operator, I want to quickly create an AKS cluster and deploy a Windows Server container so that I can see how to run applications running on a Windows Server container using the managed Kubernetes service in Azure. ---- - -# Create a Windows Server container on an Azure Kubernetes Service (AKS) cluster using PowerShell - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and -manage clusters. In this article, you deploy an AKS cluster using PowerShell. You also deploy an -`ASP.NET` sample application in a Windows Server container to the cluster. - -![Image of browsing to ASP.NET sample application](media/windows-container-powershell/asp-net-sample-app.png) - -This article assumes a basic understanding of Kubernetes concepts. For more information, see -[Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -## Prerequisites - -If you don't have an Azure subscription, create a [free](https://azure.microsoft.com/free/) account -before you begin. - -If you choose to use PowerShell locally, this article requires that you install the Az PowerShell -module and connect to your Azure account using the -[Connect-AzAccount](/powershell/module/az.accounts/Connect-AzAccount) cmdlet. For more information -about installing the Az PowerShell module, see -[Install Azure PowerShell][install-azure-powershell]. You also must install the Az.Aks PowerShell module: - -```azurepowershell-interactive -Install-Module Az.Aks -``` - -[!INCLUDE [cloud-shell-try-it](../../includes/cloud-shell-try-it.md)] - -If you have multiple Azure subscriptions, choose the appropriate subscription in which the resources -should be billed. Select a specific subscription ID using the -[Set-AzContext](/powershell/module/az.accounts/set-azcontext) cmdlet. - -```azurepowershell-interactive -Set-AzContext -SubscriptionId 00000000-0000-0000-0000-000000000000 -``` - -## Limitations - -The following limitations apply when you create and manage AKS clusters that support multiple node pools: - -* You can't delete the first node pool. - -The following additional limitations apply to Windows Server node pools: - -* The AKS cluster can have a maximum of 10 node pools. -* The AKS cluster can have a maximum of 100 nodes in each node pool. -* The Windows Server node pool name has a limit of 6 characters. - -## Create a resource group - -An [Azure resource group](../azure-resource-manager/management/overview.md) -is a logical group in which Azure resources are deployed and managed. When you create a resource -group, you are asked to specify a location. This location is where resource group metadata is -stored, it is also where your resources run in Azure if you don't specify another region during -resource creation. Create a resource group using the [New-AzResourceGroup][new-azresourcegroup] -cmdlet. - -The following example creates a resource group named **myResourceGroup** in the **eastus** location. - -> [!NOTE] -> This article uses PowerShell syntax for the commands in this tutorial. If you are using Azure Cloud -> Shell, ensure that the dropdown in the upper-left of the Cloud Shell window is set to **PowerShell**. - -```azurepowershell-interactive -New-AzResourceGroup -Name myResourceGroup -Location eastus -``` - -The following example output shows the resource group created successfully: - -```plaintext -ResourceGroupName : myResourceGroup -Location : eastus -ProvisioningState : Succeeded -Tags : -ResourceId : /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup -``` - -## Create an AKS cluster - -Use the `ssh-keygen` command-line utility to generate an SSH key pair. For more details, see -[Quick steps: Create and use an SSH public-private key pair for Linux VMs in Azure](../virtual-machines/linux/mac-create-ssh-keys.md). - -To run an AKS cluster that supports node pools for Windows Server containers, your cluster needs to -use a network policy that uses [Azure CNI][azure-cni-about] (advanced) network plugin. For more -detailed information to help plan out the required subnet ranges and network considerations, see -[configure Azure CNI networking][use-advanced-networking]. Use the [New-AzAksCluster][new-azakscluster] cmdlet -below to create an AKS cluster named **myAKSCluster**. The following example creates the necessary -network resources if they don't exist. - -> [!NOTE] -> To ensure your cluster operates reliably, you should run at least 2 (two) nodes in the default -> node pool. - -```azurepowershell-interactive -$Username = Read-Host -Prompt 'Please create a username for the administrator credentials on your Windows Server containers: ' -$Password = Read-Host -Prompt 'Please create a password for the administrator credentials on your Windows Server containers: ' -AsSecureString -New-AzAksCluster -ResourceGroupName myResourceGroup -Name myAKSCluster -NodeCount 2 -NetworkPlugin azure -NodeVmSetType VirtualMachineScaleSets -WindowsProfileAdminUserName $Username -WindowsProfileAdminUserPassword $Password -``` - -> [!Note] -> If you are unable to create the AKS cluster because the version is not supported in this region -> then you can use the `Get-AzAksVersion -Location eastus` command to find the supported version -> list for this region. - -After a few minutes, the command completes and returns information about the cluster. Occasionally -the cluster can take longer than a few minutes to provision. Allow up to 10 minutes in these cases. - -## Add a Windows Server node pool - -By default, an AKS cluster is created with a node pool that can run Linux containers. Use -`New-AzAksNodePool` cmdlet to add a node pool that can run Windows Server containers alongside the -Linux node pool. - -```azurepowershell-interactive -New-AzAksNodePool -ResourceGroupName myResourceGroup -ClusterName myAKSCluster -VmSetType VirtualMachineScaleSets -OsType Windows -Name npwin -``` - -The above command creates a new node pool named **npwin** and adds it to the **myAKSCluster**. When -creating a node pool to run Windows Server containers, the default value for **VmSize** is -**Standard_D2s_v3**. If you choose to set the **VmSize** parameter, check the list of -[restricted VM sizes][restricted-vm-sizes]. The minimum recommended size is **Standard_D2s_v3**. The -previous command also uses the default subnet in the default vnet created when running `New-AzAksCluster`. - -## Connect to the cluster - -To manage a Kubernetes cluster, you use [kubectl][kubectl], the Kubernetes command-line client. If -you use Azure Cloud Shell, `kubectl` is already installed. To install `kubectl` locally, use the -`Install-AzAksKubectl` cmdlet: - -```azurepowershell-interactive -Install-AzAksKubectl -``` - -To configure `kubectl` to connect to your Kubernetes cluster, use the -[Import-AzAksCredential][import-azakscredential] cmdlet. This command -downloads credentials and configures the Kubernetes CLI to use them. - -```azurepowershell-interactive -Import-AzAksCredential -ResourceGroupName myResourceGroup -Name myAKSCluster -``` - -To verify the connection to your cluster, use the [kubectl get][kubectl-get] command to return a -list of the cluster nodes. - -```azurepowershell-interactive -kubectl get nodes -``` - -The following example output shows all the nodes in the cluster. Make sure that the status of all -nodes is **Ready**: - -```plaintext -NAME STATUS ROLES AGE VERSION -aks-nodepool1-12345678-vmssfedcba Ready agent 13m v1.16.7 -aksnpwin987654 Ready agent 108s v1.16.7 -``` - -## Run the application - -A Kubernetes manifest file defines a desired state for the cluster, such as what container images to -run. In this article, a manifest is used to create all objects needed to run the ASP.NET sample -application in a Windows Server container. This manifest includes a -[Kubernetes deployment][kubernetes-deployment] for the ASP.NET sample application and an external -[Kubernetes service][kubernetes-service] to access the application from the internet. - -The ASP.NET sample application is provided as part of the [.NET Framework Samples][dotnet-samples] -and runs in a Windows Server container. AKS requires Windows Server containers to be based on images -of **Windows Server 2019** or greater. The Kubernetes manifest file must also define a -[node selector][node-selector] to tell your AKS cluster to run your ASP.NET sample application's pod -on a node that can run Windows Server containers. - -Create a file named `sample.yaml` and copy in the following YAML definition. If you use the Azure -Cloud Shell, this file can be created using `vi` or `nano` as if working on a virtual or physical -system: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sample - labels: - app: sample -spec: - replicas: 1 - template: - metadata: - name: sample - labels: - app: sample - spec: - nodeSelector: - "kubernetes.io/os": windows - containers: - - name: sample - image: mcr.microsoft.com/dotnet/framework/samples:aspnetapp - resources: - limits: - cpu: 1 - memory: 800M - requests: - cpu: .1 - memory: 300M - ports: - - containerPort: 80 - selector: - matchLabels: - app: sample ---- -apiVersion: v1 -kind: Service -metadata: - name: sample -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - selector: - app: sample -``` - -Deploy the application using the [kubectl apply][kubectl-apply] command and specify the name of your -YAML manifest: - -```azurepowershell-interactive -kubectl apply -f sample.yaml -``` - -The following example output shows the Deployment and Service created successfully: - -```plaintext -deployment.apps/sample created -service/sample created -``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application frontend to the internet. -This process can take a few minutes to complete. Occasionally the service can take longer than a few -minutes to provision. Allow up to 10 minutes in these cases. - -To monitor progress, use the [kubectl get service][kubectl-get] command with the `--watch` argument. - -```azurepowershell-interactive -kubectl get service sample --watch -``` - -Initially the **EXTERNAL-IP** for the **sample** service is shown as **pending**. - -```plaintext -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -sample LoadBalancer 10.0.37.27 80:30572/TCP 6s -``` - -When the **EXTERNAL-IP** address changes from **pending** to an actual public IP address, use `CTRL-C` -to stop the `kubectl` watch process. The following example output shows a valid public IP address -assigned to the service: - -```plaintext -sample LoadBalancer 10.0.37.27 52.179.23.131 80:30572/TCP 2m -``` - -To see the sample app in action, open a web browser to the external IP address of your service. - -![Image of browsing to ASP.NET sample application](media/windows-container-powershell/asp-net-sample-app.png) - -> [!Note] -> If you receive a connection timeout when trying to load the page then you should verify the sample -> app is ready with the following command `kubectl get pods --watch`. Sometimes the Windows -> container will not be started by the time your external IP address is available. - -## Delete cluster - -When the cluster is no longer needed, use the -[Remove-AzResourceGroup][remove-azresourcegroup] cmdlet to remove -the resource group, container service, and all related resources. - -```azurepowershell-interactive -Remove-AzResourceGroup -Name myResourceGroup -``` - -> [!NOTE] -> When you delete the cluster, the Azure Active Directory service principal used by the AKS cluster -> is not removed. For steps on how to remove the service principal, see -> [AKS service principal considerations and deletion][sp-delete]. If you used a managed identity, -> the identity is managed by the platform and does not require removal. - -## Next steps - -In this article, you deployed a Kubernetes cluster and deployed an `ASP.NET` sample application in a -Windows Server container to it. - -To learn more about AKS, and walk through a complete code to deployment example, continue to the -Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[kubectl]: https://kubernetes.io/docs/user-guide/kubectl/ -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get -[dotnet-samples]: https://hub.docker.com/_/microsoft-dotnet-framework-samples/ -[node-selector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply - - -[kubernetes-concepts]: concepts-clusters-workloads.md -[install-azure-powershell]: /powershell/azure/install-az-ps -[new-azresourcegroup]: /powershell/module/az.resources/new-azresourcegroup -[azure-cni-about]: concepts-network.md#azure-cni-advanced-networking -[use-advanced-networking]: configure-azure-cni.md -[new-azakscluster]: /powershell/module/az.aks/new-azakscluster -[restricted-vm-sizes]: quotas-skus-regions.md#restricted-vm-sizes -[import-azakscredential]: /powershell/module/az.aks/import-azakscredential -[kubernetes-deployment]: concepts-clusters-workloads.md#deployments-and-yaml-manifests -[kubernetes-service]: concepts-network.md#services -[remove-azresourcegroup]: /powershell/module/az.resources/remove-azresourcegroup -[sp-delete]: kubernetes-service-principal.md#additional-considerations -[aks-tutorial]: ./tutorial-kubernetes-prepare-app.md diff --git a/articles/aks/windows-faq.md b/articles/aks/windows-faq.md index 09898dbd73236..d4c4a0cfb78b7 100644 --- a/articles/aks/windows-faq.md +++ b/articles/aks/windows-faq.md @@ -215,7 +215,7 @@ To get started with Windows Server containers in AKS, see [Create a node pool th [azure-network-models]: concepts-network.md#azure-virtual-networks [configure-azure-cni]: configure-azure-cni.md [nodepool-upgrade]: use-multiple-node-pools.md#upgrade-a-node-pool -[windows-node-cli]: windows-container-cli.md +[windows-node-cli]: ./learn/quick-windows-container-deploy-cli.md [aks-support-policies]: support-policies.md [aks-faq]: faq.md [upgrade-cluster]: upgrade-cluster.md diff --git a/articles/api-management/api-management-advanced-policies.md b/articles/api-management/api-management-advanced-policies.md index ba55d5aaeaf39..63f4995efbf27 100644 --- a/articles/api-management/api-management-advanced-policies.md +++ b/articles/api-management/api-management-advanced-policies.md @@ -536,6 +536,29 @@ In the following example, request forwarding is retried up to ten times using an ``` +### Example + +In the following example, sending a request to a URL other than the defined backend is retried up to three times if the connection is dropped/timed out, or the request results in a server-side error. Since `first-fast-retry` is set to true, the first retry is executed immediately upon the initial request failure. Note that `send-request` must set `ignore-error` to true in order for `response-variable-name` to be null in the event of an error. + +```xml + += 500)" + count="3" + interval="1" + first-fast-retry="true"> + + https://api.contoso.com/products/5 + GET + + + +``` + ### Elements | Element | Description | Required | diff --git a/articles/api-management/how-to-deploy-self-hosted-gateway-azure-kubernetes-service.md b/articles/api-management/how-to-deploy-self-hosted-gateway-azure-kubernetes-service.md index ef9949dedc999..33e347d9acf72 100644 --- a/articles/api-management/how-to-deploy-self-hosted-gateway-azure-kubernetes-service.md +++ b/articles/api-management/how-to-deploy-self-hosted-gateway-azure-kubernetes-service.md @@ -19,7 +19,7 @@ This article provides the steps for deploying self-hosted gateway component of A ## Prerequisites - [Create an Azure API Management instance](get-started-create-service-instance.md) -- [Create an Azure Kubernetes cluster](../aks/kubernetes-walkthrough-portal.md) +- Create an Azure Kubernetes cluster [using the Azure CLI](../aks/learn/quick-kubernetes-deploy-cli.md), [using Azure PowerShell](../aks/learn/quick-kubernetes-deploy-powershell.md), or [using the Azure portal](../aks/learn/quick-kubernetes-deploy-portal.md). - [Provision a gateway resource in your API Management instance](api-management-howto-provision-self-hosted-gateway.md). ## Deploy the self-hosted gateway to AKS diff --git a/articles/api-management/how-to-deploy-self-hosted-gateway-kubernetes-opentelemetry.md b/articles/api-management/how-to-deploy-self-hosted-gateway-kubernetes-opentelemetry.md index 94f6b7bfcfc27..327399b2e51e0 100644 --- a/articles/api-management/how-to-deploy-self-hosted-gateway-kubernetes-opentelemetry.md +++ b/articles/api-management/how-to-deploy-self-hosted-gateway-kubernetes-opentelemetry.md @@ -27,10 +27,9 @@ You learn how to: ## Prerequisites - [Create an Azure API Management instance](get-started-create-service-instance.md) -- [Create an Azure Kubernetes cluster](../aks/kubernetes-walkthrough-portal.md) +- Create an Azure Kubernetes cluster [using the Azure CLI](../aks/learn/quick-kubernetes-deploy-cli.md), [using Azure PowerShell](../aks/learn/quick-kubernetes-deploy-powershell.md), or [using the Azure portal](../aks/learn/quick-kubernetes-deploy-portal.md). - [Provision a self-hosted gateway resource in your API Management instance](api-management-howto-provision-self-hosted-gateway.md). - ## Introduction to OpenTelemetry [OpenTelemetry](https://opentelemetry.io/) is a set of open-source tools and frameworks for logging, metrics, and tracing in a vendor-neutral way. diff --git a/articles/app-service/deploy-staging-slots.md b/articles/app-service/deploy-staging-slots.md index f9376d5dfb0f0..7391f3a618a4e 100644 --- a/articles/app-service/deploy-staging-slots.md +++ b/articles/app-service/deploy-staging-slots.md @@ -47,6 +47,10 @@ The app must be running in the **Standard**, **Premium**, or **Isolated** tier i ![Configuration source](./media/web-sites-staged-publishing/ConfigurationSource1.png) You can clone a configuration from any existing slot. Settings that can be cloned include app settings, connection strings, language framework versions, web sockets, HTTP version, and platform bitness. + + > [!NOTE] + > Currently, VNET and the Private Endpoint are not cloned across slots. + > 4. After the slot is added, select **Close** to close the dialog box. The new slot is now shown on the **Deployment slots** page. By default, **Traffic %** is set to 0 for the new slot, with all customer traffic routed to the production slot. diff --git a/articles/app-service/environment/how-to-migrate.md b/articles/app-service/environment/how-to-migrate.md index c1ec64fee59bf..ff0f6593184b6 100644 --- a/articles/app-service/environment/how-to-migrate.md +++ b/articles/app-service/environment/how-to-migrate.md @@ -76,8 +76,7 @@ App Service Environment v3 requires the subnet it's in to have a single delegati ```azurecli az network vnet subnet update -g $ASE_RG -n --vnet-name --delegations Microsoft.Web/hostingEnvironments ``` - -![subnet delegation sample](./media/migration/subnet-delegation.png) +:::image type="content" source="./media/migration/subnet-delegation.png" alt-text="Subnet delegation sample."::: ## 6. Migrate to App Service Environment v3 @@ -110,32 +109,33 @@ az appservice ase show --name $ASE_NAME --resource-group $ASE_RG From the [Azure portal](https://portal.azure.com), navigate to the **Migration** page for the App Service Environment you'll be migrating. You can do this by clicking on the banner at the top of the **Overview** page for your App Service Environment or by clicking the **Migration** item on the left-hand side. ![migration access points](./media/migration/portal-overview.png) +:::image type="content" source="./media/migration/portal-overview.png" alt-text="Migration access points."::: On the migration page, the platform will validate if migration is supported for your App Service Environment. If your environment isn't supported for migration, a banner will appear at the top of the page and include an error message with a reason. See the [troubleshooting](migrate.md#troubleshooting) section for descriptions of the error messages you may see if you aren't eligible for migration. If your App Service Environment isn't supported for migration at this time or your environment is in an unhealthy or suspended state, you won't be able to use the migration feature. If your environment [won't be supported for migration with the migration feature](migrate.md#supported-scenarios) or you want to migrate to App Service Environment v3 without using the migration feature, see the [manual migration options](migration-alternatives.md). -![migration not supported sample](./media/migration/migration-not-supported.png) +:::image type="content" source="./media/migration/migration-not-supported.png" alt-text="Migration not supported sample."::: If migration is supported for your App Service Environment, you'll be able to proceed to the next step in the process. The migration page will guide you through the series of steps to complete the migration. -![migration page sample](./media/migration/migration-ux-pre.png) +:::image type="content" source="./media/migration/migration-ux-pre.png" alt-text="Migration page sample."::: ## 2. Generate IP addresses for your new App Service Environment v3 Under **Get new IP addresses**, confirm you understand the implications and start the process. This step will take about 15 minutes to complete. You won't be able to scale or make changes to your existing App Service Environment during this time. If after 15 minutes you don't see your new IP addresses, select refresh as shown in the sample to allow your new IP addresses to appear. -![pre-migration request to refresh](./media/migration/pre-migration-refresh.png) +:::image type="content" source="./media/migration/pre-migration-refresh.png" alt-text="Pre-migration request to refresh."::: ## 3. Update dependent resources with new IPs When the previous step finishes, you'll be shown the IP addresses for your new App Service Environment v3. Using the new IPs, update any resources and networking components to ensure your new environment functions as intended once migration is complete. It's your responsibility to make any necessary updates. Don't move on to the next step until you confirm that you have made these updates. -![sample IPs](./media/migration/ip-sample.png) +:::image type="content" source="./media/migration/ip-sample.png" alt-text="Sample IPs generated during pre-migration."::: ## 4. Delegate your App Service Environment subnet App Service Environment v3 requires the subnet it's in to have a single delegation of `Microsoft.Web/hostingEnvironments`. Previous versions didn't require this delegation. You'll need to confirm your subnet is delegated properly and/or update the delegation if needed before migrating. A link to your subnet is given so that you can confirm and update as needed. -![ux subnet delegation sample](./media/migration/subnet-delegation-ux.png) +:::image type="content" source="./media/migration/subnet-delegation-ux.png" alt-text="Subnet delegation using the portal."::: ## 5. Migrate to App Service Environment v3 diff --git a/articles/app-service/environment/migrate.md b/articles/app-service/environment/migrate.md index 4657a4e058ebd..fe92571a22b84 100644 --- a/articles/app-service/environment/migrate.md +++ b/articles/app-service/environment/migrate.md @@ -3,7 +3,7 @@ title: Migrate to App Service Environment v3 by using the migration feature description: Overview of the migration feature for migration to App Service Environment v3 author: seligj95 ms.topic: article -ms.date: 4/27/2022 +ms.date: 4/29/2022 ms.author: jordanselig ms.custom: references_regions --- @@ -89,6 +89,7 @@ If your App Service Environment doesn't pass the validation checks or you try to |Migration to ASEv3 is not allowed for this ASE|You won't be able to migrate using the migration feature. |Migrate using one of the [manual migration options](migration-alternatives.md). | |Subscription has too many App Service Environments. Please remove some before trying to create more.|The App Service Environment [quota for your subscription](/azure/azure-resource-manager/management/azure-subscription-service-limits#app-service-limits) has been met. |Remove unneeded environments or contact support to review your options. | |`` is not available in this location|You'll see this error if you're trying to migrate an App Service Environment in a region that doesn't support one of your requested features. |Migrate using one of the [manual migration options](migration-alternatives.md) if you want to migrate immediately. Otherwise, wait for the migration feature to support this App Service Environment configuration. | +|Migrate cannot be called on this ASE until the active upgrade has finished. |App Service Environments can't be migrated during platform upgrades. You can set your [upgrade preference](using-an-ase.md#upgrade-preference) from the Azure portal. |Wait until the upgrade finishes and then migrate. | ## Overview of the migration process using the migration feature @@ -163,4 +164,4 @@ There's no cost to migrate your App Service Environment. You'll stop being charg > [App Service Environment v3 Networking](networking.md) > [!div class="nextstepaction"] -> [Using an App Service Environment v3](using.md) \ No newline at end of file +> [Using an App Service Environment v3](using.md) diff --git a/articles/app-service/overview-vnet-integration.md b/articles/app-service/overview-vnet-integration.md index 23db2219330c0..bb49cc80d87be 100644 --- a/articles/app-service/overview-vnet-integration.md +++ b/articles/app-service/overview-vnet-integration.md @@ -158,7 +158,7 @@ After your app integrates with your virtual network, it uses the same DNS server There are some limitations with using regional virtual network integration: -* The feature is available from all App Service deployments in Premium v2 and Premium v3. It's also available in Basic and Standard tier but only from newer App Service deployments. If you're on an older deployment, you can only use the feature from a Premium v2 App Service plan. If you want to make sure you can use the feature in a Standard App Service plan, create your app in a Premium v3 App Service plan. Those plans are only supported on our newest deployments. You can scale down if you want after the plan is created. +* The feature is available from all App Service deployments in Premium v2 and Premium v3. It's also available in Basic and Standard tier but only from newer App Service deployments. If you're on an older deployment, you can only use the feature from a Premium v2 App Service plan. If you want to make sure you can use the feature in a Basic or Standard App Service plan, create your app in a Premium v3 App Service plan. Those plans are only supported on our newest deployments. You can scale down if you want after the plan is created. * The feature can't be used by Isolated plan apps that are in an App Service Environment. * You can't reach resources across peering connections with classic virtual networks. * The feature requires an unused subnet that's an IPv4 `/28` block or larger in an Azure Resource Manager virtual network. diff --git a/articles/app-service/tutorial-connect-msi-azure-database.md b/articles/app-service/tutorial-connect-msi-azure-database.md index fe80a014ca83b..361aead405183 100644 --- a/articles/app-service/tutorial-connect-msi-azure-database.md +++ b/articles/app-service/tutorial-connect-msi-azure-database.md @@ -12,7 +12,7 @@ ms.custom: "mvc, devx-track-azurecli" [App Service](overview.md) provides a highly scalable, self-patching web hosting service in Azure. It also provides a [managed identity](overview-managed-identity.md) for your app, which is a turn-key solution for securing access to Azure databases, including: -- [Azure SQL Database](/azure/sql-database/) +- [Azure SQL Database](/azure/azure-sql/database/) - [Azure Database for MySQL](/azure/mysql/) - [Azure Database for PostgreSQL](/azure/postgresql/) @@ -50,7 +50,7 @@ Prepare your environment for the Azure CLI. First, enable Azure Active Directory authentication to the Azure database by assigning an Azure AD user as the administrator of the server. For the scenario in the tutorial, you'll use this user to connect to your Azure database from the local development environment. Later, you set up the managed identity for your App Service app to connect from within Azure. > [!NOTE] -> This user is different from the Microsoft account you used to sign up for your Azure subscription. It must be a user that you created, imported, synced, or invited into Azure AD. For more information on allowed Azure AD users, see [Azure AD features and limitations in SQL Database](../azure-sql/database/authentication-aad-overview.md#azure-ad-features-and-limitations). +> This user is different from the Microsoft account you used to sign up for your Azure subscription. It must be a user that you created, imported, synced, or invited into Azure AD. For more information on allowed Azure AD users, see [Azure AD features and limitations in SQL Database](/azure/azure-sql/database/authentication-aad-overview#azure-ad-features-and-limitations). 1. If your Azure AD tenant doesn't have a user yet, create one by following the steps at [Add or delete users using Azure Active Directory](../active-directory/fundamentals/add-users-azure-active-directory.md). @@ -68,7 +68,7 @@ First, enable Azure Active Directory authentication to the Azure database by ass az sql server ad-admin create --resource-group --server-name --display-name ADMIN --object-id $azureaduser ``` - For more information on adding an Active Directory administrator, see [Provision an Azure Active Directory administrator for your server](../azure-sql/database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance) + For more information on adding an Active Directory administrator, see [Provision an Azure Active Directory administrator for your server](/azure/azure-sql/database/authentication-aad-configure#provision-azure-ad-admin-sql-managed-instance) # [Azure Database for MySQL](#tab/mysql) @@ -550,7 +550,7 @@ For Azure Database for MySQL and Azure Database for PostgreSQL, the database use connection.connect(); ``` - The [tedious](https://tediousjs.github.io/tedious/) library also has an authentication type `azure-active-directory-msi-app-service`, which doesn't require you to retrieve the token yourself, but the use of `DefaultAzureCredential` in this example works both in App Service and in your local development environment. For more information, see [Quickstart: Use Node.js to query a database in Azure SQL Database or Azure SQL Managed Instance](../azure-sql/database/connect-query-nodejs.md) + The [tedious](https://tediousjs.github.io/tedious/) library also has an authentication type `azure-active-directory-msi-app-service`, which doesn't require you to retrieve the token yourself, but the use of `DefaultAzureCredential` in this example works both in App Service and in your local development environment. For more information, see [Quickstart: Use Node.js to query a database in Azure SQL Database or Azure SQL Managed Instance](/azure/azure-sql/database/connect-query-nodejs) # [Azure Database for MySQL](#tab/mysql) diff --git a/articles/app-service/tutorial-networking-isolate-vnet.md b/articles/app-service/tutorial-networking-isolate-vnet.md index cbaf55c19b300..311ea87de5f19 100644 --- a/articles/app-service/tutorial-networking-isolate-vnet.md +++ b/articles/app-service/tutorial-networking-isolate-vnet.md @@ -9,25 +9,25 @@ ms.reviewer: madsd # Tutorial: Isolate back-end communication in Azure App Service with Virtual Network integration -In this article you will configure an App Service app with secure, network-isolated communication to backend services. The example scenario used is in [Tutorial: Secure Cognitive Service connection from App Service using Key Vault](tutorial-connect-msi-key-vault.md). When you're finished, you have an App Service app that accesses both Key Vault and Cognitive Services through an [Azure virtual network](../virtual-network/virtual-networks-overview.md) (VNet), and no other traffic is allowed to access those back-end resources. All traffic will be isolated within your VNet using [VNet integration](web-sites-integrate-with-vnet.md) and [private endpoints](../private-link/private-endpoint-overview.md). +In this article you will configure an App Service app with secure, network-isolated communication to backend services. The example scenario used is in [Tutorial: Secure Cognitive Service connection from App Service using Key Vault](tutorial-connect-msi-key-vault.md). When you're finished, you have an App Service app that accesses both Key Vault and Cognitive Services through an [Azure virtual network](../virtual-network/virtual-networks-overview.md), and no other traffic is allowed to access those back-end resources. All traffic will be isolated within your virtual network using [virtual network integration](web-sites-integrate-with-vnet.md) and [private endpoints](../private-link/private-endpoint-overview.md). -As a multi-tenanted service, outbound network traffic from your App Service app to other Azure services shares the same environment with other apps or even other subscriptions. While the traffic itself can be encrypted, certain scenarios may require an extra level of security by isolating back-end communication from other network traffic. These scenarios are typically accessible to large enterprises with a high level of expertise, but App Service puts it within reach with VNet integration. +As a multi-tenanted service, outbound network traffic from your App Service app to other Azure services shares the same environment with other apps or even other subscriptions. While the traffic itself can be encrypted, certain scenarios may require an extra level of security by isolating back-end communication from other network traffic. These scenarios are typically accessible to large enterprises with a high level of expertise, but App Service puts it within reach with virtual network integration. ![scenario architecture](./media/tutorial-networking-isolate-vnet/architecture.png) With this architecture: - Public traffic to the back-end services is blocked. -- Outbound traffic from App Service is routed to the VNet and can reach the back-end services. +- Outbound traffic from App Service is routed to the virtual network and can reach the back-end services. - App Service is able to perform DNS resolution to the back-end services through the private DNS zones. What you will learn: > [!div class="checklist"] -> * Create a VNet and subnets for App Service VNet integration +> * Create a virtual network and subnets for App Service virtual network integration > * Create private DNS zones > * Create private endpoints -> * Configure VNet integration in App Service +> * Configure virtual network integration in App Service ## Prerequisites @@ -43,9 +43,9 @@ The tutorial continues to use the following environment variables from the previ vaultName= ``` -## Create VNet and subnets +## Create virtual network and subnets -1. Create a VNet. Replace *\* with a unique name. +1. Create a virtual network. Replace *\* with a unique name. ```azurecli-interactive # Save vnet name as variable for convenience @@ -54,13 +54,13 @@ The tutorial continues to use the following environment variables from the previ az network vnet create --resource-group $groupName --location $region --name $vnetName --address-prefixes 10.0.0.0/16 ``` -1. Create a subnet for the App Service VNet integration. +1. Create a subnet for the App Service virtual network integration. ```azurecli-interactive az network vnet subnet create --resource-group $groupName --vnet-name $vnetName --name vnet-integration-subnet --address-prefixes 10.0.0.0/24 --delegations Microsoft.Web/serverfarms ``` - For App Service, the VNet integration subnet is recommended to have a CIDR block of `/26` at a minimum (see [VNet integration subnet requirements](overview-vnet-integration.md#subnet-requirements)). `/24` is more than sufficient. `--delegations Microsoft.Web/serverfarms` specifies that the subnet is [delegated for App Service VNet integration](../virtual-network/subnet-delegation-overview.md). + For App Service, the virtual network integration subnet is recommended to have a CIDR block of `/26` at a minimum (see [Virtual network integration subnet requirements](overview-vnet-integration.md#subnet-requirements)). `/24` is more than sufficient. `--delegations Microsoft.Web/serverfarms` specifies that the subnet is [delegated for App Service virtual network integration](../virtual-network/subnet-delegation-overview.md). 1. Create another subnet for the private endpoints. @@ -83,7 +83,7 @@ Because your Key Vault and Cognitive Services resources will sit behind [private For more information on these settings, see [Azure Private Endpoint DNS configuration](../private-link/private-endpoint-dns.md#azure-services-dns-zone-configuration) -1. Link the private DNS zones to the VNet. +1. Link the private DNS zones to the virtual network. ```azurecli-interactive az network private-dns link vnet create --resource-group $groupName --name cognitiveservices-zonelink --zone-name privatelink.cognitiveservices.azure.com --virtual-network $vnetName --registration-enabled False @@ -92,7 +92,7 @@ Because your Key Vault and Cognitive Services resources will sit behind [private ## Create private endpoints -1. In the private endpoint subnet of your VNet, create a private endpoint for your key vault. +1. In the private endpoint subnet of your virtual network, create a private endpoint for your key vault. ```azurecli-interactive # Get Cognitive Services resource ID @@ -142,11 +142,11 @@ Because your Key Vault and Cognitive Services resources will sit behind [private > [!NOTE] > Again, you can observe the behavior change in the sample app. You can no longer load the app because it can no longer access the key vault references. The app has lost its connectivity to the key vault through the shared networking. -The two private endpoints are only accessible to clients inside the VNet you created. You can't even access the secrets in the key vault through **Secrets** page in the Azure portal, because the portal accesses them through the public internet (see [Manage the locked down resources](#manage-the-locked-down-resources)). +The two private endpoints are only accessible to clients inside the virtual network you created. You can't even access the secrets in the key vault through **Secrets** page in the Azure portal, because the portal accesses them through the public internet (see [Manage the locked down resources](#manage-the-locked-down-resources)). -## Configure VNet integration in your app +## Configure virtual network integration in your app -1. Scale the app up to **Standard** tier. VNet integration requires **Standard** tier or above (see [Integrate your app with an Azure virtual network](overview-vnet-integration.md)). +1. Scale the app up to a supported pricing tier (see [Integrate your app with an Azure virtual network](overview-vnet-integration.md)). ```azurecli-interactive az appservice plan update --name $appName --resource-group $groupName --sku S1 @@ -158,13 +158,13 @@ The two private endpoints are only accessible to clients inside the VNet you cre az webapp update --resource-group $groupName --name $appName --https-only ``` -1. Enable VNet integration on your app. +1. Enable virtual network integration on your app. ```azurecli-interactive az webapp vnet-integration add --resource-group $groupName --name $appName --vnet $vnetName --subnet vnet-integration-subnet ``` - VNet integration allows outbound traffic to flow directly into the VNet. By default, only local IP traffic defined in [RFC-1918](https://tools.ietf.org/html/rfc1918#section-3) is routed to the VNet, which is what you need for the private endpoints. To route all your traffic to the VNet, see [Manage virtual network integration routing](configure-vnet-integration-routing.md). Routing all traffic can also be used if you want to route internet traffic through your VNet, such as through an [Azure VNet NAT](../virtual-network/nat-gateway/nat-overview.md) or an [Azure Firewall](../firewall/overview.md). + Virtual network integration allows outbound traffic to flow directly into the virtual network. By default, only local IP traffic defined in [RFC-1918](https://tools.ietf.org/html/rfc1918#section-3) is routed to the virtual network, which is what you need for the private endpoints. To route all your traffic to the virtual network, see [Manage virtual network integration routing](configure-vnet-integration-routing.md). Routing all traffic can also be used if you want to route internet traffic through your virtual network, such as through an [Azure Virtual Network NAT](../virtual-network/nat-gateway/nat-overview.md) or an [Azure Firewall](../firewall/overview.md). 1. In the browser, navigate to `.azurewebsites.net` again and wait for the integration to take effect. If you get an HTTP 500 error, wait a few minutes and try again. If you can load the page and get detection results, then you're connecting to the Cognitive Services endpoint with key vault references. @@ -181,9 +181,9 @@ The two private endpoints are only accessible to clients inside the VNet you cre Depending on your scenarios, you may not be able to manage the private endpoint protected resources through the Azure portal, Azure CLI, or Azure PowerShell (for example, Key Vault). These tools all make REST API calls to access the resources through the public internet, and are blocked by your configuration. Here are a few options for accessing the locked down resources: - For Key Vault, add the public IP of your local machine to view or update the private endpoint protected secrets. -- If your on premises network is extended into the Azure VNet through a [VPN gateway](../vpn-gateway/vpn-gateway-about-vpngateways.md) or [ExpressRoute](../expressroute/expressroute-introduction.md), you can manage the private endpoint protected resources directly from your on premises network. -- Manage the private endpoint protected resources from a [jump server](https://wikipedia.org/wiki/Jump_server) in the VNet. -- [Deploy Cloud Shell into the VNet](../cloud-shell/private-vnet.md). +- If your on premises network is extended into the Azure virtual network through a [VPN gateway](../vpn-gateway/vpn-gateway-about-vpngateways.md) or [ExpressRoute](../expressroute/expressroute-introduction.md), you can manage the private endpoint protected resources directly from your on premises network. +- Manage the private endpoint protected resources from a [jump server](https://wikipedia.org/wiki/Jump_server) in the virtual network. +- [Deploy Cloud Shell into the virtual network](../cloud-shell/private-vnet.md). ## Clean up resources diff --git a/articles/app-service/webjobs-dotnet-deploy-vs.md b/articles/app-service/webjobs-dotnet-deploy-vs.md index 7c1157c9517d2..42044702fe23c 100644 --- a/articles/app-service/webjobs-dotnet-deploy-vs.md +++ b/articles/app-service/webjobs-dotnet-deploy-vs.md @@ -13,7 +13,7 @@ ms.reviewer: david.ebbo;suwatch;pbatum;naren.soni # Develop and deploy WebJobs using Visual Studio -This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](https://go.microsoft.com/fwlink/?LinkId=390226). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). +This article explains how to use Visual Studio to deploy a console app project to a web app in [Azure App Service](overview.md) as an [Azure WebJob](/azure/app-service/webjobs-create). For information about how to deploy WebJobs by using the [Azure portal](https://portal.azure.com), see [Run background tasks with WebJobs in Azure App Service](webjobs-create.md). You can choose to develop a WebJob that runs as either a [.NET Core app](#webjobs-as-net-core-console-apps) or a [.NET Framework app](#webjobs-as-net-framework-console-apps). Version 3.x of the [Azure WebJobs SDK](webjobs-sdk-how-to.md) lets you develop WebJobs that run as either .NET Core apps or .NET Framework apps, while version 2.x supports only the .NET Framework. The way that you deploy a WebJobs project is different for .NET Core projects than for .NET Framework projects. @@ -98,7 +98,7 @@ To create a new WebJobs-enabled project, use the console app project template an Create a project that is configured to deploy automatically as a WebJob when you deploy a web project in the same solution. Use this option when you want to run your WebJob in the same web app in which you run the related web application. > [!NOTE] -> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](https://www.asp.net/aspnet/overview/developing-apps-with-windows-azure/getting-started-with-windows-azure-webjobs). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. +> The WebJobs new-project template automatically installs NuGet packages and includes code in *Program.cs* for the [WebJobs SDK](/azure/app-service/webjobs-sdk-get-started). If you don't want to use the WebJobs SDK, remove or change the `host.RunAndBlock` statement in *Program.cs*. > > diff --git a/articles/application-gateway/http-response-codes.md b/articles/application-gateway/http-response-codes.md index 68bcc9f2d3e4a..9e4613fa0cf95 100644 --- a/articles/application-gateway/http-response-codes.md +++ b/articles/application-gateway/http-response-codes.md @@ -74,7 +74,7 @@ An HTTP 408 response can be observed when client requests to the frontend listen #### 499 – Client closed the connection -An HTTP 499 response is presented if a client request that is sent to application gateways using v2 sku is closed before the server finished responding. This error can be observed when a large response is returned to the client, but the client may have closed or refreshed their browser/application before the server had a chance to finish responding. +An HTTP 499 response is presented if a client request that is sent to application gateways using v2 sku is closed before the server finished responding. This error can be observed when a large response is returned to the client, but the client may have closed or refreshed their browser/application before the server had a chance to finish responding. In application gateways using v1 sku, an HTTP 0 response code may be raised for the client closing the connection before the server has finished responding as well. ## 5XX response codes (server error) diff --git a/articles/application-gateway/ingress-controller-install-new.md b/articles/application-gateway/ingress-controller-install-new.md index ba401c5c6195f..47ccef9d0411e 100644 --- a/articles/application-gateway/ingress-controller-install-new.md +++ b/articles/application-gateway/ingress-controller-install-new.md @@ -111,7 +111,8 @@ Kubernetes infrastructure. For the following steps, we need setup [kubectl](https://kubectl.docs.kubernetes.io/) command, which we will use to connect to our new Kubernetes cluster. [Cloud Shell](https://shell.azure.com/) has `kubectl` already installed. We will use `az` CLI to obtain credentials for Kubernetes. -Get credentials for your newly deployed AKS ([read more](../aks/kubernetes-walkthrough.md#connect-to-the-cluster)): +Get credentials for your newly deployed AKS ([read more](../aks/manage-azure-rbac.md#use-azure-rbac-for-kubernetes-authorization-with-kubectl)): + ```azurecli # use the deployment-outputs.json created after deployment to get the cluster name and resource group name aksClusterName=$(jq -r ".aksClusterName.value" deployment-outputs.json) @@ -129,7 +130,6 @@ az aks get-credentials --resource-group $resourceGroupName --name $aksClusterNam * [Managed Identity Controller (MIC)](https://github.com/Azure/aad-pod-identity#managed-identity-controllermic) component * [Node Managed Identity (NMI)](https://github.com/Azure/aad-pod-identity#node-managed-identitynmi) component - To install AAD Pod Identity to your cluster: - *Kubernetes RBAC enabled* AKS cluster diff --git a/articles/applied-ai-services/index.yml b/articles/applied-ai-services/index.yml index 9bcd29d571565..fc6a01176a4e2 100644 --- a/articles/applied-ai-services/index.yml +++ b/articles/applied-ai-services/index.yml @@ -57,10 +57,10 @@ productDirectory: summary: Proactively monitor metrics and diagnose issues url: ./metrics-advisor/index.yml # Card - - title: Azure Video Analyzer - imageSrc: ./media/video-analytics.svg + - title: Azure Video Indexer + imageSrc: ./media/video-indexer.svg summary: Extract actionable insights from your videos - url: ../azure-video-analyzer/index.yml + url: ../azure-video-indexer/index.yml # Card - title: Azure Immersive Reader imageSrc: ./media/immersive-reader.svg @@ -90,8 +90,8 @@ additionalContent: url: ./immersive-reader/quickstarts/client-libraries.md - text: Azure Cognitive Search url: ../search/search-get-started-portal.md - - text: Azure Video Analyzer - url: ../azure-video-analyzer/video-analyzer-docs/edge/detect-motion-emit-events-quickstart.md + - text: Azure Video Indexer + url: ../azure-video-indexer/video-indexer-get-started.md - text: Azure Bot Service url: /composer/quickstart-create-bot @@ -102,4 +102,4 @@ additionalContent: - text: Azure Cognitive Services url: ../cognitive-services/index.yml - text: Azure Machine Learning - url: ../machine-learning/index.yml \ No newline at end of file + url: ../machine-learning/index.yml diff --git a/articles/applied-ai-services/media/video-indexer.svg b/articles/applied-ai-services/media/video-indexer.svg new file mode 100644 index 0000000000000..16b38ec3903a0 --- /dev/null +++ b/articles/applied-ai-services/media/video-indexer.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/articles/attestation/audit-logs.md b/articles/attestation/audit-logs.md index 2a9f347a5e6f4..8ef8823540a49 100644 --- a/articles/attestation/audit-logs.md +++ b/articles/attestation/audit-logs.md @@ -65,7 +65,7 @@ Individual blobs are stored as text, formatted as a JSON blob. Let’s look at a } ``` -Most of these fields are documented in the [Top-level common schema](/azure-monitor/essentials/resource-logs-schema#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: +Most of these fields are documented in the [Top-level common schema](/azure/azure-monitor/essentials/resource-logs-schema#top-level-common-schema). The following table lists the field names and descriptions for the entries not included in the top-level common schema: | Field Name | Description | |------------------------------------------|-----------------------------------------------------------------------------------------------| diff --git a/articles/attestation/claim-sets.md b/articles/attestation/claim-sets.md index 9247c8fdf04f7..259db2bae3761 100644 --- a/articles/attestation/claim-sets.md +++ b/articles/attestation/claim-sets.md @@ -135,7 +135,7 @@ Azure Attestation includes the below claims in the attestation token for all att - **x-ms-policy-hash**: Hash of Azure Attestation evaluation policy computed as BASE64URL(SHA256(UTF8(BASE64URL(UTF8(policy text))))) - **x-ms-policy-signer**: JSON object with a "jwk” member representing the key a customer used to sign their policy. This is applicable when customer uploads a signed policy - **x-ms-runtime**: JSON object containing "claims" that are defined and generated within the attested environment. This is a specialization of the “enclave held data” concept, where the “enclave held data” is specifically formatted as a UTF-8 encoding of well formed JSON -- **x-ms-inittime**: JSON object containing “claims” that are defined and enforced at secure environment initialization time +- **x-ms-inittime**: JSON object containing “claims” that are defined and verified at initialization time of the attested environment Below claim names are used from [IETF JWT specification](https://tools.ietf.org/html/rfc7519) diff --git a/articles/automation/automation-hybrid-runbook-worker.md b/articles/automation/automation-hybrid-runbook-worker.md index 2ab646286bbb2..2ee01f537abc9 100644 --- a/articles/automation/automation-hybrid-runbook-worker.md +++ b/articles/automation/automation-hybrid-runbook-worker.md @@ -86,6 +86,9 @@ The process to install a user Hybrid Runbook Worker depends on the operating sys |Linux | [Manual](automation-linux-hrw-install.md#install-a-linux-hybrid-runbook-worker) | |Either | For user Hybrid Runbook Workers, see [Deploy an extension-based Windows or Linux user Hybrid Runbook Worker in Automation](./extension-based-hybrid-runbook-worker-install.md). This is the recommended method. | +>[!NOTE] +> Hybrid Runbook Worker is currently not supported on VM Scale Sets. + ## Network planning Check [Azure Automation Network Configuration](automation-network-configuration.md#network-planning-for-hybrid-runbook-worker) for detailed information on the ports, URLs, and other networking details required for the Hybrid Runbook Worker. diff --git a/articles/automation/learn/automation-tutorial-runbook-textual-python-3.md b/articles/automation/learn/automation-tutorial-runbook-textual-python-3.md index a84e9eb6ddcd1..d893717c2aa29 100644 --- a/articles/automation/learn/automation-tutorial-runbook-textual-python-3.md +++ b/articles/automation/learn/automation-tutorial-runbook-textual-python-3.md @@ -109,7 +109,7 @@ To do this, the script has to authenticate using the Run As account credential f > [!NOTE] > The Automation account must have been created with the Run As account for there to be a Run As certificate. > If your Automation account was not created with the Run As account, you can authenticate as described in -> [Authenticate with the Azure Management Libraries for Python](/azure/python/python-sdk-azure-authenticate) or [create a Run As account](../create-run-as-account.md). +> [Authenticate with the Azure Management Libraries for Python](/azure/developer/python/sdk/authentication-overview) or [create a Run As account](../create-run-as-account.md). 1. Open the textual editor by selecting **Edit** on the **MyFirstRunbook-Python3** pane. diff --git a/articles/azure-arc/data/create-complete-managed-instance-directly-connected.md b/articles/azure-arc/data/create-complete-managed-instance-directly-connected.md index f19655ea562b4..b1130d2385762 100644 --- a/articles/azure-arc/data/create-complete-managed-instance-directly-connected.md +++ b/articles/azure-arc/data/create-complete-managed-instance-directly-connected.md @@ -42,7 +42,7 @@ In addition, you need the following additional extensions to connect the cluster ## Access your Kubernetes cluster -After installing the client tools, you need access to a Kubernetes cluster. You can create Kubernetes cluster with [`az aks create`](/cli/azure/aks#az-aks-create), or you can follow the steps below to create the cluster in the Azure portal. +After installing the client tools, you need access to a Kubernetes cluster. You can create a Kubernetes cluster with [`az aks create`](/cli/azure/aks#az-aks-create), or you can follow the steps below to create the cluster in the Azure portal. ### Create a cluster @@ -116,7 +116,7 @@ After creating the cluster, connect to the cluster through the Azure CLI. ### Arc enable the Kubernetes cluster -Now that the cluster is running, connect the cluster to Azure. When you connect a cluster to Azure, you Arc enable it. Arc enabling your cluster allow you to view and manage the cluster, and deploy and manage additional services such as Arc-enabled data services on the cluster directly from Azure portal. +Now that the cluster is running, connect the cluster to Azure. When you connect a cluster to Azure, you Arc enable it. Arc enabling your cluster allows you to view and manage the cluster, and deploy and manage additional services such as Arc-enabled data services on the cluster directly from Azure portal. Use `az connectedk8s connect` to connect the cluster to Azure: @@ -181,7 +181,7 @@ NAME STATE Ready ``` -## Create Azure Arc-enabled SQL Managed Instance +## Create an Azure Arc-enabled SQL Managed Instance 1. In the portal, locate the resource group. 1. In the resource group, select **Create**. diff --git a/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md b/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md index 518ace3a1353b..4fdb6b2ab4215 100644 --- a/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md +++ b/articles/azure-arc/data/create-complete-managed-instance-indirectly-connected.md @@ -99,7 +99,7 @@ Follow the steps below to deploy the cluster from the Azure CLI. For command details, see [az aks create](/cli/azure/aks#az-aks-create). - For a complete demonstration, including an application on a single-node Kubernetes cluster, go to [Quickstart: Deploy an Azure Kubernetes Service cluster using the Azure CLI](../../aks/kubernetes-walkthrough.md). + For a complete demonstration, including an application on a single-node Kubernetes cluster, go to [Quickstart: Deploy an Azure Kubernetes Service cluster using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md). 1. Get credentials diff --git a/articles/azure-arc/servers/deployment-options.md b/articles/azure-arc/servers/deployment-options.md index af7ba575a34c4..ad799f6a12ceb 100644 --- a/articles/azure-arc/servers/deployment-options.md +++ b/articles/azure-arc/servers/deployment-options.md @@ -22,6 +22,7 @@ Connecting machines in your hybrid environment directly with Azure can be accomp | At scale | [Connect machines using a service principal](onboard-service-principal.md) to install the agent at scale non-interactively.| | At scale | [Connect machines by running PowerShell scripts with Configuration Manager](onboard-configuration-manager-powershell.md) | At scale | [Connect machines with a Configuration Manager custom task sequence](onboard-configuration-manager-custom-task.md) +| At scale | [Connect Windows machines using Group Policy](onboard-group-policy.md) | At scale | [Connect machines from Automation Update Management](onboard-update-management-machines.md) to create a service principal that installs and configures the agent for multiple machines managed with Azure Automation Update Management to connect machines non-interactively. | > [!IMPORTANT] diff --git a/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/all-services-page.png b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/all-services-page.png new file mode 100644 index 0000000000000..d43caebc0a2e1 Binary files /dev/null and b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/all-services-page.png differ diff --git a/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assign-policy-button.png b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assign-policy-button.png new file mode 100644 index 0000000000000..6add55729d578 Binary files /dev/null and b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assign-policy-button.png differ diff --git a/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assignments-tab.png b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assignments-tab.png new file mode 100644 index 0000000000000..a8cfb622cd86c Binary files /dev/null and b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/assignments-tab.png differ diff --git a/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/compliance-policy.png b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/compliance-policy.png new file mode 100644 index 0000000000000..7ce45c35f58e8 Binary files /dev/null and b/articles/azure-arc/servers/learn/media/tutorial-assign-policy-portal/compliance-policy.png differ diff --git a/articles/azure-arc/servers/learn/tutorial-assign-policy-portal.md b/articles/azure-arc/servers/learn/tutorial-assign-policy-portal.md index 2029ae2a8f1e6..6e96b6a9d87d7 100644 --- a/articles/azure-arc/servers/learn/tutorial-assign-policy-portal.md +++ b/articles/azure-arc/servers/learn/tutorial-assign-policy-portal.md @@ -2,14 +2,22 @@ title: Tutorial - New policy assignment with Azure portal description: In this tutorial, you use Azure portal to create an Azure Policy assignment to identify non-compliant resources. ms.topic: tutorial -ms.date: 04/21/2021 +ms.date: 04/20/2022 --- # Tutorial: Create a policy assignment to identify non-compliant resources -The first step in understanding compliance in Azure is to identify the status of your resources. Azure Policy supports auditing the state of your Azure Arc-enabled server with guest configuration policies. Azure Policy's guest configuration definitions can audit or apply settings inside the machine. This tutorial steps you through the process of creating and assigning a policy, identifying which of your Azure Arc-enabled servers don't have the Log Analytics agent installed. +The first step in understanding compliance in Azure is to identify the status of your resources. Azure Policy supports auditing the state of your Azure Arc-enabled server with guest configuration policies. Azure Policy's guest configuration definitions can audit or apply settings inside the machine. + +This tutorial steps you through the process of creating and assigning a policy in order to identify which of your Azure Arc-enabled servers don't have the Log Analytics agent for Windows or Linux installed. These machines are considered _non-compliant_ with the policy assignment. + +In this tutorial, you will learn how to: + +> [!div class="checklist"] +> * Create policy assignment and assign a definition to it +> * Identify resources that aren't compliant with the new policy +> * Remove the policy from non-compliant resources -At the end of this process, you'll successfully identify machines that don't have the Log Analytics agent for Windows or Linux installed. They're _non-compliant_ with the policy assignment. ## Prerequisites @@ -18,22 +26,20 @@ before you begin. ## Create a policy assignment -In this tutorial, you create a policy assignment and assign the _\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines_ policy definition. +Follow the steps below to create a policy assignment and assign the policy definition _\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines_: -1. Launch the Azure Policy service in the Azure portal by clicking **All services**, then searching +1. Launch the Azure Policy service in the Azure portal by selecting **All services**, then searching for and selecting **Policy**. - :::image type="content" source="./media/tutorial-assign-policy-portal/search-policy.png" alt-text="Search for Policy in All Services" border="false"::: + :::image type="content" source="./media/tutorial-assign-policy-portal/all-services-page.png" alt-text="Screenshot of All services window showing search for policy service." border="true"::: 1. Select **Assignments** on the left side of the Azure Policy page. An assignment is a policy that has been assigned to take place within a specific scope. - :::image type="content" source="./media/tutorial-assign-policy-portal/select-assignment.png" alt-text="Select Assignments page from Policy Overview page" border="false"::: + :::image type="content" source="./media/tutorial-assign-policy-portal/assignments-tab.png" alt-text="Screenshot of All services Policy window showing policy assignments." border="true"::: 1. Select **Assign Policy** from the top of the **Policy - Assignments** page. - :::image type="content" source="./media/tutorial-assign-policy-portal/select-assign-policy.png" alt-text="Assign a policy definition from Assignments page" border="false"::: - 1. On the **Assign Policy** page, select the **Scope** by clicking the ellipsis and selecting either a management group or subscription. Optionally, select a resource group. A scope determines what resources or grouping of resources the policy assignment gets enforced on. Then click **Select** @@ -54,21 +60,12 @@ In this tutorial, you create a policy assignment and assign the _\[Preview]: Log For a partial list of available built-in policies, see [Azure Policy samples](../../../governance/policy/samples/index.md). 1. Search through the policy definitions list to find the _\[Preview]: Log Analytics extension should be installed on your Windows Azure Arc machines_ - definition if you have enabled the Arc-enabled servers agent on a Windows-based machine. For a Linux-based machine, find the corresponding _\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines_ policy definition. Click on that policy and click **Select**. + definition (if you have enabled the Arc-enabled servers agent on a Windows-based machine). For a Linux-based machine, find the corresponding _\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines_ policy definition. Click on that policy and click **Select**. 1. The **Assignment name** is automatically populated with the policy name you selected, but you can - change it. For this example, leave _\[Preview]: Log Analytics extension should be installed on your Windows Azure Arc machines_ or _\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines_ depending on which one you selected. You can also add an optional **Description**. The description provides details about this policy assignment. - **Assigned by** will automatically fill based on who is logged in. This field is optional, so - custom values can be entered. - -1. Leave **Create a Managed Identity** unchecked. This box _must_ be checked when the policy or - initiative includes a policy with the - [deployIfNotExists](../../../governance/policy/concepts/effects.md#deployifnotexists) effect. As the policy used for this - quickstart doesn't, leave it blank. For more information, see - [managed identities](../../../active-directory/managed-identities-azure-resources/overview.md) and - [how remediation security works](../../../governance/policy/how-to/remediate-resources.md#how-remediation-security-works). - -1. Click **Assign**. + change it. For this example, leave the policy name as is, and don't change any of the remaining options on the page. + +1. For this example, we don't need to change any settings on the other tabs. Select **Review + Create** to review your new policy assignment, then select **Create**. You're now ready to identify non-compliant resources to understand the compliance state of your environment. @@ -77,22 +74,22 @@ environment. Select **Compliance** in the left side of the page. Then locate the **\[Preview]: Log Analytics extension should be installed on your Windows Azure Arc machines** or **\[Preview]: Log Analytics extension should be installed on your Linux Azure Arc machines** policy assignment you created. -:::image type="content" source="./media/tutorial-assign-policy-portal/policy-compliance.png" alt-text="Compliance details on the Policy Compliance page" border="false"::: +:::image type="content" source="./media/tutorial-assign-policy-portal/compliance-policy.png" alt-text="Screenshot of Policy Compliance page showing policy compliance for the selected scope." border="true"::: If there are any existing resources that aren't compliant with this new assignment, they appear under **Non-compliant resources**. When a condition is evaluated against your existing resources and found true, then those resources -are marked as non-compliant with the policy. The following table shows how different policy effects +are marked as non-compligitant with the policy. The following table shows how different policy effects work with the condition evaluation for the resulting compliance state. Although you don't see the evaluation logic in the Azure portal, the compliance state results are shown. The compliance state result is either compliant or non-compliant. -| **Resource State** | **Effect** | **Policy Evaluation** | **Compliance State** | +| **Resource state** | **Effect** | **Policy evaluation** | **Compliance state** | | --- | --- | --- | --- | -| Exists | Deny, Audit, Append\*, DeployIfNotExist\*, AuditIfNotExist\* | True | Non-Compliant | +| Exists | Deny, Audit, Append\*, DeployIfNotExist\*, AuditIfNotExist\* | True | Non-compliant | | Exists | Deny, Audit, Append\*, DeployIfNotExist\*, AuditIfNotExist\* | False | Compliant | -| New | Audit, AuditIfNotExist\* | True | Non-Compliant | +| New | Audit, AuditIfNotExist\* | True | Non-compliant | | New | Audit, AuditIfNotExist\* | False | Compliant | \* The Append, DeployIfNotExist, and AuditIfNotExist effects require the IF statement to be TRUE. @@ -108,8 +105,6 @@ To remove the assignment created, follow these steps: 1. Right-click the policy assignment and select **Delete assignment**. - :::image type="content" source="./media/tutorial-assign-policy-portal/delete-assignment.png" alt-text="Delete an assignment from the Compliance page" border="false"::: - ## Next steps In this tutorial, you assigned a policy definition to a scope and evaluated its compliance report. The policy definition validates that all the resources in the scope are compliant and identifies which ones aren't. Now you are ready to monitor your Azure Arc-enabled servers machine by enabling [VM insights](../../../azure-monitor/vm/vminsights-overview.md). diff --git a/articles/azure-arc/servers/media/onboard-group-policy/general-properties.png b/articles/azure-arc/servers/media/onboard-group-policy/general-properties.png new file mode 100644 index 0000000000000..a785623ebf08b Binary files /dev/null and b/articles/azure-arc/servers/media/onboard-group-policy/general-properties.png differ diff --git a/articles/azure-arc/servers/media/onboard-group-policy/new-action.png b/articles/azure-arc/servers/media/onboard-group-policy/new-action.png new file mode 100644 index 0000000000000..8f8970094ae90 Binary files /dev/null and b/articles/azure-arc/servers/media/onboard-group-policy/new-action.png differ diff --git a/articles/azure-arc/servers/media/onboard-group-policy/new-trigger.png b/articles/azure-arc/servers/media/onboard-group-policy/new-trigger.png new file mode 100644 index 0000000000000..5615f6f64dd13 Binary files /dev/null and b/articles/azure-arc/servers/media/onboard-group-policy/new-trigger.png differ diff --git a/articles/azure-arc/servers/onboard-group-policy.md b/articles/azure-arc/servers/onboard-group-policy.md new file mode 100644 index 0000000000000..f55840121b214 --- /dev/null +++ b/articles/azure-arc/servers/onboard-group-policy.md @@ -0,0 +1,183 @@ +--- +title: Connect machines at scale using group policy +description: In this article, you learn how to connect machines to Azure using Azure Arc-enabled servers using group policy. +ms.date: 04/29/2022 +ms.topic: conceptual +ms.custom: template-how-to +--- + +# Connect machines at scale using Group Policy + +You can onboard Active Directory–joined Windows machines to Azure Arc-enabled servers at scale using Group Policy. + +You'll first need to set up a local remote share with the Connected Machine Agent and define a configuration file specifying the Arc-enabled server's landing zone within Azure. You will then define a Group Policy Object to run an onboarding script using a scheduled task. This Group Policy can be applied at the site, domain, or organizational unit level. Assignment can also use Access Control List (ACL) and other security filtering native to Group Policy. Machines in the scope of the Group Policy will be onboarded to Azure Arc-enabled servers. + +Before you get started, be sure to review the [prerequisites](prerequisites.md) and verify that your subscription and resources meet the requirements. For information about supported regions and other related considerations, see [supported Azure regions](overview.md#supported-regions). Also review our [at-scale planning guide](plan-at-scale-deployment.md) to understand the design and deployment criteria, as well as our management and monitoring recommendations. + +If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + +## Prepare a remote share + +The Group Policy to onboard Azure Arc-enabled servers requires a remote share with the Connected Machine Agent. You will need to: + +1. Prepare a remote share to host the Azure Connected Machine agent package for Windows and the configuration file. You need to be able to add files to the distributed location. + +1. Download the latest version of the [Windows agent Windows Installer package](https://aka.ms/AzureConnectedMachineAgent) from the Microsoft Download Center and save it to the remote share. + +## Generate an onboarding script and configuration file from Azure Portal + +Before you can run the script to connect your machines, you'll need to do the following: + +1. Follow the steps to [create a service principal for onboarding at scale](onboard-service-principal.md#create-a-service-principal-for-onboarding-at-scale). + + * Assign the Azure Connected Machine Onboarding role to your service principal and limit the scope of the role to the target Azure landing zone. + * Make a note of the Service Principal Secret; you'll need this value later. + +1. Modify and save the following configuration file to the remote share as `ArcConfig.json`. Edit the file with your Azure subscription, resource group, and location details. Use the service principal details from step 1 for the last two fields: + +``` +{ + "tenant-id": "INSERT AZURE TENANTID", + "subscription-id": "INSERT AZURE SUBSCRIPTION ID", + "resource-group": "INSERT RESOURCE GROUP NAME", + "location": "INSERT REGION", + "service-principal-id": "INSERT SPN ID", + "service-principal-secret": "INSERT SPN Secret" + } +``` + +The group policy will project machines as Arc-enabled servers in the Azure subscription, resource group, and region specified in this configuration file. + +## Modify and save the onboarding script + +Before you can run the script to connect your machines, you'll need to modify and save the onboarding script: + +1. Edit the field for `remotePath` to reflect the distributed share location with the configuration file and Connected Machine Agent. + +1. Edit the `localPath` with the local path where the logs generated from the onboarding to Azure Arc-enabled servers will be saved per machine. + +1. Save the modified onboarding script locally and note its location. This will be referenced when creating the Group Policy Object. + +``` +[string] $remotePath = "\\dc-01.contoso.lcl\Software\Arc" +[string] $localPath = "$env:HOMEDRIVE\ArcDeployment" + +[string] $RegKey = "HKLM\SOFTWARE\Microsoft\Azure Connected Machine Agent" +[string] $logFile = "installationlog.txt" +[string] $InstallationFolder = "ArcDeployment" +[string] $configFilename = "ArcConfig.json" + +if (!(Test-Path $localPath) ) { + $BitsDirectory = new-item -path C:\ -Name $InstallationFolder -ItemType Directory + $logpath = new-item -path $BitsDirectory -Name $logFile -ItemType File +} +else{ + $BitsDirectory = "C:\ArcDeployment" + } + +function Deploy-Agent { + [bool] $isDeployed = Test-Path $RegKey + if ($isDeployed) { + $logMessage = "Azure Arc Serverenabled agent is deployed , exit process" + $logMessage >> $logpath + exit + } + else { + Copy-Item -Path "$remotePath\*" -Destination $BitsDirectory -Recurse -Verbose + $exitCode = (Start-Process -FilePath msiexec.exe -ArgumentList @("/i", "$BitsDirectory\AzureConnectedMachineAgent.msi" , "/l*v", "$BitsDirectory\$logFile", "/qn") -Wait -Passthru).ExitCode + + if($exitCode -eq 0){ + Start-Sleep -Seconds 120 + $x= & "$env:ProgramW6432\AzureConnectedMachineAgent\azcmagent.exe" connect --config "$BitsDirectory\$configFilename" + $msg >> $logpath + } + else { + $message = (net helpmsg $exitCode) + $message >> $logpath + } + } +} + +Deploy-Agent +``` + +## Create a Group Policy Object + +Create a new Group Policy Object (GPO) to run the onboarding script using the configuration file details: + +1. Open the Group Policy Management Console (GPMC). + +1. Navigate to the Organization Unit (OU), Domain, or Security Group in your AD forest that contains the machines you want to onboard to Azure Arc-enabled servers. + +1. Right-click on this set of resources and select **Create a GPO in this domain, and Link it here.** + +1. Assign the name “Onboard servers to Azure Arc-enabled servers” to this new Group Policy Object (GPO). + +## Create a scheduled task + +The newly created GPO needs to be modified to run the onboarding script at the appropriate cadence. Use Group Policy’s built-in Scheduled Task capabilities to do so: + +1. Select **Computer Configuration > Preferences > Control Panel Settings > Scheduled Tasks**. + +1. Right-click in the blank area and select **New > Scheduled Task**. + +Your workstation must be running Windows 7 or higher to be able to create a Scheduled Task from Group Policy Management Console. + +### Assign general parameters for the task + +In the **General** tab, set the following parameters under **Security Options**: + +1. In the field **When running the task, use the following user account:**, enter "NT AUTHORITY\System". + +1. Select **Run whether user is logged on or not**. + +1. Check the box for **Run with highest privileges**. + +1. In the field **Configure for**, select **Windows Vista or Window 2008**. + +:::image type="content" source="media/onboard-group-policy/general-properties.png" alt-text="Screenshot of the Azure Arc agent Deployment and Configuration properties window." ::: + +### Assign trigger parameters for the task + +In the **Triggers** tab, select **New**, then enter the following parameters in the **New Trigger** window: + +1. In the field **Begin the task**, select **On a schedule**. + +1. Under **Settings**, select **One time** and enter the date and time for the task to run. + +1. Under **Advanced Settings**, check the box for **Enabled**. + +1. Once you've set the trigger parameters, select **OK**. + +:::image type="content" source="media/onboard-group-policy/new-trigger.png" alt-text="Screenshot of the New Trigger window." ::: + +### Assign action parameters for the task + +In the **Actions** tab, select **New**, then enter the follow parameters in the **New Action** window: + +1. For **Action**, select **Start a program** from the dropdown. + +1. For **Program/script**, enter `C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe`. + +1. For **Add arguments (optional)**, enter `-ExecutionPolicy Bypass -command `. + + Note that you must enter the location of the deployment script, modified earlier with the `DeploymentPath` and `LocalPath`, instead of the placeholder "Path to Deployment Script". + +1. For **Start In (Optional)**, enter `C:\`. + +1. Once you've set the action parameters, select **OK**. + +:::image type="content" source="media/onboard-group-policy/new-action.png" alt-text="Screenshot of the New Action window." ::: + +## Apply the Group Policy Object + +On the Group Policy Management Console, right-click on the desired Organizational Unit and select the option to link an existent GPO. Choose the Group Policy Object defined in the Scheduled Task. After 10 or 20 minutes, the Group Policy Object will be replicated to the respective domain controllers. Learn more about [creating and managing group policy in Azure AD Domain Services](../../active-directory-domain-services/manage-group-policy.md). + +After you have successfully installed the agent and configure it to connect to Azure Arc-enabled servers, go to the Azure portal to verify that the servers in your Organizational Unit have successfully connected. View your machines in the [Azure portal](https://aka.ms/hybridmachineportal). + +## Next steps + +- Review the [Planning and deployment guide](plan-at-scale-deployment.md) to plan for deploying Azure Arc-enabled servers at any scale and implement centralized management and monitoring. +- Review connection troubleshooting information in the [Troubleshoot Connected Machine agent guide](troubleshoot-agent-onboard.md). +- Learn how to manage your machine using [Azure Policy](../../governance/policy/overview.md) for such things as VM [guest configuration](../../governance/policy/concepts/guest-configuration.md), verifying that the machine is reporting to the expected Log Analytics workspace, enabling monitoring with [VM insights](../../azure-monitor/vm/vminsights-enable-policy.md), and much more. +- Learn more about [Group Policy](/troubleshoot/windows-server/group-policy/group-policy-overview). diff --git a/articles/azure-arc/servers/toc.yml b/articles/azure-arc/servers/toc.yml index 314c50d49428d..0a4dedbe08abe 100644 --- a/articles/azure-arc/servers/toc.yml +++ b/articles/azure-arc/servers/toc.yml @@ -67,6 +67,8 @@ href: onboard-configuration-manager-powershell.md - name: Connect machines using Configuration Manager custom task sequence href: onboard-configuration-manager-custom-task.md + - name: Connect machines using group policy + href: onboard-group-policy.md - name: Connect machines from Automation Update Management href: onboard-update-management-machines.md - name: Migrate diff --git a/articles/azure-fluid-relay/how-tos/deploy-fluid-static-web-apps.md b/articles/azure-fluid-relay/how-tos/deploy-fluid-static-web-apps.md index d27be31a4d884..90f69ff0172ca 100644 --- a/articles/azure-fluid-relay/how-tos/deploy-fluid-static-web-apps.md +++ b/articles/azure-fluid-relay/how-tos/deploy-fluid-static-web-apps.md @@ -1,7 +1,7 @@ --- title: 'How to: Deploy Fluid applications using Azure Static Web Apps' description: Detailed explanation about how Fluid applications can be hosted on Azure Static Web Apps -author: sdeshpande3 +author: sonalivdeshpande ms.author: sdeshpande ms.date: 08/19/2021 ms.topic: article diff --git a/articles/azure-functions/functions-bindings-storage-table-input.md b/articles/azure-functions/functions-bindings-storage-table-input.md index 71fb0e447dc42..bf84d33b5cc21 100644 --- a/articles/azure-functions/functions-bindings-storage-table-input.md +++ b/articles/azure-functions/functions-bindings-storage-table-input.md @@ -696,7 +696,7 @@ def main(req: func.HttpRequest, messageJSON) -> func.HttpResponse: return func.HttpResponse(f"Table row: {messageJSON}") ``` -With this simple binding, you can't programmatically handle a case in which no row that has a row key ID is found. For more fine-grained data selection, use the [storage SDK](/azure/developer/python/azure-sdk-example-storage-use?tabs=cmd). +With this simple binding, you can't programmatically handle a case in which no row that has a row key ID is found. For more fine-grained data selection, use the [storage SDK](/azure/developer/python/sdk/examples/azure-sdk-example-storage-use?tabs=cmd). --- diff --git a/articles/azure-glossary-cloud-terminology.md b/articles/azure-glossary-cloud-terminology.md index 80b8c2775525b..44ff178b1a422 100644 --- a/articles/azure-glossary-cloud-terminology.md +++ b/articles/azure-glossary-cloud-terminology.md @@ -115,7 +115,7 @@ The agreement that describes Microsoft's commitments for uptime and connectivity See [Service Level Agreements](https://azure.microsoft.com/support/legal/sla/) ## shared access signature (SAS) -A signature that enables you to grant limited access to a resource, without exposing your account key. For example, [Azure Storage uses SAS](./storage/common/storage-sas-overview.md) to grant client access to objects such as blobs. [IoT Hub uses SAS](iot-hub/iot-hub-dev-guide-sas.md#security-tokens) to grant devices permission to send telemetry. +A signature that enables you to grant limited access to a resource, without exposing your account key. For example, [Azure Storage uses SAS](./storage/common/storage-sas-overview.md) to grant client access to objects such as blobs. [IoT Hub uses SAS](iot-hub/iot-hub-dev-guide-sas.md#sas-tokens) to grant devices permission to send telemetry. ## storage account An account that gives you access to the Azure Blob, Queue, Table, and File services in Azure Storage. The storage account name defines the unique namespace for Azure Storage data objects. diff --git a/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md b/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md index 002d5379a89c0..0e66ebf296367 100644 --- a/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md +++ b/articles/azure-government/compliance/azure-services-in-fedramp-auditscope.md @@ -227,7 +227,7 @@ This article provides a detailed list of Azure, Dynamics 365, Microsoft 365, and | [Synapse Analytics](../../synapse-analytics/index.yml) | ✅ | ✅ | | [Time Series Insights](../../time-series-insights/index.yml) | ✅ | ✅ | | [Traffic Manager](../../traffic-manager/index.yml) | ✅ | ✅ | -| [Video Analyzer for Media](../../azure-video-analyzer/video-analyzer-for-media-docs/index.yml) (formerly Video Indexer) | ✅ | ✅ | +| [Video Analyzer for Media](../../azure-video-indexer/index.yml) (formerly Video Indexer) | ✅ | ✅ | | [Virtual Machine Scale Sets](../../virtual-machine-scale-sets/index.yml) | ✅ | ✅ | | [Virtual Machines](../../virtual-machines/index.yml) (incl. [Reserved VM Instances](../../virtual-machines/prepay-reserved-vm-instances.md)) | ✅ | ✅ | | [Virtual Network](../../virtual-network/index.yml) | ✅ | ✅ | diff --git a/articles/azure-government/documentation-government-developer-guide.md b/articles/azure-government/documentation-government-developer-guide.md index 62f6e0b18d39d..f7a77f70a3b73 100644 --- a/articles/azure-government/documentation-government-developer-guide.md +++ b/articles/azure-government/documentation-government-developer-guide.md @@ -58,7 +58,7 @@ Navigate through the following links to get started using Azure Government: - [Connect with CLI](./documentation-government-get-started-connect-with-cli.md) - [Connect with Visual Studio](./documentation-government-connect-vs.md) - [Connect to Azure Storage](./documentation-government-get-started-connect-to-storage.md) -- [Connect with Azure SDK for Python](/azure/developer/python/azure-sdk-sovereign-domain) +- [Connect with Azure SDK for Python](/azure/developer/python/sdk/azure-sdk-sovereign-domain) ### Azure Government Video Library diff --git a/articles/azure-maps/zoom-levels-and-tile-grid.md b/articles/azure-maps/zoom-levels-and-tile-grid.md index a114d207374e1..67af9d528f346 100644 --- a/articles/azure-maps/zoom-levels-and-tile-grid.md +++ b/articles/azure-maps/zoom-levels-and-tile-grid.md @@ -52,7 +52,7 @@ The following table provides the full list of values for zoom levels where the t | 16 | 2.3887 | 611.496 | | 17 | 1.1943 | 305.748 | | 18 | 0.5972 | 152.874 | -| 19 | 0.14929 | 76.437 | +| 19 | 0.2986 | 76.437 | | 20 | 0.14929 | 38.2185 | | 21 | 0.074646 | 19.10926 | | 22 | 0.037323 | 9.55463 | diff --git a/articles/azure-monitor/app/data-model-pageview-telemetry.md b/articles/azure-monitor/app/data-model-pageview-telemetry.md new file mode 100644 index 0000000000000..41ccaf0185711 --- /dev/null +++ b/articles/azure-monitor/app/data-model-pageview-telemetry.md @@ -0,0 +1,37 @@ +--- +title: Azure Application Insights Data Model - PageView Telemetry +description: Application Insights data model for page view telemetry +ms.topic: conceptual +ms.date: 03/24/2022 +ms.reviewer: vgorbenko +--- + +# PageView telemetry: Application Insights data model + +PageView telemetry (in [Application Insights](./app-insights-overview.md)) is logged when an application user opens a new page of a monitored application. The `Page` in this context is a logical unit that is defined by the developer to be an application tab or a screen and is not necessarily correlated to a browser webpage load or refresh action. This distinction can be further understood in the context of single-page applications (SPA) where the switch between pages is not tied to browser page actions. [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) is the time it takes for the application to present the page to the user. + +> [!NOTE] +> By default, Application Insights SDKs log single PageView events on each browser webpage load action, with [`pageViews.duration`](https://docs.microsoft.com/azure/azure-monitor/reference/tables/pageviews) populated by [browser timing](#measuring-browsertiming-in-application-insights). Developers can extend additional tracking of PageView events by using the [trackPageView API call](./api-custom-events-metrics.md#page-views). + +## Measuring browserTiming in Application Insights + +Modern browsers expose measurements for page load actions with the [Performance API](https://developer.mozilla.org/en-US/docs/Web/API/Performance_API). Application Insights simplifies these measurements by consolidating related timings into [standard browser metrics](../essentials/metrics-supported.md#microsoftinsightscomponents) as defined by these processing time definitions: + +1. Client <--> DNS : Client reaches out to DNS to resolve website hostname, DNS responds with IP address. +1. Client <--> Web Server : Client creates TCP then TLS handshakes with web server. +1. Client <--> Web Server : Client sends request payload, waits for server to execute request, and receives first response packet. +1. Client <-- Web Server : Client receives the rest of the response payload bytes from the web server. +1. Client : Client now has full response payload and has to render contents into browser and load the DOM. + +* `browserTimings/networkDuration` = #1 + #2 +* `browserTimings/sendDuration` = #3 +* `browserTimings/receiveDuration` = #4 +* `browserTimings/processingDuration` = #5 +* `browsertimings/totalDuration` = #1 + #2 + #3 + #4 + #5 +* `pageViews/duration` + * The PageView duration is from the browser’s performance timing interface, [`PerformanceNavigationTiming.duration`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceEntry/duration). + * If `PerformanceNavigationTiming` is available that duration is used. + * If it’s not, then the *deprecated* [`PerformanceTiming`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming) interface is used and the delta between [`NavigationStart`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/navigationStart) and [`LoadEventEnd`](https://developer.mozilla.org/en-US/docs/Web/API/PerformanceTiming/loadEventEnd) is calculated. + * The developer specifies a duration value when logging custom PageView events using the [trackPageView API call](./api-custom-events-metrics.md#page-views). + +![Screenshot of the Metrics page in Application Insights showing graphic displays of metrics data for a web application.](./media/javascript/page-view-load-time.png) diff --git a/articles/azure-monitor/containers/container-insights-enable-new-cluster.md b/articles/azure-monitor/containers/container-insights-enable-new-cluster.md index c5ad552c5914c..18cad95f835b3 100644 --- a/articles/azure-monitor/containers/container-insights-enable-new-cluster.md +++ b/articles/azure-monitor/containers/container-insights-enable-new-cluster.md @@ -18,7 +18,7 @@ You can enable monitoring of an AKS cluster using one of the supported methods: ## Enable using Azure CLI -To enable monitoring of a new AKS cluster created with Azure CLI, follow the step in the quickstart article under the section [Create AKS cluster](../../aks/kubernetes-walkthrough.md#create-aks-cluster). +To enable monitoring of a new AKS cluster created with Azure CLI, follow the step in the quickstart article under the section [Create AKS cluster](../../aks/learn/quick-kubernetes-deploy-cli.md). >[!NOTE] >If you choose to use the Azure CLI, you first need to install and use the CLI locally. You must be running the Azure CLI version 2.0.74 or later. To identify your version, run `az --version`. If you need to install or upgrade the Azure CLI, see [Install the Azure CLI](/cli/azure/install-azure-cli). diff --git a/articles/azure-monitor/containers/container-insights-onboard.md b/articles/azure-monitor/containers/container-insights-onboard.md index c21a807203897..893e92e10cb9b 100644 --- a/articles/azure-monitor/containers/container-insights-onboard.md +++ b/articles/azure-monitor/containers/container-insights-onboard.md @@ -18,7 +18,6 @@ This article provides an overview of the options that are available for setting - [Azure Red Hat OpenShift](../../openshift/intro-openshift.md) version 4.x - [Red Hat OpenShift](https://docs.openshift.com/container-platform/4.3/welcome/index.html) version 4.x - You can enable Container insights for a new deployment or for one or more existing deployments of Kubernetes by using any of the following supported methods: - The Azure portal @@ -124,7 +123,7 @@ To enable Container insights, use one of the methods that's described in the fol | Deployment state | Method | Description | |------------------|--------|-------------| -| New Kubernetes cluster | [Create an AKS cluster by using the Azure CLI](../../aks/kubernetes-walkthrough.md#create-aks-cluster)| You can enable monitoring for a new AKS cluster that you create by using the Azure CLI. | +| New Kubernetes cluster | [Create an AKS cluster by using the Azure CLI](../../aks/learn/quick-kubernetes-deploy-cli.md)| You can enable monitoring for a new AKS cluster that you create by using the Azure CLI. | | | [Create an AKS cluster by using Terraform](container-insights-enable-new-cluster.md#enable-using-terraform)| You can enable monitoring for a new AKS cluster that you create by using the open-source tool Terraform. | | | [Create an OpenShift cluster by using an Azure Resource Manager template](container-insights-azure-redhat-setup.md#enable-for-a-new-cluster-using-an-azure-resource-manager-template) | You can enable monitoring for a new OpenShift cluster that you create by using a preconfigured Azure Resource Manager template. | | | [Create an OpenShift cluster by using the Azure CLI](/cli/azure/openshift#az-openshift-create) | You can enable monitoring when you deploy a new OpenShift cluster by using the Azure CLI. | diff --git a/articles/azure-monitor/faq.yml b/articles/azure-monitor/faq.yml index 183d66e272ad7..3359dd3611f39 100644 --- a/articles/azure-monitor/faq.yml +++ b/articles/azure-monitor/faq.yml @@ -516,6 +516,7 @@ sections: * [Page view counts](app/usage-overview.md) * [AJAX calls](app/asp-net-dependencies.md) Requests made from a running script. * Page view load data + * Configurable Page [visit time](./app/data-model-pageview-telemetry.md) * User and session counts * [Authenticated user IDs](app/api-custom-events-metrics.md#authenticated-users) diff --git a/articles/azure-monitor/toc.yml b/articles/azure-monitor/toc.yml index 36402ce3ffc5e..d545df1c414bd 100644 --- a/articles/azure-monitor/toc.yml +++ b/articles/azure-monitor/toc.yml @@ -54,7 +54,7 @@ items: href: logs/tutorial-custom-logs.md - name: Resource manager templates displayName: Custom logs - href: logs/tutorial-custom-logs.md + href: logs/tutorial-custom-logs-api.md - name: Ingestion-time transformations items: - name: Azure portal @@ -1562,6 +1562,8 @@ items: href: app/data-model-event-telemetry.md - name: Metric href: app/data-model-metric-telemetry.md + - name: PageView + href: app/data-model-pageview-telemetry.md - name: Context href: app/data-model-context.md - name: Integration with Azure Functions diff --git a/articles/azure-netapp-files/azure-netapp-files-resource-limits.md b/articles/azure-netapp-files/azure-netapp-files-resource-limits.md index 68f4da110bf5c..9d5ff4d5b23ba 100644 --- a/articles/azure-netapp-files/azure-netapp-files-resource-limits.md +++ b/articles/azure-netapp-files/azure-netapp-files-resource-limits.md @@ -12,7 +12,7 @@ ms.service: azure-netapp-files ms.workload: storage ms.tgt_pltfrm: na ms.topic: conceptual -ms.date: 03/02/2022 +ms.date: 04/28/2022 ms.author: anfdocs --- # Resource limits for Azure NetApp Files @@ -51,6 +51,8 @@ The following table describes resource limits for Azure NetApp Files: For more information, see [Capacity management FAQs](faq-capacity-management.md). +For limits and constraints related to Azure NetApp Files network features, see [Guidelines for Azure NetApp Files network planning](azure-netapp-files-network-topologies.md#considerations). + ## Determine if a directory is approaching the limit size You can use the `stat` command from a client to see whether a directory is approaching the maximum size limit for directory metadata (320 MB). diff --git a/articles/azure-resource-manager/bicep/bicep-functions-resource.md b/articles/azure-resource-manager/bicep/bicep-functions-resource.md index 3ed829f4a51a9..b6d3170984eab 100644 --- a/articles/azure-resource-manager/bicep/bicep-functions-resource.md +++ b/articles/azure-resource-manager/bicep/bicep-functions-resource.md @@ -4,7 +4,7 @@ description: Describes the functions to use in a Bicep file to retrieve values a author: mumian ms.author: jgao ms.topic: conceptual -ms.date: 03/02/2022 +ms.date: 04/28/2022 --- # Resource functions for Bicep @@ -631,6 +631,78 @@ resource roleAssignment 'Microsoft.Authorization/roleAssignments@2018-09-01-prev } ``` +## managementGroupResourceId + +`managementGroupResourceId(resourceType, resourceName1, [resourceName2], ...)` + +Returns the unique identifier for a resource deployed at the management group level. + +Namespace: [az](bicep-functions.md#namespaces-for-functions). + +The `managementGroupResourceId` function is available in Bicep files, but typically you don't need it. Instead, use the symbolic name for the resource and access the `id` property. + +The identifier is returned in the following format: + +```json +/providers/Microsoft.Management/managementGroups/{managementGroupName}/providers/{resourceType}/{resourceName} +``` + +### Remarks + +You use this function to get the resource ID for resources that are [deployed to the management group](deploy-to-management-group.md) rather than a resource group. The returned ID differs from the value returned by the [resourceId](#resourceid) function by not including a subscription ID and a resource group value. + +### managementGrouopResourceID example + +The following template creates a policy definition, and assign the policy defintion. It uses the `managementGroupResourceId` function to get the resource ID for policy definition. + +```bicep +targetScope = 'managementGroup' + +@description('Target Management Group') +param targetMG string + +@description('An array of the allowed locations, all other locations will be denied by the created policy.') +param allowedLocations array = [ + 'australiaeast' + 'australiasoutheast' + 'australiacentral' +] + +var mgScope = tenantResourceId('Microsoft.Management/managementGroups', targetMG) +var policyDefinitionName = 'LocationRestriction' + +resource policyDefinition 'Microsoft.Authorization/policyDefinitions@2020-03-01' = { + name: policyDefinitionName + properties: { + policyType: 'Custom' + mode: 'All' + parameters: {} + policyRule: { + if: { + not: { + field: 'location' + in: allowedLocations + } + } + then: { + effect: 'deny' + } + } + } +} + +resource location_lock 'Microsoft.Authorization/policyAssignments@2020-03-01' = { + name: 'location-lock' + properties: { + scope: mgScope + policyDefinitionId: managementGroupResourceId('Microsoft.Authorization/policyDefinitions', policyDefinitionName) + } + dependsOn: [ + policyDefinition + ] +} +``` + ## tenantResourceId `tenantResourceId(resourceType, resourceName1, [resourceName2], ...)` diff --git a/articles/azure-resource-manager/management/resource-name-rules.md b/articles/azure-resource-manager/management/resource-name-rules.md index a876ab8239501..4be43d4784e4e 100644 --- a/articles/azure-resource-manager/management/resource-name-rules.md +++ b/articles/azure-resource-manager/management/resource-name-rules.md @@ -2,7 +2,7 @@ title: Resource naming restrictions description: Shows the rules and restrictions for naming Azure resources. ms.topic: conceptual -ms.date: 04/26/2022 +ms.date: 04/28/2022 --- # Naming rules and restrictions for Azure resources @@ -531,10 +531,14 @@ In the following tables, the term alphanumeric refers to: > [!div class="mx-tableFixed"] > | Entity | Scope | Length | Valid Characters | > | --- | --- | --- | --- | -> | netAppAccounts | resource group | 1-128 | Alphanumerics, underscores, periods, and hyphens. | -> | netAppAccounts / capacityPools | NetApp account | 1-64 | Alphanumerics, underscores, periods, and hyphens.

Start with alphanumeric. | -> | netAppAccounts / snapshotPolicies | NetApp account | 1-64 | Alphanumerics, underscores, periods, and hyphens.

Start with alphanumeric. | -> | netAppAccounts / volumeGroups | NetApp account | 1-64 | Alphanumerics, underscores, periods, and hyphens.

Start with alphanumeric. | +> | netAppAccounts | resource group | 1-128 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / backups | NetApp account | 3-225 | Alphanumerics, underscores, periods, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / backupPolicies | NetApp account | 1-64 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / capacityPools | NetApp account | 1-64 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / snapshots | NetApp account | 1-255 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / snapshotPolicies | NetApp account | 1-64 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / volumes | NetApp account | 1-64 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | +> | netAppAccounts / volumeGroups | NetApp account | 3-64 | Alphanumerics, underscores, and hyphens.

Start with alphanumeric. | ## Microsoft.Network diff --git a/articles/azure-resource-manager/templates/template-functions-resource.md b/articles/azure-resource-manager/templates/template-functions-resource.md index 6db637c555cc5..a6e4edeb558b1 100644 --- a/articles/azure-resource-manager/templates/template-functions-resource.md +++ b/articles/azure-resource-manager/templates/template-functions-resource.md @@ -2,7 +2,7 @@ title: Template functions - resources description: Describes the functions to use in an Azure Resource Manager template (ARM template) to retrieve values about resources. ms.topic: conceptual -ms.date: 03/24/2022 +ms.date: 03/31/2022 ms.custom: devx-track-azurepowershell --- @@ -17,6 +17,7 @@ Resource Manager provides the following functions for getting resource values in * [reference](#reference) * [resourceId](#resourceid) * [subscriptionResourceId](#subscriptionresourceid) +* [managementGroupResourceId](#managementgroupresourceid) * [tenantResourceId](#tenantresourceid) To get values from parameters, variables, or the current deployment, see [Deployment value functions](template-functions-deployment.md). @@ -632,33 +633,31 @@ Continue adding resource names as parameters when the resource type includes mor ### Return value -When the template is deployed at the scope of a resource group, the resource ID is returned in the following format: +The resource ID is returned in different formats at different scopes: -```json -/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} -``` +* Resource group scope: -You can use the `resourceId` function for other deployment scopes, but the format of the ID changes. + ```json + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ``` -If you use `resourceId` while deploying to a subscription, the resource ID is returned in the following format: +* Subscription scope: -```json -/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} -``` + ```json + /subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ``` -If you use `resourceId` while deploying to a management group or tenant, the resource ID is returned in the following format: +* Management group or tenant scope: -```json -/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} -``` + ```json + /providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ``` To avoid confusion, we recommend that you don't use `resourceId` when working with resources deployed to the subscription, management group, or tenant. Instead, use the ID function that is designed for the scope. -For [subscription-level resources](deploy-to-subscription.md), use the [subscriptionResourceId](#subscriptionresourceid) function. - -For [management group-level resources](deploy-to-management-group.md), use the [extensionResourceId](#extensionresourceid) function to reference a resource that is implemented as an extension of a management group. For example, custom policy definitions that are deployed to a management group are extensions of the management group. Use the [tenantResourceId](#tenantresourceid) function to reference resources that are deployed to the tenant but available in your management group. For example, built-in policy definitions are implemented as tenant level resources. - -For [tenant-level resources](deploy-to-tenant.md), use the [tenantResourceId](#tenantresourceid) function. Use `tenantResourceId` for built-in policy definitions because they're implemented at the tenant level. +* For [subscription-level resources](deploy-to-subscription.md), use the [subscriptionResourceId](#subscriptionresourceid) function. +* For [management group-level resources](deploy-to-management-group.md), use the [managementGroupResourceId](#managementgroupresourceid) function. Use the [extensionResourceId](#extensionresourceid) function to reference a resource that is implemented as an extension of a management group. For example, custom policy definitions that are deployed to a management group are extensions of the management group. Use the [tenantResourceId](#tenantresourceid) function to reference resources that are deployed to the tenant but available in your management group. For example, built-in policy definitions are implemented as tenant level resources. +* For [tenant-level resources](deploy-to-tenant.md), use the [tenantResourceId](#tenantresourceid) function. Use `tenantResourceId` for built-in policy definitions because they're implemented at the tenant level. ### Remarks @@ -750,6 +749,107 @@ The following template assigns a built-in role. You can deploy it to either a re :::code language="json" source="~/resourcemanager-templates/azure-resource-manager/functions/resource/subscriptionresourceid.json"::: +## managementGroupResourceId + +`managementGroupResourceId([managementGroupResourceId],resourceType, resourceName1, [resourceName2], ...)` + +Returns the unique identifier for a resource deployed at the management group level. + +In Bicep, use the [managementGroupResourceId](../bicep/bicep-functions-resource.md#managementgroupresourceid) function. + +### Parameters + +| Parameter | Required | Type | Description | +|:--- |:--- |:--- |:--- | +| managementGroupResourceId |No |string (in GUID format) |Default value is the current management group. Specify this value when you need to retrieve a resource in another management group. | +| resourceType |Yes |string |Type of resource including resource provider namespace. | +| resourceName1 |Yes |string |Name of resource. | +| resourceName2 |No |string |Next resource name segment, if needed. | + +Continue adding resource names as parameters when the resource type includes more segments. + +### Return value + +The identifier is returned in the following format: + +```json +/providers/Microsoft.Management/managementGroups/{managementGroupName}/providers/{resourceType}/{resourceName} +``` + +### Remarks + +You use this function to get the resource ID for resources that are [deployed to the management group](deploy-to-management-group.md) rather than a resource group. The returned ID differs from the value returned by the [resourceId](#resourceid) function by not including a subscription ID and a resource group value. + +### managementGrouopResourceID example + +The following template creates a policy definition, and assign the policy defintion. It uses the `managementGroupResourceId` function to get the resource ID for policy definition. + +```json +{ + "$schema": "https://schema.management.azure.com/schemas/2019-08-01/managementGroupDeploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "targetMG": { + "type": "string", + "metadata": { + "description": "Target Management Group" + } + }, + "allowedLocations": { + "type": "array", + "defaultValue": [ + "australiaeast", + "australiasoutheast", + "australiacentral" + ], + "metadata": { + "description": "An array of the allowed locations, all other locations will be denied by the created policy." + } + } + }, + "functions": [], + "variables": { + "mgScope": "[tenantResourceId('Microsoft.Management/managementGroups', parameters('targetMG'))]", + "policyDefinitionName": "LocationRestriction" + }, + "resources": [ + { + "type": "Microsoft.Authorization/policyDefinitions", + "apiVersion": "2020-03-01", + "name": "[variables('policyDefinitionName')]", + "properties": { + "policyType": "Custom", + "mode": "All", + "parameters": {}, + "policyRule": { + "if": { + "not": { + "field": "location", + "in": "[parameters('allowedLocations')]" + } + }, + "then": { + "effect": "deny" + } + } + } + }, + { + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2020-03-01", + "name": "location-lock", + "properties": { + "scope": "[variables('mgScope')]", + "policyDefinitionId": "[managementGroupResourceId('Microsoft.Authorization/policyDefinitions', variables('policyDefinitionName'))]" + }, + "dependsOn": [ + "[format('Microsoft.Authorization/policyDefinitions/{0}', variables('policyDefinitionName'))]" + ] + } + ] +} +``` + ## tenantResourceId `tenantResourceId(resourceType, resourceName1, [resourceName2], ...)` diff --git a/articles/azure-resource-manager/templates/toc.yml b/articles/azure-resource-manager/templates/toc.yml index 6f28006009395..8dc46a1bcfd9a 100644 --- a/articles/azure-resource-manager/templates/toc.yml +++ b/articles/azure-resource-manager/templates/toc.yml @@ -140,7 +140,7 @@ - name: Containers items: - name: AKS - href: ../../aks/kubernetes-walkthrough-rm-template.md?toc=/azure/azure-resource-manager/templates/toc.json + href: ../../aks/learn/quick-kubernetes-deploy-rm-template.md?toc=/azure/azure-resource-manager/templates/toc.json - name: Container Instances href: ../../container-instances/container-instances-quickstart-template.md?toc=/azure/azure-resource-manager/templates/toc.json - name: Container Registry diff --git a/articles/azure-sql/accelerated-database-recovery.md b/articles/azure-sql/accelerated-database-recovery.md deleted file mode 100644 index dc7aafe70beda..0000000000000 --- a/articles/azure-sql/accelerated-database-recovery.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Accelerated database recovery -titleSuffix: Azure SQL -description: Accelerated database recovery provides fast and consistent database recovery, instantaneous transaction rollback, and aggressive log truncation for databases in the Azure SQL portfolio. -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: sqldbrb=4 -ms.devlang: -ms.topic: conceptual -author: kfarlee -ms.author: kfarlee -ms.reviewer: mathoma, kendralittle, nvraparl, wiassaf -ms.date: 02/18/2022 ---- -# Accelerated Database Recovery in Azure SQL -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -**Accelerated Database Recovery (ADR)** is a SQL Server database engine feature that greatly improves database availability, especially in the presence of long running transactions, by redesigning the SQL Server database engine recovery process. - -ADR is currently available for Azure SQL Database, Azure SQL Managed Instance, databases in Azure Synapse Analytics, and SQL Server on Azure VMs starting with SQL Server 2019. For information on ADR in SQL Server, see [Manage accelerated database recovery](/sql/relational-databases/accelerated-database-recovery-management). - -> [!NOTE] -> ADR is enabled by default in Azure SQL Database and Azure SQL Managed Instance. Disabling ADR in Azure SQL Database and Azure SQL Managed Instance is not supported. - -## Overview - -The primary benefits of ADR are: - -- **Fast and consistent database recovery** - - With ADR, long running transactions do not impact the overall recovery time, enabling fast and consistent database recovery irrespective of the number of active transactions in the system or their sizes. - -- **Instantaneous transaction rollback** - - With ADR, transaction rollback is instantaneous, irrespective of the time that the transaction has been active or the number of updates that has performed. - -- **Aggressive log truncation** - - With ADR, the transaction log is aggressively truncated, even in the presence of active long-running transactions, which prevents it from growing out of control. - -## Standard database recovery process - -Database recovery follows the [ARIES](https://people.eecs.berkeley.edu/~brewer/cs262/Aries.pdf) recovery model and consists of three phases, which are illustrated in the following diagram and explained in more detail following the diagram. - -![current recovery process](./media/accelerated-database-recovery/current-recovery-process.png) - -- **Analysis phase** - - Forward scan of the transaction log from the beginning of the last successful checkpoint (or the oldest dirty page LSN) until the end, to determine the state of each transaction at the time the database stopped. - -- **Redo phase** - - Forward scan of the transaction log from the oldest uncommitted transaction until the end, to bring the database to the state it was at the time of the crash by redoing all committed operations. - -- **Undo phase** - - For each transaction that was active as of the time of the crash, traverses the log backwards, undoing the operations that this transaction performed. - -Based on this design, the time it takes the SQL Server database engine to recover from an unexpected restart is (roughly) proportional to the size of the longest active transaction in the system at the time of the crash. Recovery requires a rollback of all incomplete transactions. The length of time required is proportional to the work that the transaction has performed and the time it has been active. Therefore, the recovery process can take a long time in the presence of long-running transactions (such as large bulk insert operations or index build operations against a large table). - -Also, cancelling/rolling back a large transaction based on this design can also take a long time as it is using the same Undo recovery phase as described above. - -In addition, the SQL Server database engine cannot truncate the transaction log when there are long-running transactions because their corresponding log records are needed for the recovery and rollback processes. As a result of this design of the SQL Server database engine, some customers used to face the problem that the size of the transaction log grows very large and consumes huge amounts of drive space. - -## The Accelerated Database Recovery process - -ADR addresses the above issues by completely redesigning the SQL Server database engine recovery process to: - -- Make it constant time/instant by avoiding having to scan the log from/to the beginning of the oldest active transaction. With ADR, the transaction log is only processed from the last successful checkpoint (or oldest dirty page Log Sequence Number (LSN)). As a result, recovery time is not impacted by long running transactions. -- Minimize the required transaction log space since there is no longer a need to process the log for the whole transaction. As a result, the transaction log can be truncated aggressively as checkpoints and backups occur. - -At a high level, ADR achieves fast database recovery by versioning all physical database modifications and only undoing logical operations, which are limited and can be undone almost instantly. Any transaction that was active as of the time of a crash are marked as aborted and, therefore, any versions generated by these transactions can be ignored by concurrent user queries. - -The ADR recovery process has the same three phases as the current recovery process. How these phases operate with ADR is illustrated in the following diagram and explained in more detail following the diagram. - -![ADR recovery process](./media/accelerated-database-recovery/adr-recovery-process.png) - -- **Analysis phase** - - The process remains the same as before with the addition of reconstructing SLOG and copying log records for non-versioned operations. - -- **Redo** phase - - Broken into two phases (P) - - Phase 1 - - Redo from SLOG (oldest uncommitted transaction up to last checkpoint). Redo is a fast operation as it only needs to process a few records from the SLOG. - - - Phase 2 - - Redo from Transaction Log starts from last checkpoint (instead of oldest uncommitted transaction) - -- **Undo phase** - - The Undo phase with ADR completes almost instantaneously by using SLOG to undo non-versioned operations and Persisted Version Store (PVS) with Logical Revert to perform row level version-based Undo. - -## ADR recovery components - -The four key components of ADR are: - -- **Persisted version store (PVS)** - - The persisted version store is a new SQL Server database engine mechanism for persisting the row versions generated in the database itself instead of the traditional `tempdb` version store. PVS enables resource isolation as well as improves availability of readable secondaries. - -- **Logical revert** - - Logical revert is the asynchronous process responsible for performing row-level version-based Undo - providing instant transaction rollback and undo for all versioned operations. Logical revert is accomplished by: - - - Keeping track of all aborted transactions and marking them invisible to other transactions. - - Performing rollback by using PVS for all user transactions, rather than physically scanning the transaction log and undoing changes one at a time. - - Releasing all locks immediately after transaction abort. Since abort involves simply marking changes in memory, the process is very efficient and therefore locks do not have to be held for a long time. - -- **SLOG** - - SLOG is a secondary in-memory log stream that stores log records for non-versioned operations (such as metadata cache invalidation, lock acquisitions, and so on). The SLOG is: - - - Low volume and in-memory - - Persisted on disk by being serialized during the checkpoint process - - Periodically truncated as transactions commit - - Accelerates redo and undo by processing only the non-versioned operations - - Enables aggressive transaction log truncation by preserving only the required log records - -- **Cleaner** - - The cleaner is the asynchronous process that wakes up periodically and cleans page versions that are not needed. - -## Accelerated Database Recovery (ADR) patterns - -The following types of workloads benefit most from ADR: - -- ADR is recommended for workloads with long running transactions. -- ADR is recommended for workloads that have seen cases where active transactions are causing the transaction log to grow significantly. -- ADR is recommended for workloads that have experienced long periods of database unavailability due to long running recovery (such as unexpected service restart or manual transaction rollback). - -## Best practices for Accelerated Database Recovery - -- Avoid long-running transactions in the database. Though one objective of ADR is to speed up database recovery due to redo long active transactions, long-running transactions can delay version cleanup and increase the size of the PVS. - -- Avoid large transactions with data definition changes or DDL operations. ADR uses a SLOG (system log stream) mechanism to track DDL operations used in recovery. The SLOG is only used while the transaction active. SLOG is checkpointed, so avoiding large transactions that use SLOG can help overall performance. These scenarios can cause the SLOG to take up more space: - - - Many DDLs are executed in one transaction. For example, in one transaction, rapidly creating and dropping temp tables. - - - A table has very large number of partitions/indexes that are modified. For example, a DROP TABLE operation on such table would require a large reservation of SLOG memory, which would delay truncation of the transaction log and delay undo/redo operations. The workaround can be drop the indexes individually and gradually, then drop the table. For more information on the SLOG, see [ADR recovery components](/sql/relational-databases/accelerated-database-recovery-concepts). - -- Prevent or reduce unnecessary aborted situations. A high abort rate will put pressure on the PVS cleaner and lower ADR performance. The aborts may come from a high rate of deadlocks, duplicate keys, or other constraint violations. - - - The `sys.dm_tran_aborted_transactions` DMV shows all aborted transactions on the SQL Server instance. The `nested_abort` column indicates that the transaction committed but there are portions that aborted (savepoints or nested transactions) which can block the PVS cleanup process. For more information, see [sys.dm_tran_aborted_transactions (Transact-SQL)](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-aborted-transactions). - - - To activate the PVS cleanup process manually between workloads or during maintenance windows, use `sys.sp_persistent_version_cleanup`. For more information, see [sys.sp_persistent_version_cleanup](/sql/relational-databases/system-stored-procedures/sys-sp-persistent-version-cleanup-transact-sql). - -- If you observe issues either with storage usage, high abort transaction and other factors, see [Troubleshooting Accelerated Database Recovery (ADR) on SQL Server](/sql/relational-databases/accelerated-database-recovery-troubleshoot). - -## Next steps - -- [Accelerated database recovery](/sql/relational-databases/accelerated-database-recovery-concepts) -- [Troubleshooting Accelerated Database Recovery (ADR) on SQL Server](/sql/relational-databases/accelerated-database-recovery-troubleshoot). diff --git a/articles/azure-sql/azure-hybrid-benefit.md b/articles/azure-sql/azure-hybrid-benefit.md deleted file mode 100644 index 45a277da64203..0000000000000 --- a/articles/azure-sql/azure-hybrid-benefit.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Azure Hybrid Benefit -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Use existing SQL Server licenses for Azure SQL Database and SQL Managed Instance discounts. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=4 -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: sashan, moslake, mathoma -ms.date: 11/09/2021 ---- -# Azure Hybrid Benefit - Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -[Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) allows you to exchange your existing licenses for discounted rates on Azure SQL Database and Azure SQL Managed Instance. You can save up to 30 percent or more on SQL Database and SQL Managed Instance by using your Software Assurance-enabled SQL Server licenses on Azure. The [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) page has a calculator to help determine savings. - -Changing to Azure Hybrid Benefit does not require any downtime. - -## Overview - -![vcore pricing structure](./media/azure-hybrid-benefit/pricing.png) - -With Azure Hybrid Benefit, you pay only for the underlying Azure infrastructure by using your existing SQL Server license for the SQL Server database engine itself (Base Compute pricing). If you do not use Azure Hybrid Benefit, you pay for both the underlying infrastructure and the SQL Server license (License-Included pricing). - -For Azure SQL Database, Azure Hybrid Benefit is only available when using the provisioned compute tier of the [vCore-based purchasing model](database/service-tiers-vcore.md). Azure Hybrid Benefit doesn't apply to [DTU-based purchasing models](database/service-tiers-dtu.md) or the [serverless compute tier](database/serverless-tier-overview.md). - -## Enable Azure Hybrid Benefit - -### Azure SQL Database - -You can choose or change your licensing model for Azure SQL Database using the Azure portal or the API of your choice. - -You can only apply the Azure Hybrid licensing model when you choose a vCore-based purchasing model and the provisioned compute tier for your Azure SQL Database. Azure Hybrid Benefit isn't available for service tiers under the DTU-based purchasing model or for the serverless compute tier. - -#### [Portal](#tab/azure-portal) - -To set or update the license type using the Azure portal: - -- For new databases, during creation, select **Configure database** on the **Basics** tab and select the option to **Save money**. -- For existing databases, select **Compute + storage** in the **Settings** menu and select the option to **Save money**. - -If you don't see the **Save money** option in the Azure portal, verify that you selected a service tier using the vCore-based purchasing model and the provisioned compute tier. -#### [PowerShell](#tab/azure-powershell) - -To set or update the license type using PowerShell: - -- [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) with the -LicenseType parameter -- [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) with the -LicenseType parameter - -#### [Azure CLI](#tab/azure-cli) - -To set or update the license type using the Azure CLI: - -- [az sql db create](/cli/azure/sql/db#az-sql-db-create) with the --license-type parameter - -#### [REST API](#tab/rest) - -To set or update the license type using the REST API: - -- [Create or update](/rest/api/sql/databases/createorupdate) with the properties.licenseType parameter -- [Update](/rest/api/sql/databases/update) with the properties.licenseType parameter - ---- - -### Azure SQL Managed Instance - -You can choose or change your licensing model for Azure SQL Managed Instance using the Azure portal or the API of your choice. -#### [Portal](#tab/azure-portal) - -To set or update the license type using the Azure portal: - -- For new managed instances, during creation, select **Configure Managed Instance** on the **Basics** tab and select the option for **Azure Hybrid Benefit**. -- For existing managed instances, select **Compute + storage** in the **Settings** menu and select the option for **Azure Hybrid Benefit**. - -#### [PowerShell](#tab/azure-powershell) - -To set or update the license type using PowerShell: - -- [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) with the -LicenseType parameter -- [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) with the -LicenseType parameter - -#### [Azure CLI](#tab/azure-cli) - -To set or update the license type using the Azure CLI: - -- [az sql mi create](/cli/azure/sql/mi#az-sql-mi-create) with the --license-type parameter -- [az sql mi update](/cli/azure/sql/mi#az-sql-mi-update) with the --license-type parameter - -#### [REST API](#tab/rest) - -To set or update the license type using the REST API: - -- [Create or update](/rest/api/sql/managedinstances/createorupdate) with the properties.licenseType parameter -- [Update](/rest/api/sql/managedinstances/update) with the properties.licenseType parameter - ---- -## Frequently asked questions - -### Are there dual-use rights with Azure Hybrid Benefit for SQL Server? - -You have 180 days of dual use rights of the license to ensure migrations are running seamlessly. After that 180-day period, you can only use the SQL Server license on Azure. You no longer have dual use rights on-premises and on Azure. - -### How does Azure Hybrid Benefit for SQL Server differ from license mobility? - -We offer license mobility benefits to SQL Server customers with Software Assurance. License mobility allows reassignment of their licenses to a partner's shared servers. You can use this benefit on Azure IaaS and AWS EC2. - -Azure Hybrid Benefit for SQL Server differs from license mobility in two key areas: - -- It provides economic benefits for moving highly virtualized workloads to Azure. SQL Server Enterprise Edition customers can get four cores in Azure in the General Purpose SKU for every core they own on-premises for highly virtualized applications. License mobility doesn't allow any special cost benefits for moving virtualized workloads to the cloud. -- It provides for a PaaS destination on Azure (SQL Managed Instance) that's highly compatible with SQL Server. - -### What are the specific rights of the Azure Hybrid Benefit for SQL Server? - -SQL Database and SQL Managed Instance customers have the following rights associated with Azure Hybrid Benefit for SQL Server: - -|License footprint|What does Azure Hybrid Benefit for SQL Server get you?| -|---|---| -|SQL Server Enterprise Edition core customers with SA|
  • Can pay base rate on Hyperscale, General Purpose, or Business Critical SKU

  • One core on-premises = Four vCores in Hyperscale SKU

  • One core on-premises = Four vCores in General Purpose SKU

  • One core on-premises = One vCore in Business Critical SKU
  • | -|SQL Server Standard Edition core customers with SA|
  • Can pay base rate on Hyperscale, General Purpose, or Business Critical SKU

  • One core on-premises = One vCore in Hyperscale SKU

  • One core on-premises = One vCore in General Purpose SKU

  • Four cores on-premises = One vCore in Business Critical SKU
  • | - -## Next steps - -- For help with choosing an Azure SQL deployment option, see [Service comparison](azure-sql-iaas-vs-paas-what-is-overview.md). -- For a comparison of SQL Database and SQL Managed Instance features, see [Features of SQL Database and SQL Managed Instance](database/features-comparison.md). \ No newline at end of file diff --git a/articles/azure-sql/azure-sql-iaas-vs-paas-what-is-overview.md b/articles/azure-sql/azure-sql-iaas-vs-paas-what-is-overview.md deleted file mode 100644 index 308f59788f55e..0000000000000 --- a/articles/azure-sql/azure-sql-iaas-vs-paas-what-is-overview.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: "What is Azure SQL?" -description: "Learn about the different options within the Azure SQL family of services: Azure SQL Database, Azure SQL Managed Instance, and SQL Server on Azure VM." -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=4 -ms.devlang: -ms.topic: overview -keywords: SQL Server cloud, SQL Server in the cloud, PaaS database, cloud SQL Server, DBaaS, IaaS -author: MashaMSFT -ms.author: mathoma -ms.reviewer: kendralittle -ms.date: 03/18/2022 ---- -# What is Azure SQL? -[!INCLUDE[appliesto-asf](includes/appliesto-asf.md)] - -Azure SQL is a family of managed, secure, and intelligent products that use the SQL Server database engine in the Azure cloud. - -- **Azure SQL Database**: Support modern cloud applications on an intelligent, managed database service, that includes serverless compute. -- **Azure SQL Managed Instance**: Modernize your existing SQL Server applications at scale with an intelligent fully managed instance as a service, with almost 100% feature parity with the SQL Server database engine. Best for most migrations to the cloud. -- **SQL Server on Azure VMs**: Lift-and-shift your SQL Server workloads with ease and maintain 100% SQL Server compatibility and operating system-level access. - -Azure SQL is built upon the familiar SQL Server engine, so you can migrate applications with ease and continue to use the tools, languages, and resources you're familiar with. Your skills and experience transfer to the cloud, so you can do even more with what you already have. - -Learn how each product fits into Microsoft's Azure SQL data platform to match the right option for your business requirements. Whether you prioritize cost savings or minimal administration, this article can help you decide which approach delivers against the business requirements you care about most. - -If you're new to Azure SQL, check out the *What is Azure SQL* video from our in-depth [Azure SQL video series](/shows/Azure-SQL-for-Beginners/?WT.mc_id=azuresql4beg_azuresql-ch9-niner): -> [!VIDEO https://docs.microsoft.com/shows/Azure-SQL-for-Beginners/What-is-Azure-SQL-3-of-61/player] - - - -## Overview - -In today's data-driven world, driving digital transformation increasingly depends on our ability to manage massive amounts of data and harness its potential. But today's data estates are increasingly complex, with data hosted on-premises, in the cloud, or at the edge of the network. Developers who are building intelligent and immersive applications can find themselves constrained by limitations that can ultimately impact their experience. Limitations arising from incompatible platforms, inadequate data security, insufficient resources and price-performance barriers create complexity that can inhibit app modernization and development. - -One of the first things to understand in any discussion of Azure versus on-premises SQL Server databases is that you can use it all. Microsoft's data platform leverages SQL Server technology and makes it available across physical on-premises machines, private cloud environments, third-party hosted private cloud environments, and the public cloud. - - -### Fully managed and always up to date - -Spend more time innovating and less time patching, updating, and backing up your databases. Azure is the only cloud with evergreen SQL that automatically applies the latest updates and patches so that your databases are always up to date—eliminating end-of-support hassle. Even complex tasks like performance tuning, high availability, disaster recovery, and backups are automated, freeing you to focus on applications. - -### Protect your data with built-in intelligent security - -Azure constantly monitors your data for threats. With Azure SQL, you can: - -- Remediate potential threats in real time with intelligent [advanced threat detection](../security/fundamentals/threat-detection.md#threat-protection-features-other-azure-services) and proactive vulnerability assessment alerts. -- Get industry-leading, multi-layered protection with [built-in security controls](https://azure.microsoft.com/overview/security/) including T-SQL, authentication, networking, and key management. -- Take advantage of the most comprehensive [compliance](https://azure.microsoft.com/overview/trusted-cloud/compliance/) coverage of any cloud database service. - - -### Business motivations - -There are several factors that can influence your decision to choose between the different data offerings: - -- [Cost](#cost): Both platform as a service (PaaS) and infrastructure as a service (IaaS) options include base price that covers underlying infrastructure and licensing. However, with the IaaS option you need to invest additional time and resources to manage your database, while in PaaS you get these administration features included in the price. IaaS enables you to shut down resources while you are not using them to decrease the cost, while PaaS is always running unless you drop and re-create your resources when they are needed. -- [Administration](#administration): PaaS options reduce the amount of time that you need to invest to administer the database. However, it also limits the range of custom administration tasks and scripts that you can perform or run. For example, the CLR is not supported with SQL Database, but is supported for an instance of SQL Managed Instance. Also, no deployment options in PaaS support the use of trace flags. -- [Service-level agreement](#service-level-agreement-sla): Both IaaS and PaaS provide high, industry standard SLA. PaaS option guarantees 99.99% SLA, while IaaS guarantees 99.95% SLA for infrastructure, meaning that you need to implement additional mechanisms to ensure availability of your databases. You can attain 99.99% SLA by creating an additional SQL virtual machine, and implementing the SQL Server Always On availability group high availability solution. -- [Time to move to Azure](#market): SQL Server on Azure VM is the exact match of your environment, so migration from on-premises to the Azure VM is no different than moving the databases from one on-premises server to another. SQL Managed Instance also enables easy migration; however, there might be some changes that you need to apply before your migration. - - -## Service comparison - - ![Cloud SQL Server options: SQL Server on IaaS, or SaaS SQL Database in the cloud.](./media/azure-sql-iaas-vs-paas-what-is-overview/SQLIAAS_SQL_Server_Cloud_Continuum.png) - -As seen in the diagram, each service offering can be characterized by the level of administration you have over the infrastructure, and by the degree of cost efficiency. - -In Azure, you can have your SQL Server workloads running as a hosted service ([PaaS](https://azure.microsoft.com/overview/what-is-paas/)), or a hosted infrastructure ([IaaS](https://azure.microsoft.com/overview/what-is-iaas/)). Within PaaS, you have multiple product options, and service tiers within each option. The key question that you need to ask when deciding between PaaS or IaaS is do you want to manage your database, apply patches, and take backups, or do you want to delegate these operations to Azure? - -### Azure SQL Database - -[Azure SQL Database](database/sql-database-paas-overview.md) is a relational database-as-a-service (DBaaS) hosted in Azure that falls into the industry category of *Platform-as-a-Service (PaaS)*. -- Best for modern cloud applications that want to use the latest stable SQL Server features and have time constraints in development and marketing. -- A fully managed SQL Server database engine, based on the latest stable Enterprise Edition of SQL Server. SQL Database has two deployment options built on standardized hardware and software that is owned, hosted, and maintained by Microsoft. - -With SQL Server, you can use built-in features and functionality that requires extensive configuration (either on-premises or in an Azure virtual machine). When using SQL Database, you pay-as-you-go with options to scale up or out for greater power with no interruption. SQL Database has some additional features that are not available in SQL Server, such as built-in high availability, intelligence, and management. - - -Azure SQL Database offers the following deployment options: - - As a [*single database*](database/single-database-overview.md) with its own set of resources managed via a [logical SQL server](database/logical-servers.md). A single database is similar to a [contained database](/sql/relational-databases/databases/contained-databases) in SQL Server. This option is optimized for modern application development of new cloud-born applications. [Hyperscale](database/service-tier-hyperscale.md) and [serverless](database/serverless-tier-overview.md) options are available. - - An [*elastic pool*](database/elastic-pool-overview.md), which is a collection of databases with a shared set of resources managed via a [logical server](database/logical-servers.md). Single databases can be moved into and out of an elastic pool. This option is optimized for modern application development of new cloud-born applications using the multi-tenant SaaS application pattern. Elastic pools provide a cost-effective solution for managing the performance of multiple databases that have variable usage patterns. - -### Azure SQL Managed Instance - -[Azure SQL Managed Instance](managed-instance/sql-managed-instance-paas-overview.md) falls into the industry category of *Platform-as-a-Service (PaaS)*, and is best for most migrations to the cloud. SQL Managed Instance is a collection of system and user databases with a shared set of resources that is lift-and-shift ready. -- Best for new applications or existing on-premises applications that want to use the latest stable SQL Server features and that are migrated to the cloud with minimal changes. An instance of SQL Managed Instance is similar to an instance of the [Microsoft SQL Server database engine](/sql/database-engine/sql-server-database-engine-overview) offering shared resources for databases and additional instance-scoped features. -- SQL Managed Instance supports database migration from on-premises with minimal to no database change. This option provides all of the PaaS benefits of Azure SQL Database but adds capabilities that were previously only available in SQL Server VMs. This includes a native virtual network and near 100% compatibility with on-premises SQL Server. Instances of SQL Managed Instance provide full SQL Server access and feature compatibility for migrating SQL Servers to Azure. - -### SQL Server on Azure VM - -[SQL Server on Azure VM](virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) falls into the industry category *Infrastructure-as-a-Service (IaaS)* and allows you to run SQL Server inside a fully managed virtual machine (VM) in Azure. -- SQL Server installed and hosted in the cloud runs on Windows Server or Linux virtual machines running on Azure, also known as an infrastructure as a service (IaaS). SQL virtual machines are a good option for migrating on-premises SQL Server databases and applications without any database change. All recent versions and editions of SQL Server are available for installation in an IaaS virtual machine. -- Best for migrations and applications requiring OS-level access. SQL virtual machines in Azure are lift-and-shift ready for existing applications that require fast migration to the cloud with minimal changes or no changes. SQL virtual machines offer full administrative control over the SQL Server instance and underlying OS for migration to Azure. -- The most significant difference from SQL Database and SQL Managed Instance is that SQL Server on Azure Virtual Machines allows full control over the database engine. You can choose when to start maintenance/patching, change the recovery model to simple or bulk-logged, pause or start the service when needed, and you can fully customize the SQL Server database engine. With this additional control comes the added responsibility to manage the virtual machine. -- Rapid development and test scenarios when you do not want to buy on-premises non-production SQL Server hardware. SQL virtual machines also run on standardized hardware that is owned, hosted, and maintained by Microsoft. When using SQL virtual machines, you can either pay-as-you-go for a SQL Server license already included in a SQL Server image or easily use an existing license. You can also stop or resume the VM as needed. -- Optimized for migrating existing applications to Azure or extending existing on-premises applications to the cloud in hybrid deployments. In addition, you can use SQL Server in a virtual machine to develop and test traditional SQL Server applications. With SQL virtual machines, you have the full administrative rights over a dedicated SQL Server instance and a cloud-based VM. It is a perfect choice when an organization already has IT resources available to maintain the virtual machines. These capabilities allow you to build a highly customized system to address your application’s specific performance and availability requirements. - - -### Comparison table - -Additional differences are listed in the following table, but *both SQL Database and SQL Managed Instance are optimized to reduce overall management costs to a minimum for provisioning and managing many databases.* Ongoing administration costs are reduced since you do not have to manage any virtual machines, operating system, or database software. You do not have to manage upgrades, high availability, or [backups](database/automated-backups-overview.md). - -In general, SQL Database and SQL Managed Instance can dramatically increase the number of databases managed by a single IT or development resource. [Elastic pools](database/elastic-pool-overview.md) also support SaaS multi-tenant application architectures with features including tenant isolation and the ability to scale to reduce costs by sharing resources across databases. [SQL Managed Instance](managed-instance/sql-managed-instance-paas-overview.md) provides support for instance-scoped features enabling easy migration of existing applications, as well as sharing resources among databases. Whereas, [SQL Server on Azure VMs](virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) provide DBAs with an experience most similar to the on-premises environment they're familiar with. - - -| Azure SQL Database | Azure SQL Managed Instance | SQL Server on Azure VM | -| :--- | :--- | :--- | -|Supports most on-premises database-level capabilities. The most commonly used SQL Server features are available.
    99.995% availability guaranteed.
    Built-in backups, patching, recovery.
    Latest stable Database Engine version.
    Ability to assign necessary resources (CPU/storage) to individual databases.
    Built-in advanced intelligence and security.
    Online change of resources (CPU/storage).| Supports almost all on-premises instance-level and database-level capabilities. High compatibility with SQL Server.
    99.99% availability guaranteed.
    Built-in backups, patching, recovery.
    Latest stable Database Engine version.
    Easy migration from SQL Server.
    Private IP address within Azure Virtual Network.
    Built-in advanced intelligence and security.
    Online change of resources (CPU/storage).| You have full control over the SQL Server engine. Supports all on-premises capabilities.
    Up to 99.99% availability.
    Full parity with the matching version of on-premises SQL Server.
    Fixed, well-known Database Engine version.
    Easy migration from SQL Server.
    Private IP address within Azure Virtual Network.
    You have the ability to deploy application or services on the host where SQL Server is placed.| -|Migration from SQL Server might be challenging.
    Some SQL Server features are not available.
    Configurable [maintenance windows](database/maintenance-window.md).
    Compatibility with the SQL Server version can be achieved only using database compatibility levels.
    Private IP address support with [Azure Private Link](database/private-endpoint-overview.md).|There is still some minimal number of SQL Server features that are not available.
    Configurable [maintenance windows](database/maintenance-window.md).
    Compatibility with the SQL Server version can be achieved only using database compatibility levels.|You may use [manual or automated backups](virtual-machines/windows/backup-restore.md).
    You need to implement your own High-Availability solution.
    There is a downtime while changing the resources(CPU/storage)| -| Databases of up to 100 TB. | Up to 16 TB. | SQL Server instances with up to 256 TB of storage. The instance can support as many databases as needed. | -| On-premises application can access data in Azure SQL Database. | [Native virtual network implementation](managed-instance/vnet-existing-add-subnet.md) and connectivity to your on-premises environment using Azure Express Route or VPN Gateway. | With SQL virtual machines, you can have applications that run partly in the cloud and partly on-premises. For example, you can extend your on-premises network and Active Directory Domain to the cloud via [Azure Virtual Network](../virtual-network/virtual-networks-overview.md). For more information on hybrid cloud solutions, see [Extending on-premises data solutions to the cloud](/azure/architecture/data-guide/scenarios/hybrid-on-premises-and-cloud). | - - -## Cost - -Whether you're a startup that is strapped for cash, or a team in an established company that operates under tight budget constraints, limited funding is often the primary driver when deciding how to host your databases. In this section, you learn about the billing and licensing basics in Azure associated with the Azure SQL family of services. You also learn about calculating the total application cost. - -### Billing and licensing basics - -Currently, both **SQL Database** and **SQL Managed Instance** are sold as a service and are available with several options and in several service tiers with different prices for resources, all of which are billed hourly at a fixed rate based on the service tier and compute size you choose. For the latest information on the current supported service tiers, compute sizes, and storage amounts, see [DTU-based purchasing model for SQL Database](database/service-tiers-dtu.md) and [vCore-based purchasing model for both SQL Database and SQL Managed Instance](database/service-tiers-vcore.md). - -- With SQL Database, you can choose a service tier that fits your needs from a wide range of prices starting from 5$/month for basic tier and you can create [elastic pools](database/elastic-pool-overview.md) to share resources among databases to reduce costs and accommodate usage spikes. -- With SQL Managed Instance, you can also bring your own license. For more information on bring-your-own licensing, see [License Mobility through Software Assurance on Azure](https://azure.microsoft.com/pricing/license-mobility/) or use the [Azure Hybrid Benefit calculator](https://azure.microsoft.com/pricing/hybrid-benefit/#sql-database) to see how to **save up to 40%**. - -In addition, you are billed for outgoing Internet traffic at regular [data transfer rates](https://azure.microsoft.com/pricing/details/data-transfers/). You can dynamically adjust service tiers and compute sizes to match your application’s varied throughput needs. - -With **SQL Database** and **SQL Managed Instance**, the database software is automatically configured, patched, and upgraded by Azure, which reduces your administration costs. In addition, its [built-in backup](database/automated-backups-overview.md) capabilities help you achieve significant cost savings, especially when you have a large number of databases. - -With **SQL on Azure VMs**, you can use any of the platform-provided SQL Server images (which includes a license) or bring your SQL Server license. All the supported SQL Server versions (2008R2, 2012, 2014, 2016, 2017, 2019) and editions (Developer, Express, Web, Standard, Enterprise) are available. In addition, Bring-Your-Own-License versions (BYOL) of the images are available. When using the Azure provided images, the operational cost depends on the VM size and the edition of SQL Server you choose. Regardless of VM size or SQL Server edition, you pay per-minute licensing cost of SQL Server and the Windows or Linux Server, along with the Azure Storage cost for the VM disks. The per-minute billing option allows you to use SQL Server for as long as you need without buying addition SQL Server licenses. If you bring your own SQL Server license to Azure, you are charged for server and storage costs only. For more information on bring-your-own licensing, see [License Mobility through Software Assurance on Azure](https://azure.microsoft.com/pricing/license-mobility/). In addition, you are billed for outgoing Internet traffic at regular [data transfer rates](https://azure.microsoft.com/pricing/details/data-transfers/). - -#### Calculating the total application cost - -When you start using a cloud platform, the cost of running your application includes the cost for new development and ongoing administration costs, plus the public cloud platform service costs. - -For more information on pricing, see the following resources: - -- [SQL Database & SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/sql-database/) -- [Virtual machine pricing](https://azure.microsoft.com/pricing/details/virtual-machines/) for [SQL](https://azure.microsoft.com/pricing/details/virtual-machines/#sql) and for [Windows](https://azure.microsoft.com/pricing/details/virtual-machines/#windows) -- [Azure Pricing Calculator](https://azure.microsoft.com/pricing/calculator/) - -## Administration - -For many businesses, the decision to transition to a cloud service is as much about offloading complexity of administration as it is cost. With IaaS and PaaS, Azure administers the underlying infrastructure and automatically replicates all data to provide disaster recovery, configures and upgrades the database software, manages load balancing, and does transparent failover if there is a server failure within a data center. - -- With **SQL Database** and **SQL Managed Instance**, you can continue to administer your database, but you no longer need to manage the database engine, the operating system, or the hardware. Examples of items you can continue to administer include databases and logins, index and query tuning, and auditing and security. Additionally, configuring high availability to another data center requires minimal configuration and administration. -- With **SQL on Azure VM**, you have full control over the operating system and SQL Server instance configuration. With a VM, it's up to you to decide when to update/upgrade the operating system and database software and when to install any additional software such as anti-virus. Some automated features are provided to dramatically simplify patching, backup, and high availability. In addition, you can control the size of the VM, the number of disks, and their storage configurations. Azure allows you to change the size of a VM as needed. For information, see [Virtual Machine and Cloud Service Sizes for Azure](../virtual-machines/sizes.md). - -## Service-level agreement (SLA) - -For many IT departments, meeting up-time obligations of a service-level agreement (SLA) is a top priority. In this section, we look at what SLA applies to each database hosting option. - -For both **Azure SQL Database** and **Azure SQL Managed Instance**, Microsoft provides an availability SLA of 99.99%. For the latest information, see [Service-level agreement](https://azure.microsoft.com/support/legal/sla/azure-sql-database). - -For **SQL on Azure VM**, Microsoft provides an availability SLA of 99.95% that covers just the virtual machine. This SLA does not cover the processes (such as SQL Server) running on the VM and requires that you host at least two VM instances in an availability set. For the latest information, see the [VM SLA](https://azure.microsoft.com/support/legal/sla/virtual-machines/). For database high availability (HA) within VMs, you should configure one of the supported high availability options in SQL Server, such as [Always On availability groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server). Using a supported high availability option doesn't provide an additional SLA, but allows you to achieve >99.99% database availability. - -## Time to move to Azure - -**Azure SQL Database** is the right solution for cloud-designed applications when developer productivity and fast time-to-market for new solutions are critical. With programmatic DBA-like functionality, it is perfect for cloud architects and developers as it lowers the need for managing the underlying operating system and database. - -**Azure SQL Managed Instance** greatly simplifies the migration of existing applications to Azure, enabling you to bring migrated database applications to market in Azure quickly. - -**SQL on Azure VM** is perfect if your existing or new applications require large databases or access to all features in SQL Server or Windows/Linux, and you want to avoid the time and expense of acquiring new on-premises hardware. It is also a good fit when you want to migrate existing on-premises applications and databases to Azure as-is - in cases where SQL Database or SQL Managed Instance is not a good fit. Since you do not need to change the presentation, application, and data layers, you save time and budget on re-architecting your existing solution. Instead, you can focus on migrating all your solutions to Azure and in doing some performance optimizations that may be required by the Azure platform. For more information, see [Performance Best Practices for SQL Server on Azure Virtual Machines](./virtual-machines/windows/performance-guidelines-best-practices-checklist.md). - -[!INCLUDE [sql-database-create-manage-portal](includes/sql-database-create-manage-portal.md)] - -## Next steps - -- See [Your first Azure SQL Database](database/single-database-create-quickstart.md) to get started with SQL Database. -- See [Your first Azure SQL Managed Instance](managed-instance/instance-create-quickstart.md) to get started with SQL Managed Instance. -- See [SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). -- See [Azure SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/azure-sql-managed-instance/single/). -- See [Provision a SQL Server virtual machine in Azure](virtual-machines/windows/create-sql-vm-portal.md) to get started with SQL Server on Azure VMs. -- [Identify the right SQL Database or SQL Managed Instance SKU for your on-premises database](/sql/dma/dma-sku-recommend-sql-db/). diff --git a/articles/azure-sql/capacity-errors-troubleshoot.md b/articles/azure-sql/capacity-errors-troubleshoot.md deleted file mode 100644 index 1381eac6a4b7f..0000000000000 --- a/articles/azure-sql/capacity-errors-troubleshoot.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Resolve capacity errors with Azure SQL resources -description: Learn how to resolve possible capacity errors when attempting to deploy or scale Azure SQL Database or Azure SQL Managed Instance resources. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.topic: how-to -author: sachinpMSFT -ms.author: sachinp -ms.reviewer: mathoma, kendralittle -ms.date: 09/03/2021 -ms.custom: references_regions ---- - -# Resolve capacity errors with Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -In this article, learn how to resolve capacity errors when deploying Azure SQL Database or Azure SQL Managed Instance resources. - -## Exceeded quota - -If you encounter any of the following errors when attempting to deploy your Azure SQL resource, please [request to increase your quota](database/quota-increase-request.md): - -- `Server quota limit has been reached for this location. Please select a different location with lower server count.` -- `Could not perform the operation because server would exceed the allowed Database Throughput Unit quota of xx.` -- During a scale operation, you may see the following error: - `Could not perform the operation because server would exceed the allowed Database Throughput Unit quota of xx. `. - -## Subscription access - -Your subscription may not have access to create a server in the selected region if your subscription has not been registered with the SQL resource provider (RP). - -If you see the following errors, please [register your subscription with the SQL RP](#register-with-sql-rp): -- `Your subscription does not have access to create a server in the selected region.` -- `Provisioning is restricted in this region. Please choose a different region. For exceptions to this rule please open a support request with issue type of 'Service and subscription limits' ` -- `Location 'region name' is not accepting creation of new Windows Azure SQL Database servers for the subscription 'subscription id' at this time` - - -## Enable region - -Your subscription may not have access to create a server in the selected region if that region has not been enabled. To resolve this, file a [support request to enable a specific region](database/quota-increase-request.md#region) for your subscription. - -If you see the following errors, file a support ticket to enable a specific region: -- `Your subscription does not have access to create a server in the selected region.` -- `Provisioning is restricted in this region. Please choose a different region. For exceptions to this rule please open a support request with issue type of 'Service and subscription limits' ` -- `Location 'region name' is not accepting creation of new Windows Azure SQL Database servers for the subscription 'subscription id' at this time` - - - -## Register with SQL RP - -To deploy Azure SQL resources, register your subscription with the SQL resource provider (RP). - -You can register your subscription using the Azure portal, [the Azure CLI](/cli/azure/install-azure-cli), or [Azure PowerShell](/powershell/azure/install-az-ps). - -# [Azure portal](#tab/portal) - -To register your subscription in the Azure portal, follow these steps: - -1. Open the Azure portal and go to **All Services**. -1. Go to **Subscriptions** and select the subscription of interest. -1. On the **Subscriptions** page, select **Resource providers** under **Settings**. -1. Enter **sql** in the filter to bring up the SQL-related extensions. -1. Select **Register**, **Re-register**, or **Unregister** for the **Microsoft.Sql** provider, depending on your desired action. - - ![Modify the provider](./media/capacity-errors-troubleshoot/register-with-sql-rp.png) - -# [Azure CLI](#tab/bash) - -To register your subscription using [the Azure CLI](/cli/azure/install-azure-cli), run this cmdlet: - -```azurecli-interactive -# Register the SQL resource provider to your subscription -az provider register --namespace Microsoft.SqlVirtualMac -``` - -# [Azure PowerShell](#tab/powershell) - -To register your subscription using [Azure PowerShell](/powershell/azure/install-az-ps), run this cmdlet: - -```powershell-interactive -# Register the SQL resource provider to your subscription -Register-AzResourceProvider -ProviderNamespace Microsoft.Sql - -``` - ---- - -## Additional provisioning issues - -If you're still experiencing provisioning issues, please open a **Region** access request under the support topic of SQL Database and specify the DTU or vCores you want to consume on Azure SQL Database or Azure SQL Managed Instance. - -## Azure Program regions - -Azure Program offerings (Azure Pass, Imagine, Azure for Students, MPN, BizSpark, BizSpark Plus, Microsoft for Startups / Sponsorship Offers, Visual Studio Subscriptions / MSDN) have access to a limited set of regions. - -If your subscription is part of an Azure Program offering, and you would like to request access to any of the following regions, please consider using an alternate region instead: - -_Australia Central, Australia Central 2, Australia SouthEast, Brazil SouthEast, Canada East, China East, China North, China North 2, France South, Germany North, Japan West, JIO India Central, JIO India West, Korea South, Norway West, South Africa West, South India, Switzerland West, UAE Central , UK West, US DoD Central, US DoD East, US Gov Arizona, US Gov Texas, West Central US, West India._ - -## Next steps - -After you submit your request, it will be reviewed. You will be contacted with an answer based on the information you provided in the form. - -For more information about other Azure limits, see [Azure subscription and service limits, quotas, and constraints](../azure-resource-manager/management/azure-subscription-service-limits.md). diff --git a/articles/azure-sql/database/active-directory-interactive-connect-azure-sql-db.md b/articles/azure-sql/database/active-directory-interactive-connect-azure-sql-db.md deleted file mode 100644 index 1e471aeab1d04..0000000000000 --- a/articles/azure-sql/database/active-directory-interactive-connect-azure-sql-db.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: ActiveDirectoryInteractive connects to SQL -description: "C# Code example, with explanations, for connecting to Azure SQL Database by using SqlAuthenticationMethod.ActiveDirectoryInteractive mode." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: active directory, has-adal-ref, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: GithubMirek -ms.author: MirekS -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/06/2022 ---- -# Connect to Azure SQL Database with Azure AD Multi-Factor Authentication -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides a C# program that connects to Azure SQL Database. The program uses interactive mode authentication, which supports [Azure AD Multi-Factor Authentication](../../active-directory/authentication/concept-mfa-howitworks.md). - -For more information about Multi-Factor Authentication support for SQL tools, see [Using multi-factor Azure Active Directory authentication](./authentication-mfa-ssms-overview.md). - -## Multi-Factor Authentication for Azure SQL Database - -`Active Directory Interactive` authentication supports multi-factor authentication using [Microsoft.Data.SqlClient](/sql/connect/ado-net/introduction-microsoft-data-sqlclient-namespace) to connect to Azure SQL data sources. In a client C# program, the enum value directs the system to use the Azure Active Directory (Azure AD) interactive mode that supports Multi-Factor Authentication to connect to Azure SQL Database. The user who runs the program sees the following dialog boxes: - -* A dialog box that displays an Azure AD user name and asks for the user's password. - - If the user's domain is federated with Azure AD, the dialog box doesn't appear, because no password is needed. - - If the Azure AD policy imposes Multi-Factor Authentication on the user, a dialog box to sign in to your account will display. - -* The first time a user goes through Multi-Factor Authentication, the system displays a dialog box that asks for a mobile phone number to send text messages to. Each message provides the *verification code* that the user must enter in the next dialog box. - -* A dialog box that asks for a Multi-Factor Authentication verification code, which the system has sent to a mobile phone. - -For information about how to configure Azure AD to require Multi-Factor Authentication, see [Getting started with Azure AD Multi-Factor Authentication in the cloud](../../active-directory/authentication/howto-mfa-getstarted.md). - -For screenshots of these dialog boxes, see [Configure multi-factor authentication for SQL Server Management Studio and Azure AD](authentication-mfa-ssms-configure.md). - -> [!TIP] -> You can search .NET Framework APIs with the [.NET API Browser tool page](/dotnet/api/). -> -> You can also search directly with the [optional ?term=<search value> parameter](/dotnet/api/?term=SqlAuthenticationMethod). - -## Prerequisite - -Before you begin, you should have a [logical SQL server](logical-servers.md) created and available. - -### Set an Azure AD admin for your server - -For the C# example to run, a [logical SQL server](logical-servers.md) admin needs to assign an Azure AD admin for your server. - -On the **SQL server** page, select **Active Directory admin** > **Set admin**. - -For more information about Azure AD admins and users for Azure SQL Database, see the screenshots in [Configure and manage Azure Active Directory authentication with SQL Database](authentication-aad-configure.md#provision-azure-ad-admin-sql-database). - -## Microsoft.Data.SqlClient - -The C# example relies on the [Microsoft.Data.SqlClient](/sql/connect/ado-net/introduction-microsoft-data-sqlclient-namespace) namespace. For more information, see [Using Azure Active Directory authentication with SqlClient](/sql/connect/ado-net/sql/azure-active-directory-authentication). - -> [!NOTE] -> [System.Data.SqlClient](/dotnet/api/system.data.sqlclient) uses the Azure Active Directory Authentication Library (ADAL), which will be deprecated. If you're using the [System.Data.SqlClient](/dotnet/api/system.data.sqlclient) namespace for Azure Active Directory authentication, migrate applications to [Microsoft.Data.SqlClient](/sql/connect/ado-net/introduction-microsoft-data-sqlclient-namespace) and the [Microsoft Authentication Library (MSAL)](../../active-directory/develop/msal-migration.md). For more information about using Azure AD authentication with SqlClient, see [Using Azure Active Directory authentication with SqlClient](/sql/connect/ado-net/sql/azure-active-directory-authentication). - -## Verify with SQL Server Management Studio - -Before you run the C# example, it's a good idea to check that your setup and configurations are correct in [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). Any C# program failure can then be narrowed to source code. - -### Verify server-level firewall IP addresses - -Run SSMS from the same computer, in the same building, where you plan to run the C# example. For this test, any **Authentication** mode is OK. If there's any indication that the server isn't accepting your IP address, see [server-level and database-level firewall rules](firewall-configure.md) for help. - -### Verify Azure Active Directory Multi-Factor Authentication - -Run SSMS again, this time with **Authentication** set to **Azure Active Directory - Universal with MFA**. This option requires SSMS version 17.5 or later. - -For more information, see [Configure Multi-Factor Authentication for SSMS and Azure AD](authentication-mfa-ssms-configure.md). - -> [!NOTE] -> If you are a guest user in the database, you also need to provide the Azure AD domain name for the database: Select **Options** > **AD domain name or tenant ID**. If you are running SSMS 18.x or later, the AD domain name or tenant ID is no longer needed for guest users because 18.x or later automatically recognizes it. -> ->To find the domain name in the Azure portal, select **Azure Active Directory** > **Custom domain names**. In the C# example program, providing a domain name is not necessary. - -## C# code example - -> [!NOTE] -> If you are using .NET Core, you will want to use the [Microsoft.Data.SqlClient](/dotnet/api/microsoft.data.sqlclient) namespace. For more information, see the following [blog](https://devblogs.microsoft.com/dotnet/introducing-the-new-microsoftdatasqlclient/). - -This is an example of C# source code. - -```csharp - -using System; -using Microsoft.Data.SqlClient; - -public class Program -{ - public static void Main(string[] args) - { - // Use your own server, database, and user ID. - // Connetion string - user ID is not provided and is asked interactively. - string ConnectionString = @"Server=.database.windows.net; Authentication=Active Directory Interactive; Database="; - - - using (SqlConnection conn = new SqlConnection(ConnectionString)) - - { - conn.Open(); - Console.WriteLine("ConnectionString2 succeeded."); - using (var cmd = new SqlCommand("SELECT @@Version", conn)) - { - Console.WriteLine("select @@version"); - var result = cmd.ExecuteScalar(); - Console.WriteLine(result.ToString()); - } - - } - Console.ReadKey(); - - } -} - -``` - -  - -This is an example of the C# test output. - -```C# -ConnectionString2 succeeded. -select @@version -Microsoft SQL Azure (RTM) - 12.0.2000.8 - ... -``` - -## Next steps - -- [Azure Active Directory server principals](authentication-azure-ad-logins.md) -- [Azure AD-only authentication with Azure SQL](authentication-azure-ad-only-authentication.md) -- [Using multi-factor Azure Active Directory authentication](authentication-mfa-ssms-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/active-geo-replication-configure-portal.md b/articles/azure-sql/database/active-geo-replication-configure-portal.md deleted file mode 100644 index 02a7a4860f712..0000000000000 --- a/articles/azure-sql/database/active-geo-replication-configure-portal.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: "Tutorial: Geo-replication & failover in portal" -description: Learn how to configure geo-replication for an SQL database using the Azure portal or Azure CLI, and initiate failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurecli -ms.topic: tutorial -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 08/20/2021 ---- -# Tutorial: Configure active geo-replication and failover (Azure SQL Database) - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article shows you how to configure [active geo-replication for Azure SQL Database](active-geo-replication-overview.md#active-geo-replication-terminology-and-capabilities) using the [Azure portal](https://portal.azure.com) or Azure CLI and to initiate failover. - -For best practices using auto-failover groups, see [Auto-failover groups with Azure SQL Database](auto-failover-group-sql-db.md) and [Auto-failover groups with Azure SQL Managed Instance](../managed-instance/auto-failover-group-sql-mi.md). - - - -## Prerequisites - -# [Portal](#tab/portal) - -To configure active geo-replication by using the Azure portal, you need the following resource: - -* A database in Azure SQL Database: The primary database that you want to replicate to a different geographical region. - -> [!Note] -> When using Azure portal, you can only create a secondary database within the same subscription as the primary. If a secondary database is required to be in a different subscription, use [Create Database REST API](/rest/api/sql/databases/createorupdate) or [ALTER DATABASE Transact-SQL API](/sql/t-sql/statements/alter-database-transact-sql). - -# [Azure CLI](#tab/azure-cli) - -To configure active geo-replication, you need a database in Azure SQL Database. It's the primary database that you want to replicate to a different geographical region. - -Prepare your environment for the Azure CLI. - -[!INCLUDE [azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] - ---- - -## Add a secondary database - -The following steps create a new secondary database in a geo-replication partnership. - -To add a secondary database, you must be the subscription owner or co-owner. - -The secondary database has the same name as the primary database and has, by default, the same service tier and compute size. The secondary database can be a single database or a pooled database. For more information, see [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md). -After the secondary is created and seeded, data begins replicating from the primary database to the new secondary database. - -> [!NOTE] -> If the partner database already exists, (for example, as a result of terminating a previous geo-replication relationship) the command fails. - -# [Portal](#tab/portal) - -1. In the [Azure portal](https://portal.azure.com), browse to the database that you want to set up for geo-replication. -2. On the SQL Database page, select your database, scroll to **Data management**, select **Replicas**, and then select **Create replica**. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-cli-create-geo-replica.png" alt-text="Configure geo-replication"::: - -3. Select or create the server for the secondary database, and configure the **Compute + storage** options if necessary. You can select any region for your secondary server, but we recommend the [paired region](../../availability-zones/cross-region-replication-azure.md). - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-portal-create-and-configure-replica.png" alt-text="{alt-text}"::: - - Optionally, you can add a secondary database to an elastic pool. To create the secondary database in a pool, select **Yes** next to **Want to use SQL elastic pool?** and select a pool on the target server. A pool must already exist on the target server. This workflow doesn't create a pool. - -4. Click **Review + create**, review the information, and then click **Create**. -5. The secondary database is created and the deployment process begins. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-portal-geo-replica-deployment.png" alt-text="Screenshot that shows the deployment status of the secondary database."::: - -6. When the deployment is complete, the secondary database displays its status. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-portal-sql-database-secondary-status.png" alt-text="Screenshot that shows the secondary database status after deployment."::: - -7. Return to the primary database page, and then select **Replicas**. Your secondary database is listed under **Geo replicas**. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-sql-db-geo-replica-list.png" alt-text="Screenshot that shows the SQL database primary and geo replicas."::: - -# [Azure CLI](#tab/azure-cli) - -Select the database you want to set up for geo-replication. You'll need the following information: -- Your original Azure SQL database name. -- The Azure SQL server name. -- Your resource group name. -- The name of the server to create the new replica in. - -> [!NOTE] -> The secondary database must have the same service tier as the primary. - -You can select any region for your secondary server, but we recommend the [paired region](../../availability-zones/cross-region-replication-azure.md). - -Run the [az sql db replica create](/cli/azure/sql/db/replica#az-sql-db-replica-create) command. - -```azurecli -az sql db replica create --resource-group ContosoHotel --server contosoeast --name guestlist --partner-server contosowest --family Gen5 --capacity 2 --secondary-type Geo -``` - -Optionally, you can add a secondary database to an elastic pool. To create the secondary database in a pool, use the `--elastic-pool` parameter. A pool must already exist on the target server. This workflow doesn't create a pool. - -The secondary database is created and the deployment process begins. - -When the deployment is complete, you can check the status of the secondary database by running the [az sql db replica list-links](/cli/azure/sql/db/replica#az-sql-db-replica-list-links) command: - -```azurecli -az sql db replica list-links --name guestlist --resource-group ContosoHotel --server contosowest -``` - ---- - -## Initiate a failover - -The secondary database can be switched to become the primary. - -# [Portal](#tab/portal) - -1. In the [Azure portal](https://portal.azure.com), browse to the primary database in the geo-replication partnership. -2. Scroll to **Data management**, and then select **Replicas**. -3. In the **Geo replicas** list, select the database you want to become the new primary, select the ellipsis, and then select **Forced failover**. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-portal-select-forced-failover.png" alt-text="Screenshot that shows selecting forced failover from the drop-down."::: -4. Select **Yes** to begin the failover. - -# [Azure CLI](#tab/azure-cli) - -Run the [az sql db replica set-primary](/cli/azure/sql/db/replica#az-sql-db-replica-set-primary) command. - -```azurecli -az sql db replica set-primary --name guestlist --resource-group ContosoHotel --server contosowest -``` - ---- - -The command immediately switches the secondary database into the primary role. This process normally should complete within 30 seconds or less. - -There's a short period during which both databases are unavailable, on the order of 0 to 25 seconds, while the roles are switched. If the primary database has multiple secondary databases, the command automatically reconfigures the other secondaries to connect to the new primary. The entire operation should take less than a minute to complete under normal circumstances. - -> [!NOTE] -> This command is designed for quick recovery of the database in case of an outage. It triggers failover without data synchronization, or forced failover. If the primary is online and committing transactions when the command is issued some data loss may occur. - -## Remove secondary database - -This operation permanently stops the replication to the secondary database, and changes the role of the secondary to a regular read-write database. If the connectivity to the secondary database is broken, the command succeeds but the secondary doesn't become read-write until after connectivity is restored. - -# [Portal](#tab/portal) - -1. In the [Azure portal](https://portal.azure.com), browse to the primary database in the geo-replication partnership. -2. Select **Replicas**. -3. In the **Geo replicas** list, select the database you want to remove from the geo-replication partnership, select the ellipsis, and then select **Stop replication**. - - :::image type="content" source="./media/active-geo-replication-configure-portal/azure-portal-select-stop-replication.png" alt-text="Screenshot that shows selecting stop replication from the drop-down."::: -5. A confirmation window opens. Click **Yes** to remove the database from the geo-replication partnership. (Set it to a read-write database not part of any replication.) - -# [Azure CLI](#tab/azure-cli) - -Run the [az sql db replica delete-link](/cli/azure/sql/db/replica#az-sql-db-replica-delete-link) command. - -```azurecli -az sql db replica delete-link --name guestlist --resource-group ContosoHotel --server contosoeast --partner-server contosowest -``` - -Confirm that you want to perform the operation. - ---- - -## Next steps - -* To learn more about active geo-replication, see [active geo-replication](active-geo-replication-overview.md). -* To learn about auto-failover groups, see [Auto-failover groups](auto-failover-group-overview.md) -* For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/database/active-geo-replication-overview.md b/articles/azure-sql/database/active-geo-replication-overview.md deleted file mode 100644 index 9c51d1f2e6494..0000000000000 --- a/articles/azure-sql/database/active-geo-replication-overview.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: Active geo-replication -description: Use active geo-replication to create readable secondary databases of individual databases in Azure SQL Database in the same or different regions. -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1 -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 4/14/2022 ---- - -# Active geo-replication -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Active geo-replication is a feature that lets you to create a continuously synchronized readable secondary database for a primary database. The readable secondary database may be in the same Azure region as the primary, or, more commonly, in a different region. This kind of readable secondary databases are also known as geo-secondaries, or geo-replicas. - -Active geo-replication is designed as a business continuity solution that lets you perform quick disaster recovery of individual databases in case of a regional disaster or a large scale outage. Once geo-replication is set up, you can initiate a geo-failover to a geo-secondary in a different Azure region. The geo-failover is initiated programmatically by the application or manually by the user. - -> [!NOTE] -> Active geo-replication for Azure SQL Hyperscale is [now in public preview](service-tier-hyperscale-replicas.md#geo-replica-in-preview). Current limitations include: -> - Primary can have only one geo-secondary replica. -> - Restore or database copy from geo-secondary is not supported. -> - Can't use geo-secondary as a source for geo-replication to another database. - - -> [!NOTE] -> Active geo-replication is not supported by Azure SQL Managed Instance. For geographic failover of instances of SQL Managed Instance, use [Auto-failover groups](auto-failover-group-overview.md). - -> [!NOTE] -> To migrate SQL databases from Azure Germany using active geo-replication, see [Migrate SQL Database using active geo-replication](../../germany/germany-migration-databases.md#migrate-sql-database-using-active-geo-replication). - -If your application requires a stable connection endpoint and automatic geo-failover support in addition to geo-replication, use [Auto-failover groups](auto-failover-group-overview.md). - -The following diagram illustrates a typical configuration of a geo-redundant cloud application using Active geo-replication. - -![active geo-replication](./media/active-geo-replication-overview/geo-replication.png) - -If for any reason your primary database fails, you can initiate a geo-failover to any of your secondary databases. When a secondary is promoted to the primary role, all other secondaries are automatically linked to the new primary. - -You can manage geo-replication and initiate a geo-failover using the following: - -- The [Azure portal](active-geo-replication-configure-portal.md) -- [PowerShell: Single database](scripts/setup-geodr-and-failover-database-powershell.md) -- [PowerShell: Elastic pool](scripts/setup-geodr-and-failover-elastic-pool-powershell.md) -- [Transact-SQL: Single database or elastic pool](/sql/t-sql/statements/alter-database-azure-sql-database) -- [REST API: Single database](/rest/api/sql/replicationlinks) - -Active geo-replication leverages the [Always On availability group](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) technology to asynchronously replicate transaction log generated on the primary replica to all geo-replicas. While at any given point, a secondary database might be slightly behind the primary database, the data on a secondary is guaranteed to be transactionally consistent. In other words, changes made by uncommitted transactions are not visible. - -> [!NOTE] -> Active geo-replication replicates changes by streaming database transaction log from the primary replica to secondary replicas. It is unrelated to [transactional replication](/sql/relational-databases/replication/transactional/transactional-replication), which replicates changes by executing DML (INSERT, UPDATE, DELETE) commands on subscribers. - -Regional redundancy provided by geo-replication enables applications to quickly recover from a permanent loss of an entire Azure region, or parts of a region, caused by natural disasters, catastrophic human errors, or malicious acts. Geo-replication RPO can be found in [Overview of Business Continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). - -The following figure shows an example of active geo-replication configured with a primary in the North Central US region and a geo-secondary in the South Central US region. - -![geo-replication relationship](./media/active-geo-replication-overview/geo-replication-relationship.png) - -In addition to disaster recovery, active geo-replication can be used in the following scenarios: - -- **Database migration**: You can use active geo-replication to migrate a database from one server to another with minimum downtime. -- **Application upgrades**: You can create an extra secondary as a fail back copy during application upgrades. - -To achieve full business continuity, adding database regional redundancy is only a part of the solution. Recovering an application (service) end-to-end after a catastrophic failure requires recovery of all components that constitute the service and any dependent services. Examples of these components include the client software (for example, a browser with a custom JavaScript), web front ends, storage, and DNS. It is critical that all components are resilient to the same failures and become available within the recovery time objective (RTO) of your application. Therefore, you need to identify all dependent services and understand the guarantees and capabilities they provide. Then, you must take adequate steps to ensure that your service functions during the failover of the services on which it depends. For more information about designing solutions for disaster recovery, see [Designing Cloud Solutions for Disaster Recovery Using active geo-replication](designing-cloud-solutions-for-disaster-recovery.md). - -## Active geo-replication terminology and capabilities - -- **Automatic asynchronous replication** - - You can only create a geo-secondary for an existing database. The geo-secondary can be created on any logical server, other than the server with the primary database. Once created, the geo-secondary replica is populated with the data of the primary database. This process is known as seeding. After a geo-secondary has been created and seeded, updates to the primary database are automatically and asynchronously replicated to the geo-secondary replica. Asynchronous replication means that transactions are committed on the primary database before they are replicated. - -- **Readable geo-secondary replicas** - - An application can access a geo-secondary replica to execute read-only queries using the same or different security principals used for accessing the primary database. For more information, see [Use read-only replicas to offload read-only query workloads](read-scale-out.md). - - > [!IMPORTANT] - > You can use geo-replication to create secondary replicas in the same region as the primary. You can use these secondaries to satisfy read scale-out scenarios in the same region. However, a secondary replica in the same region does not provide additional resilience to catastrophic failures or large scale outages, and therefore is not a suitable failover target for disaster recovery purposes. It also does not guarantee availability zone isolation. Use Business Critical or Premium service tiers [zone redundant configuration](high-availability-sla.md#premium-and-business-critical-service-tier-zone-redundant-availability) or General Purpose service tier [zone redundant configuration](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) to achieve availability zone isolation. - > - -- **Planned geo-failover** - - Planned geo-failover switches the roles of primary and geo-secondary databases after completing full data synchronization. A planned failover does not result in data loss. The duration of planned geo-failover depends on the size of transaction log on the primary that needs to be synchronized to the geo-secondary. Planned geo-failover is designed for the following scenarios: - - - Perform DR drills in production when the data loss is not acceptable; - - Relocate the database to a different region; - - Return the database to the primary region after the outage has been mitigated (known as failback). - -- **Unplanned geo-failover** - - Unplanned, or forced, geo-failover immediately switches the geo-secondary to the primary role without any synchronization with the primary. Any transactions committed on the primary but not yet replicated to the secondary are lost. This operation is designed as a recovery method during outages when the primary is not accessible, but database availability must be quickly restored. When the original primary is back online, it will be automatically re-connected, reseeded using the current primary data, and become a new geo-secondary. - - > [!IMPORTANT] - > After either planned or unplanned geo-failover, the connection endpoint for the new primary changes because the new primary is now located on a different logical server. - -- **Multiple readable geo-secondaries** - - Up to four geo-secondaries can be created for a primary. If there is only one secondary, and it fails, the application is exposed to higher risk until a new secondary is created. If multiple secondaries exist, the application remains protected even if one of the secondaries fails. Additional secondaries can also be used to scale out read-only workloads. - - > [!TIP] - > If you are using active geo-replication to build a globally distributed application and need to provide read-only access to data in more than four regions, you can create a secondary of a secondary (a process known as chaining) to create additional geo-replicas. Replication lag on chained geo-replicas may be higher than on geo-replicas connected directly to the primary. Setting up chained geo-replication topologies is only supported programmatically, and not from Azure portal. - -- **Geo-replication of databases in an elastic pool** - - Each geo-secondary can be a single database or a database in an elastic pool. The elastic pool choice for each geo-secondary database is separate and does not depend on the configuration of any other replica in the topology (either primary or secondary). Each elastic pool is contained within a single logical server. Because database names on a logical server must be unique, multiple geo-secondaries of the same primary can never share an elastic pool. - -- **User-controlled geo-failover and failback** - - A geo-secondary that has finished initial seeding can be explicitly switched to the primary role (failed over) at any time by the application or the user. During an outage where the primary is inaccessible, only an unplanned geo-failover can be used. That immediately promotes a geo-secondary to be the new primary. When the outage is mitigated, the system automatically makes the recovered primary a geo-secondary, and brings it up-to-date with the new primary. Due to the asynchronous nature of geo-replication, recent transactions may be lost during unplanned geo-failovers if the primary fails before these transactions are replicated to a geo-secondary. When a primary with multiple geo-secondaries fails over, the system automatically reconfigures replication relationships and links the remaining geo-secondaries to the newly promoted primary, without requiring any user intervention. After the outage that caused the geo-failover is mitigated, it may be desirable to return the primary to its original region. To do that, invoke a planned geo-failover. - -## Prepare for geo-failover - -To ensure that your application can immediately access the new primary after geo-failover, validate that authentication and network access for your secondary server are properly configured. For details, see [SQL Database security after disaster recovery](active-geo-replication-security-configure.md). Also validate that backup retention policy on the secondary database matches that of the primary. This setting is not a part of the database and is not replicated from the primary. By default, the geo-secondary is configured with a default PITR retention period of seven days. For details, see [SQL Database automated backups](automated-backups-overview.md). - -> [!IMPORTANT] -> If your database is a member of a failover group, you cannot initiate its failover using the geo-replication failover command. Use the failover command for the group. If you need to failover an individual database, you must remove it from the failover group first. See [Auto-failover groups](auto-failover-group-overview.md) for details. - -## Configure geo-secondary - -Both primary and geo-secondary are required to have the same service tier. It is also strongly recommended that the geo-secondary is configured with the same backup storage redundancy and compute size (DTUs or vCores) as the primary. If the primary is experiencing a heavy write workload, a geo-secondary with a lower compute size may not be able to keep up. That will cause replication lag on the geo-secondary, and may eventually cause unavailability of the geo-secondary. To mitigate these risks, active geo-replication will reduce (throttle) the primary's transaction log rate if necessary to allow its secondaries to catch up. - -Another consequence of an imbalanced geo-secondary configuration is that after failover, application performance may suffer due to insufficient compute capacity of the new primary. In that case, it will be necessary to scale up the database to have sufficient resources, which may take significant time, and will require a [high availability](high-availability-sla.md) failover at the end of the scale up process, which may interrupt application workloads. - -If you decide to create the geo-secondary with a lower compute size, you should monitor log IO rate on the primary over time. This lets you estimate the minimal compute size of the geo-secondary required to sustain the replication load. For example, if your primary database is P6 (1000 DTU) and its log IO is sustained at 50%, the geo-secondary needs to be at least P4 (500 DTU). To retrieve historical log IO data, use the [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) view. To retrieve recent log IO data with higher granularity that better reflects short-term spikes, use the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view. - -> [!TIP] -> Transaction log IO throttling on the primary due to lower compute size on a geo-secondary is reported using the HADR_THROTTLE_LOG_RATE_MISMATCHED_SLO wait type, visible in the [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) and [sys.dm_os_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql) database views. -> -> Transaction log IO on the primary may be throttled for reasons unrelated to lower compute size on a geo-secondary. This kind of throttling may occur even if the geo-secondary has the same or higher compute size than the primary. For details, including wait types for different kinds of log IO throttling, see [Transaction log rate governance](resource-limits-logical-server.md#transaction-log-rate-governance). - -By default, backup storage redundancy of the geo-secondary is same as for the primary database. You can choose to configure a geo-secondary with a different backup storage redundancy. Backups are always taken on the primary database. If the secondary is configured with a different backup storage redundancy, then after a geo-failover, when the geo-secondary is promoted to the primary, new backups will be stored and billed according to the type of storage (RA-GRS, ZRS, LRS) selected on the new primary (previous secondary). - -## Cross-subscription geo-replication - -To create a geo-secondary in a subscription different from the subscription of the primary (whether under the same Azure Active Directory tenant or not), follow the steps in this section. - -1. Add the IP address of the client machine executing the T-SQL commands below to the server firewalls of **both** the primary and secondary servers. You can confirm that IP address by executing the following query while connected to the primary server from the same client machine. - - ```sql - select client_net_address from sys.dm_exec_connections where session_id = @@SPID; - ``` - - For more information see, [Configure firewall](firewall-configure.md). - -2. In the master database on the **primary** server, create a SQL authentication login dedicated to active geo-replication setup. Adjust login name and password as needed. - - ```sql - create login geodrsetup with password = 'ComplexPassword01'; - ``` - -3. In the same database, create a user for the login, and add it to the `dbmanager` role: - - ```sql - create user geodrsetup for login geodrsetup; - alter role dbmanager add member geodrsetup; - ``` - -4. Take note of the SID value of the new login. Obtain the SID value using the following query. - - ```sql - select sid from sys.sql_logins where name = 'geodrsetup'; - ``` - -5. Connect to the **primary** database (not the master database), and create a user for the same login. - - ```sql - create user geodrsetup for login geodrsetup; - ``` - -6. In the same database, add the user to the `db_owner` role. - - ```sql - alter role db_owner add member geodrsetup; - ``` - -7. In the master database on the **secondary** server, create the same login as on the primary server, using the same name, password, and SID. Replace the hexadecimal SID value in the sample command below with the one obtained in Step 4. - - ```sql - create login geodrsetup with password = 'ComplexPassword01', sid=0x010600000000006400000000000000001C98F52B95D9C84BBBA8578FACE37C3E; - ``` - -8. In the same database, create a user for the login, and add it to the `dbmanager` role. - - ```sql - create user geodrsetup for login geodrsetup; - alter role dbmanager add member geodrsetup; - ``` - -9. Connect to the master database on the **primary** server using the new `geodrsetup` login, and initiate geo-secondary creation on the secondary server. Adjust database name and secondary server name as needed. Once the command is executed, you can monitor geo-secondary creation by querying the [sys.dm_geo_replication_link_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-geo-replication-link-status-azure-sql-database) view in the **primary** database, and the [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) view in the master database on the **primary** server. The time needed to create a geo-secondary depends on the primary database size. - - ```sql - alter database [dbrep] add secondary on server [servername]; - ``` - -10. After the geo-secondary is successfully created, the users, logins, and firewall rules created by this procedure can be removed. - -> [!NOTE] -> Cross-subscription geo-replication operations including setup and geo-failover are only supported using T-SQL commands. -> -> Adding a geo-secondary using T-SQL is not supported when connecting to the primary server over a [private endpoint](private-endpoint-overview.md). If a private endpoint is configured but public network access is allowed, adding a geo-secondary is supported when connected to the primary server from a public IP address. Once a geo-secondary is added, public access can be [denied](connectivity-settings.md#deny-public-network-access). -> -> Creating a geo-secondary on a logical server in a different Azure tenant is not supported when [Azure Active Directory only](https://techcommunity.microsoft.com/t5/azure-sql/azure-active-directory-only-authentication-for-azure-sql/ba-p/2417673) authentication for Azure SQL is active (enabled) on either primary or secondary logical server. - -## Keep credentials and firewall rules in sync - -When using public network access for connecting to the database, we recommend using [database-level IP firewall rules](firewall-configure.md) for geo-replicated databases. These rules are replicated with the database, which ensures that all geo-secondaries have the same IP firewall rules as the primary. This approach eliminates the need for customers to manually configure and maintain firewall rules on servers hosting the primary and secondary databases. Similarly, using [contained database users](logins-create-manage.md) for data access ensures both primary and secondary databases always have the same authentication credentials. This way, after a geo-failover, there is no disruptions due to authentication credential mismatches. If you are using logins and users (rather than contained users), you must take extra steps to ensure that the same logins exist for your secondary database. For configuration details see [How to configure logins and users](active-geo-replication-security-configure.md). - -## Scale primary database - -You can scale up or scale down the primary database to a different compute size (within the same service tier) without disconnecting any geo-secondaries. When scaling up, we recommend that you scale up the geo-secondary first, and then scale up the primary. When scaling down, reverse the order: scale down the primary first, and then scale down the secondary. - -> [!NOTE] -> If you created a geo-secondary as part of failover group configuration, it is not recommended to scale it down. This is to ensure your data tier has sufficient capacity to process your regular workload after a geo-failover. - -> [!IMPORTANT] -> The primary database in a failover group can't scale to a higher service tier (edition) unless the secondary database is first scaled to the higher tier. For example, if you want to scale up the primary from General Purpose to Business Critical, you have to first scale the geo-secondary to Business Critical. If you try to scale the primary or geo-secondary in a way that violates this rule, you will receive the following error: -> -> `The source database 'Primaryserver.DBName' cannot have higher edition than the target database 'Secondaryserver.DBName'. Upgrade the edition on the target before upgrading the source.` -> - -## Prevent loss of critical data - -Due to the high latency of wide area networks, geo-replication uses an asynchronous replication mechanism. Asynchronous replication makes the possibility of data loss unavoidable if the primary fails. To protect critical transactions from data loss, an application developer can call the [sp_wait_for_database_copy_sync](/sql/relational-databases/system-stored-procedures/active-geo-replication-sp-wait-for-database-copy-sync) stored procedure immediately after committing the transaction. Calling `sp_wait_for_database_copy_sync` blocks the calling thread until the last committed transaction has been transmitted and hardened in the transaction log of the secondary database. However, it does not wait for the transmitted transactions to be replayed (redone) on the secondary. `sp_wait_for_database_copy_sync` is scoped to a specific geo-replication link. Any user with the connection rights to the primary database can call this procedure. - -> [!NOTE] -> `sp_wait_for_database_copy_sync` prevents data loss after geo-failover for specific transactions, but does not guarantee full synchronization for read access. The delay caused by a `sp_wait_for_database_copy_sync` procedure call can be significant and depends on the size of the not yet transmitted transaction log on the primary at the time of the call. - -## Monitor geo-replication lag - -To monitor lag with respect to RPO, use *replication_lag_sec* column of [sys.dm_geo_replication_link_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-geo-replication-link-status-azure-sql-database) on the primary database. It shows lag in seconds between the transactions committed on the primary, and hardened to the transaction log on the secondary. For example, if the lag is one second, it means that if the primary is impacted by an outage at this moment and a geo-failover is initiated, transactions committed in the last second will be lost. - -To measure lag with respect to changes on the primary database that have been hardened on the geo-secondary, compare *last_commit* time on the geo-secondary with the same value on the primary. - -> [!TIP] -> If *replication_lag_sec* on the primary is NULL, it means that the primary does not currently know how far behind a geo-secondary is. This typically happens after process restarts and should be a transient condition. Consider sending an alert if *replication_lag_sec* returns NULL for an extended period of time. It may indicate that the geo-secondary cannot communicate with the primary due to a connectivity failure. -> -> There are also conditions that could cause the difference between *last_commit* time on the geo-secondary and on the primary to become large. For example, if a commit is made on the primary after a long period of no changes, the difference will jump up to a large value before quickly returning to zero. Consider sending an alert if the difference between these two values remains large for a long time. - -## Programmatically manage active geo-replication - -As discussed previously, active geo-replication can also be managed programmatically using T-SQL, Azure PowerShell, and REST API. The following tables describe the set of commands available. Active geo-replication includes a set of Azure Resource Manager APIs for management, including the [Azure SQL Database REST API](/rest/api/sql/) and [Azure PowerShell cmdlets](/powershell/azure/). These APIs support Azure role-based access control (Azure RBAC). For more information on how to implement access roles, see [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - -### T-SQL: Manage geo-failover of single and pooled databases - -> [!IMPORTANT] -> These T-SQL commands only apply to active geo-replication and do not apply to failover groups. As such, they also do not apply to SQL Managed Instance, which only supports failover groups. - -| Command | Description | -| --- | --- | -| [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-current) |Use **ADD SECONDARY ON SERVER** argument to create a secondary database for an existing database and starts data replication | -| [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-current) |Use **FAILOVER** or **FORCE_FAILOVER_ALLOW_DATA_LOSS** to switch a secondary database to be primary to initiate failover | -| [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-current) |Use **REMOVE SECONDARY ON SERVER** to terminate a data replication between a SQL Database and the specified secondary database. | -| [sys.geo_replication_links](/sql/relational-databases/system-dynamic-management-views/sys-geo-replication-links-azure-sql-database) |Returns information about all existing replication links for each database on a server. | -| [sys.dm_geo_replication_link_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-geo-replication-link-status-azure-sql-database) |Gets the last replication time, last replication lag, and other information about the replication link for a given database. | -| [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) |Shows the status for all database operations including changes to replication links. | -| [sys.sp_wait_for_database_copy_sync](/sql/relational-databases/system-stored-procedures/active-geo-replication-sp-wait-for-database-copy-sync) |Causes the application to wait until all committed transactions are hardened to the transaction log of a geo-secondary. | - - -### PowerShell: Manage geo-failover of single and pooled databases - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -| Cmdlet | Description | -| --- | --- | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) |Gets one or more databases. | -| [New-AzSqlDatabaseSecondary](/powershell/module/az.sql/new-azsqldatabasesecondary) |Creates a secondary database for an existing database and starts data replication. | -| [Set-AzSqlDatabaseSecondary](/powershell/module/az.sql/set-azsqldatabasesecondary) |Switches a secondary database to be primary to initiate failover. | -| [Remove-AzSqlDatabaseSecondary](/powershell/module/az.sql/remove-azsqldatabasesecondary) |Terminates data replication between a SQL Database and the specified secondary database. | -| [Get-AzSqlDatabaseReplicationLink](/powershell/module/az.sql/get-azsqldatabasereplicationlink) |Gets the geo-replication links for a database. | - -> [!TIP] -> For sample scripts, see [Configure and failover a single database using active geo-replication](scripts/setup-geodr-and-failover-database-powershell.md) and [Configure and failover a pooled database using active geo-replication](scripts/setup-geodr-and-failover-elastic-pool-powershell.md). - -### REST API: Manage geo-failover of single and pooled databases - -| API | Description | -| --- | --- | -| [Create or Update Database (createMode=Restore)](/rest/api/sql/databases/createorupdate) |Creates, updates, or restores a primary or a secondary database. | -| [Get Create or Update Database Status](/rest/api/sql/databases/createorupdate) |Returns the status during a create operation. | -| [Set Secondary Database as Primary (Planned Failover)](/rest/api/sql/replicationlinks/failover) |Sets which secondary database is primary by failing over from the current primary database. **This option is not supported for SQL Managed Instance.**| -| [Set Secondary Database as Primary (Unplanned Failover)](/rest/api/sql/replicationlinks/failoverallowdataloss) |Sets which secondary database is primary by failing over from the current primary database. This operation might result in data loss. **This option is not supported for SQL Managed Instance.**| -| [Get Replication Link](/rest/api/sql/replicationlinks/get) |Gets a specific replication link for a given database in a geo-replication partnership. It retrieves the information visible in the sys.geo_replication_links catalog view. **This option is not supported for SQL Managed Instance.**| -| [Replication Links - List By Database](/rest/api/sql/replicationlinks/listbydatabase) | Gets all replication links for a given database in a geo-replication partnership. It retrieves the information visible in the sys.geo_replication_links catalog view. | -| [Delete Replication Link](/rest/api/sql/replicationlinks/delete) | Deletes a database replication link. Cannot be done during failover. | - - -## Next steps - -- For sample scripts, see: - - [Configure and failover a single database using active geo-replication](scripts/setup-geodr-and-failover-database-powershell.md). - - [Configure and failover a pooled database using active geo-replication](scripts/setup-geodr-and-failover-elastic-pool-powershell.md). -- SQL Database also supports auto-failover groups. For more information, see using [auto-failover groups](auto-failover-group-overview.md). -- For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). -- To learn about Azure SQL Database automated backups, see [SQL Database automated backups](automated-backups-overview.md). -- To learn about using automated backups for recovery, see [Restore a database from the service-initiated backups](recovery-using-backups.md). -- To learn about authentication requirements for a new primary server and database, see [SQL Database security after disaster recovery](active-geo-replication-security-configure.md). diff --git a/articles/azure-sql/database/active-geo-replication-security-configure.md b/articles/azure-sql/database/active-geo-replication-security-configure.md deleted file mode 100644 index 8c6edd1c7972e..0000000000000 --- a/articles/azure-sql/database/active-geo-replication-security-configure.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Configure security for disaster recovery -description: Learn the security considerations for configuring and managing security after a database restore or a failover to a secondary server. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 12/18/2018 ---- -# Configure and manage Azure SQL Database security for geo-restore or failover -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes the authentication requirements to configure and control [active geo-replication](active-geo-replication-overview.md) and [auto-failover groups](auto-failover-group-overview.md). It also provides the steps required to set up user access to the secondary database. Finally, it also describes how to enable access to the recovered database after using [geo-restore](recovery-using-backups.md#geo-restore). For more information on recovery options, see [Business Continuity Overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). - -## Disaster recovery with contained users - -Unlike traditional users, which must be mapped to logins in the master database, a contained user is managed completely by the database itself. This has two benefits. In the disaster recovery scenario, the users can continue to connect to the new primary database or the database recovered using geo-restore without any additional configuration, because the database manages the users. There are also potential scalability and performance benefits from this configuration from a login perspective. For more information, see [Contained Database Users - Making Your Database Portable](/sql/relational-databases/security/contained-database-users-making-your-database-portable). - -The main trade-off is that managing the disaster recovery process at scale is more challenging. When you have multiple databases that use the same login, maintaining the credentials using contained users in multiple databases may negate the benefits of contained users. For example, the password rotation policy requires that changes be made consistently in multiple databases rather than changing the password for the login once in the master database. For this reason, if you have multiple databases that use the same user name and password, using contained users is not recommended. - -## How to configure logins and users - -If you are using logins and users (rather than contained users), you must take extra steps to ensure that the same logins exist in the master database. The following sections outline the steps involved and additional considerations. - - >[!NOTE] - > It is also possible to use Azure Active Directory (AAD) logins to manage your databases. For more information, see [Azure SQL logins and users](./logins-create-manage.md). - -### Set up user access to a secondary or recovered database - -In order for the secondary database to be usable as a read-only secondary database, and to ensure proper access to the new primary database or the database recovered using geo-restore, the master database of the target server must have the appropriate security configuration in place before the recovery. - -The specific permissions for each step are described later in this topic. - -Preparing user access to a geo-replication secondary should be performed as part configuring geo-replication. Preparing user access to the geo-restored databases should be performed at any time when the original server is online (e.g. as part of the DR drill). - -> [!NOTE] -> If you fail over or geo-restore to a server that does not have properly configured logins, access to it will be limited to the server admin account. - -Setting up logins on the target server involves three steps outlined below: - -#### 1. Determine logins with access to the primary database - -The first step of the process is to determine which logins must be duplicated on the target server. This is accomplished with a pair of SELECT statements, one in the logical master database on the source server and one in the primary database itself. - -Only the server admin or a member of the **LoginManager** server role can determine the logins on the source server with the following SELECT statement. - -```sql -SELECT [name], [sid] -FROM [sys].[sql_logins] -WHERE [type_desc] = 'SQL_Login' -``` - -Only a member of the db_owner database role, the dbo user, or server admin, can determine all of the database user principals in the primary database. - -```sql -SELECT [name], [sid] -FROM [sys].[database_principals] -WHERE [type_desc] = 'SQL_USER' -``` - -#### 2. Find the SID for the logins identified in step 1 - -By comparing the output of the queries from the previous section and matching the SIDs, you can map the server login to database user. Logins that have a database user with a matching SID have user access to that database as that database user principal. - -The following query can be used to see all of the user principals and their SIDs in a database. Only a member of the db_owner database role or server admin can run this query. - -```sql -SELECT [name], [sid] -FROM [sys].[database_principals] -WHERE [type_desc] = 'SQL_USER' -``` - -> [!NOTE] -> The **INFORMATION_SCHEMA** and **sys** users have *NULL* SIDs, and the **guest** SID is **0x00**. The **dbo** SID may start with *0x01060000000001648000000000048454*, if the database creator was the server admin instead of a member of **DbManager**. - -#### 3. Create the logins on the target server - -The last step is to go to the target server, or servers, and generate the logins with the appropriate SIDs. The basic syntax is as follows. - -```sql -CREATE LOGIN [] -WITH PASSWORD = '', -SID = 0x1234 /*replace 0x1234 with the desired login SID*/ -``` - -> [!NOTE] -> If you want to grant user access to the secondary, but not to the primary, you can do that by altering the user login on the primary server by using the following syntax. -> -> ```sql -> ALTER LOGIN [] DISABLE -> ``` -> -> DISABLE doesn’t change the password, so you can always enable it if needed. - -## Next steps - -* For more information on managing database access and logins, see [SQL Database security: Manage database access and login security](logins-create-manage.md). -* For more information on contained database users, see [Contained Database Users - Making Your Database Portable](/sql/relational-databases/security/contained-database-users-making-your-database-portable). -* To learn about active geo-replication, see [Active geo-replication](active-geo-replication-overview.md). -* To learn about auto-failover groups, see [Auto-failover groups](auto-failover-group-overview.md). -* For information about using geo-restore, see [geo-restore](recovery-using-backups.md#geo-restore) \ No newline at end of file diff --git a/articles/azure-sql/database/adonet-v12-develop-direct-route-ports.md b/articles/azure-sql/database/adonet-v12-develop-direct-route-ports.md deleted file mode 100644 index e8f03c65a5cf4..0000000000000 --- a/articles/azure-sql/database/adonet-v12-develop-direct-route-ports.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Ports beyond 1433 -description: Client connections from ADO.NET to Azure SQL Database can bypass the proxy and interact directly with the database using ports other than 1433. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: "sqldbrb=1, devx-track-dotnet" -ms.devlang: -ms.topic: reference -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 06/11/2020 ---- -# Ports beyond 1433 for ADO.NET 4.5 -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This topic describes the Azure SQL Database connection behavior for clients that use ADO.NET 4.5 or a later version. - -> [!IMPORTANT] -> For information about connectivity architecture, see [Azure SQL Database connectivity architecture](connectivity-architecture.md). -> - -## Outside vs inside - -For connections to Azure SQL Database, we must first ask whether your client program runs *outside* or *inside* the Azure cloud boundary. The subsections discuss two common scenarios. - -### *Outside:* Client runs on your desktop computer - -Port 1433 is the only port that must be open on your desktop computer that hosts your SQL Database client application. - -### *Inside:* Client runs on Azure - -When your client runs inside the Azure cloud boundary, it uses what we can call a *direct route* to interact with SQL Database. After a connection is established, further interactions between the client and database involve no Azure SQL Database Gateway. - -The sequence is as follows: - -1. ADO.NET 4.5 (or later) initiates a brief interaction with the Azure cloud, and receives a dynamically identified port number. - - * The dynamically identified port number is in the range of 11000-11999. -2. ADO.NET then connects to SQL Database directly, with no middleware in between. -3. Queries are sent directly to the database, and results are returned directly to the client. - -Ensure that the port ranges of 11000-11999 on your Azure client machine are left available for ADO.NET 4.5 client interactions with SQL Database. - -* In particular, ports in the range must be free of any other outbound blockers. -* On your Azure VM, the **Windows Firewall with Advanced Security** controls the port settings. - - * You can use the [firewall's user interface](/sql/sql-server/install/configure-the-windows-firewall-to-allow-sql-server-access) to add a rule for which you specify the **TCP** protocol along with a port range with the syntax like **11000-11999**. - -## Version clarifications - -This section clarifies the monikers that refer to product versions. It also lists some pairings of versions between products. - -### ADO.NET - -* ADO.NET 4.0 supports the TDS 7.3 protocol, but not 7.4. -* ADO.NET 4.5 and later supports the TDS 7.4 protocol. - -### ODBC - -* Microsoft SQL Server ODBC 11 or above - -### JDBC - -* Microsoft SQL Server JDBC 4.2 or above (JDBC 4.0 actually supports TDS 7.4 but does not implement “redirection”) - -## Related links - -* ADO.NET 4.6 was released on July 20, 2015. A blog announcement from the .NET team is available [here](https://devblogs.microsoft.com/dotnet/announcing-net-framework-4-6/). -* ADO.NET 4.5 was released on August 15, 2012. A blog announcement from the .NET team is available [here](https://devblogs.microsoft.com/dotnet/announcing-the-release-of-net-framework-4-5-rtm-product-and-source-code/). - * A blog post about ADO.NET 4.5.1 is available [here](https://devblogs.microsoft.com/dotnet/announcing-the-net-framework-4-5-1-preview/). - -* Microsoft ODBC Driver 17 for SQL Server -https://aka.ms/downloadmsodbcsql - -* Connect to Azure SQL Database V12 via Redirection -https://techcommunity.microsoft.com/t5/DataCAT/Connect-to-Azure-SQL-Database-V12-via-Redirection/ba-p/305362 - -* [TDS protocol version list](https://www.freetds.org/) -* [SQL Database Development Overview](develop-overview.md) -* [Azure SQL Database firewall](firewall-configure.md) diff --git a/articles/azure-sql/database/advance-notifications.md b/articles/azure-sql/database/advance-notifications.md deleted file mode 100644 index 8b89e5dde87b8..0000000000000 --- a/articles/azure-sql/database/advance-notifications.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Advance notifications (Preview) for planned maintenance events -description: Get notification before planned maintenance for Azure SQL Database. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: -ms.devlang: -ms.topic: how-to -author: scott-kim-sql -ms.author: scottkim -ms.reviewer: kendralittle, mathoma, wiassaf, urosmil -ms.date: 04/04/2022 ---- -# Advance notifications for planned maintenance events (Preview) -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Advance notifications (Preview) are available for databases configured to use a non-default [maintenance window](maintenance-window.md) and managed instances with any configuration (including the default one). Advance notifications enable customers to configure notifications to be sent up to 24 hours in advance of any planned event. - -Notifications can be configured so you can get texts, emails, Azure push notifications, and voicemails when planned maintenance is due to begin in the next 24 hours. Additional notifications are sent when maintenance begins and when maintenance ends. - -> [!IMPORTANT] -> For Azure SQL Database, advance notifications cannot be configured for the **System default** maintenance window option. Choose a maintenance window other than the **System default** to configure and enable Advance notifications. - -> [!NOTE] -> While [maintenance windows](maintenance-window.md) are generally available, advance notifications for maintenance windows are in public preview for Azure SQL Database and Azure SQL Managed Instance. - -## Create an advance notification - -Advance notifications are available for Azure SQL databases that have their maintenance window configured. - -Complete the following steps to enable a notification. - -1. Go to the [Planned maintenance](https://portal.azure.com/#blade/Microsoft_Azure_Health/AzureHealthBrowseBlade/plannedMaintenance) page, select **Health alerts**, then **Add service health alert**. - - :::image type="content" source="media/advance-notifications/health-alerts.png" alt-text="create a new health alert menu option"::: - -2. In the **Actions** section, select **Add action groups**. - - :::image type="content" source="media/advance-notifications/add-action-group.png" alt-text="add an action group menu option"::: - -3. Complete the **Create action group** form, then select **Next: Notifications**. - - :::image type="content" source="media/advance-notifications/create-action-group.png" alt-text="create action group form"::: - -1. On the **Notifications** tab, select the **Notification type**. The **Email/SMS message/Push/Voice** option offers the most flexibility and is the recommended option. Select the pen to configure the notification. - - :::image type="content" source="media/advance-notifications/notifications.png" alt-text="configure notifications"::: - - 1. Complete the *Add or edit notification* form that opens and select **OK**: - - 2. Actions and Tags are optional. Here you can configure additional actions to be triggered or use tags to categorize and organize your Azure resources. - - 4. Check the details on the **Review + create** tab and select **Create**. - -7. After selecting create, the alert rule configuration screen opens and the action group will be selected. Give a name to your new alert rule, then choose the resource group for it, and select **Create alert rule**. - -8. Click the **Health alerts** menu item again, and the list of alerts now contains your new alert. - - -You're all set. Next time there's a planned Azure SQL maintenance event, you'll receive an advance notification. - -## Receiving notifications - -The following table shows the general-information notifications you may receive: - -|Status|Description| -|:---|:---| -|**Planned Deployment**| Received 24 hours prior to the maintenance event. Maintenance is planned on DATE between 5pm - 8am (local time) for DB xyz.| -|**In-Progress** | Maintenance for database *xyz* is starting.| -|**Complete** | Maintenance of database *xyz* is complete. | - -The following table shows additional notifications that may be sent while maintenance is ongoing: - -|Status|Description| -|:---|:---| -|**Extended** | Maintenance is in progress but didn't complete for database *xyz*. Maintenance will continue at the next maintenance window.| -|**Canceled**| Maintenance for database *xyz* is canceled and will be rescheduled later. | -|**Blocked**|There was a problem during maintenance for database *xyz*. We'll notify you when we resume.| -|**Resumed**|The problem has been resolved and maintenance will continue at the next maintenance window.| - -## Permissions - -While Advance Notifications can be sent to any email address, Azure subscription RBAC (role-based access control) policy determines who can access the links in the email. Querying resource graph is covered by [Azure RBAC](../../role-based-access-control/overview.md) access management. To enable read access, each recipient should have resource group level read access. For more information, see [Steps to assign an Azure role](../../role-based-access-control/role-assignments-steps.md). - -## Retrieve the list of impacted resources - -[Azure Resource Graph](../../governance/resource-graph/overview.md) is an Azure service designed to extend Azure Resource Management. The Azure Resource Graph Explorer provides efficient and performant resource exploration with the ability to query at scale across a given set of subscriptions so that you can effectively govern your environment. - -You can use the Azure Resource Graph Explorer to query for maintenance events. For an introduction on how to run these queries, see [Quickstart: Run your first Resource Graph query using Azure Resource Graph Explorer](../../governance/resource-graph/first-query-portal.md). - -When the advanced notification for planned maintenance is received, you will get a link that opens Azure Resource Graph and executes the query for the exact event, similar to the following. Note that the `notificationId` value is unique per maintenance event. - -```kusto -resources -| project resource = tolower(id) -| join kind=inner ( - maintenanceresources - | where type == "microsoft.maintenance/updates" - | extend p = parse_json(properties) - | mvexpand d = p.value - | where d has 'notificationId' and d.notificationId == 'LNPN-R9Z' - | project resource = tolower(name), status = d.status, resourceGroup, location, startTimeUtc = d.startTimeUtc, endTimeUtc = d.endTimeUtc, impactType = d.impactType -) on resource -| project resource, status, resourceGroup, location, startTimeUtc, endTimeUtc, impactType -``` - -For the full reference of the sample queries and how to use them across tools like PowerShell or Azure CLI, visit [Azure Resource Graph sample queries for Azure Service Health](../../service-health/resource-graph-samples.md). - - -## Next steps - -- [Maintenance window](maintenance-window.md) -- [Maintenance window FAQ](maintenance-window-faq.yml) -- [Overview of alerts in Microsoft Azure](../../azure-monitor/alerts/alerts-overview.md) -- [Email Azure Resource Manager Role](../../azure-monitor/alerts/action-groups.md#email-azure-resource-manager-role) diff --git a/articles/azure-sql/database/alerts-insights-configure-portal.md b/articles/azure-sql/database/alerts-insights-configure-portal.md deleted file mode 100644 index ae9a36b8c4839..0000000000000 --- a/articles/azure-sql/database/alerts-insights-configure-portal.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Setup alerts and notifications in the Azure portal -description: Use the Azure portal to create alerts, which can trigger notifications or automation when the conditions you specify are met. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: sravanisaluru -ms.author: srsaluru -ms.date: "03/23/2022" -ms.reviewer: kendralittle, mathoma, wiassaf ---- -# Create alerts for Azure SQL Database and Azure Synapse Analytics using the Azure portal -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - - -## Overview - -This article shows you how to set up alerts for databases in Azure SQL Database and Azure Synapse Analytics using the Azure portal. Alerts can send you an email or call a web hook when some metric (for example database size or CPU usage) reaches the threshold. - -> [!NOTE] -> For Azure SQL Managed Instance specific instructions, see [Create alerts for Azure SQL Managed Instance](../managed-instance/alerts-create.md). - -You can receive an alert based on monitoring metrics for, or events on, your Azure services. - -* **Metric values** - The alert triggers when the value of a specified metric crosses a threshold you assign in either direction. That is, it triggers both when the condition is first met and then afterwards when that condition is no longer being met. -* **Activity log events** - An alert can trigger on *every* event, or, only when a certain number of events occur. - -You can configure an alert to do the following when it triggers: - -* Send email notifications to the service administrator and co-administrators -* Send email to additional emails that you specify. -* Call a webhook - -You can configure and get information about alert rules using - -* [The Azure portal](../../azure-monitor/alerts/alerts-classic-portal.md) -* [PowerShell](../../azure-monitor/alerts/alerts-classic-portal.md) -* [A command-line interface (CLI)](../../azure-monitor/alerts/alerts-classic-portal.md) -* [Azure Monitor REST API](/rest/api/monitor/alertrules) - -## Create an alert rule on a metric with the Azure portal - -1. In the [portal](https://portal.azure.com/), locate the resource you are interested in monitoring and select it. -2. Select **Alerts** in the Monitoring section. The text and icon may vary slightly for different resources. - - ![Monitoring](./media/alerts-insights-configure-portal/Alerts.png) - -3. Select the **New alert rule** button to open the **Create rule** page. - ![Create rule](./media/alerts-insights-configure-portal/create-rule.png) - -4. In the **Condition** section, click **Add**. - ![Define condition](./media/alerts-insights-configure-portal/create-rule.png) -5. In the **Configure signal logic** page, select a signal. - ![Select signal](./media/alerts-insights-configure-portal/select-signal.png) -6. After selecting a signal, such as **CPU percentage**, the **Configure signal logic** page appears. - ![Configure signal logic](./media/alerts-insights-configure-portal/configure-signal-logic.png) -7. On this page, configure that threshold type, operator, aggregation type, threshold value, aggregation granularity, and frequency of evaluation. Then click **Done**. -8. On the **Create rule**, select an existing **Action group** or create a new group. An action group enables you to define the action to be taken when an alert condition occurs. - ![Define action group](./media/alerts-insights-configure-portal/action-group.png) - -9. Define a name for the rule, provide an optional description, choose a severity level for the rule, choose whether to enable the rule upon rule creation, and then click **Create rule alert** to create the metric rule alert. - -Within 10 minutes, the alert is active and triggers as previously described. - -## Next steps - -* Learn more about [configuring webhooks in alerts](../../azure-monitor/alerts/alerts-webhooks.md). \ No newline at end of file diff --git a/articles/azure-sql/database/always-encrypted-azure-key-vault-configure.md b/articles/azure-sql/database/always-encrypted-azure-key-vault-configure.md deleted file mode 100644 index 3749e44f6c53d..0000000000000 --- a/articles/azure-sql/database/always-encrypted-azure-key-vault-configure.md +++ /dev/null @@ -1,605 +0,0 @@ ---- -title: "Configure Always Encrypted by using Azure Key Vault" -description: This tutorial shows you how to secure sensitive data in a database in Azure SQL Database with data encryption by using the Always Encrypted wizard in SQL Server Management Studio. -keywords: data encryption, encryption key, cloud encryption -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 11/02/2020 ---- -# Configure Always Encrypted by using Azure Key Vault - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb-sqlmi.md)] - -This article shows you how to secure sensitive data in a database in Azure SQL Database with data encryption by using the [Always Encrypted wizard](/sql/relational-databases/security/encryption/always-encrypted-wizard) in [SQL Server Management Studio (SSMS)](/sql/ssms/sql-server-management-studio-ssms). It also includes instructions that will show you how to store each encryption key in Azure Key Vault. - -Always Encrypted is a data encryption technology that helps protect sensitive data at rest on the server, during movement between client and server, and while the data is in use. Always Encrypted ensures that sensitive data never appears as plaintext inside the database system. After you configure data encryption, only client applications or app servers that have access to the keys can access plaintext data. For detailed information, see [Always Encrypted (Database Engine)](/sql/relational-databases/security/encryption/always-encrypted-database-engine). - -After you configure the database to use Always Encrypted, you will create a client application in C# with Visual Studio to work with the encrypted data. - -Follow the steps in this article and learn how to set up Always Encrypted for your database in Azure SQL Database or SQL Managed Instance. In this article you will learn how to perform the following tasks: - -- Use the Always Encrypted wizard in SSMS to create [Always Encrypted keys](/sql/relational-databases/security/encryption/always-encrypted-database-engine#Anchor_3). - - Create a [column master key (CMK)](/sql/t-sql/statements/create-column-master-key-transact-sql). - - Create a [column encryption key (CEK)](/sql/t-sql/statements/create-column-encryption-key-transact-sql). -- Create a database table and encrypt columns. -- Create an application that inserts, selects, and displays data from the encrypted columns. - -## Prerequisites - - -- An Azure account and subscription. If you don't have one, sign up for a [free trial](https://azure.microsoft.com/pricing/free-trial/). -- A database in [Azure SQL Database](single-database-create-quickstart.md) or [Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). -- [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) version 13.0.700.242 or later. -- [.NET Framework 4.6](/dotnet/framework/) or later (on the client computer). -- [Visual Studio](https://www.visualstudio.com/downloads/download-visual-studio-vs.aspx). -- [Azure PowerShell](/powershell/azure/) or [Azure CLI](/cli/azure/install-azure-cli) - -## Enable client application access - -You must enable your client application to access your database in SQL Database by setting up an Azure Active Directory (Azure AD) application and copying the *Application ID* and *key* that you will need to authenticate your application. - -To get the *Application ID* and *key*, follow the steps in [create an Azure Active Directory application and service principal that can access resources](../../active-directory/develop/howto-create-service-principal-portal.md). - -## Create a key vault to store your keys - -Now that your client app is configured and you have your application ID, it's time to create a key vault and configure its access policy so you and your application can access the vault's secrets (the Always Encrypted keys). The *create*, *get*, *list*, *sign*, *verify*, *wrapKey*, and *unwrapKey* permissions are required for creating a new column master key and for setting up encryption with SQL Server Management Studio. - -You can quickly create a key vault by running the following script. For a detailed explanation of these commands and more information about creating and configuring a key vault, see [What is Azure Key Vault?](../../key-vault/general/overview.md). - -# [PowerShell](#tab/azure-powershell) - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -```powershell -$subscriptionName = '' -$userPrincipalName = '' -$applicationId = '' -$resourceGroupName = '' # use the same resource group name when creating your SQL Database below -$location = '' -$vaultName = '' - -Connect-AzAccount -$subscriptionId = (Get-AzSubscription -SubscriptionName $subscriptionName).Id -Set-AzContext -SubscriptionId $subscriptionId - -New-AzResourceGroup -Name $resourceGroupName -Location $location -New-AzKeyVault -VaultName $vaultName -ResourceGroupName $resourceGroupName -Location $location - -Set-AzKeyVaultAccessPolicy -VaultName $vaultName -ResourceGroupName $resourceGroupName -PermissionsToKeys create,get,wrapKey,unwrapKey,sign,verify,list -UserPrincipalName $userPrincipalName -Set-AzKeyVaultAccessPolicy -VaultName $vaultName -ResourceGroupName $resourceGroupName -ServicePrincipalName $applicationId -PermissionsToKeys get,wrapKey,unwrapKey,sign,verify,list -``` - -# [Azure CLI](#tab/azure-cli) - -```azurecli -$subscriptionName = '' -$userPrincipalName = '' -$applicationId = '' -$resourceGroupName = '' # use the same resource group name when creating your database in Azure SQL Database below -$location = '' -$vaultName = '' - -az login -az account set --subscription $subscriptionName - -az group create --location $location --name $resourceGroupName - -az keyvault create --name $vaultName --resource-group $resourceGroupName --location $location - -az keyvault set-policy --name $vaultName --key-permissions create get list sign unwrapKey verify wrapKey --resource-group $resourceGroupName --upn $userPrincipalName -az keyvault set-policy --name $vaultName --key-permissions get list sign unwrapKey verify wrapKey --resource-group $resourceGroupName --spn $applicationId -``` - ---- - -## Connect with SSMS - -Open SQL Server Management Studio (SSMS) and connect to the server or managed with your database. - -1. Open SSMS. (Go to **Connect** > **Database Engine** to open the **Connect to Server** window if it isn't open.) - -2. Enter your server name or instance name and credentials. - - ![Copy the connection string](./media/always-encrypted-azure-key-vault-configure/ssms-connect.png) - -If the **New Firewall Rule** window opens, sign in to Azure and let SSMS create a new firewall rule for you. - -## Create a table - -In this section, you will create a table to hold patient data. It's not initially encrypted--you will configure encryption in the next section. - -1. Expand **Databases**. -2. Right-click the database and click **New Query**. -3. Paste the following Transact-SQL (T-SQL) into the new query window and **Execute** it. - -```sql -CREATE TABLE [dbo].[Patients]( - [PatientId] [int] IDENTITY(1,1), - [SSN] [char](11) NOT NULL, - [FirstName] [nvarchar](50) NULL, - [LastName] [nvarchar](50) NULL, - [MiddleName] [nvarchar](50) NULL, - [StreetAddress] [nvarchar](50) NULL, - [City] [nvarchar](50) NULL, - [ZipCode] [char](5) NULL, - [State] [char](2) NULL, - [BirthDate] [date] NOT NULL - PRIMARY KEY CLUSTERED ([PatientId] ASC) ON [PRIMARY] ); -GO -``` - -## Encrypt columns (configure Always Encrypted) - -SSMS provides a wizard that helps you easily configure Always Encrypted by setting up the column master key, column encryption key, and encrypted columns for you. - -1. Expand **Databases** > **Clinic** > **Tables**. -2. Right-click the **Patients** table and select **Encrypt Columns** to open the Always Encrypted wizard: - - ![Screenshot that highlights the Encrypt Columns... menu option.](./media/always-encrypted-azure-key-vault-configure/encrypt-columns.png) - -The Always Encrypted wizard includes the following sections: **Column Selection**, **Master Key Configuration**, **Validation**, and **Summary**. - -### Column Selection - -Click **Next** on the **Introduction** page to open the **Column Selection** page. On this page, you will select which columns you want to encrypt, [the type of encryption, and what column encryption key (CEK)](/sql/relational-databases/security/encryption/always-encrypted-wizard#Anchor_2) to use. - -Encrypt **SSN** and **BirthDate** information for each patient. The SSN column will use deterministic encryption, which supports equality lookups, joins, and group by. The BirthDate column will use randomized encryption, which does not support operations. - -Set the **Encryption Type** for the SSN column to **Deterministic** and the BirthDate column to **Randomized**. Click **Next**. - -![Encrypt columns](./media/always-encrypted-azure-key-vault-configure/column-selection.png) - -### Master Key Configuration - -The **Master Key Configuration** page is where you set up your CMK and select the key store provider where the CMK will be stored. Currently, you can store a CMK in the Windows certificate store, Azure Key Vault, or a hardware security module (HSM). - -This tutorial shows how to store your keys in Azure Key Vault. - -1. Select **Azure Key Vault**. -2. Select the desired key vault from the drop-down list. -3. Click **Next**. - -![Master key configuration](./media/always-encrypted-azure-key-vault-configure/master-key-configuration.png) - -### Validation - -You can encrypt the columns now or save a PowerShell script to run later. For this tutorial, select **Proceed to finish now** and click **Next**. - -### Summary - -Verify that the settings are all correct and click **Finish** to complete the setup for Always Encrypted. - -![Screenshot shows the results page with tasks marked as passed.](./media/always-encrypted-azure-key-vault-configure/summary.png) - -### Verify the wizard's actions - -After the wizard is finished, your database is set up for Always Encrypted. The wizard performed the following actions: - -- Created a column master key and stored it in Azure Key Vault. -- Created a column encryption key and stored it in Azure Key Vault. -- Configured the selected columns for encryption. The Patients table currently has no data, but any existing data in the selected columns is now encrypted. - -You can verify the creation of the keys in SSMS by expanding **Clinic** > **Security** > **Always Encrypted Keys**. - -## Create a client application that works with the encrypted data - -Now that Always Encrypted is set up, you can build an application that performs *inserts* and *selects* on the encrypted columns. - -> [!IMPORTANT] -> Your application must use [SqlParameter](/dotnet/api/system.data.sqlclient.sqlparameter) objects when passing plaintext data to the server with Always Encrypted columns. Passing literal values without using SqlParameter objects will result in an exception. - -1. Open Visual Studio and create a new C# **Console Application** (Visual Studio 2015 and earlier) or **Console App (.NET Framework)** (Visual Studio 2017 and later). Make sure your project is set to **.NET Framework 4.6** or later. -2. Name the project **AlwaysEncryptedConsoleAKVApp** and click **OK**. -3. Install the following NuGet packages by going to **Tools** > **NuGet Package Manager** > **Package Manager Console**. - -Run these two lines of code in the Package Manager Console: - - ```powershell - Install-Package Microsoft.SqlServer.Management.AlwaysEncrypted.AzureKeyVaultProvider - Install-Package Microsoft.IdentityModel.Clients.ActiveDirectory - ``` - -## Modify your connection string to enable Always Encrypted - -This section explains how to enable Always Encrypted in your database connection string. - -To enable Always Encrypted, you need to add the **Column Encryption Setting** keyword to your connection string and set it to **Enabled**. - -You can set this directly in the connection string, or you can set it by using [SqlConnectionStringBuilder](/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder). The sample application in the next section shows how to use **SqlConnectionStringBuilder**. - -### Enable Always Encrypted in the connection string - -Add the following keyword to your connection string. - - `Column Encryption Setting=Enabled` - -### Enable Always Encrypted with SqlConnectionStringBuilder - -The following code shows how to enable Always Encrypted by setting [SqlConnectionStringBuilder.ColumnEncryptionSetting](/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.columnencryptionsetting) to [Enabled](/dotnet/api/system.data.sqlclient.sqlconnectioncolumnencryptionsetting). - -```csharp -// Instantiate a SqlConnectionStringBuilder. -SqlConnectionStringBuilder connStringBuilder = new SqlConnectionStringBuilder("replace with your connection string"); - -// Enable Always Encrypted. -connStringBuilder.ColumnEncryptionSetting = SqlConnectionColumnEncryptionSetting.Enabled; -``` - -## Register the Azure Key Vault provider -The following code shows how to register the Azure Key Vault provider with the ADO.NET driver. - -```csharp -private static ClientCredential _clientCredential; - -static void InitializeAzureKeyVaultProvider() { - _clientCredential = new ClientCredential(applicationId, clientKey); - - SqlColumnEncryptionAzureKeyVaultProvider azureKeyVaultProvider = new SqlColumnEncryptionAzureKeyVaultProvider(GetToken); - - Dictionary providers = new Dictionary(); - - providers.Add(SqlColumnEncryptionAzureKeyVaultProvider.ProviderName, azureKeyVaultProvider); - SqlConnection.RegisterColumnEncryptionKeyStoreProviders(providers); -} -``` - -## Always Encrypted sample console application - -This sample demonstrates how to: - -- Modify your connection string to enable Always Encrypted. -- Register Azure Key Vault as the application's key store provider. -- Insert data into the encrypted columns. -- Select a record by filtering for a specific value in an encrypted column. - -Replace the contents of *Program.cs* with the following code. Replace the connection string for the global connectionString variable in the line that directly precedes the Main method with your valid connection string from the Azure portal. This is the only change you need to make to this code. - -Run the app to see Always Encrypted in action. - -```csharp -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using System.Data; -using System.Data.SqlClient; -using Microsoft.IdentityModel.Clients.ActiveDirectory; -using Microsoft.SqlServer.Management.AlwaysEncrypted.AzureKeyVaultProvider; - -namespace AlwaysEncryptedConsoleAKVApp { - class Program { - // Update this line with your Clinic database connection string from the Azure portal. - static string connectionString = @""; - static string applicationId = @""; - static string clientKey = ""; - - static void Main(string[] args) { - InitializeAzureKeyVaultProvider(); - - Console.WriteLine("Signed in as: " + _clientCredential.ClientId); - - Console.WriteLine("Original connection string copied from the Azure portal:"); - Console.WriteLine(connectionString); - - // Create a SqlConnectionStringBuilder. - SqlConnectionStringBuilder connStringBuilder = - new SqlConnectionStringBuilder(connectionString); - - // Enable Always Encrypted for the connection. - // This is the only change specific to Always Encrypted - connStringBuilder.ColumnEncryptionSetting = - SqlConnectionColumnEncryptionSetting.Enabled; - - Console.WriteLine(Environment.NewLine + "Updated connection string with Always Encrypted enabled:"); - Console.WriteLine(connStringBuilder.ConnectionString); - - // Update the connection string with a password supplied at runtime. - Console.WriteLine(Environment.NewLine + "Enter server password:"); - connStringBuilder.Password = Console.ReadLine(); - - // Assign the updated connection string to our global variable. - connectionString = connStringBuilder.ConnectionString; - - // Delete all records to restart this demo app. - ResetPatientsTable(); - - // Add sample data to the Patients table. - Console.Write(Environment.NewLine + "Adding sample patient data to the database..."); - - InsertPatient(new Patient() { - SSN = "999-99-0001", - FirstName = "Orlando", - LastName = "Gee", - BirthDate = DateTime.Parse("01/04/1964") - }); - InsertPatient(new Patient() { - SSN = "999-99-0002", - FirstName = "Keith", - LastName = "Harris", - BirthDate = DateTime.Parse("06/20/1977") - }); - InsertPatient(new Patient() { - SSN = "999-99-0003", - FirstName = "Donna", - LastName = "Carreras", - BirthDate = DateTime.Parse("02/09/1973") - }); - InsertPatient(new Patient() { - SSN = "999-99-0004", - FirstName = "Janet", - LastName = "Gates", - BirthDate = DateTime.Parse("08/31/1985") - }); - InsertPatient(new Patient() { - SSN = "999-99-0005", - FirstName = "Lucy", - LastName = "Harrington", - BirthDate = DateTime.Parse("05/06/1993") - }); - - // Fetch and display all patients. - Console.WriteLine(Environment.NewLine + "All the records currently in the Patients table:"); - - foreach (Patient patient in SelectAllPatients()) { - Console.WriteLine(patient.FirstName + " " + patient.LastName + "\tSSN: " + patient.SSN + "\tBirthdate: " + patient.BirthDate); - } - - // Get patients by SSN. - Console.WriteLine(Environment.NewLine + "Now lets locate records by searching the encrypted SSN column."); - - string ssn; - - // This very simple validation only checks that the user entered 11 characters. - // In production be sure to check all user input and use the best validation for your specific application. - do { - Console.WriteLine("Please enter a valid SSN (ex. 999-99-0003):"); - ssn = Console.ReadLine(); - } while (ssn.Length != 11); - - // The example allows duplicate SSN entries so we will return all records - // that match the provided value and store the results in selectedPatients. - Patient selectedPatient = SelectPatientBySSN(ssn); - - // Check if any records were returned and display our query results. - if (selectedPatient != null) { - Console.WriteLine("Patient found with SSN = " + ssn); - Console.WriteLine(selectedPatient.FirstName + " " + selectedPatient.LastName + "\tSSN: " - + selectedPatient.SSN + "\tBirthdate: " + selectedPatient.BirthDate); - } - else { - Console.WriteLine("No patients found with SSN = " + ssn); - } - - Console.WriteLine("Press Enter to exit..."); - Console.ReadLine(); - } - - private static ClientCredential _clientCredential; - - static void InitializeAzureKeyVaultProvider() { - _clientCredential = new ClientCredential(applicationId, clientKey); - - SqlColumnEncryptionAzureKeyVaultProvider azureKeyVaultProvider = - new SqlColumnEncryptionAzureKeyVaultProvider(GetToken); - - Dictionary providers = - new Dictionary(); - - providers.Add(SqlColumnEncryptionAzureKeyVaultProvider.ProviderName, azureKeyVaultProvider); - SqlConnection.RegisterColumnEncryptionKeyStoreProviders(providers); - } - - public async static Task GetToken(string authority, string resource, string scope) { - var authContext = new AuthenticationContext(authority); - AuthenticationResult result = await authContext.AcquireTokenAsync(resource, _clientCredential); - - if (result == null) - throw new InvalidOperationException("Failed to obtain the access token"); - return result.AccessToken; - } - - static int InsertPatient(Patient newPatient) { - int returnValue = 0; - - string sqlCmdText = @"INSERT INTO [dbo].[Patients] ([SSN], [FirstName], [LastName], [BirthDate]) - VALUES (@SSN, @FirstName, @LastName, @BirthDate);"; - - SqlCommand sqlCmd = new SqlCommand(sqlCmdText); - - SqlParameter paramSSN = new SqlParameter(@"@SSN", newPatient.SSN); - paramSSN.DbType = DbType.AnsiStringFixedLength; - paramSSN.Direction = ParameterDirection.Input; - paramSSN.Size = 11; - - SqlParameter paramFirstName = new SqlParameter(@"@FirstName", newPatient.FirstName); - paramFirstName.DbType = DbType.String; - paramFirstName.Direction = ParameterDirection.Input; - - SqlParameter paramLastName = new SqlParameter(@"@LastName", newPatient.LastName); - paramLastName.DbType = DbType.String; - paramLastName.Direction = ParameterDirection.Input; - - SqlParameter paramBirthDate = new SqlParameter(@"@BirthDate", newPatient.BirthDate); - paramBirthDate.SqlDbType = SqlDbType.Date; - paramBirthDate.Direction = ParameterDirection.Input; - - sqlCmd.Parameters.Add(paramSSN); - sqlCmd.Parameters.Add(paramFirstName); - sqlCmd.Parameters.Add(paramLastName); - sqlCmd.Parameters.Add(paramBirthDate); - - using (sqlCmd.Connection = new SqlConnection(connectionString)) { - try { - sqlCmd.Connection.Open(); - sqlCmd.ExecuteNonQuery(); - } - catch (Exception ex) { - returnValue = 1; - Console.WriteLine("The following error was encountered: "); - Console.WriteLine(ex.Message); - Console.WriteLine(Environment.NewLine + "Press Enter key to exit"); - Console.ReadLine(); - Environment.Exit(0); - } - } - return returnValue; - } - - - static List SelectAllPatients() { - List patients = new List(); - - SqlCommand sqlCmd = new SqlCommand( - "SELECT [SSN], [FirstName], [LastName], [BirthDate] FROM [dbo].[Patients]", - new SqlConnection(connectionString)); - - using (sqlCmd.Connection = new SqlConnection(connectionString)) - - using (sqlCmd.Connection = new SqlConnection(connectionString)) { - try { - sqlCmd.Connection.Open(); - SqlDataReader reader = sqlCmd.ExecuteReader(); - - if (reader.HasRows) { - while (reader.Read()) { - patients.Add(new Patient() { - SSN = reader[0].ToString(), - FirstName = reader[1].ToString(), - LastName = reader["LastName"].ToString(), - BirthDate = (DateTime)reader["BirthDate"] - }); - } - } - } - catch (Exception ex) { - throw; - } - } - - return patients; - } - - static Patient SelectPatientBySSN(string ssn) { - Patient patient = new Patient(); - - SqlCommand sqlCmd = new SqlCommand( - "SELECT [SSN], [FirstName], [LastName], [BirthDate] FROM [dbo].[Patients] WHERE [SSN]=@SSN", - new SqlConnection(connectionString)); - - SqlParameter paramSSN = new SqlParameter(@"@SSN", ssn); - paramSSN.DbType = DbType.AnsiStringFixedLength; - paramSSN.Direction = ParameterDirection.Input; - paramSSN.Size = 11; - - sqlCmd.Parameters.Add(paramSSN); - - using (sqlCmd.Connection = new SqlConnection(connectionString)) { - try { - sqlCmd.Connection.Open(); - SqlDataReader reader = sqlCmd.ExecuteReader(); - - if (reader.HasRows) { - while (reader.Read()) { - patient = new Patient() { - SSN = reader[0].ToString(), - FirstName = reader[1].ToString(), - LastName = reader["LastName"].ToString(), - BirthDate = (DateTime)reader["BirthDate"] - }; - } - } - else { - patient = null; - } - } - catch (Exception ex) { - throw; - } - } - return patient; - } - - // This method simply deletes all records in the Patients table to reset our demo. - static int ResetPatientsTable() { - int returnValue = 0; - - SqlCommand sqlCmd = new SqlCommand("DELETE FROM Patients"); - using (sqlCmd.Connection = new SqlConnection(connectionString)) { - try { - sqlCmd.Connection.Open(); - sqlCmd.ExecuteNonQuery(); - - } - catch (Exception ex) { - returnValue = 1; - } - } - return returnValue; - } - } - - class Patient { - public string SSN { get; set; } - public string FirstName { get; set; } - public string LastName { get; set; } - public DateTime BirthDate { get; set; } - } -} -``` - -## Verify that the data is encrypted - -You can quickly check that the actual data on the server is encrypted by querying the Patients data with SSMS (using your current connection where **Column Encryption Setting** is not yet enabled). - -Run the following query on the Clinic database. - -```sql -SELECT FirstName, LastName, SSN, BirthDate FROM Patients; -``` - -You can see that the encrypted columns do not contain any plaintext data. - - ![Screenshot that shows that the encrypted columns do not contain any plaintext data.](./media/always-encrypted-azure-key-vault-configure/ssms-encrypted.png) - -To use SSMS to access the plaintext data, you first need to ensure that the user has proper permissions to the Azure Key Vault: *get*, *unwrapKey*, and *verify*. For detailed information, see [Create and Store Column Master Keys (Always Encrypted)](/sql/relational-databases/security/encryption/create-and-store-column-master-keys-always-encrypted). - -Then add the *Column Encryption Setting=enabled* parameter during your connection. - -1. In SSMS, right-click your server in **Object Explorer** and choose **Disconnect**. -2. Click **Connect** > **Database Engine** to open the **Connect to Server** window and click **Options**. -3. Click **Additional Connection Parameters** and type **Column Encryption Setting=enabled**. - - ![Screenshot that shows the Additional Correction Parameters tab.](./media/always-encrypted-azure-key-vault-configure/ssms-connection-parameter.png) - -4. Run the following query on the Clinic database. - - ```sql - SELECT FirstName, LastName, SSN, BirthDate FROM Patients; - ``` - - You can now see the plaintext data in the encrypted columns. - - ![New console application](./media/always-encrypted-azure-key-vault-configure/ssms-plaintext.png) - -## Next steps - -After your database is configured to use Always Encrypted, you may want to do the following: - -- [Rotate and clean up your keys](/sql/relational-databases/security/encryption/configure-always-encrypted-using-sql-server-management-studio). -- [Migrate data that is already encrypted with Always Encrypted](/sql/relational-databases/security/encryption/migrate-sensitive-data-protected-by-always-encrypted). - -## Related information - -- [Always Encrypted (client development)](/sql/relational-databases/security/encryption/always-encrypted-client-development) -- [Transparent data encryption](/sql/relational-databases/security/encryption/transparent-data-encryption) -- [SQL Server encryption](/sql/relational-databases/security/encryption/sql-server-encryption) -- [Always Encrypted wizard](/sql/relational-databases/security/encryption/always-encrypted-wizard) -- [Always Encrypted blog](/archive/blogs/sqlsecurity/always-encrypted-key-metadata) diff --git a/articles/azure-sql/database/always-encrypted-certificate-store-configure.md b/articles/azure-sql/database/always-encrypted-certificate-store-configure.md deleted file mode 100644 index ddc29b264ccbf..0000000000000 --- a/articles/azure-sql/database/always-encrypted-certificate-store-configure.md +++ /dev/null @@ -1,545 +0,0 @@ ---- -title: Configure Always Encrypted by using the Windows certificate store -description: This article shows you how to secure sensitive data in Azure SQL Database with database encryption by using the Always Encrypted wizard in SQL Server Management Studio (SSMS). It also shows you how to store your encryption keys in the Windows certificate store. -keywords: encrypt data, sql encryption, database encryption, sensitive data, Always Encrypted -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviwer: -ms.date: 04/23/2020 ---- - -# Configure Always Encrypted by using the Windows certificate store - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article shows you how to secure sensitive data in Azure SQL Database or Azure SQL Managed Instance with database encryption by using the [Always Encrypted wizard](/sql/relational-databases/security/encryption/always-encrypted-wizard) in [SQL Server Management Studio (SSMS)](/sql/ssms/sql-server-management-studio-ssms). It also shows you how to store your encryption keys in the Windows certificate store. - -Always Encrypted is a data encryption technology that helps protect sensitive data at rest on the server, during movement between client and server, and while the data is in use, ensuring that sensitive data never appears as plaintext inside the database system. After you encrypt data, only client applications or app servers that have access to the keys can access plaintext data. For detailed information, see [Always Encrypted (Database Engine)](/sql/relational-databases/security/encryption/always-encrypted-database-engine). - -After configuring the database to use Always Encrypted, you will create a client application in C# with Visual Studio to work with the encrypted data. - -Follow the steps in this article to learn how to set up Always Encrypted for SQL Database or SQL Managed Instance. In this article, you will learn how to perform the following tasks: - -* Use the Always Encrypted wizard in SSMS to create [Always Encrypted Keys](/sql/relational-databases/security/encryption/always-encrypted-database-engine#Anchor_3). - * Create a [Column Master Key (CMK)](/sql/t-sql/statements/create-column-master-key-transact-sql). - * Create a [Column Encryption Key (CEK)](/sql/t-sql/statements/create-column-encryption-key-transact-sql). -* Create a database table and encrypt columns. -* Create an application that inserts, selects, and displays data from the encrypted columns. - -## Prerequisites - -For this tutorial, you'll need: - -* An Azure account and subscription. If you don't have one, sign up for a [free trial](https://azure.microsoft.com/pricing/free-trial/). -- A database in [Azure SQL Database](single-database-create-quickstart.md) or [Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). -* [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) version 13.0.700.242 or later. -* [.NET Framework 4.6](/dotnet/framework/) or later (on the client computer). -* [Visual Studio](https://www.visualstudio.com/downloads/download-visual-studio-vs.aspx). - -## Enable client application access - -You must enable your client application to access SQL Database or SQL Managed Instance by setting up an Azure Active Directory (AAD) application and copying the *Application ID* and *key* that you will need to authenticate your application. - -To get the *Application ID* and *key*, follow the steps in [create an Azure Active Directory application and service principal that can access resources](../../active-directory/develop/howto-create-service-principal-portal.md). - - - -## Connect with SSMS - -Open SQL Server Management Studio (SSMS) and connect to the server or managed with your database. - -1. Open SSMS. (Click **Connect** > **Database Engine** to open the **Connect to Server** window if it is not open). -2. Enter your server name and credentials. - - ![Copy the connection string](./media/always-encrypted-certificate-store-configure/ssms-connect.png) - -If the **New Firewall Rule** window opens, sign in to Azure and let SSMS create a new firewall rule for you. - -## Create a table - -In this section, you will create a table to hold patient data. This will be a normal table initially--you will configure encryption in the next section. - -1. Expand **Databases**. -2. Right-click the **Clinic** database and click **New Query**. -3. Paste the following Transact-SQL (T-SQL) into the new query window and **Execute** it. - - ```tsql - CREATE TABLE [dbo].[Patients]( - [PatientId] [int] IDENTITY(1,1), - [SSN] [char](11) NOT NULL, - [FirstName] [nvarchar](50) NULL, - [LastName] [nvarchar](50) NULL, - [MiddleName] [nvarchar](50) NULL, - [StreetAddress] [nvarchar](50) NULL, - [City] [nvarchar](50) NULL, - [ZipCode] [char](5) NULL, - [State] [char](2) NULL, - [BirthDate] [date] NOT NULL - PRIMARY KEY CLUSTERED ([PatientId] ASC) ON [PRIMARY] ); - GO - ``` - -## Encrypt columns (configure Always Encrypted) - -SSMS provides a wizard to easily configure Always Encrypted by setting up the CMK, CEK, and encrypted columns for you. - -1. Expand **Databases** > **Clinic** > **Tables**. -2. Right-click the **Patients** table and select **Encrypt Columns** to open the Always Encrypted wizard: - - ![Screenshot that shows the Encrypt Colunns... menu option in the Patients table.](./media/always-encrypted-certificate-store-configure/encrypt-columns.png) - -The Always Encrypted wizard includes the following sections: **Column Selection**, **Master Key Configuration** (CMK), **Validation**, and **Summary**. - -### Column Selection - -Click **Next** on the **Introduction** page to open the **Column Selection** page. On this page, you will select which columns you want to encrypt, [the type of encryption, and what column encryption key (CEK)](/sql/relational-databases/security/encryption/always-encrypted-wizard#Anchor_2) to use. - -Encrypt **SSN** and **BirthDate** information for each patient. The **SSN** column will use deterministic encryption, which supports equality lookups, joins, and group by. The **BirthDate** column will use randomized encryption, which does not support operations. - -Set the **Encryption Type** for the **SSN** column to **Deterministic** and the **BirthDate** column to **Randomized**. Click **Next**. - -![Encrypt columns](./media/always-encrypted-certificate-store-configure/column-selection.png) - -### Master Key Configuration - -The **Master Key Configuration** page is where you set up your CMK and select the key store provider where the CMK will be stored. Currently, you can store a CMK in the Windows certificate store, Azure Key Vault, or a hardware security module (HSM). This tutorial shows how to store your keys in the Windows certificate store. - -Verify that **Windows certificate store** is selected and click **Next**. - -![Master key configuration](./media/always-encrypted-certificate-store-configure/master-key-configuration.png) - -### Validation - -You can encrypt the columns now or save a PowerShell script to run later. For this tutorial, select **Proceed to finish now** and click **Next**. - -### Summary - -Verify that the settings are all correct and click **Finish** to complete the setup for Always Encrypted. - -![Screenshot shows the results page with tasks marked as passed.](./media/always-encrypted-certificate-store-configure/summary.png) - -### Verify the wizard's actions - -After the wizard is finished, your database is set up for Always Encrypted. The wizard performed the following actions: - -* Created a CMK. -* Created a CEK. -* Configured the selected columns for encryption. Your **Patients** table currently has no data, but any existing data in the selected columns is now encrypted. - -You can verify the creation of the keys in SSMS by going to **Clinic** > **Security** > **Always Encrypted Keys**. You can now see the new keys that the wizard generated for you. - -## Create a client application that works with the encrypted data - -Now that Always Encrypted is set up, you can build an application that performs *inserts* and *selects* on the encrypted columns. To successfully run the sample application, you must run it on the same computer where you ran the Always Encrypted wizard. To run the application on another computer, you must deploy your Always Encrypted certificates to the computer running the client app. - -> [!IMPORTANT] -> Your application must use [SqlParameter](/dotnet/api/system.data.sqlclient.sqlparameter) objects when passing plaintext data to the server with Always Encrypted columns. Passing literal values without using SqlParameter objects will result in an exception. - -1. Open Visual Studio and create a new C# console application. Make sure your project is set to **.NET Framework 4.6** or later. -2. Name the project **AlwaysEncryptedConsoleApp** and click **OK**. - -![Screenshot that shows the newly named AlwaysEncryptedConsoleApp project.](./media/always-encrypted-certificate-store-configure/console-app.png) - -## Modify your connection string to enable Always Encrypted - -This section explains how to enable Always Encrypted in your database connection string. You will modify the console app you just created in the next section, "Always Encrypted sample console application." - -To enable Always Encrypted, you need to add the **Column Encryption Setting** keyword to your connection string and set it to **Enabled**. - -You can set this directly in the connection string, or you can set it by using a [SqlConnectionStringBuilder](/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder). The sample application in the next section shows how to use **SqlConnectionStringBuilder**. - -> [!NOTE] -> This is the only change required in a client application specific to Always Encrypted. If you have an existing application that stores its connection string externally (that is, in a config file), you might be able to enable Always Encrypted without changing any code. - -### Enable Always Encrypted in the connection string - -Add the following keyword to your connection string: - -`Column Encryption Setting=Enabled` - -### Enable Always Encrypted with a SqlConnectionStringBuilder - -The following code shows how to enable Always Encrypted by setting the [SqlConnectionStringBuilder.ColumnEncryptionSetting](/dotnet/api/system.data.sqlclient.sqlconnectionstringbuilder.columnencryptionsetting) to [Enabled](/dotnet/api/system.data.sqlclient.sqlconnectioncolumnencryptionsetting). - -```csharp -// Instantiate a SqlConnectionStringBuilder. -SqlConnectionStringBuilder connStringBuilder = - new SqlConnectionStringBuilder("replace with your connection string"); - -// Enable Always Encrypted. -connStringBuilder.ColumnEncryptionSetting = - SqlConnectionColumnEncryptionSetting.Enabled; -``` - -## Always Encrypted sample console application - -This sample demonstrates how to: - -* Modify your connection string to enable Always Encrypted. -* Insert data into the encrypted columns. -* Select a record by filtering for a specific value in an encrypted column. - -Replace the contents of **Program.cs** with the following code. Replace the connection string for the global connectionString variable in the line directly above the Main method with your valid connection string from the Azure portal. This is the only change you need to make to this code. - -Run the app to see Always Encrypted in action. - -```cs -using System; -using System.Collections.Generic; -using System.Data; -using System.Data.SqlClient; -using System.Globalization; - -namespace AlwaysEncryptedConsoleApp -{ - class Program - { - // Update this line with your Clinic database connection string from the Azure portal. - static string connectionString = @"Data Source = SPE-T640-01.sys-sqlsvr.local; Initial Catalog = Clinic; Integrated Security = true"; - - static void Main(string[] args) - { - Console.WriteLine("Original connection string copied from the Azure portal:"); - Console.WriteLine(connectionString); - - // Create a SqlConnectionStringBuilder. - SqlConnectionStringBuilder connStringBuilder = - new SqlConnectionStringBuilder(connectionString); - - // Enable Always Encrypted for the connection. - // This is the only change specific to Always Encrypted - connStringBuilder.ColumnEncryptionSetting = - SqlConnectionColumnEncryptionSetting.Enabled; - - Console.WriteLine(Environment.NewLine + "Updated connection string with Always Encrypted enabled:"); - Console.WriteLine(connStringBuilder.ConnectionString); - - // Update the connection string with a password supplied at runtime. - Console.WriteLine(Environment.NewLine + "Enter server password:"); - connStringBuilder.Password = Console.ReadLine(); - - // Assign the updated connection string to our global variable. - connectionString = connStringBuilder.ConnectionString; - - - // Delete all records to restart this demo app. - ResetPatientsTable(); - - // Add sample data to the Patients table. - Console.Write(Environment.NewLine + "Adding sample patient data to the database..."); - - CultureInfo culture = CultureInfo.CreateSpecificCulture("en-US"); - InsertPatient(new Patient() - { - SSN = "999-99-0001", - FirstName = "Orlando", - LastName = "Gee", - BirthDate = DateTime.Parse("01/04/1964", culture) - }); - InsertPatient(new Patient() - { - SSN = "999-99-0002", - FirstName = "Keith", - LastName = "Harris", - BirthDate = DateTime.Parse("06/20/1977", culture) - }); - InsertPatient(new Patient() - { - SSN = "999-99-0003", - FirstName = "Donna", - LastName = "Carreras", - BirthDate = DateTime.Parse("02/09/1973", culture) - }); - InsertPatient(new Patient() - { - SSN = "999-99-0004", - FirstName = "Janet", - LastName = "Gates", - BirthDate = DateTime.Parse("08/31/1985", culture) - }); - InsertPatient(new Patient() - { - SSN = "999-99-0005", - FirstName = "Lucy", - LastName = "Harrington", - BirthDate = DateTime.Parse("05/06/1993", culture) - }); - - - // Fetch and display all patients. - Console.WriteLine(Environment.NewLine + "All the records currently in the Patients table:"); - - foreach (Patient patient in SelectAllPatients()) - { - Console.WriteLine(patient.FirstName + " " + patient.LastName + "\tSSN: " + patient.SSN + "\tBirthdate: " + patient.BirthDate); - } - - // Get patients by SSN. - Console.WriteLine(Environment.NewLine + "Now let's locate records by searching the encrypted SSN column."); - - string ssn; - - // This very simple validation only checks that the user entered 11 characters. - // In production be sure to check all user input and use the best validation for your specific application. - do - { - Console.WriteLine("Please enter a valid SSN (ex. 123-45-6789):"); - ssn = Console.ReadLine(); - } while (ssn.Length != 11); - - // The example allows duplicate SSN entries so we will return all records - // that match the provided value and store the results in selectedPatients. - Patient selectedPatient = SelectPatientBySSN(ssn); - - // Check if any records were returned and display our query results. - if (selectedPatient != null) - { - Console.WriteLine("Patient found with SSN = " + ssn); - Console.WriteLine(selectedPatient.FirstName + " " + selectedPatient.LastName + "\tSSN: " - + selectedPatient.SSN + "\tBirthdate: " + selectedPatient.BirthDate); - } - else - { - Console.WriteLine("No patients found with SSN = " + ssn); - } - - Console.WriteLine("Press Enter to exit..."); - Console.ReadLine(); - } - - - static int InsertPatient(Patient newPatient) - { - int returnValue = 0; - - string sqlCmdText = @"INSERT INTO [dbo].[Patients] ([SSN], [FirstName], [LastName], [BirthDate]) - VALUES (@SSN, @FirstName, @LastName, @BirthDate);"; - - SqlCommand sqlCmd = new SqlCommand(sqlCmdText); - - - SqlParameter paramSSN = new SqlParameter(@"@SSN", newPatient.SSN); - paramSSN.DbType = DbType.AnsiStringFixedLength; - paramSSN.Direction = ParameterDirection.Input; - paramSSN.Size = 11; - - SqlParameter paramFirstName = new SqlParameter(@"@FirstName", newPatient.FirstName); - paramFirstName.DbType = DbType.String; - paramFirstName.Direction = ParameterDirection.Input; - - SqlParameter paramLastName = new SqlParameter(@"@LastName", newPatient.LastName); - paramLastName.DbType = DbType.String; - paramLastName.Direction = ParameterDirection.Input; - - SqlParameter paramBirthDate = new SqlParameter(@"@BirthDate", newPatient.BirthDate); - paramBirthDate.SqlDbType = SqlDbType.Date; - paramBirthDate.Direction = ParameterDirection.Input; - - sqlCmd.Parameters.Add(paramSSN); - sqlCmd.Parameters.Add(paramFirstName); - sqlCmd.Parameters.Add(paramLastName); - sqlCmd.Parameters.Add(paramBirthDate); - - using (sqlCmd.Connection = new SqlConnection(connectionString)) - { - try - { - sqlCmd.Connection.Open(); - sqlCmd.ExecuteNonQuery(); - } - catch (Exception ex) - { - returnValue = 1; - Console.WriteLine("The following error was encountered: "); - Console.WriteLine(ex.Message); - Console.WriteLine(Environment.NewLine + "Press Enter key to exit"); - Console.ReadLine(); - Environment.Exit(0); - } - } - return returnValue; - } - - - static List SelectAllPatients() - { - List patients = new List(); - - - SqlCommand sqlCmd = new SqlCommand( - "SELECT [SSN], [FirstName], [LastName], [BirthDate] FROM [dbo].[Patients]", - new SqlConnection(connectionString)); - - - using (sqlCmd.Connection = new SqlConnection(connectionString)) - - using (sqlCmd.Connection = new SqlConnection(connectionString)) - { - try - { - sqlCmd.Connection.Open(); - SqlDataReader reader = sqlCmd.ExecuteReader(); - - if (reader.HasRows) - { - while (reader.Read()) - { - patients.Add(new Patient() - { - SSN = reader[0].ToString(), - FirstName = reader[1].ToString(), - LastName = reader["LastName"].ToString(), - BirthDate = (DateTime)reader["BirthDate"] - }); - } - } - } - catch (Exception ex) - { - throw; - } - } - - return patients; - } - - - static Patient SelectPatientBySSN(string ssn) - { - Patient patient = new Patient(); - - SqlCommand sqlCmd = new SqlCommand( - "SELECT [SSN], [FirstName], [LastName], [BirthDate] FROM [dbo].[Patients] WHERE [SSN]=@SSN", - new SqlConnection(connectionString)); - - SqlParameter paramSSN = new SqlParameter(@"@SSN", ssn); - paramSSN.DbType = DbType.AnsiStringFixedLength; - paramSSN.Direction = ParameterDirection.Input; - paramSSN.Size = 11; - - sqlCmd.Parameters.Add(paramSSN); - - - using (sqlCmd.Connection = new SqlConnection(connectionString)) - { - try - { - sqlCmd.Connection.Open(); - SqlDataReader reader = sqlCmd.ExecuteReader(); - - if (reader.HasRows) - { - while (reader.Read()) - { - patient = new Patient() - { - SSN = reader[0].ToString(), - FirstName = reader[1].ToString(), - LastName = reader["LastName"].ToString(), - BirthDate = (DateTime)reader["BirthDate"] - }; - } - } - else - { - patient = null; - } - } - catch (Exception ex) - { - throw; - } - } - return patient; - } - - - // This method simply deletes all records in the Patients table to reset our demo. - static int ResetPatientsTable() - { - int returnValue = 0; - - SqlCommand sqlCmd = new SqlCommand("DELETE FROM Patients"); - using (sqlCmd.Connection = new SqlConnection(connectionString)) - { - try - { - sqlCmd.Connection.Open(); - sqlCmd.ExecuteNonQuery(); - - } - catch (Exception ex) - { - returnValue = 1; - } - } - return returnValue; - } - } - - class Patient - { - public string SSN { get; set; } - public string FirstName { get; set; } - public string LastName { get; set; } - public DateTime BirthDate { get; set; } - } -} -``` - -## Verify that the data is encrypted - -You can quickly check that the actual data on the server is encrypted by querying the **Patients** data with SSMS. (Use your current connection where the column encryption setting is not yet enabled.) - -Run the following query on the Clinic database. - -```tsql -SELECT FirstName, LastName, SSN, BirthDate FROM Patients; -``` - -You can see that the encrypted columns do not contain any plaintext data. - - ![Screenshot that shows encrypted data in the encrypted columns.](./media/always-encrypted-certificate-store-configure/ssms-encrypted.png) - -To use SSMS to access the plaintext data, you can add the **Column Encryption Setting=enabled** parameter to the connection. - -1. In SSMS, right-click your server in **Object Explorer**, and then click **Disconnect**. -2. Click **Connect** > **Database Engine** to open the **Connect to Server** window, and then click **Options**. -3. Click **Additional Connection Parameters** and type **Column Encryption Setting=enabled**. - - ![Screenshot that shows the Additional Connection Parameters tab with Column Encryption Setting=enabled typed in the box.](./media/always-encrypted-certificate-store-configure/ssms-connection-parameter.png) -4. Run the following query on the **Clinic** database. - - ```tsql - SELECT FirstName, LastName, SSN, BirthDate FROM Patients; - ``` - - You can now see the plaintext data in the encrypted columns. - - ![New console application](./media/always-encrypted-certificate-store-configure/ssms-plaintext.png) - -> [!NOTE] -> If you connect with SSMS (or any client) from a different computer, it will not have access to the encryption keys and will not be able to decrypt the data. - -## Next steps - -After you create a database that uses Always Encrypted, you may want to do the following: - -* Run this sample from a different computer. It won't have access to the encryption keys, so it will not have access to the plaintext data and will not run successfully. -* [Rotate and clean up your keys](/sql/relational-databases/security/encryption/configure-always-encrypted-using-sql-server-management-studio). -* [Migrate data that is already encrypted with Always Encrypted](/sql/relational-databases/security/encryption/migrate-sensitive-data-protected-by-always-encrypted). -* [Deploy Always Encrypted certificates to other client machines](/sql/relational-databases/security/encryption/create-and-store-column-master-keys-always-encrypted#Anchor_1) (see the "Making Certificates Available to Applications and Users" section). - -## Related information - -* [Always Encrypted (client development)](/sql/relational-databases/security/encryption/always-encrypted-client-development) -* [Transparent Data Encryption](/sql/relational-databases/security/encryption/transparent-data-encryption) -* [SQL Server Encryption](/sql/relational-databases/security/encryption/sql-server-encryption) -* [Always Encrypted Wizard](/sql/relational-databases/security/encryption/always-encrypted-wizard) -* [Always Encrypted Blog](/archive/blogs/sqlsecurity/always-encrypted-key-metadata) \ No newline at end of file diff --git a/articles/azure-sql/database/always-encrypted-enclaves-configure-attestation.md b/articles/azure-sql/database/always-encrypted-enclaves-configure-attestation.md deleted file mode 100644 index 0a0a9b1ee7010..0000000000000 --- a/articles/azure-sql/database/always-encrypted-enclaves-configure-attestation.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Configure attestation for Always Encrypted using Azure Attestation" -description: "Configure Azure Attestation for Always Encrypted with secure enclaves in Azure SQL Database." -keywords: encrypt data, sql encryption, database encryption, sensitive data, Always Encrypted, secure enclaves, SGX, attestation -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.devlang: -ms.topic: how-to -author: jaszymas -ms.author: jaszymas -ms.reviwer: vanto -ms.date: 07/14/2021 -ms.custom: devx-track-azurepowershell ---- - -# Configure attestation for Always Encrypted using Azure Attestation - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Microsoft Azure Attestation](../../attestation/overview.md) is a solution for attesting Trusted Execution Environments (TEEs), including Intel Software Guard Extensions (Intel SGX) enclaves. - -To use Azure Attestation for attesting Intel SGX enclaves used for [Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves) in Azure SQL Database, you need to: - -1. Create an [attestation provider](../../attestation/basic-concepts.md#attestation-provider) and configure it with the recommended attestation policy. - -2. Determine the attestation URL and share it with application administrators. - -> [!NOTE] -> Configuring attestation is the responsibility of the attestation administrator. See [Roles and responsibilities when configuring SGX enclaves and attestation](always-encrypted-enclaves-plan.md#roles-and-responsibilities-when-configuring-sgx-enclaves-and-attestation). - -## Create and configure an attestation provider - -An [attestation provider](../../attestation/basic-concepts.md#attestation-provider) is a resource in Azure Attestation that evaluates [attestation requests](../../attestation/basic-concepts.md#attestation-request) against [attestation policies](../../attestation/basic-concepts.md#attestation-request) and issues [attestation tokens](../../attestation/basic-concepts.md#attestation-token). - -Attestation policies are specified using the [claim rule grammar](../../attestation/claim-rule-grammar.md). - -> [!IMPORTANT] -> An attestation provider gets created with the default policy for Intel SGX enclaves, which does not validate the code running inside the enclave. Microsoft strongly advises you set the below recommended policy, and not use the default policy, for Always Encrypted with secure enclaves. - -Microsoft recommends the following policy for attesting Intel SGX enclaves used for Always Encrypted in Azure SQL Database: - -```output -version= 1.0; -authorizationrules -{ - [ type=="x-ms-sgx-is-debuggable", value==false ] - && [ type=="x-ms-sgx-product-id", value==4639 ] - && [ type=="x-ms-sgx-svn", value>= 0 ] - && [ type=="x-ms-sgx-mrsigner", value=="e31c9e505f37a58de09335075fc8591254313eb20bb1a27e5443cc450b6e33e5"] - => permit(); -}; -``` - -The above policy verifies: - -- The enclave inside Azure SQL Database doesn't support debugging. - > Enclaves can be loaded with debugging disabled or enabled. Debugging support is designed to allow developers to troubleshoot the code running in an enclave. In a production system, debugging could enable an administrator to examine the content of the enclave, which would reduce the level of protection the enclave provides. The recommended policy disables debugging to ensure that if a malicious admin tries to turn on debugging support by taking over the enclave machine, attestation will fail. -- The product ID of the enclave matches the product ID assigned to Always Encrypted with secure enclaves. - > Each enclave has a unique product ID that differentiates the enclave from other enclaves. The product ID assigned to the Always Encrypted enclave is 4639. -- The security version number (SVN) of the library is greater than 0. - > The SVN allows Microsoft to respond to potential security bugs identified in the enclave code. In case a security issue is dicovered and fixed, Microsoft will deploy a new version of the enclave with a new (incremented) SVN. The above recommended policy will be updated to reflect the new SVN. By updating your policy to match the recommended policy you can ensure that if a malicious administrator tries to load an older and insecure enclave, attestation will fail. -- The library in the enclave has been signed using the Microsoft signing key (the value of the x-ms-sgx-mrsigner claim is the hash of the signing key). - > One of the main goals of attestation is to convince clients that the binary running in the enclave is the binary that is supposed to run. Attestation policies provide two mechanisms for this purpose. One is the **mrenclave** claim which is the hash of the binary that is supposed to run in an enclave. The problem with the **mrenclave** is that the binary hash changes even with trivial changes to the code, which makes it hard to rev the code running in the enclave. Hence, we recommend the use of the **mrsigner**, which is a hash of a key that is used to sign the enclave binary. When Microsoft revs the enclave, the **mrsigner** stays the same as long as the signing key does not change. In this way, it becomes feasible to deploy updated binaries without breaking customers' applications. - -> [!IMPORTANT] -> Microsoft may need to rotate the key used to sign the Always Encrypted enclave binary, which is expected to be a rare event. Before a new version of the enclave binary, signed with a new key, is deployed to Azure SQL Database, this article will be updated to provide a new recommended attestation policy and instructions on how you should update the policy in your attestation providers to ensure your applications continue to work uninterrupted. - -For instructions for how to create an attestation provider and configure with an attestation policy using: - -- [Quickstart: Set up Azure Attestation with Azure portal](../../attestation/quickstart-portal.md) - > [!IMPORTANT] - > When you configure your attestation policy with Azure portal, set Attestation Type to `SGX-IntelSDK`. -- [Quickstart: Set up Azure Attestation with Azure PowerShell](../../attestation/quickstart-powershell.md) - > [!IMPORTANT] - > When you configure your attestation policy with Azure PowerShell, set the `Tee` parameter to `SgxEnclave`. -- [Quickstart: Set up Azure Attestation with Azure CLI](../../attestation/quickstart-azure-cli.md) - > [!IMPORTANT] - > When you configure your attestation policy with Azure CLI, set the `attestation-type` parameter to `SGX-IntelSDK`. - - -## Determine the attestation URL for your attestation policy - -After you've configured an attestation policy, you need to share the attestation URL with administrators of applications that use Always Encrypted with secure enclaves in Azure SQL Database. The attestation URL is the `Attest URI` of the attestation provider containing the attestation policy, which looks like this: `https://MyAttestationProvider.wus.attest.azure.net`. - -### Use Azure portal to determine the attestation URL - -In the Overview pane for your attestation provider, copy the value of the `Attest URI` property to clipboard. - -### Use PowerShell to determine the attestation URL - -Use the `Get-AzAttestation` cmdlet to retrieve the attestation provider properties, including AttestURI. - -```powershell -Get-AzAttestation -Name $attestationProviderName -ResourceGroupName $attestationResourceGroupName -``` - -For more information, see [Create and manage an attestation provider](../../attestation/quickstart-powershell.md#create-and-manage-an-attestation-provider). - -## Next Steps - -- [Manage keys for Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves-manage-keys) - -## See also - -- [Tutorial: Getting started with Always Encrypted with secure enclaves in Azure SQL Database](always-encrypted-enclaves-getting-started.md) diff --git a/articles/azure-sql/database/always-encrypted-enclaves-enable-sgx.md b/articles/azure-sql/database/always-encrypted-enclaves-enable-sgx.md deleted file mode 100644 index 25cc57422125b..0000000000000 --- a/articles/azure-sql/database/always-encrypted-enclaves-enable-sgx.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Enable Intel SGX for Always Encrypted" -description: "Learn how to enable Intel SGX for Always Encrypted with secure enclaves in Azure SQL Database by selecting SGX-enabled hardware." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.devlang: -ms.topic: conceptual -author: jaszymas -ms.author: jaszymas -ms.reviwer: vanto -ms.date: 04/06/2022 ---- -# Enable Intel SGX for Always Encrypted for your Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - -[Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves) in Azure SQL Database uses [Intel Software Guard Extensions (Intel SGX)](https://itpeernetwork.intel.com/microsoft-azure-confidential-computing/) enclaves. For Intel SGX to be available, the database must use the [vCore model](service-tiers-vcore.md) and [DC-series](service-tiers-sql-database-vcore.md#dc-series) hardware. - -Configuring the DC-series hardware to enable Intel SGX enclaves is the responsibility of the Azure SQL Database administrator. See [Roles and responsibilities when configuring SGX enclaves and attestation](always-encrypted-enclaves-plan.md#roles-and-responsibilities-when-configuring-sgx-enclaves-and-attestation). - -> [!NOTE] -> Intel SGX is not available in hardware configurations other than DC-series. For example, Intel SGX is not available for Gen5 hardware, and it is not available for databases using the [DTU model](service-tiers-dtu.md). - -> [!IMPORTANT] -> Before you configure the DC-series hardware for your database, check the regional availability of DC-series and make sure you understand its performance limitations. For more information, see [DC-series](service-tiers-sql-database-vcore.md#dc-series). - -For detailed instructions for how to configure a new or existing database to use a specific hardware configuration, see [Hardware configuration](service-tiers-sql-database-vcore.md#hardware-configuration). - -## Next steps - -- [Configure Azure Attestation for your Azure SQL database server](always-encrypted-enclaves-configure-attestation.md) - -## See also - -- [Tutorial: Getting started with Always Encrypted with secure enclaves in Azure SQL Database](always-encrypted-enclaves-getting-started.md) \ No newline at end of file diff --git a/articles/azure-sql/database/always-encrypted-enclaves-getting-started.md b/articles/azure-sql/database/always-encrypted-enclaves-getting-started.md deleted file mode 100644 index 6ab57af4550f3..0000000000000 --- a/articles/azure-sql/database/always-encrypted-enclaves-getting-started.md +++ /dev/null @@ -1,432 +0,0 @@ ---- -title: "Tutorial: Getting started with Always Encrypted with secure enclaves" -description: This tutorial teaches you how to create a basic environment for Always Encrypted with secure enclaves in Azure SQL Database and how to encrypt data in-place, and issue rich confidential queries against encrypted columns using SQL Server Management Studio (SSMS). -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.devlang: -ms.topic: tutorial -author: jaszymas -ms.author: jaszymas -ms.reviwer: vanto -ms.date: 04/06/2022 ---- -# Tutorial: Getting started with Always Encrypted with secure enclaves in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This tutorial teaches you how to get started with [Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves) in Azure SQL Database. It will show you: - -> [!div class="checklist"] -> - How to create an environment for testing and evaluating Always Encrypted with secure enclaves. -> - How to encrypt data in-place and issue rich confidential queries against encrypted columns using SQL Server Management Studio (SSMS). - -## Prerequisites - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). You need to be a member of the Contributor role or the Owner role for the subscription to be able to create resources and configure an attestation policy. - -- SQL Server Management Studio (SSMS), version 18.9.1 or later. See [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) for information on how to download SSMS. - -### PowerShell requirements - -> [!NOTE] -> The prerequisites listed in this section apply only if you choose to use PowerShell for some of the steps in this tutorial. If you plan to use Azure portal instead, you can skip this section. - -Make sure the following PowerShell modules are installed on your machine. - -1. Az version 6.5.0 or later. For details on how to install the Az PowerShell module, see [Install the Azure Az PowerShell module](/powershell/azure/install-az-ps). To determine the version the Az module installed on your machine, run the following command from a PowerShell session. - - ```powershell - Get-InstalledModule -Name Az - ``` - -The PowerShell Gallery has deprecated Transport Layer Security (TLS) versions 1.0 and 1.1. TLS 1.2 or a later version is recommended. You may receive the following errors if you are using a TLS version lower than 1.2: - -- `WARNING: Unable to resolve package source 'https://www.powershellgallery.com/api/v2'` -- `PackageManagement\Install-Package: No match was found for the specified search criteria and module name.` - -To continue to interact with the PowerShell Gallery, run the following command before the Install-Module commands - -```powershell -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -``` - -## Step 1: Create and configure a server and a DC-series database - -In this step, you will create a new Azure SQL Database logical server and a new database using DC-series hardware, required for Always Encrypted with secure enclaves. For more information see [DC-series](service-tiers-sql-database-vcore.md#dc-series). - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment option](https://portal.azure.com/#create/Microsoft.AzureSQL) page. -1. If you are not already signed in to Azure portal, sign in when prompted. -1. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - - :::image type="content" source="./media/single-database-create-quickstart/select-deployment.png" alt-text="Add to Azure SQL"::: - -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. -1. For **Resource group**, select **Create new**, enter a name for your resource group, and select **OK**. -1. For **Database name** enter *ContosoHR*. -1. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - **Server name**: Enter *mysqlserver*, and add some characters for uniqueness. We can't provide an exact server name to use because server names must be globally unique for all servers in Azure, not just unique within a subscription. So enter something like mysqlserver135, and the portal lets you know if it is available or not. - - **Server admin login**: Enter an admin login name, for example: *azureuser*. - - **Password**: Enter a password that meets requirements, and enter it again in the **Confirm password** field. - - **Location**: Select a location from the dropdown list. - > [!IMPORTANT] - > You need to select a location (an Azure region) that supports both the DC-series hardware and Microsoft Azure Attestation. For the list of regions supporting DC-series, see [DC-series availability](service-tiers-sql-database-vcore.md#dc-series). [Here](https://azure.microsoft.com/global-infrastructure/services/?products=azure-attestation) is the regional availability of Microsoft Azure Attestation. - - Select **OK**. -1. Leave **Want to use SQL elastic pool** set to **No**. -1. Under **Compute + storage**, select **Configure database**, and click **Change configuration**. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-configure-database.png" alt-text="Configure database" lightbox="./media/always-encrypted-enclaves/portal-configure-database.png"::: - -1. Select the **DC-series** hardware configuration, and then select **OK**. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-configure-dc-series-database.png" alt-text="Configure DC-series database"::: - -1. Select **Apply**. -1. Back on the **Basics** tab, verify **Compute + storage** is set to **General Purpose**, **DC, 2 vCores, 32 GB storage**. -1. Select **Next: Networking** at the bottom of the page. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-configure-dc-series-database-basics.png" alt-text="Configure DC-series database - basics"::: - -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. -1. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. -1. Select **Review + create** at the bottom of the page. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-configure-database-networking.png" alt-text="New SQL database - networking"::: - -1. On the **Review + create** page, after reviewing, select **Create**. - -# [PowerShell](#tab/azure-powershell) - -1. Open a PowerShell console and import the required version of Az. - - ```PowerShell - Import-Module "Az" -MinimumVersion "5.6.0" - ``` - -1. Sign into Azure. If needed, [switch to the subscription](/powershell/azure/manage-subscriptions-azureps) you are using for this tutorial. - - ```PowerShell - Connect-AzAccount - $subscriptionId = "" - $context = Set-AzContext -Subscription $subscriptionId - ``` - -1. Create a new resource group. - - > [!IMPORTANT] - > You need to create your resource group in a region (location) that supports both the DC-series hardware and Microsoft Azure Attestation. For the list of regions supporting DC-series, see [DC-series availability](service-tiers-sql-database-vcore.md#dc-series). [Here](https://azure.microsoft.com/global-infrastructure/services/?products=azure-attestation) is the regional availability of Microsoft Azure Attestation. - - ```powershell - $resourceGroupName = "" - $location = "" - New-AzResourceGroup -Name $resourceGroupName -Location $location - ``` - -1. Create an Azure SQL logical server. When prompted, enter the server administrator name and a password. Make sure you remember the admin name and the password - you will need them later to connect to the server. - - ```powershell - $serverName = "" - New-AzSqlServer -ServerName $serverName -ResourceGroupName $resourceGroupName -Location $location - ``` - -1. Create a server firewall rule that allows access from the specified IP range. - - ```powershell - $startIp = "" - $endIp = "" - $serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - ``` - -1. Create a DC-series database. - - ```powershell - $databaseName = "ContosoHR" - $edition = "GeneralPurpose" - $vCore = 2 - $generation = "DC" - New-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition $edition ` - -Vcore $vCore ` - -ComputeGeneration $generation - ``` - ---- - -## Step 2: Configure an attestation provider - -In this step, you'll create and configure an attestation provider in Microsoft Azure Attestation. This is needed to attest the secure enclave your database uses. - -# [Portal](#tab/azure-portal) - -1. Browse to the [Create attestation provider](https://portal.azure.com/#create/Microsoft.Attestation) page. -1. On the **Create attestation provider** page, provide the following inputs: - - - **Subscription**: Choose the same subscription you created the Azure SQL logical server in. - - **Resource Group**: Choose the same resource group you created the Azure SQL logical server in. - - **Name**: Enter *myattestprovider*, and add some characters for uniqueness. We can't provide an exact attestation provider name to use because names must be globally unique. So enter something like myattestprovider12345, and the portal lets you know if it is available or not. - - **Location**: Choose the location, in which you created the Azure SQL logical server in. - - **Policy signer certificates file**: Leave this field empty, as you will configure an unsigned policy. - -1. After you provide the required inputs, select **Review + create**. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-create-attestation-provider-basics.png" alt-text="Create attestation provider"::: - -1. Select **Create**. -1. Once the attestation provider is created, click **Go to resource**. -1. On the **Overview** tab for the attestation provider, copy the value of the **Attest URI** property to clipboard and save it in a file. This is the attestation URL, you will need in later steps. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-attest-uri.png" alt-text="Attestation URL"::: - -1. Select **Policy** on the resource menu on the left side of the window or on the lower pane. -1. Set **Attestation Type** to **SGX-IntelSDK**. -1. Select **Configure** on the upper menu. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-configure-attestation-policy.png" alt-text="Configure attestation policy"::: - -1. Set **Policy Format** to **Text**. Leave **Policy options** set to **Enter policy**. -1. In the **Policy text** field, replace the default policy with the below policy. For information about the below policy, see [Create and configure an attestation provider](always-encrypted-enclaves-configure-attestation.md#create-and-configure-an-attestation-provider). - - ```output - version= 1.0; - authorizationrules - { - [ type=="x-ms-sgx-is-debuggable", value==false ] - && [ type=="x-ms-sgx-product-id", value==4639 ] - && [ type=="x-ms-sgx-svn", value>= 0 ] - && [ type=="x-ms-sgx-mrsigner", value=="e31c9e505f37a58de09335075fc8591254313eb20bb1a27e5443cc450b6e33e5"] - => permit(); - }; - ``` - -1. Click **Save**. - - :::image type="content" source="./media/always-encrypted-enclaves/portal-edit-attestation-policy.png" alt-text="Edit attestation policy"::: - -1. Click **Refresh** on the upper menu to view the configured policy. - -# [PowerShell](#tab/azure-powershell) - -1. Copy the below attestation policy and save the policy in a text file (txt). For information about the below policy, see [Create and configure an attestation provider](always-encrypted-enclaves-configure-attestation.md#create-and-configure-an-attestation-provider). - - ```output - version= 1.0; - authorizationrules - { - [ type=="x-ms-sgx-is-debuggable", value==false ] - && [ type=="x-ms-sgx-product-id", value==4639 ] - && [ type=="x-ms-sgx-svn", value>= 0 ] - && [ type=="x-ms-sgx-mrsigner", value=="e31c9e505f37a58de09335075fc8591254313eb20bb1a27e5443cc450b6e33e5"] - => permit(); - }; - ``` - -1. Import the required version of `Az.Attestation`. - - ```powershell - Import-Module "Az.Attestation" -MinimumVersion "0.1.8" - ``` - -1. Create an attestation provider. - - ```powershell - $attestationProviderName = "" - New-AzAttestation -Name $attestationProviderName -ResourceGroupName $resourceGroupName -Location $location - ``` -1. Assign yourself to the Attestation Contributor role for the attestation provider, to ensure you have permissions to configure an attestation policy. - - ```powershell - New-AzRoleAssignment -SignInName $context.Account.Id ` - -RoleDefinitionName "Attestation Contributor" ` - -ResourceName $attestationProviderName ` - -ResourceType "Microsoft.Attestation/attestationProviders" ` - -ResourceGroupName $resourceGroupName - ``` - -3. Configure your attestation policy. - - ```powershell - $policyFile = "" - $teeType = "SgxEnclave" - $policyFormat = "Text" - $policy=Get-Content -path $policyFile -Raw - Set-AzAttestationPolicy -Name $attestationProviderName ` - -ResourceGroupName $resourceGroupName ` - -Tee $teeType ` - -Policy $policy ` - -PolicyFormat $policyFormat - ``` - -1. Retrieve the attestation URL (the Attest URI of your attestation provider). - - ```powershell - $attestationUrl = (Get-AzAttestation -Name $attestationProviderName -ResourceGroupName $resourceGroupName).AttestUri - Write-Host "Your attestation URL is: $attestationUrl" - ``` - - The attestation URL should look like this: `https://myattestprovider12345.eus.attest.azure.net` - ---- - - -## Step 3: Populate your database - -In this step, you'll create a table and populate it with some data that you'll later encrypt and query. - -1. Open SSMS and connect to the **ContosoHR** database in the Azure SQL logical server you created **without** Always Encrypted enabled in the database connection. - 1. In the **Connect to Server** dialog, specify the fully qualified name of your server (for example, *myserver135.database.windows.net*), and enter the administrator user name and the password you specified when you created the server. - 2. Click **Options >>** and select the **Connection Properties** tab. Make sure to select the **ContosoHR** database (not the default, master database). - 3. Select the **Always Encrypted** tab. - 4. Make sure the **Enable Always Encrypted (column encryption)** checkbox is **not** selected. - - :::image type="content" source="./media/always-encrypted-enclaves/connect-without-always-encrypted-ssms.png" alt-text="Connect without Always Encrypted"::: - - 5. Click **Connect**. - -2. Create a new table, named **Employees**. - - ```sql - CREATE SCHEMA [HR]; - GO - - CREATE TABLE [HR].[Employees] - ( - [EmployeeID] [int] IDENTITY(1,1) NOT NULL, - [SSN] [char](11) NOT NULL, - [FirstName] [nvarchar](50) NOT NULL, - [LastName] [nvarchar](50) NOT NULL, - [Salary] [money] NOT NULL - ) ON [PRIMARY]; - GO - ``` - -3. Add a few employee records to the **Employees** table. - - ```sql - INSERT INTO [HR].[Employees] - ([SSN] - ,[FirstName] - ,[LastName] - ,[Salary]) - VALUES - ('795-73-9838' - , N'Catherine' - , N'Abel' - , $31692); - - INSERT INTO [HR].[Employees] - ([SSN] - ,[FirstName] - ,[LastName] - ,[Salary]) - VALUES - ('990-00-6818' - , N'Kim' - , N'Abercrombie' - , $55415); - ``` - -## Step 4: Provision enclave-enabled keys - -In this step, you'll create a column master key and a column encryption key that allow enclave computations. - -1. Using the SSMS instance from the previous step, in **Object Explorer**, expand your database and navigate to **Security** > **Always Encrypted Keys**. -1. Provision a new enclave-enabled column master key: - 1. Right-click **Always Encrypted Keys** and select **New Column Master Key...**. - 2. Select your column master key name: **CMK1**. - 3. Make sure you select either **Windows Certificate Store (Current User or Local Machine)** or **Azure Key Vault**. - 4. Select **Allow enclave computations**. - 5. If you selected Azure Key Vault, sign into Azure and select your key vault. For more information on how to create a key vault for Always Encrypted, see [Manage your key vaults from Azure portal](/archive/blogs/kv/manage-your-key-vaults-from-new-azure-portal). - 6. Select your certificate or Azure Key Value key if it already exists, or click the **Generate Certificate** button to create a new one. - 7. Select **OK**. - - :::image type="content" source="./media/always-encrypted-enclaves/allow-enclave-computations.png" alt-text="Allow enclave computations"::: - -1. Create a new enclave-enabled column encryption key: - - 1. Right-click **Always Encrypted Keys** and select **New Column Encryption Key**. - 2. Enter a name for the new column encryption key: **CEK1**. - 3. In the **Column master key** dropdown, select the column master key you created in the previous steps. - 4. Select **OK**. - -## Step 5: Encrypt some columns in place - -In this step, you'll encrypt the data stored in the **SSN** and **Salary** columns inside the server-side enclave, and then test a SELECT query on the data. - -1. Open a new SSMS instance and connect to your database **with** Always Encrypted enabled for the database connection. - 1. Start a new instance of SSMS. - 2. In the **Connect to Server** dialog, specify the fully qualified name of your server (for example, *myserver135.database.windows.net*), and enter the administrator user name and the password you specified when you created the server. - 3. Click **Options >>** and select the **Connection Properties** tab. Make sure to select the **ContosoHR** database (not the default, master database). - 4. Select the **Always Encrypted** tab. - 5. Make sure the **Enable Always Encrypted (column encryption)** checkbox **is** selected. - 6. Specify your enclave attestation URL that you've obtained by following the steps in [Step 2: Configure an attestation provider](#step-2-configure-an-attestation-provider). See the below screenshot. - - :::image type="content" source="./media/always-encrypted-enclaves/connect-to-server-configure-attestation.png" alt-text="Connect with attestation"::: - - 7. Select **Connect**. - 8. If you're prompted to enable Parameterization for Always Encrypted queries, select **Enable**. - -1. Using the same SSMS instance (with Always Encrypted enabled), open a new query window and encrypt the **SSN** and **Salary** columns by running the below statements. - - ```sql - ALTER TABLE [HR].[Employees] - ALTER COLUMN [SSN] [char] (11) COLLATE Latin1_General_BIN2 - ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = [CEK1], ENCRYPTION_TYPE = Randomized, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256') NOT NULL - WITH - (ONLINE = ON); - - ALTER TABLE [HR].[Employees] - ALTER COLUMN [Salary] [money] - ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = [CEK1], ENCRYPTION_TYPE = Randomized, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256') NOT NULL - WITH - (ONLINE = ON); - - ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE; - ``` - - > [!NOTE] - > Notice the ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE statement to clear the query plan cache for the database in the above script. After you have altered the table, you need to clear the plans for all batches and stored procedures that access the table to refresh parameters encryption information. - -1. To verify the **SSN** and **Salary** columns are now encrypted, open a new query window in the SSMS instance **without** Always Encrypted enabled for the database connection and execute the below statement. The query window should return encrypted values in the **SSN** and **Salary** columns. If you execute the same query using the SSMS instance with Always Encrypted enabled, you should see the data decrypted. - - ```sql - SELECT * FROM [HR].[Employees]; - ``` - -## Step 6: Run rich queries against encrypted columns - -You can run rich queries against the encrypted columns. Some query processing will be performed inside your server-side enclave. - -1. In the SSMS instance **with** Always Encrypted enabled, make sure Parameterization for Always Encrypted is also enabled. - 1. Select **Tools** from the main menu of SSMS. - 2. Select **Options...**. - 3. Navigate to **Query Execution** > **SQL Server** > **Advanced**. - 4. Ensure that **Enable Parameterization for Always Encrypted** is checked. - 5. Select **OK**. -2. Open a new query window, paste in the below query, and execute. The query should return plaintext values and rows meeting the specified search criteria. - - ```sql - DECLARE @SSNPattern [char](11) = '%6818'; - DECLARE @MinSalary [money] = $1000; - SELECT * FROM [HR].[Employees] - WHERE SSN LIKE @SSNPattern AND [Salary] >= @MinSalary; - ``` - -3. Try the same query again in the SSMS instance that doesn't have Always Encrypted enabled. A failure should occur. - -## Next steps - -After completing this tutorial, you can go to one of the following tutorials: -- [Tutorial: Develop a .NET application using Always Encrypted with secure enclaves](/sql/connect/ado-net/sql/tutorial-always-encrypted-enclaves-develop-net-apps) -- [Tutorial: Develop a .NET Framework application using Always Encrypted with secure enclaves](/sql/relational-databases/security/tutorial-always-encrypted-enclaves-develop-net-framework-apps) -- [Tutorial: Creating and using indexes on enclave-enabled columns using randomized encryption](/sql/relational-databases/security/tutorial-creating-using-indexes-on-enclave-enabled-columns-using-randomized-encryption) - -## See also - -- [Configure and use Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/configure-always-encrypted-enclaves) diff --git a/articles/azure-sql/database/always-encrypted-enclaves-plan.md b/articles/azure-sql/database/always-encrypted-enclaves-plan.md deleted file mode 100644 index d275ec5570b5f..0000000000000 --- a/articles/azure-sql/database/always-encrypted-enclaves-plan.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "Plan for Intel SGX enclaves and attestation in Azure SQL Database" -description: "Plan the deployment of Always Encrypted with secure enclaves in Azure SQL Database." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.devlang: -ms.topic: conceptual -author: jaszymas -ms.author: jaszymas -ms.reviwer: vanto -ms.date: 04/06/2022 ---- - -# Plan for Intel SGX enclaves and attestation in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves) in Azure SQL Database uses [Intel Software Guard Extensions (Intel SGX)](https://itpeernetwork.intel.com/microsoft-azure-confidential-computing/) enclaves and requires [Microsoft Azure Attestation](/sql/relational-databases/security/encryption/always-encrypted-enclaves#secure-enclave-attestation). - -## Plan for Intel SGX in Azure SQL Database - -Intel SGX is a hardware-based trusted execution environment technology. Intel SGX is available for databases that use the [vCore model](service-tiers-sql-database-vcore.md) and [DC-series](service-tiers-sql-database-vcore.md?#dc-series) hardware. Therefore, to ensure you can use Always Encrypted with secure enclaves in your database, you need to either select the DC-series hardware when you create the database, or you can update your existing database to use the DC-series hardware. - -> [!NOTE] -> Intel SGX is not available in hardware other than DC-series. For example, Intel SGX is not available for Gen5 hardware, and it is not available for databases using the [DTU model](service-tiers-dtu.md). - -> [!IMPORTANT] -> Before you configure the DC-series hardware for your database, check the regional availability of DC-series and make sure you understand its performance limitations. For details, see [DC-series](service-tiers-sql-database-vcore.md#dc-series). - -## Plan for attestation in Azure SQL Database - -[Microsoft Azure Attestation](../../attestation/overview.md) is a solution for attesting Trusted Execution Environments (TEEs), including Intel SGX enclaves in Azure SQL databases using DC-series hardware. - -To use Azure Attestation for attesting Intel SGX enclaves in Azure SQL Database, you need to create an [attestation provider](../../attestation/basic-concepts.md#attestation-provider) and configure it with the Microsoft-provided attestation policy. See [Configure attestation for Always Encrypted using Azure Attestation](always-encrypted-enclaves-configure-attestation.md) - -## Roles and responsibilities when configuring SGX enclaves and attestation - -Configuring your environment to support Intel SGX enclaves and attestation for Always Encrypted in Azure SQL Database involves setting up components of different types: Microsoft Azure Attestation, Azure SQL Database, and applications that trigger enclave attestation. Configuring components of each type is performed by users assuming one of the below distinct roles: - -- Attestation administrator - creates an attestation provider in Microsoft Azure Attestation, authors the attestation policy, grants Azure SQL logical server access to the attestation provider, and shares the attestation URL that points to the policy to application administrators. -- Azure SQL Database administrator - enables SGX enclaves in databases by selecting the DC-series hardware, and provides the attestation administrator with the identity of the Azure SQL logical server that needs to access the attestation provider. -- Application administrator - configures applications with the attestation URL obtained from the attestation administrator. - -In production environments (handling real sensitive data), it is important your organization adheres to role separation when configuring attestation, where each distinct role is assumed by different people. In particular, if the goal of deploying Always Encrypted in your organization is to reduce the attack surface area by ensuring Azure SQL Database administrators cannot access sensitive data, Azure SQL Database administrators should not control attestation policies. - -## Next steps - -- [Enable Intel SGX for your Azure SQL database](always-encrypted-enclaves-enable-sgx.md) - -## See also - -- [Tutorial: Getting started with Always Encrypted with secure enclaves in Azure SQL Database](always-encrypted-enclaves-getting-started.md) diff --git a/articles/azure-sql/database/always-encrypted-landing.yml b/articles/azure-sql/database/always-encrypted-landing.yml deleted file mode 100644 index 52be2a69d14a5..0000000000000 --- a/articles/azure-sql/database/always-encrypted-landing.yml +++ /dev/null @@ -1,129 +0,0 @@ -### YamlMime:Landing - -title: Always Encrypted documentation -summary: "Find documentation about Always Encrypted" - -metadata: - title: Always Encrypted documentation - description: "Find Always Encrypted documentation for SQL Server, Azure SQL Database, and Azure SQL Managed Instance" - ms.service: sql-db-mi - ms.subservice: security - ms.tgt_pltfrm: na - ms.devlang: - ms.topic: landing-page - author: VanMSFT - ms.author: vanto - ms.reviewer: kendralittle, mathoma - ms.date: 01/15/2021 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Always Encrypted overview - linkLists: - - linkListType: overview - links: - - text: What is Always Encrypted? - url: /sql/relational-databases/security/encryption/always-encrypted-database-engine - - - # Card - - title: Always Encrypted concepts - linkLists: - - linkListType: concept - links: - - text: Key management for Always Encrypted - url: /sql/relational-databases/security/encryption/overview-of-key-management-for-always-encrypted - - text: Always Encrypted cryptography - url: /sql/relational-databases/security/encryption/always-encrypted-cryptography - - - # Card - - title: Always Encrypted Tutorials - linkLists: - - linkListType: tutorial - links: - - text: Configure Always Encrypted by using Azure Key Vault - url: always-encrypted-azure-key-vault-configure.md - - text: Configure Always Encrypted by using the Windows certificate store - url: always-encrypted-certificate-store-configure.md - - - # Card - - title: Configure Always Encrypted - linkLists: - - linkListType: how-to-guide - links: - - text: Configure Always Encrypted using PowerShell - url: /sql/relational-databases/security/encryption/configure-always-encrypted-using-powershell - - text: Configure Always Encrypted using SQL Server Management Studio - url: /sql/relational-databases/security/encryption/configure-always-encrypted-using-sql-server-management-studio - - - # Card - - title: Manage Always Encrypted keys - linkLists: - - linkListType: how-to-guide - links: - - text: Create and store column master keys for Always Encrypted - url: /sql/relational-databases/security/encryption/create-and-store-column-master-keys-always-encrypted - - text: Provision Always Encrypted keys using SQL Server Management Studio - url: /sql/relational-databases/security/encryption/configure-always-encrypted-keys-using-ssms - - text: Rotate Always Encrypted keys using SQL Server Management Studio - url: /sql/relational-databases/security/encryption/rotate-always-encrypted-keys-using-ssms - - text: Provision Always Encrypted keys using PowerShell - url: /sql/relational-databases/security/encryption/configure-always-encrypted-keys-using-powershell - - text: Rotate Always Encrypted keys using PowerShell - url: /sql/relational-databases/security/encryption/rotate-always-encrypted-keys-using-powershell - - - # Card - - title: Configure Always Encrypted columns - linkLists: - - linkListType: how-to-guide - links: - - text: Configure column encryption using Always Encrypted Wizard - url: /sql/relational-databases/security/encryption/always-encrypted-wizard - - text: Configure column encryption using Always Encrypted with a DAC package - url: /sql/relational-databases/security/encryption/configure-always-encrypted-using-dacpac - - - # Card - - title: Query Always Encrypted columns - linkLists: - - linkListType: how-to-guide - links: - - text: Query columns using Always Encrypted with SQL Server Management Studio - url: /sql/relational-databases/security/encryption/always-encrypted-query-columns-ssms - - text: Query columns using Always Encrypted with Azure Data Studio - url: /sql/relational-databases/security/encryption/always-encrypted-query-columns-ads - - - # Card - - title: Migrate data using Always Encrypted - linkLists: - - linkListType: how-to-guide - links: - - text: Export and import databases using Always Encrypted - url: /sql/relational-databases/security/encryption/always-encrypted-migrate-using-bacpac - - text: Backup and restore databases using Always Encrypted - url: /sql/relational-databases/security/encryption/always-encrypted-migrate-using-backup-restore - - text: Migrate data to or from columns using Always Encrypted with SQL Server Import and Export Wizard - url: /sql/relational-databases/security/encryption/always-encrypted-migrate-using-import-export-wizard - - text: Bulk load encrypted data to columns using Always Encrypted - url: /sql/relational-databases/security/encryption/migrate-sensitive-data-protected-by-always-encrypted - - text: Using Always Encrypted with Azure Data Factory - url: ../../data-factory/connector-azure-sql-database.md#using-always-encrypted - - - # Card - - title: Develop applications using Always Encrypted - linkLists: - - linkListType: how-to-guide - links: - - text: Develop applications using Always Encrypted - url: /sql/relational-databases/security/encryption/always-encrypted-client-development \ No newline at end of file diff --git a/articles/azure-sql/database/always-encrypted-with-secure-enclaves-landing.yml b/articles/azure-sql/database/always-encrypted-with-secure-enclaves-landing.yml deleted file mode 100644 index 8886c4e4366b7..0000000000000 --- a/articles/azure-sql/database/always-encrypted-with-secure-enclaves-landing.yml +++ /dev/null @@ -1,135 +0,0 @@ -### YamlMime:Landing - -title: Always Encrypted with secure enclaves documentation -summary: "Find documentation about Always Encrypted with secure enclaves" - -metadata: - title: Always Encrypted with secure enclaves documentation - description: "Find Always Encrypted with secure enclaves documentation for SQL Server and Azure SQL Database" - ms.service: sql-database - ms.subservice: security - ms.tgt_pltfrm: na - ms.devlang: - ms.topic: landing-page - author: VanMSFT - ms.author: vanto - ms.reviewer: kendralittle, mathoma - ms.date: 07/14/2021 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Overview - linkLists: - - linkListType: overview - links: - - text: What is Always Encrypted with secure enclaves? - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves - - text: Configure and use Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/configure-always-encrypted-enclaves - - - # Card - - title: Set up in Azure SQL Database - linkLists: - - linkListType: concept - links: - - text: Plan for Intel SGX enclaves and attestation in Azure SQL Database - url: always-encrypted-enclaves-plan.md - - text: Enable Intel SGX for your Azure SQL Database - url: always-encrypted-enclaves-enable-sgx.md - - text: Configure attestation for Always Encrypted using Azure Attestation - url: always-encrypted-enclaves-configure-attestation.md - - # Card - - title: Set up in a SQL Server on Azure VM - linkLists: - - linkListType: concept - links: - - text: Plan for Host Guardian Service attestation - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-host-guardian-service-plan - - text: Deploy the Host Guardian Service for SQL Server - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-host-guardian-service-deploy - - text: Register computer with Host Guardian Service - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-host-guardian-service-register - - text: Configure the secure enclave in SQL Server - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-configure-enclave-type - - - # Card - - title: Samples and tutorials - linkLists: - - linkListType: sample - links: - - text: Contoso HR web app sample - url: https://github.com/microsoft/sql-server-samples/tree/master/samples/features/security/always-encrypted-with-secure-enclaves - - linkListType: tutorial - links: - - text: Getting started with Always Encrypted with secure enclaves in SQL Server - url: /sql/relational-databases/security/tutorial-getting-started-with-always-encrypted-enclaves - - text: Getting started with Always Encrypted with secure enclaves in Azure SQL Database - url: always-encrypted-enclaves-getting-started.md - - text: Create and use indexes on enclave-enabled columns using randomized encryption - url: /sql/relational-databases/security/tutorial-creating-using-indexes-on-enclave-enabled-columns-using-randomized-encryption - - text: Develop a .NET application using Always Encrypted with secure enclaves - url: /sql/connect/ado-net/sql/tutorial-always-encrypted-enclaves-develop-net-apps - - text: Develop a .NET Framework application using Always Encrypted with secure enclaves - url: /sql/relational-databases/security/tutorial-always-encrypted-enclaves-develop-net-framework-apps - - # Card - - title: Manage keys - linkLists: - - linkListType: how-to-guide - links: - - text: Manage keys for Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-manage-keys - - text: Provision enclave-enabled keys - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-provision-keys - - text: Rotate enclave-enabled keys - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-rotate-keys - - # Card - - title: Configure columns - linkLists: - - linkListType: how-to-guide - links: - - text: Configure column encryption in-place using Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-configure-encryption - - text: Configure column encryption in-place with Transact-SQL - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-configure-encryption-tsql - - text: Enable Always Encrypted with secure enclaves for existing encrypted columns - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-enable-for-encrypted-columns - - # Card - - title: Videos - linkLists: - - linkListType: video - links: - - text: Inside Azure Datacenter Architecture with Mark Russinovich - url: https://youtu.be/69PrhWQorEM?t=4523 - - text: A webinar including a section on secure enclaves - url: https://info.microsoft.com/ww-ondemand-azure-webinar-series-four-ways-to-take-your-data-security-to-the-next-level.html - - # Card - - title: Query columns - linkLists: - - linkListType: how-to-guide - links: - - text: Run Transact-SQL statements using secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-query-columns - - text: Troubleshoot common issues for Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-troubleshooting - - text: Create and use indexes on columns using Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-create-use-indexes - - # Card - - title: Develop applications - linkLists: - - linkListType: how-to-guide - links: - - text: Develop applications using Always Encrypted with secure enclaves - url: /sql/relational-databases/security/encryption/always-encrypted-enclaves-client-development diff --git a/articles/azure-sql/database/analyze-prevent-deadlocks.md b/articles/azure-sql/database/analyze-prevent-deadlocks.md deleted file mode 100644 index 64568e000daa0..0000000000000 --- a/articles/azure-sql/database/analyze-prevent-deadlocks.md +++ /dev/null @@ -1,820 +0,0 @@ ---- -title: Analyze and prevent deadlocks -titleSuffix: Azure SQL Database -description: Learn how to analyze deadlocks and prevent them from reoccurring in Azure SQL Database -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma, dfurman -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.topic: conceptual -ms.date: 4/8/2022 ---- - -# Analyze and prevent deadlocks in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article teaches you how to identify deadlocks in Azure SQL Database, use deadlock graphs and Query Store to identify the queries in the deadlock, and plan and test changes to prevent deadlocks from reoccurring. - -This article focuses on identifying and analyzing deadlocks due to lock contention. Learn more about other types of deadlocks in [resources that can deadlock](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#deadlock_resources). - -## How deadlocks occur in Azure SQL Database - -Each new database in Azure SQL Database has the [read committed snapshot](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#read_committed_snapshot--on--off--1) (RCSI) database setting enabled by default. [Blocking](understand-resolve-blocking.md) between sessions reading data and sessions writing data is minimized under RCSI, which uses row versioning to increase concurrency. However, blocking and deadlocks may still occur in databases in Azure SQL Database because: - -- Queries that modify data may block one another. -- Queries may run under isolation levels that increase blocking. Isolation levels may be specified via client library methods, [query hints](/sql/t-sql/queries/hints-transact-sql-query), or [SET statements](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) in Transact-SQL. -- [RCSI may be disabled](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#read_committed_snapshot--on--off--1), causing the database to use shared (S) locks to protect SELECT statements run under the read committed isolation level. This may increase blocking and deadlocks. - -### An example deadlock - -A deadlock occurs when two or more tasks permanently block one another because each task has a lock on a resource the other task is trying to lock. A deadlock is also called a cyclic dependency: in the case of a two-task deadlock, transaction A has a dependency on transaction B, and transaction B closes the circle by having a dependency on transaction A. - -For example: - -1. **Session A** begins an explicit transaction and runs an update statement that acquires an update (U) lock on one row on table `SalesLT.Product` that is [converted to an exclusive (X) lock](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-when-modifying-data). -1. **Session B** runs an update statement that modifies the `SalesLT.ProductDescription` table. The update statement joins to the `SalesLT.Product` table to find the correct rows to update. - - **Session B** acquires an update (U) lock on 72 rows on the `SalesLT.ProductDescription` table. - - **Session B** needs a shared lock on rows on the table `SalesLT.Product`, including the row that is locked by **Session A**. **Session B** is blocked on `SalesLT.Product`. -1. **Session A** continues its transaction, and now runs an update against the `SalesLT.ProductDescription` table. **Session A** is blocked by Session B on `SalesLT.ProductDescription`. - -:::image type="content" source="media/analyze-prevent-deadlocks/deadlock-overview.png" alt-text="A diagram showing two sessions in a deadlock. Each session owns a resource that the other process needs in order to continue."::: - -All transactions in a deadlock will wait indefinitely unless one of the participating transactions is rolled back, for example, because its session was terminated. - -The database engine deadlock monitor periodically checks for tasks that are in a deadlock. If the deadlock monitor detects a cyclic dependency, it chooses one of the tasks as a victim and terminates its transaction with error 1205, "Transaction (Process ID *N*) was deadlocked on lock resources with another process and has been chosen as the deadlock victim. Rerun the transaction." Breaking the deadlock in this way allows the other task or tasks in the deadlock to complete their transactions. - ->[!NOTE] -> Learn more about the criteria for choosing a deadlock victim in the [Deadlock process list](#deadlock-process-list) section of this article. - -:::image type="content" source="media/analyze-prevent-deadlocks/deadlock-overview-with-deadlock-victim.png" alt-text="Overview of a deadlock between two sessions. One session has been chosen as the deadlock victim."::: - -The application with the transaction chosen as the deadlock victim should retry the transaction, which usually completes after the other transaction or transactions involved in the deadlock have finished. - -It is a best practice to introduce a short, randomized delay before retry to avoid encountering the same deadlock again. Learn more about how to design [retry logic for transient errors](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors). - -### Default isolation level in Azure SQL Database - -New databases in Azure SQL Database enable read committed snapshot (RCSI) by default. RCSI changes the behavior of the [read committed isolation level](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#database-engine-isolation-levels) to use [row-versioning](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#Row_versioning) to provide statement-level consistency without the use of shared (S) locks for SELECT statements. - -With RCSI enabled: - -- Statements reading data do not block statements modifying data. -- Statements modifying data do not block statements reading data. - -[Snapshot isolation level](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#b-enable-snapshot-isolation-on-a-database) is also enabled by default for new databases in Azure SQL Database. Snapshot isolation is an additional row-based isolation level that provides transaction-level consistency for data and which uses row versions to select rows to update. To use snapshot isolation, queries or connections must explicitly set their transaction isolation level to `SNAPSHOT`. This may only be done when snapshot isolation is enabled for the database. - -You can identify if RCSI and/or snapshot isolation are enabled with Transact-SQL. Connect to your database in Azure SQL Database and run the following query: - -```sql -SELECT name, is_read_committed_snapshot_on, snapshot_isolation_state_desc -FROM sys.databases -WHERE name = DB_NAME(); -GO -``` - -If RCSI is enabled, the `is_read_committed_snapshot_on` column will return the value **1**. If snapshot isolation is enabled, the `snapshot_isolation_state_desc` column will return the value **ON**. - -If [RCSI has been disabled](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#read_committed_snapshot--on--off--1) for a database in Azure SQL Database, investigate why RCSI was disabled before re-enabling it. Application code may have been written expecting that queries reading data will be blocked by queries writing data, resulting in incorrect results from race conditions when RCSI is enabled. - -### Interpreting deadlock events - -A deadlock event is emitted after the deadlock manager in Azure SQL Database detects a deadlock and selects a transaction as the victim. In other words, if you set up alerts for deadlocks, the notification fires after an individual deadlock has been resolved. There is no user action that needs to be taken for that deadlock. Applications should be written to include [retry logic](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors) so that they automatically continue after receiving error 1205, "Transaction (Process ID *N*) was deadlocked on lock resources with another process and has been chosen as the deadlock victim. Rerun the transaction." - -It's useful to set up alerts, however, as deadlocks may reoccur. Deadlock alerts enable you to investigate if a pattern of repeat deadlocks is happening in your database, in which case you may choose to take action to prevent deadlocks from reoccurring. Learn more about alerting in the [Monitor and alert on deadlocks](#monitor-and-alert-on-deadlocks) section of this article. - -### Top methods to prevent deadlocks - -The lowest risk approach to preventing deadlocks from reoccurring is generally to [tune nonclustered indexes](#prevent-a-deadlock-from-reoccurring) to optimize queries involved in the deadlock. - -- Risk is low for this approach because tuning nonclustered indexes doesn't require changes to the query code itself, reducing the risk of a user error when rewriting Transact-SQL that causes incorrect data to be returned to the user. -- Effective nonclustered index tuning helps queries find the data to read and modify more efficiently. By reducing the amount of data that a query needs to access, the likelihood of blocking is reduced and deadlocks can often be prevented. - -In some cases, creating or tuning a clustered index can reduce blocking and deadlocks. Because the clustered index is included in all nonclustered index definitions, creating or modifying a clustered index can be an IO intensive and time consuming operation on larger tables with existing nonclustered indexes. Learn more about [Clustered index design guidelines](/sql/relational-databases/sql-server-index-design-guide#Clustered). - -When index tuning isn't successful at preventing deadlocks, other methods are available: - -- If the deadlock occurs only when a particular plan is chosen for one of the queries involved in the deadlock, [forcing a query plan](/sql/relational-databases/system-stored-procedures/sp-query-store-force-plan-transact-sql) with Query Store may prevent deadlocks from reoccurring. -- Rewriting Transact-SQL for one or more transactions involved in the deadlock can also help prevent deadlocks. Breaking apart explicit transactions into smaller transactions requires careful coding and testing to ensure data validity when concurrent modifications occur. - -Learn more about each of these approaches in the [Prevent a deadlock from reoccurring](#prevent-a-deadlock-from-reoccurring) section of this article. - -## Monitor and alert on deadlocks - -In this article, we will use the `AdventureWorksLT` sample database to set up alerts for deadlocks, cause an example deadlock, analyze the deadlock graph for the example deadlock, and test changes to prevent the deadlock from reoccurring. - -We'll use the [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS) client in this article, as it contains functionality to display deadlock graphs in an interactive visual mode. You can use other clients such as [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) to follow along with the examples, but you may only be able to view deadlock graphs as XML. - - -### Create the AdventureWorksLT database - -To follow along with the examples, create a new database in Azure SQL Database and select **Sample** data as the **Data source**. - -For detailed instructions on how to create `AdventureWorksLT` with the Azure portal, Azure CLI, or PowerShell, select the approach of your choice in [Quickstart: Create an Azure SQL Database single database](single-database-create-quickstart.md). - -### Set up deadlock alerts in the Azure portal - -To set up alerts for deadlock events, follow the steps in the article [Create alerts for Azure SQL Database and Azure Synapse Analytics using the Azure portal](alerts-insights-configure-portal.md). - -Select **Deadlocks** as the signal name for the alert. Configure the **Action group** to notify you using the method of your choice, such as the **Email/SMS/Push/Voice** action type. - -## Collect deadlock graphs in Azure SQL Database with Extended Events - -Deadlock graphs are a rich source of information regarding the processes and locks involved in a deadlock. To collect deadlock graphs with Extended Events (XEvents) in Azure SQL Database, capture the `sqlserver.database_xml_deadlock_report` event. - -You can collect deadlock graphs with XEvents using either the [ring buffer target](xevent-code-ring-buffer.md) or an [event file target](xevent-code-event-file.md). Considerations for selecting the appropriate target type are summarized in the following table: - - -|Approach |Benefits |Considerations |Usage scenarios | -|---------|---------|---------|---------| -|Ring buffer target |
    • Simple setup with Transact-SQL only.
    |
    • Event data is cleared when the XEvents session is stopped for any reason, such as taking the database offline or a database failover.
    • Database resources are used to maintain data in the ring buffer and to query session data.
    |
    • Collect sample trace data for testing and learning.
    • Create for short term needs if you cannot set up a session using an event file target immediately.
    • Use as a "landing pad" for trace data, when you have set up an automated process to persist trace data into a table.
    | -Event file target |
    • Persists event data to a blob in Azure Storage so data is available even after the session is stopped.
    • Event files may be downloaded from the Azure portal or [Azure Storage Explorer](#use-azure-storage-explorer) and analyzed locally, which does not require using database resources to query session data.
    |
    • Setup is more complex and requires configuration of an Azure Storage container and database scoped credential.
    |
    • General use when you want event data to persist even after the event session stops.
    • You want to run a trace that generates larger amounts of event data than you would like to persist in memory.
    | - -Select the target type you would like to use: - -# [Ring buffer target](#tab/ring-buffer) - -The ring buffer target is convenient and easy to set up, but has a limited capacity, which can cause older events to be lost. The ring buffer does not persist events to storage and the ring buffer target is cleared when the XEvents session is stopped. This means that any XEvents collected will not be available when the database engine restarts for any reason, such as a failover. The ring buffer target is best suited to learning and short-term needs if you do not have the ability to set up an XEvents session to an event file target immediately. - -This sample code creates an XEvents session that captures deadlock graphs in memory using the [ring buffer target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#ring_buffer-target). The maximum memory allowed for the ring buffer target is 4 MB, and the session will automatically run when the database comes online, such as after a failover. - -To create and then start a XEvents session for the `sqlserver.database_xml_deadlock_report` event that writes to the ring buffer target, connect to your database and run the following Transact-SQL: - -```sql -CREATE EVENT SESSION [deadlocks] ON DATABASE -ADD EVENT sqlserver.database_xml_deadlock_report -ADD TARGET package0.ring_buffer -WITH (STARTUP_STATE=ON, MAX_MEMORY=4 MB) -GO - -ALTER EVENT SESSION [deadlocks] ON DATABASE - STATE = START; -GO -``` - -# [Event file target](#tab/event-file) - -The event file target persists deadlock graphs to files so they are available even after the XEvents session is stopped. The event file target also allows you to capture more deadlock graphs without allocating additional memory for a ring buffer. The event file target is suitable for long term use and for collecting larger amounts of trace data. - -To create an XEvents session that writes to an event file target, we will: - -1. Configure an Azure Storage container to hold the trace files using the Azure portal. -1. Create a database scoped credential with Transact-SQL. -1. Create the XEvents session with Transact-SQL. - -### Configure an Azure Storage container - -To configure an Azure Storage container, first create or select an existing Azure Storage account, then create the container. Generate a Shared Access Signature (SAS) token for the container. This section describes completing this process in the Azure portal. - -> [!NOTE] -> If you wish to create and configure the Azure Storage blob container with PowerShell, see [Event File target code for extended events in Azure SQL Database](xevent-code-event-file.md). Alternately, you may find it convenient to [Use Azure Storage Explorer](#use-azure-storage-explorer) to create and configure the Azure Storage blob container instead of using the Azure portal. - -#### Create or select an Azure Storage account - -You can use an existing Azure Storage account or create a new Azure Storage account to host a container for trace files. - -To use an existing Azure Storage account: -1. Navigate to the resource group you want to work with in the Azure portal. -1. On the **Overview** pane, Under **Resources**, set the **Type** dropdown to *Storage account*. -1. Select the storage account you want to use. - -To create a new Azure Storage account, follow the steps in [Create an Azure storage account](/azure/media-services/latest/storage-create-how-to). Complete the process by selecting **Go to resource** in the final step. - -#### Create a container - -From the storage account page in the Azure portal: - -1. Under **Data storage**, select **Containers**. -1. Select **+ Container** to create a new container. The New container pane will appear. -1. Enter a name for the container under **Name**. -1. Select **Create**. -1. Select the container from the list after it has been created. - -#### Create a shared access token - -From the container page in the Azure portal: - -1. Under **Settings**, select **Shared access tokens**. -1. Leave the **Signing method** radio button set to the default selection, **Account key**. -1. Under the **Permissions** dropdown, select the **Read**, **Write**, and **List** permissions. -1. Set **Start** to the date and time you would like to be able to write trace files. Optionally, configure the time zone in the dropdown below **Start**. -1. Set **Expiry** to the date and time you would like these permissions to expire. Optionally, configure the time zone in the dropdown below **Expiry**. You are able to set this to a date far in the future, such as ten years, if you wish. -1. Select **Generate SAS token and URL**. The Blob SAS token and Blob SAS URL will be displayed on the screen. -1. Copy and preserve the *Blob SAS token* and *Blob SAS URL* values for use in further steps. - -### Create a database scoped credential - -Connect to your database in Azure SQL Database with SSMS to run the following steps. - -To create a database scoped credential, you must first create a [master key](/sql/t-sql/statements/create-master-key-transact-sql) in the database if one does not exist. - -Run the following Transact-SQL to create a master key if one does not exist: - -```sql -IF 0 = (SELECT COUNT(*) - FROM sys.symmetric_keys - WHERE symmetric_key_id = 101 and name=N'##MS_DatabaseMasterKey##') -BEGIN - PRINT N'Creating master key'; - CREATE MASTER KEY; -END -ELSE -BEGIN - PRINT N'Master key already exists, no action taken'; -END -GO -``` - -Next, create a database scoped credential with the following Transact-SQL. Before running the code: -- Modify the URL to reflect your storage account name and your container name. This URL will be present at the beginning of the *Blob SAS URL* you copied when you created the shared access token. You only need the text prior to the first `?` in the string. -- Modify the `SECRET` to contain the *Blob SAS token* value you copied when you created the shared access token. - -```sql -CREATE DATABASE SCOPED CREDENTIAL - [https://yourstorageaccountname.blob.core.windows.net/yourcontainername] - WITH - IDENTITY = 'SHARED ACCESS SIGNATURE', - SECRET = 'sp=r&st=2022-04-08T14:34:21Z&se=2032-04-08T22:34:21Z&sv=2020-08-04&sr=c&sig=pUNbbsmDiMzXr1vuNGZh84zyOMBFaBjgWv53IhOzYWQ%3D' - ; -GO -``` - -### Create the XEvents session - -Create and start the XEvents session with the following Transact-SQL. Before running the statement: -- Replace the `filename` value to reflect your storage account name and your container name. This URL will be present at the beginning of the *Blob SAS URL* you copied when you created the shared access token. You only need the text prior to the first `?` in the string. -- Optionally change the filename stored. The filename you specify here will be part of the actual filename(s) used for the blob(s) storing event data: additional values will be appended so that all event files have a unique name. -- Optionally add additional events to the session. - -```sql -CREATE EVENT SESSION [deadlocks_eventfile] ON DATABASE -ADD EVENT sqlserver.database_xml_deadlock_report -ADD TARGET package0.event_file - (SET filename = - 'https://yourstorageaccountname.blob.core.windows.net/yourcontainername/deadlocks.xel' - ) -WITH (STARTUP_STATE=ON, MAX_MEMORY=4 MB) -GO - -ALTER EVENT SESSION [deadlocks_eventfile] ON DATABASE - STATE = START; -GO -``` - ---- - -## Cause a deadlock in AdventureWorksLT - -> [!NOTE] -> This example works in the AdventureWorksLT database with the default schema and data when RCSI has been enabled. See [Create the AdventureWorksLT database](#create-the-adventureworkslt-database) for instructions to create the database. - -To cause a deadlock, you will need to connect two sessions to the `AdventureWorksLT` database. We'll refer to these sessions as **Session A** and **Session B**. - -In **Session A**, run the following Transact-SQL. This code begins an [explicit transaction](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#starting-transactions) and runs a single statement that updates the `SalesLT.Product` table. To do this, the transaction acquires an [update (U) lock](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-when-modifying-data) on one row on table `SalesLT.Product` which is converted to an exclusive (X) lock. We leave the transaction open. - -```sql -BEGIN TRAN - - UPDATE SalesLT.Product SET SellEndDate = SellEndDate + 1 - WHERE Color = 'Red'; - -``` - -Now, in **Session B**, run the following Transact-SQL. This code doesn't explicitly begin a transaction. Instead, it operates in [autocommit transaction mode](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#starting-transactions). This statement updates the `SalesLT.ProductDescription` table. The update will take out an update (U) lock on 72 rows on the `SalesLT.ProductDescription` table. The query joins to other tables, including the `SalesLT.Product` table. - -```sql -UPDATE SalesLT.ProductDescription SET Description = Description - FROM SalesLT.ProductDescription as pd - JOIN SalesLT.ProductModelProductDescription as pmpd on - pd.ProductDescriptionID = pmpd.ProductDescriptionID - JOIN SalesLT.ProductModel as pm on - pmpd.ProductModelID = pm.ProductModelID - JOIN SalesLT.Product as p on - pm.ProductModelID=p.ProductModelID - WHERE p.Color = 'Silver'; -``` - -To complete this update, **Session B** needs a shared (S) lock on rows on the table `SalesLT.Product`, including the row that is locked by **Session A**. **Session B** will be blocked on `SalesLT.Product`. - -Return to **Session A**. Run the following Transact-SQL statement. This runs a second UPDATE statement as part of the open transaction. - -```sql - UPDATE SalesLT.ProductDescription SET Description = Description - FROM SalesLT.ProductDescription as pd - JOIN SalesLT.ProductModelProductDescription as pmpd on - pd.ProductDescriptionID = pmpd.ProductDescriptionID - JOIN SalesLT.ProductModel as pm on - pmpd.ProductModelID = pm.ProductModelID - JOIN SalesLT.Product as p on - pm.ProductModelID=p.ProductModelID - WHERE p.Color = 'Red'; -``` - -The second update statement in **Session A** will be blocked by **Session B** on the `SalesLT.ProductDescription`. - -**Session A** and **Session B** are now mutually blocking one another. Neither transaction can proceed, as they each need a resource that is locked by the other. - -After a few seconds, the deadlock monitor will identify that the transactions in **Session A** and **Session B** are mutually blocking one another, and that neither can make progress. You should see a deadlock occur, with **Session A** chosen as the deadlock victim. An error message will appear in **Session A** with text similar to the following: - -> Msg 1205, Level 13, State 51, Line 7 -> Transaction (Process ID 91) was deadlocked on lock resources with another process and has been chosen as the deadlock victim. Rerun the transaction. - -**Session B** will complete successfully. - -If you [set up deadlock alerts in the Azure portal](#set-up-deadlock-alerts-in-the-azure-portal), you should receive a notification shortly after the deadlock occurs. - -## View deadlock graphs from an XEvents session - -If you have [set up an XEvents session to collect deadlocks](#collect-deadlock-graphs-in-azure-sql-database-with-extended-events) and a deadlock has occurred after the session was started, you can view an interactive graphic display of the deadlock graph as well as the XML for the deadlock graph. - -Different methods are available to obtain deadlock information for the ring buffer target and event file targets. Select the target you used for your XEvents session: - -# [Ring buffer target](#tab/ring-buffer) - -If you set up an XEvents session writing to the ring buffer, you can query deadlock information with the following Transact-SQL. Before running the query, replace the value of `@tracename` with the name of your xEvents session. - -```sql -DECLARE @tracename sysname = N'deadlocks'; - -WITH ring_buffer AS ( - SELECT CAST(target_data AS XML) as rb - FROM sys.dm_xe_database_sessions AS s - JOIN sys.dm_xe_database_session_targets AS t - ON CAST(t.event_session_address AS BINARY(8)) = CAST(s.address AS BINARY(8)) - WHERE s.name = @tracename and - t.target_name = N'ring_buffer' -), dx AS ( - SELECT - dxdr.evtdata.query('.') as deadlock_xml_deadlock_report - FROM ring_buffer - CROSS APPLY rb.nodes('/RingBufferTarget/event[@name=''database_xml_deadlock_report'']') AS dxdr(evtdata) -) -SELECT - d.query('/event/data[@name=''deadlock_cycle_id'']/value').value('(/value)[1]', 'int') AS [deadlock_cycle_id], - d.value('(/event/@timestamp)[1]', 'DateTime2') AS [deadlock_timestamp], - d.query('/event/data[@name=''database_name'']/value').value('(/value)[1]', 'nvarchar(256)') AS [database_name], - d.query('/event/data[@name=''xml_report'']/value/deadlock') AS deadlock_xml, - LTRIM(RTRIM(REPLACE(REPLACE(d.value('.', 'nvarchar(2000)'),CHAR(10),' '),CHAR(13),' '))) as query_text -FROM dx -CROSS APPLY deadlock_xml_deadlock_report.nodes('(/event/data/value/deadlock/process-list/process/inputbuf)') AS ib(d) -ORDER BY [deadlock_timestamp] DESC; -GO -``` - -# [Event file target](#tab/event-file) - -If you set up an XEvents session writing to an event file, you can download files from the Azure portal and view them locally, or you can query event files with Transact-SQL. - -Downloading files from the Azure portal is recommended because this method does not require using database resources to query session data. - -### Optionally restart the XEvents session - -If an Extended Events session is currently running and writing to an event file target, the blob container being written to will have a **Lease state** of *Leased* in the Azure portal. The size will be the maximum size of the file. To download a smaller file, you may wish to stop and restart the Extended Events session before downloading files. This will cause the file to change its **Lease state** to *Available*, and the file size will be the space used by events in the file. - -To stop and restart an XEvents session, connect to your database and run the following Transact-SQL. Before running the code, replace the name of the xEvents session with the appropriate value. - -```sql -ALTER EVENT SESSION [deadlocks_eventfile] ON DATABASE - STATE = STOP; -GO -ALTER EVENT SESSION [deadlocks_eventfile] ON DATABASE - STATE = START; -GO -``` - -### Download trace files from the Azure portal - -To view deadlock events that have been collected across multiple files, download the event session files to your local computer and view the files in SSMS. - -> [!NOTE] -> You can also use [Use Azure Storage Explorer](#use-azure-storage-explorer) to quickly and conveniently download event session files from a blob container in Azure Storage. - -To download the files from the Azure portal: - -1. Navigate to the storage account hosting your container in the Azure portal. -1. Under **Data storage**, select **Containers**. -1. Select the container holding your XEvent trace files. -1. For each file you wish to download, select **...**, then **Download**. - -### View XEvents trace files in SSMS - -If you have download multiple files, you can open events from all of the files together in the XEvents viewer in SSMS. To do so: -1. Open SSMS. -1. Select **File**, then **Open**, then **Merge Extended Events files...**. -1. Select **Add**. -1. Navigated to the directory where you downloaded the files. Use the **Shift** key to select multiple files. -1. Select **Open**. -1. Select **OK** in the **Merge Extended Events Files** dialog. - -If you have downloaded a single file, right-click the file and select **Open with**, then **SSMS**. This will open the XEvents viewer in SSMS. - -Navigate between events collected by selecting the relevant timestamp. To view the XML for a deadlock, double-click the `xml_report` row in the lower pane. - -### Query trace files with Transact-SQL - -> [!Important] -> Querying large (1 GB and larger) XEvents trace files using this method is not recommended because it may consume large amounts of memory in your database or elastic pool. - -To query XEvents trace files from an Azure Storage container with Transact-SQL, you must provide the exact file name for the trace file. You must also run the query in the context of the database with the credential to access the storage, in other words, the same database that has created the XEvents files. - -Run the following Transact-SQL to query the currently active XEvents trace file. Before running the query, replace `@tracename` with the name of your XEvents session. - -```sql -DECLARE @tracename sysname = N'deadlocks_eventfile', - @filename nvarchar(2000); - -WITH eft as (SELECT CAST(target_data AS XML) as rb - FROM sys.dm_xe_database_sessions AS s - JOIN sys.dm_xe_database_session_targets AS t - ON CAST(t.event_session_address AS BINARY(8)) = CAST(s.address AS BINARY(8)) - WHERE s.name = @tracename and - t.target_name = N'event_file' -) -SELECT @filename = ft.evtdata.value('(@name)[1]','nvarchar(2000)') -FROM eft -CROSS APPLY rb.nodes('EventFileTarget/File') as ft(evtdata); - -WITH xevents AS ( - SELECT cast(event_data as XML) as ed - FROM sys.fn_xe_file_target_read_file(@filename, null, null, null ) -), dx AS ( - SELECT - dxdr.evtdata.query('.') as deadlock_xml_deadlock_report - FROM xevents - CROSS APPLY ed.nodes('/event[@name=''database_xml_deadlock_report'']') AS dxdr(evtdata) -) -SELECT - d.query('/event/data[@name=''deadlock_cycle_id'']/value').value('(/value)[1]', 'int') AS [deadlock_cycle_id], - d.value('(/event/@timestamp)[1]', 'DateTime2') AS [deadlock_timestamp], - d.query('/event/data[@name=''database_name'']/value').value('(/value)[1]', 'nvarchar(256)') AS [database_name], - d.query('/event/data[@name=''xml_report'']/value/deadlock') AS deadlock_xml, - LTRIM(RTRIM(REPLACE(REPLACE(d.value('.', 'nvarchar(2000)'),CHAR(10),' '),CHAR(13),' '))) as query_text -FROM dx -CROSS APPLY deadlock_xml_deadlock_report.nodes('(/event/data/value/deadlock/process-list/process/inputbuf)') AS ib(d) -ORDER BY [deadlock_timestamp] DESC; -GO -``` - -To query non-active files, navigate to the Storage Account and container in the Azure portal to identify the filenames. - -Run the following Transact-SQL query against your database to query a specific XEvents file. Before running the query, substitute the storage account name, container name, and filename in the URL for `@filename`: - -```sql -declare @filename nvarchar(2000) = N'https://yourstorageaccountname.blob.core.windows.net/yourcontainername/yourfilename.xel'; - -with xevents AS ( - SELECT cast(event_data as XML) as ed - FROM sys.fn_xe_file_target_read_file(@filename, null, null, null ) -), dx AS ( - SELECT - dxdr.evtdata.query('.') as deadlock_xml_deadlock_report - FROM xevents - CROSS APPLY ed.nodes('/event[@name=''database_xml_deadlock_report'']') AS dxdr(evtdata) -) -SELECT - d.query('/event/data[@name=''deadlock_cycle_id'']/value').value('(/value)[1]', 'int') AS [deadlock_cycle_id], - d.value('(/event/@timestamp)[1]', 'DateTime2') AS [deadlock_timestamp], - d.query('/event/data[@name=''database_name'']/value').value('(/value)[1]', 'nvarchar(256)') AS [database_name], - d.query('/event/data[@name=''xml_report'']/value/deadlock') AS deadlock_xml, - LTRIM(RTRIM(REPLACE(REPLACE(d.value('.', 'nvarchar(2000)'),CHAR(10),' '),CHAR(13),' '))) as query_text -FROM dx -CROSS APPLY deadlock_xml_deadlock_report.nodes('(/event/data/value/deadlock/process-list/process/inputbuf)') AS ib(d) -ORDER BY [deadlock_timestamp] DESC; -GO -``` - ---- - -### View and save a deadlock graph in XML - -Viewing a deadlock graph in XML format allows you to copy the `inputbuffer` of Transact-SQL statements involved in the deadlock. You may also prefer to analyze deadlocks in a text-based format. - -If you have used a Transact-SQL query to return deadlock graph information, to view the deadlock graph XML, select the value in the `deadlock_xml` column from any row to open the deadlock graph's XML in a new window in SSMS. - -The XML for this example deadlock graph is: - -```xml - - - - - - - - -unknown - - - UPDATE SalesLT.ProductDescription SET Description = Description - FROM SalesLT.ProductDescription as pd - JOIN SalesLT.ProductModelProductDescription as pmpd on - pd.ProductDescriptionID = pmpd.ProductDescriptionID - JOIN SalesLT.ProductModel as pm on - pmpd.ProductModelID = pm.ProductModelID - JOIN SalesLT.Product as p on - pm.ProductModelID=p.ProductModelID - WHERE p.Color = 'Red' - - - - -unknown - - - UPDATE SalesLT.ProductDescription SET Description = Description - FROM SalesLT.ProductDescription as pd - JOIN SalesLT.ProductModelProductDescription as pmpd on - pd.ProductDescriptionID = pmpd.ProductDescriptionID - JOIN SalesLT.ProductModel as pm on - pmpd.ProductModelID = pm.ProductModelID - JOIN SalesLT.Product as p on - pm.ProductModelID=p.ProductModelID - WHERE p.Color = 'Silver'; - - - - - - - - - - - - - - - - - - - - - -``` - -To save the deadlock graph as an XML file: - -1. Select **File** and **Save As...**. -1. Leave the **Save as type** value as the default **XML Files (*.xml)** -1. Set the **File name** to the name of your choice. -1. Select **Save**. - -### Save a deadlock graph as an XDL file that can be displayed interactively in SSMS - -Viewing an interactive representation of a deadlock graph can be useful to get a quick overview of the processes and resources involved in a deadlock, and quickly identifying the deadlock victim. - -To save a deadlock graph as a file that can be graphically displayed by SSMS: - -1. Select the value in the `deadlock_xml` column from any row to open the deadlock graph's XML in a new window in SSMS. -1. Select **File** and **Save As...**. -1. Set **Save as type** to **All Files**. -1. Set the **File name** to the name of your choice, with the extension set to **.xdl**. -1. Select **Save**. - - :::image type="content" source="media/analyze-prevent-deadlocks/ssms-save-deadlock-file-xdl.png" alt-text="A screenshot in SSMS of saving a deadlock graph XML file to a file with the xsd extension." lightbox="media/analyze-prevent-deadlocks/ssms-save-deadlock-file-xdl.png"::: - -1. Close the file by selecting the **X** on the tab at the top of the window, or by selecting **File**, then **Close**. -1. Reopen the file in SSMS by selecting **File**, then **Open**, then **File**. Select the file you saved with the `.xdl` extension. - - The deadlock graph will now display in SSMS with a visual representation of the processes and resources involved in the deadlock. - - :::image type="content" source="media/analyze-prevent-deadlocks/ssms-deadlock-graph-xdl-file-graphic-display.png" alt-text="Screenshot of an xdl file opened in SSMS. The deadlock graph is displayed graphically, with processes indicated by ovals and lock resources as rectangles." lightbox="media/analyze-prevent-deadlocks/ssms-deadlock-graph-xdl-file-graphic-display.png"::: - -## Analyze a deadlock for Azure SQL Database - -A deadlock graph typically has three nodes: - -- **Victim-list**. The deadlock victim process identifier. -- **Process-list**. Information on all the processes involved in the deadlock. Deadlock graphs use the term 'process' to represent a session running a transaction. -- **Resource-list**. Information about the resources involved in the deadlock. - -When analyzing a deadlock, it is useful to step through these nodes. - -### Deadlock victim list - -The deadlock victim list shows the process that was chosen as the deadlock victim. In the visual representation of a deadlock graph, processes are represented by ovals. The deadlock victim process has an "X" drawn over the oval. - -:::image type="content" source="media/analyze-prevent-deadlocks/deadlock-graph-deadlock-victim.png" alt-text="Screenshot of the visual display of a deadlock. The oval representing the process selected as victim has an X drawn across it." lightbox="media/analyze-prevent-deadlocks/deadlock-graph-deadlock-victim.png"::: - -In the [XML view of a deadlock graph](#view-and-save-a-deadlock-graph-in-xml), the `victim-list` node gives an ID for the process that was the victim of the deadlock. - -In our example deadlock, the victim process ID is **process24756e75088**. We can use this ID when examining the process-list and resource-list nodes to learn more about the victim process and the resources it was locking or requesting to lock. - -### Deadlock process list - -The deadlock process list is a rich source of information about the transactions involved in the deadlock. - -The graphic representation of the deadlock graph shows only a subset of information contained in the deadlock graph XML. The ovals in the deadlock graph represent the process, and show information including the: - -- Server process ID, also known as the session ID or SPID. -- [Deadlock priority](/sql/t-sql/statements/set-deadlock-priority-transact-sql) of the session. If two sessions have different deadlock priorities, the session with the lower priority is chosen as the deadlock victim. In this example, both sessions have the same deadlock priority. -- The amount of transaction log used by the session in bytes. If both sessions have the same deadlock priority, the deadlock monitor chooses the session that is less expensive to roll back as the deadlock victim. The cost is determined by comparing the number of log bytes written to that point in each transaction. - - In our example deadlock, session_id 89 had used a lower amount of transaction log, and was selected as the deadlock victim. - -Additionally, you can view the [input buffer](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-input-buffer-transact-sql) for the last statement run in each session prior to the deadlock by hovering the mouse over each process. The input buffer will appear in a tooltip. - -:::image type="content" source="media/analyze-prevent-deadlocks/deadlock-graph-process-list.png" alt-text="Screenshot of a deadlock graph displayed visually in SSMS. Two ovals represent processes. The inputbuff for one process is shown." lightbox="media/analyze-prevent-deadlocks/deadlock-graph-process-list.png"::: - -Additional information is available for processes in the [XML view of the deadlock graph](#view-and-save-a-deadlock-graph-in-xml), including: - -- Identifying information for the session, such as the client name, host name, and login name. -- The query plan hash for the last statement run by each session prior to the deadlock. The query plan hash is useful for retrieving more information about the query from [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store). - -In our example deadlock: - -- We can see that both sessions were run using the SSMS client under the **chrisqpublic** login. -- The query plan hash of the last statement run prior to the deadlock by our deadlock victim is **0x02b0f58d7730f798**. We can see the text of this statement in the input buffer. -- The query plan hash of the last statement run by the other session in our deadlock is also **0x02b0f58d7730f798**. We can see the text of this statement in the input buffer. In this case, both queries have the same query plan hash because the queries are identical, except for a literal value used as an equality predicate. - -We'll use these values later in this article to [find additional information in Query Store](#find-query-execution-plans-in-query-store). - -#### Limitations of the input buffer in the deadlock process list - -There are some limitations to be aware of regarding input buffer information in the deadlock process list. - -Query text may be truncated in the input buffer. The input buffer is limited to the first 4,000 characters of the statement being executed. - -Additionally, some statements involved in the deadlock may not be included in the deadlock graph. In our example, **Session A** ran two update statements within a single transaction. Only the second update statement, the update that caused the deadlock, is included in the deadlock graph. The first update statement run by **Session A** played a part in the deadlock by blocking **Session B**. The input buffer, `query_hash`, and related information for the first statement run by **Session A** is not included in the deadlock graph. - -To identify the full Transact-SQL run in a multi-statement transaction involved in a deadlock, you will need to either find the relevant information in the stored procedure or application code that ran the query, or run a trace using [Extended Events](/sql/relational-databases/extended-events/extended-events) to capture full statements run by sessions involved in a deadlock while it occurs. If a statement involved in the deadlock has been truncated and only partial Transact-SQL appears in the input buffer, you can find the [Transact-SQL for the statement in Query Store with the Execution Plan](#find-query-execution-plans-in-query-store). - -### Deadlock resource list - -The deadlock resource list shows which lock resources are owned and waited on by the processes in the deadlock. - -Resources are represented by rectangles in the visual representation of the deadlock: - -:::image type="content" source="media/analyze-prevent-deadlocks/deadlock-graph-resource-list.png" alt-text="Screenshot of a deadlock graph, displayed visually in SSMS. Rectangles show the resources that are involved in the deadlock." lightbox="media/analyze-prevent-deadlocks/deadlock-graph-resource-list.png"::: - -> [!NOTE] -> You may notice that database names are represented as uniquedientifers in deadlock graphs for databases in Azure SQL Database. This is the `physical_database_name` for the database listed in the [sys.databases](/sql/relational-databases/system-catalog-views/sys-databases-transact-sql) and [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) dynamic management views. - -In this example deadlock: - -- The deadlock victim, which we have referred to as **Session A**: - - Owns an exclusive (X) lock on a key on the `PK_Product_ProductID` index on the `SalesLT.Product` table. - - Requests an update (U) lock on a key on the `PK_ProductDescription_ProductDescriptionID` index on the `SalesLT.ProductDescription` table. - -- The other process, which we have referred to as **Session B**: - - Owns an update (U) lock on a key on the `PK_ProductDescription_ProductDescriptionID` index on the `SalesLT.ProductDescription` table. - - Requests a shared (S) lock on a key on the `PK_ProductDescription_ProductDescriptionID` index on the `SalesLT.ProductDescription` table. - -We can see the same information in the [XML of the deadlock graph](#view-and-save-a-deadlock-graph-in-xml) in the **resource-list** node. - -### Find query execution plans in Query Store - -It is often useful to examine the query execution plans for statements involved in the deadlock. These execution plans can often be found in Query Store using the query plan hash from the XML view of the deadlock graph's [process list](#deadlock-process-list). - -This Transact-SQL query looks for query plans matching the query plan hash we found for our example deadlock. Connect to the user database in Azure SQL Database to run the query. - -```sql -DECLARE @query_plan_hash binary(8) = 0x02b0f58d7730f798 - -SELECT - qrsi.end_time as interval_end_time, - qs.query_id, - qp.plan_id, - qt.query_sql_text, - TRY_CAST(qp.query_plan as XML) as query_plan, - qrs.count_executions -FROM sys.query_store_query as qs -JOIN sys.query_store_query_text as qt on qs.query_text_id=qt.query_text_id -JOIN sys.query_store_plan as qp on qs.query_id=qp.query_id -JOIN sys.query_store_runtime_stats qrs on qp.plan_id = qrs.plan_id -JOIN sys.query_store_runtime_stats_interval qrsi on qrs.runtime_stats_interval_id=qrsi.runtime_stats_interval_id -WHERE query_plan_hash = @query_plan_hash -ORDER BY interval_end_time, query_id; -GO -``` - -You may not be able to obtain a query execution plan from Query Store, depending on your Query Store [CLEANUP_POLICY or QUERY_CAPTURE_MODE settings](/sql/t-sql/statements/alter-database-transact-sql-set-options#query-store). In this case, you can often get needed information by [displaying the estimated execution plan](/sql/relational-databases/performance/display-the-estimated-execution-plan) for the query. - -### Look for patterns that increase blocking - -When examining query execution plans involved in deadlocks, look out for patterns that may contribute to blocking and deadlocks. - -- **Table or index scans**. When queries modifying data are run under RCSI, the selection of rows to update is done using a blocking scan where an update (U) lock is taken on the data row as data values are read. If the data row does not meet the update criteria, the update lock is released and the next row is locked and scanned. - - Tuning indexes to help modification queries find rows more efficiently reduces the number of update locks issued. This reduces the chances of blocking and deadlocks. - -- **Indexed views referencing more than one table**. When you modify a table that is referenced in an indexed view, the database engine must also maintain the indexed view. This requires taking out more locks and can lead to increased blocking and deadlocks. Indexed views may also cause update operations to internally execute under the read committed isolation level. - -- **Modifications to columns referenced in foreign key constraints**. When you modify columns in a table that are referenced in a FOREIGN KEY constraint, the database engine must look for related rows in the referencing table. Row versions cannot be used for these reads. In cases where cascading updates or deletes are enabled, the isolation level may be escalated to serializable for the duration of the statement to protect against phantom inserts. - -- **Lock hints**. Look for [table hints](/sql/t-sql/queries/hints-transact-sql-table) that specify isolation levels requiring more locks. These hints include `HOLDLOCK` (which is equivalent to serializable), `SERIALIZABLE`, `READCOMMITTEDLOCK` (which disables RCSI), and `REPEATABLEREAD`. Additionally, hints such as `PAGLOCK`, `TABLOCK`, `UPDLOCK`, and `XLOCK` can increase the risks of blocking and deadlocks. - - If these hints are in place, research why the hints were implemented. These hints may prevent race conditions and ensure data validity. It may be possible to leave these hints in place and prevent future deadlocks using an alternate method in the [Prevent a deadlock from reoccurring](#prevent-a-deadlock-from-reoccurring) section of this article if necessary. - - > [!NOTE] - > Learn more about behavior when modifying data using row versioning in the [Transaction locking and row versioning guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-when-modifying-data). - -When examining the full code for a transaction, either in an execution plan or in application query code, look for additional problematic patterns: - -- **User interaction in transactions**. User interaction inside an explicit multi-statement transaction significantly increases the duration of transactions. This makes it more likely for these transactions to overlap and for blocking and deadlocks to occur. - - Similarly, holding an open transaction and querying an unrelated database or system mid-transaction significantly increases the chances of blocking and deadlocks. - -- **Transactions accessing objects in different orders**. Deadlocks are less likely to occur when concurrent explicit multi-statement transactions follow the same patterns and access objects in the same order. - -## Prevent a deadlock from reoccurring - -There are multiple techniques available to prevent deadlocks from reoccurring, including index tuning, forcing plans with Query Store, and modifying Transact-SQL queries. - -- **Review the table's clustered index**. Most tables benefit from clustered indexes, but often, tables are implemented as [heaps](/sql/relational-databases/indexes/heaps-tables-without-clustered-indexes) by accident. - - One way to check for a clustered index is by using the [sp_helpindex](/sql/relational-databases/system-stored-procedures/sp-helpindex-transact-sql) system stored procedure. For example, we can view a summary of the indexes on the `SalesLT.Product` table by executing the following statement: - - ```sql - exec sp_helpindex 'SalesLT.Product'; - GO - ``` - - Review the index_description column. A table can have only one clustered index. If a clustered index has been implemented for the table, the index_description will contain the word 'clustered'. - - If no clustered index is present, the table is a heap. In this case, review if the table was intentionally created as a heap to solve a specific performance problem. Consider implementing a clustered index based on the [clustered index design guidelines](/sql/relational-databases/sql-server-index-design-guide#Clustered). - - In some cases, creating or tuning a clustered index may reduce or eliminate blocking in deadlocks. In other cases, you may need to employ an additional technique such as the others in this list. - -- **Create or modify nonclustered indexes.** Tuning nonclustered indexes can help your modification queries find the data to update more quickly, which reduces the number of update locks required. - - In our example deadlock, the query execution plan [found in Query Store](#find-query-execution-plans-in-query-store) contains a clustered index scan against the `PK_Product_ProductID` index. The deadlock graph indicates that a shared (S) lock wait on this index is a component in the deadlock. - - :::image type="content" source="media/analyze-prevent-deadlocks/deadlock-execution-plan-clustered-index-scan.png" alt-text="Screenshot of a query execution plan. A clustered index scan is being performed against the PK_Product_ProductID index on the Product table."::: - - This index scan is being performed because our update query needs to modify an indexed view named `vProductAndDescription`. As mentioned in the [Look for patterns that increase blocking](#look-for-patterns-that-increase-blocking) section of this article, indexed views referencing multiple tables may increase blocking and the likelihood of deadlocks. - - If we create the following nonclustered index in the `AdventureWorksLT` database that "covers" the columns from `SalesLT.Product` referenced by the indexed view, this helps the query find rows much more efficiently: - - ```sql - CREATE INDEX ix_Product_ProductID_Name_ProductModelID on SalesLT.Product (ProductID, Name, ProductModelID); - GO - ``` - - After creating this index, the deadlock no longer reoccurs. - - When deadlocks involve modifications to columns referenced in foreign key constraints, ensure that indexes on the referencing table of the FOREIGN KEY support efficiently finding related rows. - - While indexes can dramatically improve query performance in some cases, indexes also have overhead and management costs. Review [general index design guidelines](/sql/relational-databases/sql-server-index-design-guide#General_Design) to help assess the benefit of indexes before creating indexes, especially wide indexes and indexes on large tables. - -- **Assess the value of indexed views**. Another option to prevent our example deadlock from reoccurring is to drop the `SalesLT.vProductAndDescription` indexed view. If that indexed view is not being used, this will reduce the overhead of maintaining the indexed view over time. - -- **Use Snapshot isolation**. In some cases, [setting the transaction isolation level](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) to snapshot for one or more of the transactions involved in a deadlock may prevent blocking and deadlocks from reoccurring. - - This technique is most likely to be successful when used on SELECT statements when [read committed snapshot is disabled in a database](#how-deadlocks-occur-in-azure-sql-database). When read committed snapshot is disabled, SELECT queries using the read committed isolation level require shared (S) locks. Using snapshot isolation on these transactions removes the need for shared locks, which can prevent blocking and deadlocks. - - In databases where read committed snapshot isolation has been enabled, SELECT queries do not require shared (S) locks, so deadlocks are more likely to occur between transactions that are modifying data. In cases where deadlocks occur between multiple transactions modifying data, snapshot isolation may result in an [update conflict](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-in-summary) instead of a deadlock. This similarly requires one of the transactions to retry its operation. - -- **Force a plan with Query Store**. You may find that one of the queries in the deadlock has multiple execution plans, and the deadlock only occurs when a specific plan is used. You can prevent the deadlock from reoccurring by [forcing a plan](/sql/relational-databases/system-stored-procedures/sp-query-store-force-plan-transact-sql) in Query Store. - -- **Modify the Transact-SQL**. You may need to modify Transact-SQL to prevent the deadlock from reoccurring. Modifying Transact-SQL should be done carefully and changes should be rigorously tested to ensure that data is correct when modifications run concurrently. When rewriting Transact-SQL, consider: - - Ordering statements in transactions so that they access objects in the same order. - - Breaking apart transactions into smaller transactions when possible. - - Using query hints, if necessary, to optimize performance. You can apply hints without changing application code [using Query Store](/sql/relational-databases/performance/query-store-hints?view=azuresqldb-current&preserve-view=true). - -Find more ways to [minimize deadlocks in the Transaction locking and row versioning guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#deadlock_minimizing). - -> [!NOTE] -> In some cases, you may wish to [adjust the deadlock priority](/sql/t-sql/statements/set-deadlock-priority-transact-sql) of one or more sessions involved in a deadlock if it is important for one of the sessions to complete successfully without retrying, or when one of the queries involved in the deadlock is not critical and should be always chosen as the victim. While this does not prevent the deadlock from reoccurring, it may reduce the impact of future deadlocks. - -## Drop an XEvents session - -You may wish to leave an XEvents session collecting deadlock information running on critical databases for long periods. Be aware that if you use an event file target, this may result in large files if multiple deadlocks occur. You may delete blob files from Azure Storage for an active trace, except for the file that is currently being written to. - -When you wish to remove an XEvents session, the Transact-SQL drop the session is the same, regardless of the target type selected. - -To remove an XEvents session, run the following Transact-SQL. Before running the code, replace the name of the session with the appropriate value. - -```sql -ALTER EVENT SESSION [deadlocks] ON DATABASE - STATE = STOP; -GO - -DROP EVENT SESSION [deadlocks] ON DATABASE; -GO -``` - -## Use Azure Storage Explorer - -[Azure Storage Explorer](../../vs-azure-tools-storage-manage-with-storage-explorer.md) is a standalone application that simplifies working with event file targets stored in blobs in Azure Storage. You can use Storage Explorer to: - -- [Create a blob container](../../vs-azure-tools-storage-explorer-blobs.md#create-a-blob-container) to hold XEvent session data. -- [Get the shared access signature (SAS)](../../vs-azure-tools-storage-explorer-blobs.md#get-the-sas-for-a-blob-container) for a blob container. - - As mentioned in [Collect deadlock graphs in Azure SQL Database with Extended Events](#collect-deadlock-graphs-in-azure-sql-database-with-extended-events), the read, write, and list permissions are required. - - Remove any leading `?` character from the `Query string` to use the value as the secret when [creating a database scoped credential](?tabs=event-file#create-a-database-scoped-credential). -- [View and download](../../vs-azure-tools-storage-explorer-blobs.md#view-a-blob-containers-contents) extended event files from a blob container. - -[Download Azure Storage Explorer.](https://azure.microsoft.com/features/storage-explorer/). - -## Next steps - -Learn more about performance in Azure SQL Database: - -- [Understand and resolve Azure SQL Database blocking problems](understand-resolve-blocking.md) -- [Transaction Locking and Row Versioning Guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide) -- [SET TRANSACTION ISOLATION LEVEL](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) -- [Azure SQL Database: Improving Performance Tuning with Automatic Tuning](/Shows/Data-Exposed/Azure-SQL-Database-Improving-Performance-Tuning-with-Automatic-Tuning) -- [Deliver consistent performance with Azure SQL](/learn/modules/azure-sql-performance/) -- [Retry logic for transient errors](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors). \ No newline at end of file diff --git a/articles/azure-sql/database/application-authentication-get-client-id-keys.md b/articles/azure-sql/database/application-authentication-get-client-id-keys.md deleted file mode 100644 index 234bbcccd1d8f..0000000000000 --- a/articles/azure-sql/database/application-authentication-get-client-id-keys.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Get values for app authentication -description: Create a service principal for accessing Azure SQL Database from code. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Get the required values for authenticating an application to access Azure SQL Database from code -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -To create and manage Azure SQL Database from code you must register your app in the Azure Active Directory (Azure AD) domain in the subscription where your Azure resources have been created. - -## Create a service principal to access resources from an application - -The following examples create the Active Directory (AD) application and the service principal that we need to authenticate our C# app. The script outputs values we need for the preceding C# sample. For detailed information, see [Use Azure PowerShell to create a service principal to access resources](../../active-directory/develop/howto-authenticate-service-principal-powershell.md). - -# [PowerShell](#tab/azure-powershell) - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported by SQL Database, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -```powershell -# sign in to Azure -Connect-AzAccount - -# for multiple subscriptions, uncomment and set to the subscription you want to work with -#$subscriptionId = "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" -#Set-AzContext -SubscriptionId $subscriptionId - -$appName = "{app-name}" # display name for your app, must be unique in your directory -$uri = "http://{app-name}" # does not need to be a real uri -$secret = "{app-password}" - -# create an AAD app -$azureAdApplication = New-AzADApplication -DisplayName $appName -HomePage $Uri -IdentifierUris $Uri -Password $secret - -# create a Service Principal for the app -$svcprincipal = New-AzADServicePrincipal -ApplicationId $azureAdApplication.ApplicationId - -Start-Sleep -s 15 # to avoid a PrincipalNotFound error, pause here for 15 seconds - -# if you still get a PrincipalNotFound error, then rerun the following until successful. -$roleassignment = New-AzRoleAssignment -RoleDefinitionName Contributor -ServicePrincipalName $azureAdApplication.ApplicationId.Guid - -# output the values we need for our C# application to successfully authenticate -Write-Output "Copy these values into the C# sample app" - -Write-Output "_subscriptionId:" (Get-AzContext).Subscription.SubscriptionId -Write-Output "_tenantId:" (Get-AzContext).Tenant.TenantId -Write-Output "_applicationId:" $azureAdApplication.ApplicationId.Guid -Write-Output "_applicationSecret:" $secret -``` - -# [Azure CLI](#tab/azure-cli) - -```azurecli -# sign in to Azure -az login - -# for multiple subscriptions, uncomment and set to the subscription you want to work with -#$subscriptionId = "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" -#az account set --subscription $subscriptionId - -$appName = "{app-name}" # display name for your app, must be unique in your directory -$uri = "http://{app-name}" # does not need to be a real uri -$secret = "{app-password}" - -# create an AAD app -$azureAdApplication = az ad app create --display-name $appName --homepage $Uri --identifier-uris $Uri --password $secret - -# create a Service Principal for the app -$svcprincipal = az ad sp create --id $azureAdApplication.ApplicationId - -Start-Sleep -s 15 # to avoid a PrincipalNotFound error, pause for 15 seconds - -# if you still get a PrincipalNotFound error, then rerun the following until successful. -$roleassignment = az role assignment create --role "Contributor" --scope /subscriptions/{Subscription-id}/resourceGroups/{resource-group-name} --assignee $azureAdApplication.ApplicationId.Guid - -# output the values we need for our C# application to successfully authenticate -Write-Output "Copy these values into the C# sample app" - -Write-Output "-subscriptionId:" (az account show --query "id") -Write-Output "_tenantId:" (az account show --query "tenantId") -Write-Output "_applicationId:" $azureAdApplication.ApplicationId.Guid -Write-Output "_applicationSecret:" $secret -``` - -* * * - -## See also - -[Create a database in Azure SQL Database with C#](design-first-database-csharp-tutorial.md) -[Connect to Azure SQL Database by using Azure Active Directory Authentication](authentication-aad-overview.md) diff --git a/articles/azure-sql/database/arm-templates-content-guide.md b/articles/azure-sql/database/arm-templates-content-guide.md deleted file mode 100644 index 604381c331213..0000000000000 --- a/articles/azure-sql/database/arm-templates-content-guide.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Azure Resource Manager templates - Azure SQL Database & SQL Managed Instance -description: Use Azure Resource Manager templates to create and configure Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.custom: overview-samples sqldbrb=2 -ms.devlang: -ms.topic: guide -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: kendralittle, mathoma -ms.date: 06/30/2021 ---- - -# Azure Resource Manager templates for Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure Resource Manager templates enable you to define your infrastructure as code and deploy your solutions to the Azure cloud for Azure SQL Database and Azure SQL Managed Instance. - -## [Azure SQL Database](#tab/single-database) - -The following table includes links to Azure Resource Manager templates for Azure SQL Database. - -|Link |Description| -|---|---| -| [SQL Database](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-database-transparent-encryption-create) | This Azure Resource Manager template creates a single database in Azure SQL Database and configures server-level IP firewall rules. | -| [Server](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-logical-server) | This Azure Resource Manager template creates a server for Azure SQL Database. | -| [Elastic pool](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-elastic-pool-create) | This template allows you to deploy an elastic pool and to assign databases to it. | -| [Failover groups](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-with-failover-group) | This template creates two servers, a single database, and a failover group in Azure SQL Database.| -| [Threat Detection](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-threat-detection-db-policy-multiple-databases) | This template allows you to deploy a server and a set of databases with Threat Detection enabled, with an email address for alerts for each database. Threat Detection is part of the SQL Advanced Threat Protection (ATP) offering and provides a layer of security that responds to potential threats over servers and databases.| -| [Auditing to Azure Blob storage](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-auditing-server-policy-to-blob-storage) | This template allows you to deploy a server with auditing enabled to write audit logs to a Blob storage. Auditing for Azure SQL Database tracks database events and writes them to an audit log that can be placed in your Azure storage account, OMS workspace, or Event Hubs.| -| [Auditing to Azure Event Hub](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-auditing-server-policy-to-eventhub) | This template allows you to deploy a server with auditing enabled to write audit logs to an existing event hub. In order to send audit events to Event Hubs, set auditing settings with `Enabled` `State`, and set `IsAzureMonitorTargetEnabled` as `true`. Also, configure Diagnostic Settings with the `SQLSecurityAuditEvents` log category on the `master` database (for server-level auditing). Auditing tracks database events and writes them to an audit log that can be placed in your Azure storage account, OMS workspace, or Event Hubs.| -| [Azure Web App with SQL Database](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.web/web-app-sql-database) | This sample creates a free Azure web app and a database in Azure SQL Database at the "Basic" service level.| -| [Azure Web App and Redis Cache with SQL Database](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.web/web-app-redis-cache-sql-database) | This template creates a web app, Redis Cache, and database in the same resource group and creates two connection strings in the web app for the database and Redis Cache.| -| [Import data from Blob storage using ADF V2](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.datafactory/data-factory-v2-blob-to-sql-copy) | This Azure Resource Manager template creates an instance of Azure Data Factory V2 that copies data from Azure Blob storage to SQL Database.| -| [HDInsight cluster with a database](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.hdinsight/hdinsight-linux-with-sql-database) | This template allows you to create an HDInsight cluster, a logical SQL server, a database, and two tables. This template is used by the [Use Sqoop with Hadoop in HDInsight article](../../hdinsight/hadoop/hdinsight-use-sqoop.md). | -| [Azure Logic App that runs a SQL Stored Procedure on a schedule](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.logic/logic-app-sql-proc) | This template allows you to create a logic app that will run a SQL stored procedure on schedule. Any arguments for the procedure can be put into the body section of the template.| -| [Provision server with Azure AD-only authentication enabled](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-logical-server-aad-only-auth) | This template creates a SQL logical server with an Azure AD admin set for the server and Azure AD-only authentication enabled. | - -## [Azure SQL Managed Instance](#tab/managed-instance) - -The following table includes links to Azure Resource Manager templates for Azure SQL Managed Instance. - -|Link|Description| -|---|---| -| [SQL Managed Instance in a new VNet](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sqlmi-new-vnet) | This Azure Resource Manager template creates a new configured Azure virtual network and managed instance in the virtual network. | -| [Network environment for SQL Managed Instance](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-managed-instance-azure-environment) | This deployment will create a configured Azure virtual network with two subnets, one that will be dedicated to your managed instances and another where you can place other resources (for example VMs, App Service environments, etc.). This template will create a properly configured networking environment where you can deploy managed instances. | -| [SQL Managed Instance with P2S connection](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sqlmi-new-vnet-w-point-to-site-vpn) | This deployment will create an Azure virtual network with two subnets, `ManagedInstance` and `GatewaySubnet`. SQL Managed Instance will be deployed in the ManagedInstance subnet. A virtual network gateway will be created in the `GatewaySubnet` subnet and configured for Point-to-Site VPN connection. | -| [SQL Managed Instance with a virtual machine](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sqlmi-new-vnet-w-jumpbox) | This deployment will create an Azure virtual network with two subnets, `ManagedInstance` and `Management`. SQL Managed Instance will be deployed in the `ManagedInstance` subnet. A virtual machine with the latest version of SQL Server Management Studio (SSMS) will be deployed in the `Management` subnet. | -| [SQL Managed Instance with diagnostic logs enabled](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sqlmi-new-vnet-w-diagnostic-settings) | This deployment creates an Azure Virtual Network with `ManagedInstance` subnet and deploys a Managed Instance inside with enabled diagnostic logs. It will also deploy event hub, diagnostic workspace and storage account for the purpose of sending and storing instance diagnostic logs. | - ---- diff --git a/articles/azure-sql/database/audit-log-format.md b/articles/azure-sql/database/audit-log-format.md deleted file mode 100644 index 10461d129dd68..0000000000000 --- a/articles/azure-sql/database/audit-log-format.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: SQL Database audit log format -description: Understand how Azure SQL Database audit logs are structured. -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.topic: reference -author: sravanisaluru -ms.author: srsaluru -ms.date: "03/23/2022" -ms.reviewer: kendralittle, vanto, mathoma -ms.custom: sqldbrb=1 ---- - -# SQL Database audit log format - -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -[Azure SQL Database auditing](auditing-overview.md) tracks database events and writes them to an audit log in your Azure storage account, or sends them to Event Hub or Log Analytics for downstream processing and analysis. - -## Naming conventions - -### Blob audit - -Audit logs stored in Azure Blob storage are stored in a container named `sqldbauditlogs` in the Azure storage account. The directory hierarchy within the container is of the form `////`. The Blob file name format is `_.xel`, where `CreationTime` is in UTC `hh_mm_ss_ms` format, and `FileNumberInSession` is a running index in case session logs spans across multiple Blob files. - -For example, for database `Database1` on `Server1` the following is a possible valid path: - -`Server1/Database1/SqlDbAuditing_ServerAudit_NoRetention/2019-02-03/12_23_30_794_0.xel` - -[Read-only Replicas](read-scale-out.md) audit logs are stored in the same container. The directory hierarchy within the container is of the form `////RO/`. The Blob file name shares the same format. The Audit Logs of Read-only Replicas are stored in the same container. - - -### Event Hub - -Audit events are written to the namespace and event hub that was defined during auditing configuration, and are captured in the body of [Apache Avro](https://avro.apache.org/) events and stored using JSON formatting with UTF-8 encoding. To read the audit logs, you can use [Avro Tools](../../event-hubs/event-hubs-capture-overview.md#use-avro-tools) or similar tools that process this format. - -### Log Analytics - -Audit events are written to Log Analytics workspace defined during auditing configuration, to the `AzureDiagnostics` table with the category `SQLSecurityAuditEvents`. For additional useful information about Log Analytics search language and commands, see [Log Analytics search reference](../../azure-monitor/logs/log-query-overview.md). - -## Audit log fields - -| Name (blob) | Name (Event Hubs/Log Analytics) | Description | Blob type | Event Hubs/Log Analytics type | -|-------------|---------------------------------|-------------|-----------|-------------------------------| -| action_id | action_id_s | ID of the action | varchar(4) | string | -| action_name | action_name_s | Name of the action | N/A | string | -| additional_information | additional_information_s | Any additional information about the event, stored as XML | nvarchar(4000) | string | -| affected_rows | affected_rows_d | Number of rows affected by the query | bigint | int | -| application_name | application_name_s| Name of client application | nvarchar(128) | string | -| audit_schema_version | audit_schema_version_d | Always 1 | int | int | -| class_type | class_type_s | Type of auditable entity that the audit occurs on | varchar(2) | string | -| class_type_desc | class_type_description_s | Description of auditable entity that the audit occurs on | N/A | string | -| client_ip | client_ip_s | Source IP of the client application | nvarchar(128) | string | -| connection_id | N/A | ID of the connection in the server | GUID | N/A | -| data_sensitivity_information | data_sensitivity_information_s | Information types and sensitivity labels returned by the audited query, based on the classified columns in the database. Learn more about [Azure SQL Database data discover and classification](data-discovery-and-classification-overview.md) | nvarchar(4000) | string | -| database_name | database_name_s | The database context in which the action occurred | sysname | string | -| database_principal_id | database_principal_id_d | ID of the database user context that the action is performed in | int | int | -| database_principal_name | database_principal_name_s | Name of the database user context in which the action is performed | sysname | string | -| duration_milliseconds | duration_milliseconds_d | Query execution duration in milliseconds | bigint | int | -| event_time | event_time_t | Date and time when the auditable action is fired | datetime2 | datetime | -| host_name | N/A | Client host name | string | N/A | -| is_column_permission | is_column_permission_s | Flag indicating if this is a column level permission. 1 = true, 0 = false | bit | string | -| N/A | is_server_level_audit_s | Flag indicating if this audit is at the server level | N/A | string | -| object_ id | object_id_d | The ID of the entity on which the audit occurred. This includes the : server objects, databases, database objects, and schema objects. 0 if the entity is the server itself or if the audit is not performed at an object level | int | int | -| object_name | object_name_s | The name of the entity on which the audit occurred. This includes the : server objects, databases, database objects, and schema objects. 0 if the entity is the server itself or if the audit is not performed at an object level | sysname | string | -| permission_bitmask | permission_bitmask_s | When applicable, shows the permissions that were granted, denied, or revoked | varbinary(16) | string | -| response_rows | response_rows_d | Number of rows returned in the result set | bigint | int | -| schema_name | schema_name_s | The schema context in which the action occurred. NULL for audits occurring outside a schema | sysname | string | -| N/A | securable_class_type_s | Securable object that maps to the class_type being audited | N/A | string | -| sequence_group_id | sequence_group_id_g | Unique identifier | varbinary | GUID | -| sequence_number | sequence_number_d | Tracks the sequence of records within a single audit record that was too large to fit in the write buffer for audits | int | int | -| server_instance_name | server_instance_name_s | Name of the server instance where the audit occurred | sysname | string | -| server_principal_id | server_principal_id_d | ID of the login context in which the action is performed | int | int | -| server_principal_name | server_principal_name_s | Current login | sysname | string | -| server_principal_sid | server_principal_sid_s | Current login SID | varbinary | string | -| session_id | session_id_d | ID of the session on which the event occurred | smallint | int | -| session_server_principal_name | session_server_principal_name_s | Server principal for session | sysname | string | -| statement | statement_s | T-SQL statement that was executed (if any) | nvarchar(4000) | string | -| succeeded | succeeded_s | Indicates whether the action that triggered the event succeeded. For events other than login and batch, this only reports whether the permission check succeeded or failed, not the operation. 1 = success, 0 = fail | bit | string | -| target_database_principal_id | target_database_principal_id_d | The database principal the GRANT/DENY/REVOKE operation is performed on. 0 if not applicable | int | int | -| target_database_principal_name | target_database_principal_name_s | Target user of action. NULL if not applicable | string | string | -| target_server_principal_id | target_server_principal_id_d | Server principal that the GRANT/DENY/REVOKE operation is performed on. Returns 0 if not applicable | int | int | -| target_server_principal_name | target_server_principal_name_s | Target login of action. NULL if not applicable | sysname | string | -| target_server_principal_sid | target_server_principal_sid_s | SID of target login. NULL if not applicable | varbinary | string | -| transaction_id | transaction_id_d | SQL Server only (starting with 2016) - 0 for Azure SQL Database | bigint | int | -| user_defined_event_id | user_defined_event_id_d | User defined event ID passed as an argument to sp_audit_write. NULL for system events (default) and non-zero for user-defined event. For more information, see [sp_audit_write (Transact-SQL)](/sql/relational-databases/system-stored-procedures/sp-audit-write-transact-sql) | smallint | int | -| user_defined_information | user_defined_information_s | User defined information passed as an argument to sp_audit_write. NULL for system events (default) and non-zero for user-defined event. For more information, see [sp_audit_write (Transact-SQL)](/sql/relational-databases/system-stored-procedures/sp-audit-write-transact-sql) | nvarchar(4000) | string | - -## Next steps - -Learn more about [Azure SQL Database auditing](auditing-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/database/audit-write-storage-account-behind-vnet-firewall.md b/articles/azure-sql/database/audit-write-storage-account-behind-vnet-firewall.md deleted file mode 100644 index 485a540f0a5f3..0000000000000 --- a/articles/azure-sql/database/audit-write-storage-account-behind-vnet-firewall.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Audit to storage account behind VNet and firewall -description: Configure auditing to write database events on a storage account behind virtual network and firewall -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.topic: how-to -author: sravanisaluru -ms.author: srsaluru -ms.date: "03/23/2022" -ms.reviewer: kendralittle, vanto, mathoma -ms.custom: azure-synapse, subject-rbac-steps ---- -# Write audit to a storage account behind VNet and firewall -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - - -Auditing for [Azure SQL Database](sql-database-paas-overview.md) and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) supports writing database events to an [Azure Storage account](../../storage/common/storage-account-overview.md) behind a virtual network and firewall. - -This article explains two ways to configure Azure SQL Database and Azure storage account for this option. The first uses the Azure portal, the second uses REST. - -## Background - -[Azure Virtual Network (VNet)](../../virtual-network/virtual-networks-overview.md) is the fundamental building block for your private network in Azure. VNet enables many types of Azure resources, such as Azure Virtual Machines (VM), to securely communicate with each other, the internet, and on-premises networks. VNet is similar to a traditional network in your own data center, but brings with it additional benefits of Azure infrastructure such as scale, availability, and isolation. - -To learn more about the VNet concepts, Best practices and many more, see [What is Azure Virtual Network](../../virtual-network/virtual-networks-overview.md). - -To learn more about how to create a virtual network, see [Quickstart: Create a virtual network using the Azure portal](../../virtual-network/quick-create-portal.md). - -## Prerequisites - -For audit to write to a storage account behind a VNet or firewall, the following prerequisites are required: - -> [!div class="checklist"] -> -> * A general-purpose v2 storage account. If you have a general-purpose v1 or blob storage account, [upgrade to a general-purpose v2 storage account](../../storage/common/storage-account-upgrade.md). For more information, see [Types of storage accounts](../../storage/common/storage-account-overview.md#types-of-storage-accounts). -> * The storage account must be on the same tenant and at the same location as the [logical SQL server](logical-servers.md) (it's OK to be on different subscriptions). -> * The Azure Storage account requires `Allow trusted Microsoft services to access this storage account`. Set this on the Storage Account **Firewalls and Virtual networks**. -> * You must have `Microsoft.Authorization/roleAssignments/write` permission on the selected storage account. For more information, see [Azure built-in roles](../../role-based-access-control/built-in-roles.md). - -## Configure in Azure portal - -Connect to [Azure portal](https://portal.azure.com) with your subscription. Navigate to the resource group and server. - -1. Click on **Auditing** under the Security heading. Select **On**. - -2. Select **Storage**. Select the storage account where logs will be saved. The storage account must comply with the requirements listed in [Prerequisites](#prerequisites). - -3. Open **Storage details** - - > [!NOTE] - > If the selected Storage account is behind VNet, you will see the following message: - > - >`You have selected a storage account that is behind a firewall or in a virtual network. Using this storage requires to enable 'Allow trusted Microsoft services to access this storage account' on the storage account and creates a server managed identity with 'storage blob data contributor' RBAC.` - > - >If you do not see this message, then storage account is not behind a VNet. - -4. Select the number of days for the retention period. Then click **OK**. Logs older than the retention period are deleted. - -5. Select **Save** on your auditing settings. - -You have successfully configured audit to write to a storage account behind a VNet or firewall. - -## Configure with REST commands - -As an alternative to using the Azure portal, you can use REST commands to configure audit to write database events on a storage account behind a VNet and Firewall. - -The sample scripts in this section require you to update the script before you run them. Replace the following values in the scripts: - -|Sample value|Sample description| -|:-----|:-----| -|``| Azure subscription ID| -|``| Resource group| -|``| Server name| -|``| Administrator account | -|``| Complex password for the administrator account| - -To configure SQL Audit to write events to a storage account behind a VNet or Firewall: - -1. Register your server with Azure Active Directory (Azure AD). Use either PowerShell or REST API. - - **PowerShell** - - ```powershell - Connect-AzAccount - Select-AzSubscription -SubscriptionId - Set-AzSqlServer -ResourceGroupName -ServerName -AssignIdentity - ``` - - [**REST API**](/rest/api/sql/servers/createorupdate): - - Sample request - - ```html - PUT https://management.azure.com/subscriptions//resourceGroups//providers/Microsoft.Sql/servers/?api-version=2015-05-01-preview - ``` - - Request body - - ```json - { - "identity": { - "type": "SystemAssigned", - }, - "properties": { - "fullyQualifiedDomainName": ".database.windows.net", - "administratorLogin": "", - "administratorLoginPassword": "", - "version": "12.0", - "state": "Ready" - } - } - ``` - -1. Assign the Storage Blob Data Contributor role to the server hosting the database that you registered with Azure Active Directory (Azure AD) in the previous step. - - For detailed steps, see [Assign Azure roles using the Azure portal](../../role-based-access-control/role-assignments-portal.md). - - > [!NOTE] - > Only members with Owner privilege can perform this step. For various Azure built-in roles, refer to [Azure built-in roles](../../role-based-access-control/built-in-roles.md). - -1. Configure the [server's blob auditing policy](/rest/api/sql/server%20auditing%20settings/createorupdate), without specifying a *storageAccountAccessKey*: - - Sample request - - ```html - PUT https://management.azure.com/subscriptions//resourceGroups//providers/Microsoft.Sql/servers//auditingSettings/default?api-version=2017-03-01-preview - ``` - - Request body - - ```json - { - "properties": { - "state": "Enabled", - "storageEndpoint": "https://.blob.core.windows.net" - } - } - ``` - -## Using Azure PowerShell - -- [Create or Update Database Auditing Policy (Set-AzSqlDatabaseAudit)](/powershell/module/az.sql/set-azsqldatabaseaudit) -- [Create or Update Server Auditing Policy (Set-AzSqlServerAudit)](/powershell/module/az.sql/set-azsqlserveraudit) - -## Using Azure Resource Manager template - -You can configure auditing to write database events on a storage account behind virtual network and firewall using [Azure Resource Manager](../../azure-resource-manager/management/overview.md) template, as shown in the following example: - -> [!IMPORTANT] -> In order to use storage account behind virtual network and firewall, you need to set **isStorageBehindVnet** parameter to true - -- [Deploy an Azure SQL Server with Auditing enabled to write audit logs to a blob storage](https://azure.microsoft.com/resources/templates/sql-auditing-server-policy-to-blob-storage/) - -> [!NOTE] -> The linked sample is on an external public repository and is provided 'as is', without warranty, and are not supported under any Microsoft support program/service. - -## Next steps - -* [Use PowerShell to create a virtual network service endpoint, and then a virtual network rule for Azure SQL Database.](scripts/vnet-service-endpoint-rule-powershell-create.md) -* [Virtual Network Rules: Operations with REST APIs](/rest/api/sql/virtualnetworkrules) -* [Use virtual network service endpoints and rules for servers](vnet-service-endpoint-rule-overview.md) diff --git a/articles/azure-sql/database/auditing-overview.md b/articles/azure-sql/database/auditing-overview.md deleted file mode 100644 index 8d0cc66eff067..0000000000000 --- a/articles/azure-sql/database/auditing-overview.md +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: Azure SQL Auditing for Azure SQL Database and Azure Synapse Analytics -description: Use Azure SQL Database auditing to track database events into an audit log. -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.topic: conceptual -author: sravanisaluru -ms.author: srsaluru -ms.date: "03/23/2022" -ms.reviewer: kendralittle, vanto, mathoma -ms.custom: azure-synapse, sqldbrb=1 ---- -# Auditing for Azure SQL Database and Azure Synapse Analytics -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -Auditing for [Azure SQL Database](sql-database-paas-overview.md) and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) tracks database events and writes them to an audit log in your Azure storage account, Log Analytics workspace, or Event Hubs. - -Auditing also: - -- Helps you maintain regulatory compliance, understand database activity, and gain insight into discrepancies and anomalies that could indicate business concerns or suspected security violations. - -- Enables and facilitates adherence to compliance standards, although it doesn't guarantee compliance. For more information about Azure programs that support standards compliance, see the [Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942) where you can find the most current list of Azure SQL compliance certifications. - -> [!NOTE] -> For information on Azure SQL Managed Instance auditing, see the following article, [Get started with SQL Managed Instance auditing](../managed-instance/auditing-configure.md). - -## Overview - -You can use SQL Database auditing to: - -- **Retain** an audit trail of selected events. You can define categories of database actions to be audited. -- **Report** on database activity. You can use pre-configured reports and a dashboard to get started quickly with activity and event reporting. -- **Analyze** reports. You can find suspicious events, unusual activity, and trends. - -> [!IMPORTANT] -> Auditing for Azure SQL Database, Azure Synapse and Azure SQL Managed Instance is optimized for availability and performance. During very high activity, or high network load, Azure SQL Database, Azure Synapse and Azure SQL Managed Instance allow operations to proceed and may not record some audited events. - -### Auditing limitations - -- **Premium storage** is currently **not supported**. -- **Hierarchical namespace** for **Azure Data Lake Storage Gen2 storage account** is currently **not supported**. -- Enabling auditing on a paused **Azure Synapse** is not supported. To enable auditing, resume Azure Synapse. -- Auditing for **Azure Synapse SQL pools** supports default audit action groups **only**. -- When you configure the auditing in Azure SQL Server or Azure SQL Database with log destination as the storage account, the target storage account must be enabled with access to storage account keys. If the storage account is configured to use Azure AD authentication only and not configured for access key usage, the auditing cannot be configured. - - -#### Define server-level vs. database-level auditing policy - -An auditing policy can be defined for a specific database or as a default [server](logical-servers.md) policy in Azure (which hosts SQL Database or Azure Synapse): - -- A server policy applies to all existing and newly created databases on the server. - -- If *server auditing is enabled*, it *always applies to the database*. The database will be audited, regardless of the database auditing settings. - -- When auditing policy is defined at the database-level to a Log Analytics workspace or an Event Hub destination, the following operations will not keep the source database-level auditing policy: - - [Database copy](database-copy.md) - - [Point-in-time restore](recovery-using-backups.md) - - [Geo-replication](active-geo-replication-overview.md) (Secondary database will not have database-level auditing) - -- Enabling auditing on the database, in addition to enabling it on the server, does *not* override or change any of the settings of the server auditing. Both audits will exist side by side. In other words, the database is audited twice in parallel; once by the server policy and once by the database policy. - - > [!NOTE] - > You should avoid enabling both server auditing and database blob auditing together, unless: - > - > - You want to use a different *storage account*, *retention period* or *Log Analytics Workspace* for a specific database. - > - You want to audit event types or categories for a specific database that differ from the rest of the databases on the server. For example, you might have table inserts that need to be audited only for a specific database. - > - > Otherwise, we recommended that you enable only server-level auditing and leave the database-level auditing disabled for all databases. - -#### Remarks - -- Audit logs are written to **Append Blobs** in an Azure Blob storage on your Azure subscription -- Audit logs are in .xel format and can be opened by using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). -- To configure an immutable log store for the server or database-level audit events, follow the [instructions provided by Azure Storage](../../storage/blobs/immutable-time-based-retention-policy-overview.md#allow-protected-append-blobs-writes). Make sure you have selected **Allow additional appends** when you configure the immutable blob storage. -- You can write audit logs to a an Azure Storage account behind a VNet or firewall. For specific instructions see, [Write audit to a storage account behind VNet and firewall](audit-write-storage-account-behind-vnet-firewall.md). -- For details about the log format, hierarchy of the storage folder and naming conventions, see the [Blob Audit Log Format Reference](./audit-log-format.md). -- Auditing on [Read-Only Replicas](read-scale-out.md) is automatically enabled. For further details about the hierarchy of the storage folders, naming conventions, and log format, see the [SQL Database Audit Log Format](audit-log-format.md). -- When using Azure AD Authentication, failed logins records will *not* appear in the SQL audit log. To view failed login audit records, you need to visit the [Azure Active Directory portal](../../active-directory/reports-monitoring/concept-sign-ins.md), which logs details of these events. -- Logins are routed by the gateway to the specific instance where the database is located. In the case of AAD logins, the credentials are verified before attempting to use that user to login into the requested database. In the case of failure, the requested database is never accessed, so no auditing occurs. In the case of SQL logins, the credentials are verified on the requested data, so in this case they can be audited. Successful logins, which obviously reach the database, are audited in both cases. -- After you've configured your auditing settings, you can turn on the new threat detection feature and configure emails to receive security alerts. When you use threat detection, you receive proactive alerts on anomalous database activities that can indicate potential security threats. For more information, see [Getting started with threat detection](threat-detection-overview.md). -- After a database with auditing enabled is copied to another Azure SQL logical server, you may receive an email notifying you that the audit failed. This is a known issue and auditing should work as expected on the newly copied database. - -## Set up auditing for your server - -The default auditing policy includes all actions and the following set of action groups, which will audit all the queries and stored procedures executed against the database, as well as successful and failed logins: - -- BATCH_COMPLETED_GROUP -- SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP -- FAILED_DATABASE_AUTHENTICATION_GROUP - -You can configure auditing for different types of actions and action groups using PowerShell, as described in the [Manage SQL Database auditing using Azure PowerShell](#manage-auditing) section. - -Azure SQL Database and Azure Synapse Audit stores 4000 characters of data for character fields in an audit record. When the **statement** or the **data_sensitivity_information** values returned from an auditable action contain more than 4000 characters, any data beyond the first 4000 characters will be **truncated and not audited**. -The following section describes the configuration of auditing using the Azure portal. - - > [!NOTE] - > - Enabling auditing on a paused dedicated SQL pool is not possible. To enable auditing, un-pause the dedicated SQL pool. Learn more about [dedicated SQL pool](../..//synapse-analytics/sql/best-practices-dedicated-sql-pool.md). - > - When auditing is configured to a Log Analytics workspace or to an Event Hub destination via the Azure portal or PowerShell cmdlet, a [Diagnostic Setting](../../azure-monitor/essentials/diagnostic-settings.md) is created with "SQLSecurityAuditEvents" category enabled. - -1. Go to the [Azure portal](https://portal.azure.com). -2. Navigate to **Auditing** under the Security heading in your **SQL database** or **SQL server** pane. -3. If you prefer to set up a server auditing policy, you can select the **View server settings** link on the database auditing page. You can then view or modify the server auditing settings. Server auditing policies apply to all existing and newly created databases on this server. - - ![Screenshot that shows the View server settings link highlighted on the database auditing page.](./media/auditing-overview/2_auditing_get_started_server_inherit.png) - -4. If you prefer to enable auditing on the database level, switch **Auditing** to **ON**. If server auditing is enabled, the database-configured audit will exist side-by-side with the server audit. - -5. You have multiple options for configuring where audit logs will be written. You can write logs to an Azure storage account, to a Log Analytics workspace for consumption by Azure Monitor logs, or to event hub for consumption using event hub. You can configure any combination of these options, and audit logs will be written to each. - - ![storage options](./media/auditing-overview/auditing-select-destination.png) - -### Auditing of Microsoft Support operations - -Auditing of Microsoft Support operations for Azure SQL Server allows you to audit Microsoft support engineers' operations when they need to access your server during a support request. The use of this capability, along with your auditing, enables more transparency into your workforce and allows for anomaly detection, trend visualization, and data loss prevention. - -To enable auditing of Microsoft Support operations navigate to **Auditing** under the Security heading in your Azure **SQL server** pane, and switch **Enable Auditing of Microsoft support operations** to **ON**. - -![Screenshot of Microsoft Support Operations](./media/auditing-overview/support-operations.png) - -To review the audit logs of Microsoft Support operations in your Log Analytics workspace, use the following query: - -```kusto -AzureDiagnostics -| where Category == "DevOpsOperationsAudit" -``` - -You have the option of choosing a different storage destination for this auditing log, or use the same auditing configuration for your server. - -:::image type="content" source="media/auditing-overview/auditing-support-operation-log-destination.png" alt-text="Screenshot of Auditing configuration for auditing Support operations"::: - -### Audit to storage destination - -To configure writing audit logs to a storage account, select **Storage** when you get to the **Auditing** section. Select the Azure storage account where logs will be saved, and then select the retention period by opening **Advanced properties**. Then click **Save**. Logs older than the retention period are deleted. - -- The default value for retention period is 0 (unlimited retention). You can change this value by moving the **Retention (Days)** slider in **Advanced properties** when configuring the storage account for auditing. - - If you change retention period from 0 (unlimited retention) to any other value, please note that retention will only apply to logs written after retention value was changed (logs written during the period when retention was set to unlimited are preserved, even after retention is enabled). - - ![storage account](./media/auditing-overview/auditing_select_storage.png) - -### Audit to Log Analytics destination - -To configure writing audit logs to a Log Analytics workspace, select **Log Analytics** and open **Log Analytics details**. Select the Log Analytics workspace where logs will be written and then click **OK**. If you have not created a Log Analytics workspace, see [Create a Log Analytics workspace in the Azure portal](../../azure-monitor/logs/quick-create-workspace.md). - - ![LogAnalyticsworkspace](./media/auditing-overview/auditing_select_oms.png) - -For more details about Azure Monitor Log Analytics workspace, see [Designing your Azure Monitor Logs deployment](../../azure-monitor/logs/design-logs-deployment.md) - -### Audit to Event Hub destination - -To configure writing audit logs to an event hub, select **Event Hub**. Select the event hub where logs will be written and then click **Save**. Be sure that the event hub is in the same region as your database and server. - - ![Eventhub](./media/auditing-overview/auditing_select_event_hub.png) - -## Analyze audit logs and reports - -If you chose to write audit logs to Log Analytics: - -- Use the [Azure portal](https://portal.azure.com). Open the relevant database. At the top of the database's **Auditing** page, select **View audit logs**. - - ![view audit logs](./media/auditing-overview/auditing-view-audit-logs.png) - -- Then, you have two ways to view the logs: - - Clicking on **Log Analytics** at the top of the **Audit records** page will open the Logs view in Log Analytics workspace, where you can customize the time range and the search query. - - ![open in Log Analytics workspace](./media/auditing-overview/auditing-log-analytics.png) - - Clicking **View dashboard** at the top of the **Audit records** page will open a dashboard displaying audit logs info, where you can drill down into Security Insights, Access to Sensitive Data and more. This dashboard is designed to help you gain security insights for your data. - You can also customize the time range and search query. - ![View Log Analytics Dashboard](media/auditing-overview/auditing-view-dashboard.png) - - ![Log Analytics Dashboard](media/auditing-overview/auditing-log-analytics-dashboard.png) - - ![Log Analytics Security Insights](media/auditing-overview/auditing-log-analytics-dashboard-data.png) - -- Alternatively, you can also access the audit logs from Log Analytics blade. Open your Log Analytics workspace and under **General** section, click **Logs**. You can start with a simple query, such as: *search "SQLSecurityAuditEvents"* to view the audit logs. - From here, you can also use [Azure Monitor logs](../../azure-monitor/logs/log-query-overview.md) to run advanced searches on your audit log data. Azure Monitor logs gives you real-time operational insights using integrated search and custom dashboards to readily analyze millions of records across all your workloads and servers. For additional useful information about Azure Monitor logs search language and commands, see [Azure Monitor logs search reference](../../azure-monitor/logs/log-query-overview.md). - -If you chose to write audit logs to Event Hub: - -- To consume audit logs data from Event Hub, you will need to set up a stream to consume events and write them to a target. For more information, see [Azure Event Hubs Documentation](../index.yml). -- Audit logs in Event Hub are captured in the body of [Apache Avro](https://avro.apache.org/) events and stored using JSON formatting with UTF-8 encoding. To read the audit logs, you can use [Avro Tools](../../event-hubs/event-hubs-capture-overview.md#use-avro-tools) or similar tools that process this format. - -If you chose to write audit logs to an Azure storage account, there are several methods you can use to view the logs: - -- Audit logs are aggregated in the account you chose during setup. You can explore audit logs by using a tool such as [Azure Storage Explorer](https://storageexplorer.com/). In Azure storage, auditing logs are saved as a collection of blob files within a container named **sqldbauditlogs**. For further details about the hierarchy of the storage folders, naming conventions, and log format, see the [SQL Database Audit Log Format](./audit-log-format.md). - -- Use the [Azure portal](https://portal.azure.com). Open the relevant database. At the top of the database's **Auditing** page, click **View audit logs**. - - ![view audit logs](./media/auditing-overview/auditing-view-audit-logs.png) - - **Audit records** opens, from which you'll be able to view the logs. - - - You can view specific dates by clicking **Filter** at the top of the **Audit records** page. - - You can switch between audit records that were created by the *server audit policy* and the *database audit policy* by toggling **Audit Source**. - - ![Screenshot that shows the options for viewing the audit records.]( ./media/auditing-overview/8_auditing_get_started_blob_audit_records.png) - -- Use the system function **sys.fn_get_audit_file** (T-SQL) to return the audit log data in tabular format. For more information on using this function, see [sys.fn_get_audit_file](/sql/relational-databases/system-functions/sys-fn-get-audit-file-transact-sql). - -- Use **Merge Audit Files** in SQL Server Management Studio (starting with SSMS 17): - 1. From the SSMS menu, select **File** > **Open** > **Merge Audit Files**. - - ![Screenshot that shows the Merge Audit Files menu option.](./media/auditing-overview/9_auditing_get_started_ssms_1.png) - 2. The **Add Audit Files** dialog box opens. Select one of the **Add** options to choose whether to merge audit files from a local disk or import them from Azure Storage. You are required to provide your Azure Storage details and account key. - - 3. After all files to merge have been added, click **OK** to complete the merge operation. - - 4. The merged file opens in SSMS, where you can view and analyze it, as well as export it to an XEL or CSV file, or to a table. - -- Use Power BI. You can view and analyze audit log data in Power BI. For more information and to access a downloadable template, see [Analyze audit log data in Power BI](https://techcommunity.microsoft.com/t5/azure-database-support-blog/sql-azure-blob-auditing-basic-power-bi-dashboard/ba-p/368895). -- Download log files from your Azure Storage blob container via the portal or by using a tool such as [Azure Storage Explorer](https://storageexplorer.com/). - - After you have downloaded a log file locally, double-click the file to open, view, and analyze the logs in SSMS. - - You can also download multiple files simultaneously via Azure Storage Explorer. To do so, right-click a specific subfolder and select **Save as** to save in a local folder. - -- Additional methods: - - - After downloading several files or a subfolder that contains log files, you can merge them locally as described in the SSMS Merge Audit Files instructions described previously. - - View blob auditing logs programmatically: [Query Extended Events Files](https://sqlscope.wordpress.com/2014/11/15/reading-extended-event-files-using-client-side-tools-only/) by using PowerShell. - -## Production practices - - -### Auditing geo-replicated databases - -With geo-replicated databases, when you enable auditing on the primary database the secondary database will have an identical auditing policy. It is also possible to set up auditing on the secondary database by enabling auditing on the **secondary server**, independently from the primary database. - -- Server-level (**recommended**): Turn on auditing on both the **primary server** as well as the **secondary server** - the primary and secondary databases will each be audited independently based on their respective server-level policy. -- Database-level: Database-level auditing for secondary databases can only be configured from Primary database auditing settings. - - Auditing must be enabled on the *primary database itself*, not the server. - - After auditing is enabled on the primary database, it will also become enabled on the secondary database. - - > [!IMPORTANT] - > With database-level auditing, the storage settings for the secondary database will be identical to those of the primary database, causing cross-regional traffic. We recommend that you enable only server-level auditing, and leave the database-level auditing disabled for all databases. - -### Storage key regeneration - -In production, you are likely to refresh your storage keys periodically. When writing audit logs to Azure storage, you need to resave your auditing policy when refreshing your keys. The process is as follows: - -1. Open **Advanced properties** under **Storage**. In the **Storage Access Key** box, select **Secondary**. Then click **Save** at the top of the auditing configuration page. - - ![Screenshot that shows the process for selecting a secondary storage access key.](./media/auditing-overview/5_auditing_get_started_storage_key_regeneration.png) -2. Go to the storage configuration page and regenerate the primary access key. - - ![Navigation pane](./media/auditing-overview/6_auditing_get_started_regenerate_key.png) -3. Go back to the auditing configuration page, switch the storage access key from secondary to primary, and then click **OK**. Then click **Save** at the top of the auditing configuration page. -4. Go back to the storage configuration page and regenerate the secondary access key (in preparation for the next key's refresh cycle). - -## Manage Azure SQL Database auditing - -### Using Azure PowerShell - -**PowerShell cmdlets (including WHERE clause support for additional filtering)**: - -- [Create or Update Database Auditing Policy (Set-AzSqlDatabaseAudit)](/powershell/module/az.sql/set-azsqldatabaseaudit) -- [Create or Update Server Auditing Policy (Set-AzSqlServerAudit)](/powershell/module/az.sql/set-azsqlserveraudit) -- [Get Database Auditing Policy (Get-AzSqlDatabaseAudit)](/powershell/module/az.sql/get-azsqldatabaseaudit) -- [Get Server Auditing Policy (Get-AzSqlServerAudit)](/powershell/module/az.sql/get-azsqlserveraudit) -- [Remove Database Auditing Policy (Remove-AzSqlDatabaseAudit)](/powershell/module/az.sql/remove-azsqldatabaseaudit) -- [Remove Server Auditing Policy (Remove-AzSqlServerAudit)](/powershell/module/az.sql/remove-azsqlserveraudit) - -For a script example, see [Configure auditing and threat detection using PowerShell](scripts/auditing-threat-detection-powershell-configure.md). - -### Using REST API - -**REST API**: - -- [Create or Update Database Auditing Policy](/rest/api/sql/database%20auditing%20settings/createorupdate) -- [Create or Update Server Auditing Policy](/rest/api/sql/2017-03-01-preview/server-auditing-settings/create-or-update) -- [Get Database Auditing Policy](/rest/api/sql/database%20auditing%20settings/get) -- [Get Server Auditing Policy](/rest/api/sql/2017-03-01-preview/server-auditing-settings/get) - -Extended policy with WHERE clause support for additional filtering: - -- [Create or Update Database *Extended* Auditing Policy](/rest/api/sql/database%20extended%20auditing%20settings/createorupdate) -- [Create or Update Server *Extended* Auditing Policy](/rest/api/sql/server%20auditing%20settings/createorupdate) -- [Get Database *Extended* Auditing Policy](/rest/api/sql/database%20extended%20auditing%20settings/get) -- [Get Server *Extended* Auditing Policy](/rest/api/sql/server%20auditing%20settings/get) - -### Using Azure CLI - -- [Manage a server's auditing policy](/cli/azure/sql/server/audit-policy) -- [Manage a database's auditing policy](/cli/azure/sql/db/audit-policy) - -### Using Azure Resource Manager templates - -You can manage Azure SQL Database auditing using [Azure Resource Manager](../../azure-resource-manager/management/overview.md) templates, as shown in these examples: - -- [Deploy an Azure SQL Database with Auditing enabled to write audit logs to Azure Blob storage account](https://azure.microsoft.com/resources/templates/sql-auditing-server-policy-to-blob-storage/) -- [Deploy an Azure SQL Database with Auditing enabled to write audit logs to Log Analytics](https://azure.microsoft.com/resources/templates/sql-auditing-server-policy-to-oms/) -- [Deploy an Azure SQL Database with Auditing enabled to write audit logs to Event Hubs](https://azure.microsoft.com/resources/templates/sql-auditing-server-policy-to-eventhub/) - -> [!NOTE] -> The linked samples are on an external public repository and are provided 'as is', without warranty, and are not supported under any Microsoft support program/service. - -## See also - -- Data Exposed episode [What's New in Azure SQL Auditing](/Shows/Data-Exposed/Whats-New-in-Azure-SQL-Auditing) on Channel 9. -- [Auditing for SQL Managed Instance](../managed-instance/auditing-configure.md) -- [Auditing for SQL Server](/sql/relational-databases/security/auditing/sql-server-audit-database-engine) diff --git a/articles/azure-sql/database/authentication-aad-configure.md b/articles/azure-sql/database/authentication-aad-configure.md deleted file mode 100644 index 737ef3d0dd45c..0000000000000 --- a/articles/azure-sql/database/authentication-aad-configure.md +++ /dev/null @@ -1,563 +0,0 @@ ---- -title: Configure Azure Active Directory authentication -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: Learn how to connect to SQL Database, SQL Managed Instance, and Azure Synapse Analytics by using Azure Active Directory authentication, after you configure Azure AD. -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: azure-synapse, has-adal-ref, sqldbrb=2, devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/09/2022 ---- - -# Configure and manage Azure AD authentication with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article shows you how to create and populate an Azure Active Directory (Azure AD) instance, and then use Azure AD with [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md). For an overview, see [Azure Active Directory authentication](authentication-aad-overview.md). - -## Azure AD authentication methods - -Azure AD authentication supports the following authentication methods: - -- Azure AD cloud-only identities -- Azure AD hybrid identities that support: - - Cloud authentication with two options coupled with seamless single sign-on (SSO) - - Azure AD password hash authentication - - Azure AD pass-through authentication - - Federated authentication - -For more information on Azure AD authentication methods, and which one to choose, see [Choose the right authentication method for your Azure Active Directory hybrid identity solution](../../active-directory/hybrid/choose-ad-authn.md). - -For more information on Azure AD hybrid identities, setup, and synchronization, see: - -- Password hash authentication - [Implement password hash synchronization with Azure AD Connect sync](../../active-directory/hybrid/how-to-connect-password-hash-synchronization.md) -- Pass-through authentication - [Azure Active Directory Pass-through Authentication](../../active-directory/hybrid/how-to-connect-pta-quick-start.md) -- Federated authentication - [Deploying Active Directory Federation Services in Azure](/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) and [Azure AD Connect and federation](../../active-directory/hybrid/how-to-connect-fed-whatis.md) - -## Create and populate an Azure AD instance - -Create an Azure AD instance and populate it with users and groups. Azure AD can be the initial Azure AD managed domain. Azure AD can also be an on-premises Active Directory Domain Services that is federated with the Azure AD. - -For more information, see: -- [Integrating your on-premises identities with Azure Active Directory](../../active-directory/hybrid/whatis-hybrid-identity.md) -- [Add your own domain name to Azure AD](../../active-directory/fundamentals/add-custom-domain.md) -- [Microsoft Azure now supports federation with Windows Server Active Directory](https://azure.microsoft.com/blog/windows-azure-now-supports-federation-with-windows-server-active-directory/) -- [What is Azure Active Directory?](../../active-directory/fundamentals/active-directory-whatis.md) -- [Manage Azure AD using Windows PowerShell](/powershell/module/azuread) -- [Hybrid Identity Required Ports and Protocols](../../active-directory/hybrid/reference-connect-ports.md). - -## Associate or add an Azure subscription to Azure Active Directory - -1. Associate your Azure subscription to Azure Active Directory by making the directory a trusted directory for the Azure subscription hosting the database. For details, see [Associate or add an Azure subscription to your Azure Active Directory tenant](../../active-directory/fundamentals/active-directory-how-subscriptions-associated-directory.md). - -2. Use the directory switcher in the Azure portal to switch to the subscription associated with domain. - - > [!IMPORTANT] - > Every Azure subscription has a trust relationship with an Azure AD instance. This means that it trusts that directory to authenticate users, services, and devices. Multiple subscriptions can trust the same directory, but a subscription trusts only one directory. This trust relationship that a subscription has with a directory is unlike the relationship that a subscription has with all other resources in Azure (websites, databases, and so on), which are more like child resources of a subscription. If a subscription expires, then access to those other resources associated with the subscription also stops. But the directory remains in Azure, and you can associate another subscription with that directory and continue to manage the directory users. For more information about resources, see [Understanding resource access in Azure](../../active-directory/external-identities/add-users-administrator.md). To learn more about this trusted relationship see [How to associate or add an Azure subscription to Azure Active Directory](../../active-directory/fundamentals/active-directory-how-subscriptions-associated-directory.md). - -## Azure AD admin with a server in SQL Database - -Each [server](logical-servers.md) in Azure (which hosts SQL Database or Azure Synapse) starts with a single server administrator account that is the administrator of the entire server. Create a second administrator account as an Azure AD account. This principal is created as a contained database user in the master database of the server. Administrator accounts are members of the **db_owner** role in every user database, and enter each user database as the **dbo** user. For more information about administrator accounts, see [Managing Databases and Logins](logins-create-manage.md). - -When using Azure Active Directory with geo-replication, the Azure Active Directory administrator must be configured for both the primary and the secondary servers. If a server does not have an Azure Active Directory administrator, then Azure Active Directory logins and users receive a `Cannot connect` to server error. - -> [!NOTE] -> Users that are not based on an Azure AD account (including the server administrator account) cannot create Azure AD-based users, because they do not have permission to validate proposed database users with the Azure AD. - -## Provision Azure AD admin (SQL Managed Instance) - -> [!IMPORTANT] -> Only follow these steps if you are provisioning an Azure SQL Managed Instance. This operation can only be executed by Global Administrator or a Privileged Role Administrator in Azure AD. -> -> In **public preview**, you can assign the **Directory Readers** role to a group in Azure AD. The group owners can then add the managed instance identity as a member of this group, which would allow you to provision an Azure AD admin for the SQL Managed Instance. For more information on this feature, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). - -Your SQL Managed Instance needs permissions to read Azure AD to successfully accomplish tasks such as authentication of users through security group membership or creation of new users. For this to work, you need to grant the SQL Managed Instance permission to read Azure AD. You can do this using the Azure portal or PowerShell. - -### Azure portal - -To grant your SQL Managed Instance Azure AD read permission using the Azure portal, log in as Global Administrator in Azure AD and follow these steps: - -1. In the [Azure portal](https://portal.azure.com), in the upper-right corner select your account, and then choose **Switch directories** to confirm which Active Directory is currently your active directory. Switch directories, if necessary. - - :::image type="content" source="media/authentication-aad-configure/switch-directory.png" alt-text="Screenshot of the Azure portal showing where to switch your directory"::: - -2. Choose the correct Active Directory as the default Azure AD. - - This step links the subscription associated with Active Directory to the SQL Managed Instance, making sure that the same subscription is used for both the Azure AD instance and the SQL Managed Instance. - -3. Navigate to the SQL Managed Instance you want to use for Azure AD integration. - - ![Screenshot of the Azure portal showing the Active Directory admin page open for the selected SQL managed instance.](./media/authentication-aad-configure/active-directory-pane.png) - -4. Select the banner on top of the Active Directory admin page and grant permission to the current user. - - :::image type="content" source="./media/authentication-aad-configure/grant-permissions.png" alt-text="Screenshot of the dialog for granting permissions to a SQL managed instance for accessing Active Directory. The Grant permissions button is selected."::: - -5. After the operation succeeds, the following notification will show up in the top-right corner: - - :::image type="content" source="./media/authentication-aad-configure/success.png" alt-text="Screenshot of a notification confirming that active directory read permissions have been successfully updated for the managed instance."::: - -6. Now you can choose your Azure AD admin for your SQL Managed Instance. For that, on the Active Directory admin page, select **Set admin** command. - - :::image type="content" source="./media/authentication-aad-configure/set-admin.png" alt-text="Screenshot showing the Set admin command highlighted on the Active Directory admin page for the selected SQL managed instance."::: - -7. On the Azure AD admin page, search for a user, select the user or group to be an administrator, and then select **Select**. - - The Active Directory admin page shows all members and groups of your Active Directory. Users or groups that are grayed out can't be selected because they aren't supported as Azure AD administrators. See the list of supported admins in [Azure AD Features and Limitations](authentication-aad-overview.md#azure-ad-features-and-limitations). Azure role-based access control (Azure RBAC) applies only to the Azure portal and isn't propagated to SQL Database, SQL Managed Instance, or Azure Synapse. - - :::image type="content" source="./media/authentication-aad-configure/add-azure-active-directory-admin.png" alt-text="Add Azure Active Directory admin"::: - -8. At the top of the Active Directory admin page, select **Save**. - - :::image type="content" source="./media/authentication-aad-configure/save.png" alt-text="Screenshot of the Active Directory admin page with the Save button in the top row next to the Set admin and Remove admin buttons."::: - - The process of changing the administrator may take several minutes. Then the new administrator appears in the Active Directory admin box. - - For Azure AD users and groups, the **Object ID** is displayed next to the admin name. For applications (service principals), the **Application ID** is displayed. - -After provisioning an Azure AD admin for your SQL Managed Instance, you can begin to create Azure AD server principals (logins) with the [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true) syntax. For more information, see [SQL Managed Instance overview](../managed-instance/sql-managed-instance-paas-overview.md#azure-active-directory-integration). - -> [!TIP] -> To later remove an Admin, at the top of the Active Directory admin page, select **Remove admin**, and then select **Save**. - -### PowerShell - -To grant your SQL Managed Instance Azure AD read permission by using the PowerShell, run this script: - -```powershell -# Gives Azure Active Directory read permission to a Service Principal representing the SQL Managed Instance. -# Can be executed only by a "Global Administrator" or "Privileged Role Administrator" type of user. - -$aadTenant = "" # Enter your tenant ID -$managedInstanceName = "MyManagedInstance" - -# Get Azure AD role "Directory Users" and create if it doesn't exist -$roleName = "Directory Readers" -$role = Get-AzureADDirectoryRole | Where-Object {$_.displayName -eq $roleName} -if ($role -eq $null) { - # Instantiate an instance of the role template - $roleTemplate = Get-AzureADDirectoryRoleTemplate | Where-Object {$_.displayName -eq $roleName} - Enable-AzureADDirectoryRole -RoleTemplateId $roleTemplate.ObjectId - $role = Get-AzureADDirectoryRole | Where-Object {$_.displayName -eq $roleName} -} - -# Get service principal for your SQL Managed Instance -$roleMember = Get-AzureADServicePrincipal -SearchString $managedInstanceName -$roleMember.Count -if ($roleMember -eq $null) { - Write-Output "Error: No Service Principals with name '$ ($managedInstanceName)', make sure that managedInstanceName parameter was entered correctly." - exit -} -if (-not ($roleMember.Count -eq 1)) { - Write-Output "Error: More than one service principal with name pattern '$ ($managedInstanceName)'" - Write-Output "Dumping selected service principals...." - $roleMember - exit -} - -# Check if service principal is already member of readers role -$allDirReaders = Get-AzureADDirectoryRoleMember -ObjectId $role.ObjectId -$selDirReader = $allDirReaders | where{$_.ObjectId -match $roleMember.ObjectId} - -if ($selDirReader -eq $null) { - # Add principal to readers role - Write-Output "Adding service principal '$($managedInstanceName)' to 'Directory Readers' role'..." - Add-AzureADDirectoryRoleMember -ObjectId $role.ObjectId -RefObjectId $roleMember.ObjectId - Write-Output "'$($managedInstanceName)' service principal added to 'Directory Readers' role'..." - - #Write-Output "Dumping service principal '$($managedInstanceName)':" - #$allDirReaders = Get-AzureADDirectoryRoleMember -ObjectId $role.ObjectId - #$allDirReaders | where{$_.ObjectId -match $roleMember.ObjectId} -} -else { - Write-Output "Service principal '$($managedInstanceName)' is already member of 'Directory Readers' role'." -} -``` - -### PowerShell for SQL Managed Instance - -# [PowerShell](#tab/azure-powershell) - -To run PowerShell cmdlets, you need to have Azure PowerShell installed and running. For detailed information, see [How to install and configure Azure PowerShell](/powershell/azure/). - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported by Azure SQL Managed Instance, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -To provision an Azure AD admin, execute the following Azure PowerShell commands: - -- Connect-AzAccount -- Select-AzSubscription - -The cmdlets used to provision and manage Azure AD admin for your SQL Managed Instance are listed in the following table: - -| Cmdlet name | Description | -| --- | --- | -| [Set-AzSqlInstanceActiveDirectoryAdministrator](/powershell/module/az.sql/set-azsqlinstanceactivedirectoryadministrator) |Provisions an Azure AD administrator for the SQL Managed Instance in the current subscription. (Must be from the current subscription)| -| [Remove-AzSqlInstanceActiveDirectoryAdministrator](/powershell/module/az.sql/remove-azsqlinstanceactivedirectoryadministrator) |Removes an Azure AD administrator for the SQL Managed Instance in the current subscription. | -| [Get-AzSqlInstanceActiveDirectoryAdministrator](/powershell/module/az.sql/get-azsqlinstanceactivedirectoryadministrator) |Returns information about an Azure AD administrator for the SQL Managed Instance in the current subscription.| - -The following command gets information about an Azure AD administrator for a SQL Managed Instance named ManagedInstance01 that is associated with a resource group named ResourceGroup01. - -```powershell -Get-AzSqlInstanceActiveDirectoryAdministrator -ResourceGroupName "ResourceGroup01" -InstanceName "ManagedInstance01" -``` - -The following command provisions an Azure AD administrator group named DBAs for the SQL Managed Instance named ManagedInstance01. This server is associated with resource group ResourceGroup01. - -```powershell -Set-AzSqlInstanceActiveDirectoryAdministrator -ResourceGroupName "ResourceGroup01" -InstanceName "ManagedInstance01" -DisplayName "DBAs" -ObjectId "40b79501-b343-44ed-9ce7-da4c8cc7353b" -``` - -The following command removes the Azure AD administrator for the SQL Managed Instance named ManagedInstanceName01 associated with the resource group ResourceGroup01. - -```powershell -Remove-AzSqlInstanceActiveDirectoryAdministrator -ResourceGroupName "ResourceGroup01" -InstanceName "ManagedInstanceName01" -Confirm -PassThru -``` - -# [Azure CLI](#tab/azure-cli) - -You can also provision an Azure AD admin for the SQL Managed Instance by calling the following CLI commands: - -| Command | Description | -| --- | --- | -|[az sql mi ad-admin create](/cli/azure/sql/mi/ad-admin#az-sql-mi-ad-admin-create) | Provisions an Azure Active Directory administrator for the SQL Managed Instance (must be from the current subscription). | -|[az sql mi ad-admin delete](/cli/azure/sql/mi/ad-admin#az-sql-mi-ad-admin-delete) | Removes an Azure Active Directory administrator for the SQL Managed Instance. | -|[az sql mi ad-admin list](/cli/azure/sql/mi/ad-admin#az-sql-mi-ad-admin-list) | Returns information about an Azure Active Directory administrator currently configured for the SQL Managed Instance. | -|[az sql mi ad-admin update](/cli/azure/sql/mi/ad-admin#az-sql-mi-ad-admin-update) | Updates the Active Directory administrator for the SQL Managed Instance. | - -For more information about CLI commands, see [az sql mi](/cli/azure/sql/mi). - -* * * - -## Provision Azure AD admin (SQL Database) - -> [!IMPORTANT] -> Only follow these steps if you are provisioning a [server](logical-servers.md) for SQL Database or Azure Synapse. - -The following two procedures show you how to provision an Azure Active Directory administrator for your server in the Azure portal and by using PowerShell. - -### Azure portal - -1. In the [Azure portal](https://portal.azure.com/), in the upper-right corner, select your connection to drop down a list of possible Active Directories. Choose the correct Active Directory as the default Azure AD. This step links the subscription-associated Active Directory with server making sure that the same subscription is used for both Azure AD and the server. - -2. Search for and select **SQL server**. - - :::image type="content" source="./media/authentication-aad-configure/search-for-and-select-sql-servers.png" alt-text="Search for and select SQL servers"::: - - >[!NOTE] - > On this page, before you select **SQL servers**, you can select the **star** next to the name to *favorite* the category and add **SQL servers** to the left navigation bar. - -3. On the **SQL Server** page, select **Active Directory admin**. - -4. In the **Active Directory admin** page, select **Set admin**. - - :::image type="content" source="./media/authentication-aad-configure/sql-servers-set-active-directory-admin.png" alt-text="SQL servers set Active Directory admin"::: - -5. In the **Add admin** page, search for a user, select the user or group to be an administrator, and then select **Select**. (The Active Directory admin page shows all members and groups of your Active Directory. Users or groups that are grayed out cannot be selected because they are not supported as Azure AD administrators. (See the list of supported admins in the **Azure AD Features and Limitations** section of [Use Azure Active Directory Authentication for authentication with SQL Database or Azure Synapse](authentication-aad-overview.md).) Azure role-based access control (Azure RBAC) applies only to the portal and is not propagated to SQL Server. - - :::image type="content" source="./media/authentication-aad-configure/select-azure-active-directory-admin.png" alt-text="Select Azure Active Directory admin"::: - -6. At the top of the **Active Directory admin** page, select **Save**. - - :::image type="content" source="./media/authentication-aad-configure/save-admin.png" alt-text="save admin"::: - - For Azure AD users and groups, the **Object ID** is displayed next to the admin name. For applications (service principals), the **Application ID** is displayed. - -The process of changing the administrator may take several minutes. Then the new administrator appears in the **Active Directory admin** box. - - > [!NOTE] - > When setting up the Azure AD admin, the new admin name (user or group) cannot already be present in the virtual master database as a server authentication user. If present, the Azure AD admin setup will fail; rolling back its creation and indicating that such an admin (name) already exists. Since such a server authentication user is not part of the Azure AD, any effort to connect to the server using Azure AD authentication fails. - -To later remove an Admin, at the top of the **Active Directory admin** page, select **Remove admin**, and then select **Save**. - -### PowerShell for SQL Database and Azure Synapse - -# [PowerShell](#tab/azure-powershell) - -To run PowerShell cmdlets, you need to have Azure PowerShell installed and running. For detailed information, see [How to install and configure Azure PowerShell](/powershell/azure/). To provision an Azure AD admin, execute the following Azure PowerShell commands: - -- Connect-AzAccount -- Select-AzSubscription - -Cmdlets used to provision and manage Azure AD admin for SQL Database and Azure Synapse: - -| Cmdlet name | Description | -| --- | --- | -| [Set-AzSqlServerActiveDirectoryAdministrator](/powershell/module/az.sql/set-azsqlserveractivedirectoryadministrator) |Provisions an Azure Active Directory administrator for the server hosting SQL Database or Azure Synapse. (Must be from the current subscription) | -| [Remove-AzSqlServerActiveDirectoryAdministrator](/powershell/module/az.sql/remove-azsqlserveractivedirectoryadministrator) |Removes an Azure Active Directory administrator for the server hosting SQL Database or Azure Synapse.| -| [Get-AzSqlServerActiveDirectoryAdministrator](/powershell/module/az.sql/get-azsqlserveractivedirectoryadministrator) |Returns information about an Azure Active Directory administrator currently configured for the server hosting SQL Database or Azure Synapse. | - -Use PowerShell command get-help to see more information for each of these commands. For example, `get-help Set-AzSqlServerActiveDirectoryAdministrator`. - -The following script provisions an Azure AD administrator group named **DBA_Group** (object ID `40b79501-b343-44ed-9ce7-da4c8cc7353f`) for the **demo_server** server in a resource group named **Group-23**: - -```powershell -Set-AzSqlServerActiveDirectoryAdministrator -ResourceGroupName "Group-23" -ServerName "demo_server" -DisplayName "DBA_Group" -``` - -The **DisplayName** input parameter accepts either the Azure AD display name or the User Principal Name. For example, ``DisplayName="John Smith"`` and ``DisplayName="johns@contoso.com"``. For Azure AD groups only the Azure AD display name is supported. - -> [!NOTE] -> The Azure PowerShell command `Set-AzSqlServerActiveDirectoryAdministrator` does not prevent you from provisioning Azure AD admins for unsupported users. An unsupported user can be provisioned, but can not connect to a database. - -The following example uses the optional **ObjectID**: - -```powershell -Set-AzSqlServerActiveDirectoryAdministrator -ResourceGroupName "Group-23" -ServerName "demo_server" ` - -DisplayName "DBA_Group" -ObjectId "40b79501-b343-44ed-9ce7-da4c8cc7353f" -``` - -> [!NOTE] -> The Azure AD **ObjectID** is required when the **DisplayName** is not unique. To retrieve the **ObjectID** and **DisplayName** values, use the Active Directory section of Azure Classic Portal, and view the properties of a user or group. - -The following example returns information about the current Azure AD admin for the server: - -```powershell -Get-AzSqlServerActiveDirectoryAdministrator -ResourceGroupName "Group-23" -ServerName "demo_server" | Format-List -``` - -The following example removes an Azure AD administrator: - -```powershell -Remove-AzSqlServerActiveDirectoryAdministrator -ResourceGroupName "Group-23" -ServerName "demo_server" -``` - -# [Azure CLI](#tab/azure-cli) - -You can provision an Azure AD admin by calling the following CLI commands: - -| Command | Description | -| --- | --- | -|[az sql server ad-admin create](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-create) | Provisions an Azure Active Directory administrator for the server hosting SQL Database or Azure Synapse. (Must be from the current subscription) | -|[az sql server ad-admin delete](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-delete) | Removes an Azure Active Directory administrator for the server hosting SQL Database or Azure Synapse. | -|[az sql server ad-admin list](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-list) | Returns information about an Azure Active Directory administrator currently configured for the server hosting SQL Database or Azure Synapse. | -|[az sql server ad-admin update](/cli/azure/sql/server/ad-admin#az-sql-server-ad-admin-update) | Updates the Active Directory administrator for the server hosting SQL Database or Azure Synapse. | - -For more information about CLI commands, see [az sql server](/cli/azure/sql/server). - -* * * - -> [!NOTE] -> You can also provision an Azure Active Directory Administrator by using the REST APIs. For more information, see [Service Management REST API Reference and Operations for Azure SQL Database Operations for Azure SQL Database](/rest/api/sql/) - -## Configure your client computers - -> [!NOTE] -> [System.Data.SqlClient](/dotnet/api/system.data.sqlclient) uses the Azure Active Directory Authentication Library (ADAL), which will be deprecated. If you're using the [System.Data.SqlClient](/dotnet/api/system.data.sqlclient) namespace for Azure Active Directory authentication, migrate applications to [Microsoft.Data.SqlClient](/sql/connect/ado-net/introduction-microsoft-data-sqlclient-namespace) and the [Microsoft Authentication Library (MSAL)](../../active-directory/develop/msal-migration.md). For more information about using Azure AD authentication with SqlClient, see [Using Azure Active Directory authentication with SqlClient](/sql/connect/ado-net/sql/azure-active-directory-authentication). -> -> SSMS and SSDT still uses the Azure Active Directory Authentication Library (ADAL). If you want to continue using *ADAL.DLL* in your applications, you can use the links in this section to install the latest SSMS, ODBC, and OLE DB driver that contains the latest *ADAL.DLL* library. - -On all client machines, from which your applications or users connect to SQL Database or Azure Synapse using Azure AD identities, you must install the following software: - -- .NET Framework 4.6 or later from [https://msdn.microsoft.com/library/5a4x27ek.aspx](/dotnet/framework/install/guide-for-developers). -- [Microsoft Authentication Library (MSAL)](../../active-directory/develop/msal-migration.md) or Azure Active Directory Authentication Library for SQL Server (*ADAL.DLL*). Below are the download links to install the latest SSMS, ODBC, and OLE DB driver that contains the *ADAL.DLL* library. - - [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) - - [ODBC Driver 17 for SQL Server](/sql/connect/odbc/download-odbc-driver-for-sql-server?view=sql-server-ver15&preserve-view=true) - - [OLE DB Driver 18 for SQL Server](/sql/connect/oledb/download-oledb-driver-for-sql-server?view=sql-server-ver15&preserve-view=true) - -You can meet these requirements by: - -- Installing the latest version of [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt) meets the .NET Framework 4.6 requirement. - - SSMS installs the x86 version of *ADAL.DLL*. - - SSDT installs the amd64 version of *ADAL.DLL*. - - The latest Visual Studio from [Visual Studio Downloads](https://www.visualstudio.com/downloads/download-visual-studio-vs) meets the .NET Framework 4.6 requirement, but does not install the required amd64 version of *ADAL.DLL*. - -## Create contained users mapped to Azure AD identities - -Because SQL Managed Instance supports Azure AD server principals (logins), using contained database users is not required. Azure AD server principals (logins) enable you to create logins from Azure AD users, groups, or applications. This means that you can authenticate with your SQL Managed Instance by using the Azure AD server login rather than a contained database user. For more information, see [SQL Managed Instance overview](../managed-instance/sql-managed-instance-paas-overview.md#azure-active-directory-integration). For syntax on creating Azure AD server principals (logins), see [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true). - -However, using Azure Active Directory authentication with SQL Database and Azure Synapse requires using contained database users based on an Azure AD identity. A contained database user does not have a login in the master database, and maps to an identity in Azure AD that is associated with the database. The Azure AD identity can be either an individual user account or a group. For more information about contained database users, see [Contained Database Users- Making Your Database Portable](/sql/relational-databases/security/contained-database-users-making-your-database-portable). - -> [!NOTE] -> Database users (with the exception of administrators) cannot be created using the Azure portal. Azure roles are not propagated to the database in SQL Database, the SQL Managed Instance, or Azure Synapse. Azure roles are used for managing Azure Resources, and do not apply to database permissions. For example, the **SQL Server Contributor** role does not grant access to connect to the database in SQL Database, the SQL Managed Instance, or Azure Synapse. The access permission must be granted directly in the database using Transact-SQL statements. - -> [!WARNING] -> Special characters like colon `:` or ampersand `&` when included as user names in the T-SQL `CREATE LOGIN` and `CREATE USER` statements are not supported. - -> [!IMPORTANT] -> Azure AD users and service principals (Azure AD applications) that are members of more than 2048 Azure AD security groups are not supported to login into the database in SQL Database, Managed Instance, or Azure Synapse. - - -To create an Azure AD-based contained database user (other than the server administrator that owns the database), connect to the database with an Azure AD identity, as a user with at least the **ALTER ANY USER** permission. Then use the following Transact-SQL syntax: - -```sql -CREATE USER [] FROM EXTERNAL PROVIDER; -``` - -*Azure_AD_principal_name* can be the user principal name of an Azure AD user or the display name for an Azure AD group. - -**Examples:** -To create a contained database user representing an Azure AD federated or managed domain user: - -```sql -CREATE USER [bob@contoso.com] FROM EXTERNAL PROVIDER; -CREATE USER [alice@fabrikam.onmicrosoft.com] FROM EXTERNAL PROVIDER; -``` - -To create a contained database user representing an Azure AD or federated domain group, provide the display name of a security group: - -```sql -CREATE USER [ICU Nurses] FROM EXTERNAL PROVIDER; -``` - -To create a contained database user representing an application that connects using an Azure AD token: - -```sql -CREATE USER [appName] FROM EXTERNAL PROVIDER; -``` - -> [!NOTE] -> This command requires that SQL access Azure AD (the "external provider") on behalf of the logged-in user. Sometimes, circumstances will arise that cause Azure AD to return an exception back to SQL. In these cases, the user will see SQL error 33134, which should contain the Azure AD-specific error message. Most of the time, the error will say that access is denied, or that the user must enroll in MFA to access the resource, or that access between first-party applications must be handled via preauthorization. In the first two cases, the issue is usually caused by Conditional Access policies that are set in the user's Azure AD tenant: they prevent the user from accessing the external provider. Updating the Conditional Access policies to allow access to the application '00000003-0000-0000-c000-000000000000' (the application ID of the Microsoft Graph API) should resolve the issue. In the case that the error says access between first-party applications must be handled via preauthorization, the issue is because the user is signed in as a service principal. The command should succeed if it is executed by a user instead. - -> [!TIP] -> You cannot directly create a user from an Azure Active Directory other than the Azure Active Directory that is associated with your Azure subscription. However, members of other Active Directories that are imported users in the associated Active Directory (known as external users) can be added to an Active Directory group in the tenant Active Directory. By creating a contained database user for that AD group, the users from the external Active Directory can gain access to SQL Database. - -For more information about creating contained database users based on Azure Active Directory identities, see [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql). - -> [!NOTE] -> Removing the Azure Active Directory administrator for the server prevents any Azure AD authentication user from connecting to the server. If necessary, unusable Azure AD users can be dropped manually by a SQL Database administrator. - -> [!NOTE] -> If you receive a **Connection Timeout Expired**, you may need to set the `TransparentNetworkIPResolution` -parameter of the connection string to false. For more information, see [Connection timeout issue with .NET Framework 4.6.1 - TransparentNetworkIPResolution](/archive/blogs/dataaccesstechnologies/connection-timeout-issue-with-net-framework-4-6-1-transparentnetworkipresolution). - -When you create a database user, that user receives the **CONNECT** permission and can connect to that database as a member of the **PUBLIC** role. Initially the only permissions available to the user are any permissions granted to the **PUBLIC** role, or any permissions granted to any Azure AD groups that they are a member of. Once you provision an Azure AD-based contained database user, you can grant the user additional permissions, the same way as you grant permission to any other type of user. Typically grant permissions to database roles, and add users to roles. For more information, see [Database Engine Permission Basics](https://social.technet.microsoft.com/wiki/contents/articles/4433.database-engine-permission-basics.aspx). For more information about special SQL Database roles, see [Managing Databases and Logins in Azure SQL Database](logins-create-manage.md). -A federated domain user account that is imported into a managed domain as an external user, must use the managed domain identity. - -> [!NOTE] -> Azure AD users are marked in the database metadata with type E (EXTERNAL_USER) and for groups with type X (EXTERNAL_GROUPS). For more information, see [sys.database_principals](/sql/relational-databases/system-catalog-views/sys-database-principals-transact-sql). - -## Connect to the database using SSMS or SSDT - -To confirm the Azure AD administrator is properly set up, connect to the **master** database using the Azure AD administrator account. -To provision an Azure AD-based contained database user (other than the server administrator that owns the database), connect to the database with an Azure AD identity that has access to the database. - -> [!IMPORTANT] -> Support for Azure Active Directory authentication is available with [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) starting in 2016 and [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt) starting in 2015. The August 2016 release of SSMS also includes support for Active Directory Universal Authentication, which allows administrators to require Multi-Factor Authentication using a phone call, text message, smart cards with pin, or mobile app notification. - -## Using an Azure AD identity to connect using SSMS or SSDT - -The following procedures show you how to connect to SQL Database with an Azure AD identity using SQL Server Management Studio or SQL Server Database Tools. - -### Active Directory integrated authentication - -Use this method if you are logged into Windows using your Azure Active Directory credentials from a federated domain, or a managed domain that is configured for seamless single sign-on for pass-through and password hash authentication. For more information, see [Azure Active Directory Seamless Single Sign-On](../../active-directory/hybrid/how-to-connect-sso.md). - -1. Start Management Studio or Data Tools and in the **Connect to Server** (or **Connect to Database Engine**) dialog box, in the **Authentication** box, select **Azure Active Directory - Integrated**. No password is needed or can be entered because your existing credentials will be presented for the connection. - - ![Select AD Integrated Authentication][11] - -2. Select the **Options** button, and on the **Connection Properties** page, in the **Connect to database** box, type the name of the user database you want to connect to. For more information, see the article [Multi-factor Azure AD auth](authentication-mfa-ssms-overview.md#azure-ad-domain-name-or-tenant-id-parameter) on the differences between the Connection Properties for SSMS 17.x and 18.x. - - ![Select the database name][13] - -### Active Directory password authentication - -Use this method when connecting with an Azure AD principal name using the Azure AD managed domain. You can also use it for federated accounts without access to the domain, for example, when working remotely. - -Use this method to authenticate to the database in SQL Database or the SQL Managed Instance with Azure AD cloud-only identity users, or those who use Azure AD hybrid identities. This method supports users who want to use their Windows credential, but their local machine is not joined with the domain (for example, using remote access). In this case, a Windows user can indicate their domain account and password, and can authenticate to the database in SQL Database, the SQL Managed Instance, or Azure Synapse. - -1. Start Management Studio or Data Tools and in the **Connect to Server** (or **Connect to Database Engine**) dialog box, in the **Authentication** box, select **Azure Active Directory - Password**. - -2. In the **User name** box, type your Azure Active Directory user name in the format **username\@domain.com**. User names must be an account from Azure Active Directory or an account from a managed or federated domain with Azure Active Directory. - -3. In the **Password** box, type your user password for the Azure Active Directory account or managed/federated domain account. - - ![Select AD Password Authentication][12] - -4. Select the **Options** button, and on the **Connection Properties** page, in the **Connect to database** box, type the name of the user database you want to connect to. (See the graphic in the previous option.) - -### Active Directory interactive authentication - -Use this method for interactive authentication with or without Multi-Factor Authentication (MFA), with password being requested interactively. This method can be used to authenticate to the database in SQL Database, the SQL Managed Instance, and Azure Synapse for Azure AD cloud-only identity users, or those who use Azure AD hybrid identities. - -For more information, see [Using multi-factor Azure AD authentication with SQL Database and Azure Synapse (SSMS support for MFA)](authentication-mfa-ssms-overview.md). - -## Using an Azure AD identity to connect from a client application - -The following procedures show you how to connect to a SQL Database with an Azure AD identity from a client application. - -### Active Directory integrated authentication - -To use integrated Windows authentication, your domain's Active Directory must be federated with Azure Active Directory, or should be a managed domain that is configured for seamless single sign-on for pass-through or password hash authentication. For more information, see [Azure Active Directory Seamless Single Sign-On](../../active-directory/hybrid/how-to-connect-sso.md). - -Your client application (or a service) connecting to the database must be running on a domain-joined machine under a user's domain credentials. - -To connect to a database using integrated authentication and an Azure AD identity, the Authentication keyword in the database connection string must be set to `Active Directory Integrated`. The following C# code sample uses ADO .NET. - -```csharp -string ConnectionString = @"Data Source=n9lxnyuzhv.database.windows.net; Authentication=Active Directory Integrated; Initial Catalog=testdb;"; -SqlConnection conn = new SqlConnection(ConnectionString); -conn.Open(); -``` - -The connection string keyword `Integrated Security=True` is not supported for connecting to Azure SQL Database. When making an ODBC connection, you will need to remove spaces and set Authentication to 'ActiveDirectoryIntegrated'. - -### Active Directory password authentication - -To connect to a database using Azure AD cloud-only identity user accounts, or those who use Azure AD hybrid identities, the Authentication keyword must be set to `Active Directory Password`. The connection string must contain User ID/UID and Password/PWD keywords and values. The following C# code sample uses ADO .NET. - -```csharp -string ConnectionString = -@"Data Source=n9lxnyuzhv.database.windows.net; Authentication=Active Directory Password; Initial Catalog=testdb; UID=bob@contoso.onmicrosoft.com; PWD=MyPassWord!"; -SqlConnection conn = new SqlConnection(ConnectionString); -conn.Open(); -``` - -Learn more about Azure AD authentication methods using the demo code samples available at [Azure AD Authentication GitHub Demo](https://github.com/Microsoft/sql-server-samples/tree/master/samples/features/security/azure-active-directory-auth). - -## Azure AD token - -This authentication method allows middle-tier services to obtain [JSON Web Tokens (JWT)](../../active-directory/develop/id-tokens.md) to connect to the database in SQL Database, the SQL Managed Instance, or Azure Synapse by obtaining a token from Azure AD. This method enables various application scenarios including service identities, service principals, and applications using certificate-based authentication. You must complete four basic steps to use Azure AD token authentication: - -1. Register your application with Azure Active Directory and get the client ID for your code. -2. Create a database user representing the application. (Completed earlier in step 6.) -3. Create a certificate on the client computer runs the application. -4. Add the certificate as a key for your application. - -Sample connection string: - -```csharp -string ConnectionString = @"Data Source=n9lxnyuzhv.database.windows.net; Initial Catalog=testdb;"; -SqlConnection conn = new SqlConnection(ConnectionString); -conn.AccessToken = "Your JWT token"; -conn.Open(); -``` - -For more information, see [SQL Server Security Blog](/archive/blogs/sqlsecurity/token-based-authentication-support-for-azure-sql-db-using-azure-ad-auth). For information about adding a certificate, see [Get started with certificate-based authentication in Azure Active Directory](../../active-directory/authentication/active-directory-certificate-based-authentication-get-started.md). - -### sqlcmd - -The following statements, connect using version 13.1 of sqlcmd, which is available from the [Download Center](https://www.microsoft.com/download/details.aspx?id=53591). - -> [!NOTE] -> `sqlcmd` with the `-G` command does not work with system identities, and requires a user principal login. - -```cmd -sqlcmd -S Target_DB_or_DW.testsrv.database.windows.net -G -sqlcmd -S Target_DB_or_DW.testsrv.database.windows.net -U bob@contoso.com -P MyAADPassword -G -l 30 -``` - -## Troubleshoot Azure AD authentication - -Guidance on troubleshooting issues with Azure AD authentication can be found in the following blog: - -## Next steps - -- For an overview of logins, users, database roles, and permissions in SQL Database, see [Logins, users, database roles, and user accounts](logins-create-manage.md). -- For more information about database principals, see [Principals](/sql/relational-databases/security/authentication-access/principals-database-engine). -- For more information about database roles, see [Database roles](/sql/relational-databases/security/authentication-access/database-level-roles). -- For more information about firewall rules in SQL Database, see [SQL Database firewall rules](firewall-configure.md). -- For information about how to set an Azure AD guest user as the Azure AD admin, see [Create Azure AD guest users and set as an Azure AD admin](authentication-aad-guest-users.md). -- For information on how to service principals with Azure SQL, see [Create Azure AD users using Azure AD applications](authentication-aad-service-principal-tutorial.md) - - - -[11]: ./media/authentication-aad-configure/active-directory-integrated.png -[12]: ./media/authentication-aad-configure/12connect-using-pw-auth2.png -[13]: ./media/authentication-aad-configure/13connect-to-db2.png \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-aad-directory-readers-role-tutorial.md b/articles/azure-sql/database/authentication-aad-directory-readers-role-tutorial.md deleted file mode 100644 index a7b2341f3c812..0000000000000 --- a/articles/azure-sql/database/authentication-aad-directory-readers-role-tutorial.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Assign Directory Readers role to an Azure AD group and manage role assignments -description: This article guides you through enabling the Directory Readers role using Azure AD groups to manage Azure AD role assignments with Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics -ms.service: sql-db-mi -ms.subservice: security -ms.custom: azure-synapse -ms.topic: tutorial -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/15/2021 ---- - -# Tutorial: Assign Directory Readers role to an Azure AD group and manage role assignments - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article guides you through creating a group in Azure Active Directory (Azure AD), and assigning that group the [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) role. The Directory Readers permissions allow the group owners to add additional members to the group, such as a [managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types) of [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md). This bypasses the need for a [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) to assign the Directory Readers role directly for each Azure SQL logical server identity in the tenant. - -This tutorial uses the feature introduced in [Use Azure AD groups to manage role assignments](../../active-directory/roles/groups-concept.md). - -For more information on the benefits of assigning the Directory Readers role to an Azure AD group for Azure SQL, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). - -> [!NOTE] -> With [Microsoft Graph](/graph/overview) support for Azure SQL, the Directory Readers role can be replaced with using lower level permissions. For more information, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). - -## Prerequisites - -- An Azure AD instance. For more information, see [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md). -- A SQL Database, SQL Managed Instance, or Azure Synapse. - -## Directory Readers role assignment using the Azure portal - -### Create a new group and assign owners and role - -1. A user with [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) permissions is required for this initial setup. -1. Have the privileged user sign into the [Azure portal](https://portal.azure.com). -1. Go to the **Azure Active Directory** resource. Under **Managed**, go to **Groups**. Select **New group** to create a new group. -1. Select **Security** as the group type, and fill in the rest of the fields. Make sure that the setting **Azure AD roles can be assigned to the group** is switched to **Yes**. Then assign the Azure AD **Directory readers** role to the group. -1. Assign Azure AD users as owner(s) to the group that was created. A group owner can be a regular AD user without any Azure AD administrative role assigned. The owner should be a user that is managing your SQL Database, SQL Managed Instance, or Azure Synapse. - - :::image type="content" source="media/authentication-aad-directory-readers-role/new-group.png" alt-text="aad-new-group"::: - -1. Select **Create** - -### Checking the group that was created - -> [!NOTE] -> Make sure that the **Group Type** is **Security**. *Microsoft 365* groups are not supported for Azure SQL. - -To check and manage the group that was created, go back to the **Groups** pane in the Azure portal, and search for your group name. Additional owners and members can be added under the **Owners** and **Members** menu of **Manage** setting after selecting your group. You can also review the **Assigned roles** for the group. - -:::image type="content" source="media/authentication-aad-directory-readers-role/azure-ad-group-created.png" alt-text="Screenshot of a Group pane with the links that open the Settings menus for Members, Owners, and Assigned roles highlighted."::: - -### Add Azure SQL managed identity to the group - -> [!NOTE] -> We're using SQL Managed Instance for this example, but similar steps can be applied for SQL Database or Azure Synapse to achieve the same results. - -For subsequent steps, the Global Administrator or Privileged Role Administrator user is no longer needed. - -1. Log into the Azure portal as the user managing SQL Managed Instance, and is an owner of the group created earlier. - -1. Find the name of your **SQL managed instance** resource in the Azure portal. - - :::image type="content" source="media/authentication-aad-directory-readers-role/azure-ad-managed-instance.png" alt-text="Screenshot of the SQL managed instances screen with the SQL instance name ssomitest and the Subnet name ManagedInstance highlighted."::: - - During the creation of your SQL Managed Instance, an Azure identity was created for your instance. The created identity has the same name as the prefix of your SQL Managed Instance name. You can find the service principal for your SQL Managed Instance identity that created as an Azure AD Application by following these steps: - - - Go to the **Azure Active Directory** resource. Under the **Manage** setting, select **Enterprise applications**. The **Object ID** is the identity of the instance. - - :::image type="content" source="media/authentication-aad-directory-readers-role/azure-ad-managed-instance-service-principal.png" alt-text="Screenshot of the Enterprise applications page for an Azure Active Directory resource with the Object ID of the SQL Managed instance highlighted."::: - -1. Go to the **Azure Active Directory** resource. Under **Managed**, go to **Groups**. Select the group that you created. Under the **Managed** setting of your group, select **Members**. Select **Add members** and add your SQL Managed Instance service principal as a member of the group by searching for the name found above. - - :::image type="content" source="media/authentication-aad-directory-readers-role/azure-ad-add-managed-instance-service-principal.png" alt-text="Screenshot of the Members page for an Azure Active Directory resource with the options highlighted for adding an SQL Managed instance as a new member."::: - -> [!NOTE] -> It can take a few minutes to propagate the service principal permissions through the Azure system, and allow access to Microsoft Graph API. You may have to wait a few minutes before you provision an Azure AD admin for SQL Managed Instance. - -### Remarks - -For SQL Database and Azure Synapse, the server identity can be created during the Azure SQL logical server creation or after the server was created. For more information on how to create or set the server identity in SQL Database or Azure Synapse, see [Enable service principals to create Azure AD users](authentication-aad-service-principal.md#enable-service-principals-to-create-azure-ad-users). - -For SQL Managed Instance, the **Directory Readers** role must be assigned to managed instance identity before you can [set up an Azure AD admin for the managed instance](authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). - -Assigning the **Directory Readers** role to the server identity isn't required for SQL Database or Azure Synapse when setting up an Azure AD admin for the logical server. However, to enable an Azure AD object creation in SQL Database or Azure Synapse on behalf of an Azure AD application, the **Directory Readers** role is required. If the role isn't assigned to the SQL logical server identity, creating Azure AD users in Azure SQL will fail. For more information, see [Azure Active Directory service principal with Azure SQL](authentication-aad-service-principal.md). - -## Directory Readers role assignment using PowerShell - -> [!IMPORTANT] -> A [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) will need to run these initial steps. In addition to PowerShell, Azure AD offers Microsoft Graph API to [Create a role-assignable group in Azure AD](../../active-directory/roles/groups-create-eligible.md#microsoft-graph-api). - -1. Download the Azure AD PowerShell module using the following commands. You may need to run PowerShell as an administrator. - - ```powershell - Install-Module azuread - Import-Module azuread - #To verify that the module is ready to use, use the following command: - Get-Module azuread - ``` - -1. Connect to your Azure AD tenant. - - ```powershell - Connect-AzureAD - ``` - -1. Create a security group to assign the **Directory Readers** role. - - - `DirectoryReaderGroup`, `Directory Reader Group`, and `DirRead` can be changed according to your preference. - - ```powershell - $group = New-AzureADMSGroup -DisplayName "DirectoryReaderGroup" -Description "Directory Reader Group" -MailEnabled $False -SecurityEnabled $true -MailNickName "DirRead" -IsAssignableToRole $true - $group - ``` - -1. Assign **Directory Readers** role to the group. - - ```powershell - # Displays the Directory Readers role information - $roleDefinition = Get-AzureADMSRoleDefinition -Filter "displayName eq 'Directory Readers'" - $roleDefinition - ``` - - ```powershell - # Assigns the Directory Readers role to the group - $roleAssignment = New-AzureADMSRoleAssignment -ResourceScope '/' -RoleDefinitionId $roleDefinition.Id -PrincipalId $group.Id - $roleAssignment - ``` - -1. Assign owners to the group. - - - Replace `` with the user you want to own this group. Several owners can be added by repeating these steps. - - ```powershell - $RefObjectID = Get-AzureADUser -ObjectId "" - $RefObjectID - ``` - - ```powershell - $GrOwner = Add-AzureADGroupOwner -ObjectId $group.ID -RefObjectId $RefObjectID.ObjectID - ``` - - Check owners of the group using the following command: - - ```powershell - Get-AzureADGroupOwner -ObjectId $group.ID - ``` - - You can also verify owners of the group in the [Azure portal](https://portal.azure.com). Follow the steps in [Checking the group that was created](#checking-the-group-that-was-created). - -### Assigning the service principal as a member of the group - -For subsequent steps, the Global Administrator or Privileged Role Administrator user is no longer needed. - -1. Using an owner of the group, that also manages the Azure SQL resource, run the following command to connect to your Azure AD. - - ```powershell - Connect-AzureAD - ``` - -1. Assign the service principal as a member of the group that was created. - - - Replace `` with your Azure SQL logical server name, or your Managed Instance name. For more information, see the section, [Add Azure SQL service identity to the group](#add-azure-sql-managed-identity-to-the-group) - - ```powershell - # Returns the service principal of your Azure SQL resource - $miIdentity = Get-AzureADServicePrincipal -SearchString "" - $miIdentity - ``` - - ```powershell - # Adds the service principal to the group as a member - Add-AzureADGroupMember -ObjectId $group.ID -RefObjectId $miIdentity.ObjectId - ``` - - The following command will return the service principal Object ID indicating that it has been added to the group: - - ```powershell - Add-AzureADGroupMember -ObjectId $group.ID -RefObjectId $miIdentity.ObjectId - ``` - -## Next steps - -- [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md) -- [Tutorial: Create Azure AD users using Azure AD applications](authentication-aad-service-principal-tutorial.md) -- [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-aad-directory-readers-role.md b/articles/azure-sql/database/authentication-aad-directory-readers-role.md deleted file mode 100644 index 09747cf15d2b3..0000000000000 --- a/articles/azure-sql/database/authentication-aad-directory-readers-role.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Directory Readers role in Azure Active Directory for Azure SQL -description: Learn about the directory reader's role in Azure AD for Azure SQL. -ms.service: sql-db-mi -ms.subservice: security -ms.custom: azure-synapse -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/15/2021 ---- - -# Directory Readers role in Azure Active Directory for Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Azure Active Directory (Azure AD) has introduced [using Azure AD groups to manage role assignments](../../active-directory/roles/groups-concept.md). This allows for Azure AD roles to be assigned to groups. - -> [!NOTE] -> With [Microsoft Graph](/graph/overview) support for Azure SQL, the Directory Readers role can be replaced with using lower level permissions. For more information, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). - -When enabling a [managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types) for Azure SQL Database, Azure SQL Managed Instance, or Azure Synapse Analytics, the Azure AD [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) role can be assigned to the identity to allow read access to the [Microsoft Graph API](/graph/overview). The managed identity of SQL Database and Azure Synapse is referred to as the server identity. The managed identity of SQL Managed Instance is referred to as the managed instance identity, and is automatically assigned when the instance is created. For more information on assigning a server identity to SQL Database or Azure Synapse, see [Enable service principals to create Azure AD users](authentication-aad-service-principal.md#enable-service-principals-to-create-azure-ad-users). - -The **Directory Readers** role can be used as the server or instance identity to help: - -- Create Azure AD logins for SQL Managed Instance -- Impersonate Azure AD users in Azure SQL -- Migrate SQL Server users that use Windows authentication to SQL Managed Instance with Azure AD authentication (using the [ALTER USER (Transact-SQL)](/sql/t-sql/statements/alter-user-transact-sql?view=azuresqldb-mi-current&preserve-view=true#d-map-the-user-in-the-database-to-an-azure-ad-login-after-migration) command) -- Change the Azure AD admin for SQL Managed Instance -- Allow [service principals (Applications)](authentication-aad-service-principal.md) to create Azure AD users in Azure SQL - -## Assigning the Directory Readers role - -In order to assign the [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) role to an identity, a user with [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) permissions is needed. Users who often manage or deploy SQL Database, SQL Managed Instance, or Azure Synapse may not have access to these highly privileged roles. This can often cause complications for users that create unplanned Azure SQL resources, or need help from highly privileged role members that are often inaccessible in large organizations. - -For SQL Managed Instance, the **Directory Readers** role must be assigned to managed instance identity before you can [set up an Azure AD admin for the managed instance](authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). - -Assigning the **Directory Readers** role to the server identity isn't required for SQL Database or Azure Synapse when setting up an Azure AD admin for the logical server. However, to enable an Azure AD object creation in SQL Database or Azure Synapse on behalf of an Azure AD application, the **Directory Readers** role is required. If the role isn't assigned to the SQL logical server identity, creating Azure AD users in Azure SQL will fail. For more information, see [Azure Active Directory service principal with Azure SQL](authentication-aad-service-principal.md). - -## Granting the Directory Readers role to an Azure AD group - -You can now have a [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) create an Azure AD group and assign the [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) permission to the group. This will allow access to the Microsoft Graph API for members of this group. In addition, Azure AD users who are owners of this group are allowed to assign new members for this group, including identities of the Azure SQL logical servers. - -This solution still requires a high privilege user (Global Administrator or Privileged Role Administrator) to create a group and assign users as a one time activity, but the Azure AD group owners will be able to assign additional members going forward. This eliminates the need to involve a high privilege user in the future to configure all SQL Databases, SQL Managed Instances, or Azure Synapse servers in their Azure AD tenant. - -## Next steps - -> [!div class="nextstepaction"] -> [Tutorial: Assign Directory Readers role to an Azure AD group and manage role assignments](authentication-aad-directory-readers-role-tutorial.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-aad-guest-users.md b/articles/azure-sql/database/authentication-aad-guest-users.md deleted file mode 100644 index 4e484e77d7824..0000000000000 --- a/articles/azure-sql/database/authentication-aad-guest-users.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Create Azure AD guest users -description: How to create Azure AD guest users and set them as Azure AD admin without using Azure AD groups in Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics -ms.service: sql-db-mi -ms.subservice: security -ms.custom: azure-synapse -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 05/10/2021 ---- - -# Create Azure AD guest users and set as an Azure AD admin - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Guest users in Azure Active Directory (Azure AD) are users that have been imported into the current Azure AD from other Azure Active Directories, or outside of it. For example, guest users can include users from other Azure Active Directories, or from accounts like *\@outlook.com*, *\@hotmail.com*, -*\@live.com*, or *\@gmail.com*. - -This article demonstrates how to create an Azure AD guest user and set that user as an Azure AD admin for Azure SQL Managed Instance or the [logical server in Azure](logical-servers.md) used by Azure SQL Database and Azure Synapse Analytics, without having to add the guest user to a group inside Azure AD. - -## Feature description - -This feature lifts the current limitation that only allows guest users to connect to Azure SQL Database, SQL Managed Instance, or Azure Synapse Analytics when they're members of a group created in Azure AD. The group needed to be mapped to a user manually using the [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql) statement in a given database. Once a database user has been created for the Azure AD group containing the guest user, the guest user can sign into the database using Azure Active Directory with MFA authentication. Guest users can be created and connect directly to SQL Database, SQL Managed Instance, or Azure Synapse without the requirement of adding them to an Azure AD group first, and then creating a database user for that Azure AD group. - -As part of this feature, you also have the ability to set the Azure AD guest user directly as an AD admin for the logical server or for a managed instance. The existing functionality (which allows the guest user to be part of an Azure AD group that can then be set as the Azure AD admin for the logical server or managed instance) is *not* impacted. Guest users in the database that are a part of an Azure AD group are also not impacted by this change. - -For more information about existing support for guest users using Azure AD groups, see [Using multi-factor Azure Active Directory authentication](authentication-mfa-ssms-overview.md). - -## Prerequisite - -- [Az.Sql 2.9.0](https://www.powershellgallery.com/packages/Az.Sql/2.9.0) module or higher is needed when using PowerShell to set a guest user as an Azure AD admin for the logical server or managed instance. - -## Create database user for Azure AD guest user - -Follow these steps to create a database user using an Azure AD guest user. - -### Create guest user in SQL Database and Azure Synapse - -1. Ensure that the guest user (for example, `user1@gmail.com`) is already added into your Azure AD and an Azure AD admin has been set for the database server. Having an Azure AD admin is required for Azure Active Directory authentication. - -1. Connect to the SQL database as the Azure AD admin or an Azure AD user with sufficient SQL permissions to create users, and run the below command on the database where the guest user needs to be added: - - ```sql - CREATE USER [user1@gmail.com] FROM EXTERNAL PROVIDER - ``` - -1. There should now be a database user created for the guest user `user1@gmail.com`. - -1. Run the below command to verify the database user got created successfully: - - ```sql - SELECT * FROM sys.database_principals - ``` - -1. Disconnect and sign into the database as the guest user `user1@gmail.com` using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) using the authentication method **Azure Active Directory - Universal with MFA**. For more information, see [Using multi-factor Azure Active Directory authentication](authentication-mfa-ssms-overview.md). - -### Create guest user in SQL Managed Instance - -> [!NOTE] -> SQL Managed Instance supports logins for Azure AD users, as well as Azure AD contained database users. The below steps show how to create a login and user for an Azure AD guest user in SQL Managed Instance. You can also choose to create a [contained database user](/sql/relational-databases/security/contained-database-users-making-your-database-portable) in SQL Managed Instance by using the method in the [Create guest user in SQL Database and Azure Synapse](#create-guest-user-in-sql-database-and-azure-synapse) section. - -1. Ensure that the guest user (for example, `user1@gmail.com`) is already added into your Azure AD and an Azure AD admin has been set for the SQL Managed Instance server. Having an Azure AD admin is required for Azure Active Directory authentication. - -1. Connect to the SQL Managed Instance server as the Azure AD admin or an Azure AD user with sufficient SQL permissions to create users, and run the following command on the `master` database to create a login for the guest user: - - ```sql - CREATE LOGIN [user1@gmail.com] FROM EXTERNAL PROVIDER - ``` - -1. There should now be a login created for the guest user `user1@gmail.com` in the `master` database. - -1. Run the below command to verify the login got created successfully: - - ```sql - SELECT * FROM sys.server_principals - ``` - -1. Run the below command on the database where the guest user needs to be added: - - ```sql - CREATE USER [user1@gmail.com] FROM LOGIN [user1@gmail.com] - ``` - -1. There should now be a database user created for the guest user `user1@gmail.com`. - -1. Disconnect and sign into the database as the guest user `user1@gmail.com` using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) using the authentication method **Azure Active Directory - Universal with MFA**. For more information, see [Using multi-factor Azure Active Directory authentication](authentication-mfa-ssms-overview.md). - -## Setting a guest user as an Azure AD admin - -Set the Azure AD admin using either the Azure portal, Azure PowerShell, or the Azure CLI. - -### Azure portal - -To setup an Azure AD admin for a logical server or a managed instance using the Azure portal, follow these steps: - -1. Open the [Azure portal](https://portal.azure.com). -1. Navigate to your SQL server or managed instance **Azure Active Directory** settings. -1. Select **Set Admin**. -1. In the Azure AD pop-up prompt, type the guest user, such as `guestuser@gmail.com`. -1. Select this new user, and then save the operation. - -For more information, see [Setting Azure AD admin](authentication-aad-configure.md#azure-ad-admin-with-a-server-in-sql-database). - - -### Azure PowerShell (SQL Database and Azure Synapse) - -To setup an Azure AD guest user for a logical server, follow these steps: - -1. Ensure that the guest user (for example, `user1@gmail.com`) is already added into your Azure AD. - -1. Run the following PowerShell command to add the guest user as the Azure AD admin for your logical server: - - - Replace `` with your Azure Resource Group name that contains the logical server. - - Replace `` with your logical server name. If your server name is `myserver.database.windows.net`, replace `` with `myserver`. - - Replace `` with your guest user name. - - ```powershell - Set-AzSqlServerActiveDirectoryAdministrator -ResourceGroupName -ServerName -DisplayName - ``` - -You can also use the Azure CLI command [az sql server ad-admin](/cli/azure/sql/server/ad-admin) to set the guest user as an Azure AD admin for your logical server. - -### Azure PowerShell (SQL Managed Instance) - -To setup an Azure AD guest user for a managed instance, follow these steps: - -1. Ensure that the guest user (for example, `user1@gmail.com`) is already added into your Azure AD. - -1. Go to the [Azure portal](https://portal.azure.com), and go to your **Azure Active Directory** resource. Under **Manage**, go to the **Users** pane. Select your guest user, and record the `Object ID`. - -1. Run the following PowerShell command to add the guest user as the Azure AD admin for your SQL Managed Instance: - - - Replace `` with your Azure Resource Group name that contains the SQL Managed Instance. - - Replace `` with your SQL Managed Instance name. - - Replace `` with your guest user name. - - Replace `` with the `Object ID` gathered earlier. - - ```powershell - Set-AzSqlInstanceActiveDirectoryAdministrator -ResourceGroupName -InstanceName "" -DisplayName -ObjectId - ``` - -You can also use the Azure CLI command [az sql mi ad-admin](/cli/azure/sql/mi/ad-admin) to set the guest user as an Azure AD admin for your managed instance. - - -## Next steps - -- [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md) -- [Using multi-factor Azure Active Directory authentication](authentication-mfa-ssms-overview.md) -- [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-aad-overview.md b/articles/azure-sql/database/authentication-aad-overview.md deleted file mode 100644 index 1d6235d18b7c8..0000000000000 --- a/articles/azure-sql/database/authentication-aad-overview.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Azure Active Directory authentication -description: Learn about how to use Azure Active Directory for authentication with Azure SQL Database, Azure SQL Managed Instance, and Synapse SQL in Azure Synapse Analytics -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: azure-synapse, sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 08/11/2021 ---- - -# Use Azure Active Directory authentication - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Azure Active Directory (Azure AD) authentication is a mechanism for connecting to [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), and [Synapse SQL in Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) by using identities in Azure AD. - -> [!NOTE] -> This article applies to Azure SQL Database, SQL Managed Instance, and Azure Synapse Analytics. - -With Azure AD authentication, you can centrally manage the identities of database users and other Microsoft services in one central location. Central ID management provides a single place to manage database users and simplifies permission management. Benefits include the following: - -- It provides an alternative to SQL Server authentication. -- It helps stop the proliferation of user identities across servers. -- It allows password rotation in a single place. -- Customers can manage database permissions using external (Azure AD) groups. -- It can eliminate storing passwords by enabling integrated Windows authentication and other forms of authentication supported by Azure Active Directory. -- Azure AD authentication uses contained database users to authenticate identities at the database level. -- Azure AD supports token-based authentication for applications connecting to SQL Database and SQL Managed Instance. -- Azure AD authentication supports: - - Azure AD cloud-only identities. - - Azure AD hybrid identities that support: - - Cloud authentication with two options coupled with seamless single sign-on (SSO) **Pass-through** authentication and **password hash** authentication. - - Federated authentication. - - For more information on Azure AD authentication methods and which one to choose, see the following article: - - [Choose the right authentication method for your Azure Active Directory hybrid identity solution](../../active-directory/hybrid/choose-ad-authn.md) - -- Azure AD supports connections from SQL Server Management Studio that use Active Directory Universal Authentication, which includes Multi-Factor Authentication. Multi-Factor Authentication includes strong authentication with a range of easy verification options — phone call, text message, smart cards with pin, or mobile app notification. For more information, see [SSMS support for Azure AD Multi-Factor Authentication with Azure SQL Database, SQL Managed Instance, and Azure Synapse](authentication-mfa-ssms-overview.md) - -- Azure AD supports similar connections from SQL Server Data Tools (SSDT) that use Active Directory Interactive Authentication. For more information, see [Azure Active Directory support in SQL Server Data Tools (SSDT)](/sql/ssdt/azure-active-directory) - -> [!NOTE] -> Connecting to a SQL Server instance that's running on an Azure virtual machine (VM) is not supported using Azure Active Directory or Azure Active Directory Domain Services. Use an Active Directory domain account instead. - -The configuration steps include the following procedures to configure and use Azure Active Directory authentication. - -1. Create and populate Azure AD. -2. Optional: Associate or change the active directory that is currently associated with your Azure Subscription. -3. Create an Azure Active Directory administrator. -4. Configure your client computers. -5. Create contained database users in your database mapped to Azure AD identities. -6. Connect to your database by using Azure AD identities. - -> [!NOTE] -> To learn how to create and populate Azure AD, and then configure Azure AD with Azure SQL Database, SQL Managed Instance, and Synapse SQL in Azure Synapse Analytics, see [Configure Azure AD with Azure SQL Database](authentication-aad-configure.md). - -## Trust architecture - -- Only the cloud portion of Azure AD, SQL Database, SQL Managed Instance, and Azure Synapse is considered to support Azure AD native user passwords. -- To support Windows single sign-on credentials (or user/password for Windows credential), use Azure Active Directory credentials from a federated or managed domain that is configured for seamless single sign-on for pass-through and password hash authentication. For more information, see [Azure Active Directory Seamless Single Sign-On](../../active-directory/hybrid/how-to-connect-sso.md). -- To support Federated authentication (or user/password for Windows credentials), the communication with ADFS block is required. - -For more information on Azure AD hybrid identities, the setup, and synchronization, see the following articles: - -- Password hash authentication - [Implement password hash synchronization with Azure AD Connect sync](../../active-directory/hybrid/how-to-connect-password-hash-synchronization.md) -- Pass-through authentication - [Azure Active Directory Pass-through Authentication](../../active-directory/hybrid/how-to-connect-pta-quick-start.md) -- Federated authentication - [Deploying Active Directory Federation Services in Azure](/windows-server/identity/ad-fs/deployment/how-to-connect-fed-azure-adfs) and [Azure AD Connect and federation](../../active-directory/hybrid/how-to-connect-fed-whatis.md) - -For a sample federated authentication with ADFS infrastructure (or user/password for Windows credentials), see the diagram below. The arrows indicate communication pathways. - -![aad auth diagram][1] - -The following diagram indicates the federation, trust, and hosting relationships that allow a client to connect to a database by submitting a token. The token is authenticated by an Azure AD, and is trusted by the database. Customer 1 can represent an Azure Active Directory with native users or an Azure AD with federated users. Customer 2 represents a possible solution including imported users, in this example coming from a federated Azure Active Directory with ADFS being synchronized with Azure Active Directory. It's important to understand that access to a database using Azure AD authentication requires that the hosting subscription is associated to the Azure AD. The same subscription must be used to create the Azure SQL Database, SQL Managed Instance, or Azure Synapse resources. - -![subscription relationship][2] - -## Administrator structure - -When using Azure AD authentication, there are two Administrator accounts: the original Azure SQL Database administrator and the Azure AD administrator. The same concepts apply to Azure Synapse. Only the administrator based on an Azure AD account can create the first Azure AD contained database user in a user database. The Azure AD administrator login can be an Azure AD user or an Azure AD group. When the administrator is a group account, it can be used by any group member, enabling multiple Azure AD administrators for the server. Using group account as an administrator enhances manageability by allowing you to centrally add and remove group members in Azure AD without changing the users or permissions in SQL Database or Azure Synapse. Only one Azure AD administrator (a user or group) can be configured at any time. - -![admin structure][3] - -## Permissions - -To create new users, you must have the `ALTER ANY USER` permission in the database. The `ALTER ANY USER` permission can be granted to any database user. The `ALTER ANY USER` permission is also held by the server administrator accounts, and database users with the `CONTROL ON DATABASE` or `ALTER ON DATABASE` permission for that database, and by members of the `db_owner` database role. - -To create a contained database user in Azure SQL Database, SQL Managed Instance, or Azure Synapse, you must connect to the database or instance using an Azure AD identity. To create the first contained database user, you must connect to the database by using an Azure AD administrator (who is the owner of the database). This is demonstrated in [Configure and manage Azure Active Directory authentication with SQL Database or Azure Synapse](authentication-aad-configure.md). Azure AD authentication is only possible if the Azure AD admin was created for Azure SQL Database, SQL Managed Instance, or Azure Synapse. If the Azure Active Directory admin was removed from the server, existing Azure Active Directory users created previously inside SQL Server can no longer connect to the database using their Azure Active Directory credentials. - -## Azure AD features and limitations - -- The following members of Azure AD can be provisioned for Azure SQL Database: - - - Native members: A member created in Azure AD in the managed domain or in a customer domain. For more information, see [Add your own domain name to Azure AD](../../active-directory/fundamentals/add-custom-domain.md). - - Members of an Active Directory domain federated with Azure Active Directory on a managed domain configured for seamless single sign-on with pass-through or password hash authentication. For more information, see [Microsoft Azure now supports federation with Windows Server Active Directory](https://azure.microsoft.com/blog/windows-azure-now-supports-federation-with-windows-server-active-directory//) and [Azure Active Directory Seamless Single Sign-On](../../active-directory/hybrid/how-to-connect-sso.md). - - Imported members from other Azure AD's who are native or federated domain members. - - Active Directory groups created as security groups. - -- Azure AD users that are part of a group that has `db_owner` server role cannot use the **[CREATE DATABASE SCOPED CREDENTIAL](/sql/t-sql/statements/create-database-scoped-credential-transact-sql)** syntax against Azure SQL Database and Azure Synapse. You will see the following error: - - `SQL Error [2760] [S0001]: The specified schema name 'user@mydomain.com' either does not exist or you do not have permission to use it.` - - Grant the `db_owner` role directly to the individual Azure AD user to mitigate the **CREATE DATABASE SCOPED CREDENTIAL** issue. - -- These system functions return NULL values when executed under Azure AD principals: - - - `SUSER_ID()` - - `SUSER_NAME()` - - `SUSER_SNAME()` - - `SUSER_ID()` - - `SUSER_SID()` - -### SQL Managed Instance - -- Azure AD server principals (logins) and users are supported for [SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md). -- Setting Azure AD server principals (logins) mapped to an Azure AD group as database owner is not supported in [SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md). - - An extension of this is that when a group is added as part of the `dbcreator` server role, users from this group can connect to the SQL Managed Instance and create new databases, but will not be able to access the database. This is because the new database owner is SA, and not the Azure AD user. This issue does not manifest if the individual user is added to the `dbcreator` server role. -- SQL Agent management and jobs execution are supported for Azure AD server principals (logins). -- Database backup and restore operations can be executed by Azure AD server principals (logins). -- Auditing of all statements related to Azure AD server principals (logins) and authentication events is supported. -- Dedicated administrator connection for Azure AD server principals (logins) which are members of sysadmin server role is supported. - - Supported through SQLCMD Utility and SQL Server Management Studio. -- Logon triggers are supported for logon events coming from Azure AD server principals (logins). -- Service Broker and DB mail can be setup using an Azure AD server principal (login). - -## Connect by using Azure AD identities - -Azure Active Directory authentication supports the following methods of connecting to a database using Azure AD identities: - -- Azure Active Directory Password -- Azure Active Directory Integrated -- Azure Active Directory Universal with Multi-Factor Authentication -- Using Application token authentication - -The following authentication methods are supported for Azure AD server principals (logins): - -- Azure Active Directory Password -- Azure Active Directory Integrated -- Azure Active Directory Universal with Multi-Factor Authentication - -### Additional considerations - -- To enhance manageability, we recommend you provision a dedicated Azure AD group as an administrator. -- Only one Azure AD administrator (a user or group) can be configured for a server in SQL Database or Azure Synapse at any time. - - The addition of Azure AD server principals (logins) for SQL Managed Instance allows the possibility of creating multiple Azure AD server principals (logins) that can be added to the `sysadmin` role. -- Only an Azure AD administrator for the server can initially connect to the server or managed instance using an Azure Active Directory account. The Active Directory administrator can configure subsequent Azure AD database users. -- Azure AD users and service principals (Azure AD applications) that are members of more than 2048 Azure AD security groups are not supported to login into the database in SQL Database, Managed Instance, or Azure Synapse. -- We recommend setting the connection timeout to 30 seconds. -- SQL Server 2016 Management Studio and SQL Server Data Tools for Visual Studio 2015 (version 14.0.60311.1April 2016 or later) support Azure Active Directory authentication. (Azure AD authentication is supported by the **.NET Framework Data Provider for SqlServer**; at least version .NET Framework 4.6). Therefore the newest versions of these tools and data-tier applications (DAC and BACPAC) can use Azure AD authentication. -- Beginning with version 15.0.1, [sqlcmd utility](/sql/tools/sqlcmd-utility) and [bcp utility](/sql/tools/bcp-utility) support Active Directory Interactive authentication with Multi-Factor Authentication. -- SQL Server Data Tools for Visual Studio 2015 requires at least the April 2016 version of the Data Tools (version 14.0.60311.1). Currently, Azure AD users are not shown in SSDT Object Explorer. As a workaround, view the users in [sys.database_principals](/sql/relational-databases/system-catalog-views/sys-database-principals-transact-sql). -- [Microsoft JDBC Driver 6.0 for SQL Server](https://www.microsoft.com/download/details.aspx?id=11774) supports Azure AD authentication. Also, see [Setting the Connection Properties](/sql/connect/jdbc/setting-the-connection-properties). -- PolyBase cannot authenticate by using Azure AD authentication. -- Azure AD authentication is supported for Azure SQL Database and Azure Synapse by using the Azure portal **Import Database** and **Export Database** blades. Import and export using Azure AD authentication is also supported from a PowerShell command. -- Azure AD authentication is supported for SQL Database, SQL Managed Instance, and Azure Synapse with using the CLI. For more information, see [Configure and manage Azure AD authentication with SQL Database or Azure Synapse](authentication-aad-configure.md) and [SQL Server - az sql server](/cli/azure/sql/server). - -## Next steps - -- To learn how to create and populate an Azure AD instance and then configure it with Azure SQL Database, SQL Managed Instance, or Azure Synapse, see [Configure and manage Azure Active Directory authentication with SQL Database, SQL Managed Instance, or Azure Synapse](authentication-aad-configure.md). -- For a tutorial of using Azure AD server principals (logins) with SQL Managed Instance, see [Azure AD server principals (logins) with SQL Managed Instance](../managed-instance/aad-security-configure-tutorial.md) -- For an overview of logins, users, database roles, and permissions in SQL Database, see [Logins, users, database roles, and permissions](logins-create-manage.md). -- For more information about database principals, see [Principals](/sql/relational-databases/security/authentication-access/principals-database-engine). -- For more information about database roles, see [Database roles](/sql/relational-databases/security/authentication-access/database-level-roles). -- For syntax on creating Azure AD server principals (logins) for SQL Managed Instance, see [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true). -- For more information about firewall rules in SQL Database, see [SQL Database firewall rules](firewall-configure.md). - - -[1]: ./media/authentication-aad-overview/1aad-auth-diagram.png -[2]: ./media/authentication-aad-overview/2subscription-relationship.png -[3]: ./media/authentication-aad-overview/3admin-structure.png diff --git a/articles/azure-sql/database/authentication-aad-service-principal-tutorial.md b/articles/azure-sql/database/authentication-aad-service-principal-tutorial.md deleted file mode 100644 index b07607dbfbf40..0000000000000 --- a/articles/azure-sql/database/authentication-aad-service-principal-tutorial.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Create Azure AD users using service principals -description: This tutorial walks you through creating an Azure AD user with an Azure AD applications (service principals) in Azure SQL Database -ms.service: sql-database -ms.subservice: security -ms.topic: tutorial -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 03/29/2022 -ms.custom: devx-track-azurepowershell ---- - -# Tutorial: Create Azure AD users using Azure AD applications - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article takes you through the process of creating Azure AD users in Azure SQL Database, using Azure service principals (Azure AD applications). This functionality already exists in Azure SQL Managed Instance, but is now being introduced in Azure SQL Database. To support this scenario, an Azure AD Identity must be generated and assigned to the Azure SQL logical server. - -For more information on Azure AD authentication for Azure SQL, see the article [Use Azure Active Directory authentication](authentication-aad-overview.md). - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> - Assign an identity to the Azure SQL logical server -> - Assign Directory Readers permission to the SQL logical server identity -> - Create a service principal (an Azure AD application) in Azure AD -> - Create a service principal user in Azure SQL Database -> - Create a different Azure AD user in SQL Database using an Azure AD service principal user - -## Prerequisites - -- An existing [Azure SQL Database](single-database-create-quickstart.md) deployment. We assume you have a working SQL Database for this tutorial. -- Access to an already existing Azure Active Directory. -- [Az.Sql 2.9.0](https://www.powershellgallery.com/packages/Az.Sql/2.9.0) module or higher is needed when using PowerShell to set up an individual Azure AD application as Azure AD admin for Azure SQL. Ensure you are upgraded to the latest module. - -## Assign an identity to the Azure SQL logical server - -1. Connect to your Azure Active Directory. You will need to find your Tenant ID. This can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **Overview** pane, you should see your **Tenant ID**. Run the following PowerShell command: - - - Replace `` with your **Tenant ID**. - - ```powershell - Connect-AzAccount -Tenant - ``` - - Record the `TenantId` for future use in this tutorial. - -1. Generate and assign an Azure AD Identity to the Azure SQL logical server. Execute the following PowerShell command: - - - Replace `` and `` with your resources. If your server name is `myserver.database.windows.net`, replace `` with `myserver`. - - ```powershell - Set-AzSqlServer -ResourceGroupName -ServerName -AssignIdentity - ``` - - For more information, see the [Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver) command. - - > [!IMPORTANT] - > If an Azure AD Identity is set up for the Azure SQL logical server, the [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) permission must be granted to the identity. We will walk through this step in following section. **Do not** skip this step as Azure AD authentication will stop working. - > - > With [Microsoft Graph](/graph/overview) support for Azure SQL, the Directory Readers role can be replaced with using lower level permissions. For more information, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). - > - > If a system-assigned or user-assigned managed identity is used as the server or instance identity, deleting the identity will result in the server or instance inability to access Microsoft Graph. Azure AD authentication and other functions will fail. To restore Azure AD functionality, a new SMI or UMI must be assigned to the server with appropriate permissions. - - - If you used the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) command with the parameter `AssignIdentity` for a new SQL server creation in the past, you'll need to execute the [Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver) command afterwards as a separate command to enable this property in the Azure fabric. - -1. Check the server identity was successfully assigned. Execute the following PowerShell command: - - - Replace `` and `` with your resources. If your server name is `myserver.database.windows.net`, replace `` with `myserver`. - - ```powershell - $xyz = Get-AzSqlServer -ResourceGroupName -ServerName - $xyz.identity - ``` - - Your output should show you `PrincipalId`, `Type`, and `TenantId`. The identity assigned is the `PrincipalId`. - -1. You can also check the identity by going to the [Azure portal](https://portal.azure.com). - - - Under the **Azure Active Directory** resource, go to **Enterprise applications**. Type in the name of your SQL logical server. You will see that it has an **Object ID** attached to the resource. - - :::image type="content" source="media/authentication-aad-service-principals-tutorial/enterprise-applications-object-id.png" alt-text="object-id"::: - -## Assign Directory Readers permission to the SQL logical server identity - -To allow the Azure AD assigned identity to work properly for Azure SQL, the Azure AD `Directory Readers` permission must be granted to the server identity. - -To grant this required permission, run the following script. - -> [!NOTE] -> This script must be executed by an Azure AD `Global Administrator` or a `Privileged Roles Administrator`. -> -> You can assign the `Directory Readers` role to a group in Azure AD. The group owners can then add the managed identity as a member of this group, which would bypass the need for a `Global Administrator` or `Privileged Roles Administrator` to grant the `Directory Readers` role. For more information on this feature, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). - -- Replace `` with your `TenantId` gathered earlier. -- Replace `` with your SQL logical server name. If your server name is `myserver.database.windows.net`, replace `` with `myserver`. - -```powershell -# This script grants Azure "Directory Readers" permission to a Service Principal representing the Azure SQL logical server -# It can be executed only by a "Global Administrator" or "Privileged Roles Administrator" type of user. -# To check if the "Directory Readers" permission was granted, execute this script again - -Import-Module AzureAD -Connect-AzureAD -TenantId "" #Enter your actual TenantId -$AssignIdentityName = "" #Enter Azure SQL logical server name - -# Get Azure AD role "Directory Users" and create if it doesn't exist -$roleName = "Directory Readers" -$role = Get-AzureADDirectoryRole | Where-Object {$_.displayName -eq $roleName} -if ($role -eq $null) { - # Instantiate an instance of the role template - $roleTemplate = Get-AzureADDirectoryRoleTemplate | Where-Object {$_.displayName -eq $roleName} - Enable-AzureADDirectoryRole -RoleTemplateId $roleTemplate.ObjectId - $role = Get-AzureADDirectoryRole | Where-Object {$_.displayName -eq $roleName} -} - -# Get service principal for server -$roleMember = Get-AzureADServicePrincipal -SearchString $AssignIdentityName -$roleMember.Count -if ($roleMember -eq $null) { - Write-Output "Error: No Service Principals with name '$($AssignIdentityName)', make sure that AssignIdentityName parameter was entered correctly." - exit -} - -if (-not ($roleMember.Count -eq 1)) { - Write-Output "Error: More than one service principal with name pattern '$($AssignIdentityName)'" - Write-Output "Dumping selected service principals...." - $roleMember - exit -} - -# Check if service principal is already member of readers role -$allDirReaders = Get-AzureADDirectoryRoleMember -ObjectId $role.ObjectId -$selDirReader = $allDirReaders | where{$_.ObjectId -match $roleMember.ObjectId} - -if ($selDirReader -eq $null) { - # Add principal to readers role - Write-Output "Adding service principal '$($AssignIdentityName)' to 'Directory Readers' role'..." - Add-AzureADDirectoryRoleMember -ObjectId $role.ObjectId -RefObjectId $roleMember.ObjectId - Write-Output "'$($AssignIdentityName)' service principal added to 'Directory Readers' role'..." - - #Write-Output "Dumping service principal '$($AssignIdentityName)':" - #$allDirReaders = Get-AzureADDirectoryRoleMember -ObjectId $role.ObjectId - #$allDirReaders | where{$_.ObjectId -match $roleMember.ObjectId} -} else { - Write-Output "Service principal '$($AssignIdentityName)' is already member of 'Directory Readers' role'." -} -``` - -> [!NOTE] -> The output from this above script will indicate if the Directory Readers permission was granted to the identity. You can re-run the script if you are unsure if the permission was granted. - -For a similar approach on how to set the **Directory Readers** permission for SQL Managed Instance, see [Provision Azure AD admin (SQL Managed Instance)](authentication-aad-configure.md#powershell). - -## Create a service principal (an Azure AD application) in Azure AD - -Register your application if you have not already done so. To register an app, you need to either be an Azure AD admin or a user assigned the Azure AD *Application Developer* role. For more information about assigning roles, see [Assign administrator and non-administrator roles to users with Azure Active Directory](../../active-directory/fundamentals/active-directory-users-assign-role-azure-portal.md). - -Completing an app registration generates and displays an **Application ID**. - -To register your application: - -1. In the Azure portal, select **Azure Active Directory** > **App registrations** > **New registration**. - - ![App registration](./media/active-directory-interactive-connect-azure-sql-db/image1.png) - - After the app registration is created, the **Application ID** value is generated and displayed. - - ![App ID displayed](./media/active-directory-interactive-connect-azure-sql-db/image2.png) - -1. You'll also need to create a client secret for signing in. Follow the guide here to [upload a certificate or create a secret for signing in](../../active-directory/develop/howto-create-service-principal-portal.md#authentication-two-options). - -1. Record the following from your application registration. It should be available from your **Overview** pane: - - **Application ID** - - **Tenant ID** - This should be the same as before - -In this tutorial, we'll be using *AppSP* as our main service principal, and *myapp* as the second service principal user that will be created in Azure SQL by *AppSP*. You'll need to create two applications, *AppSP* and *myapp*. - -For more information on how to create an Azure AD application, see the article [How to: Use the portal to create an Azure AD application and service principal that can access resources](../../active-directory/develop/howto-create-service-principal-portal.md). - -## Create the service principal user in Azure SQL Database - -Once a service principal is created in Azure AD, create the user in SQL Database. You'll need to connect to your SQL Database with a valid login with permissions to create users in the database. - -1. Create the user *AppSP* in the SQL Database using the following T-SQL command: - - ```sql - CREATE USER [AppSP] FROM EXTERNAL PROVIDER - GO - ``` - -2. Grant `db_owner` permission to *AppSP*, which allows the user to create other Azure AD users in the database. - - ```sql - EXEC sp_addrolemember 'db_owner', [AppSP] - GO - ``` - - For more information, see [sp_addrolemember](/sql/relational-databases/system-stored-procedures/sp-addrolemember-transact-sql) - - Alternatively, `ALTER ANY USER` permission can be granted instead of giving the `db_owner` role. This will allow the service principal to add other Azure AD users. - - ```sql - GRANT ALTER ANY USER TO [AppSp] - GO - ``` - - > [!NOTE] - > The above setting is not required when *AppSP* is set as an Azure AD admin for the server. To set the service principal as an AD admin for the SQL logical server, you can use the Azure portal, PowerShell, or Azure CLI commands. For more information, see [Provision Azure AD admin (SQL Database)](authentication-aad-configure.md?tabs=azure-powershell#powershell-for-sql-database-and-azure-synapse). - -## Create an Azure AD user in SQL Database using an Azure AD service principal - -> [!IMPORTANT] -> The service principal used to login to SQL Database must have a client secret. If it doesn’t have one, follow step 2 of [Create a service principal (an Azure AD application) in Azure AD](#create-a-service-principal-an-azure-ad-application-in-azure-ad). This client secret needs to be added as an input parameter in the script below. - -1. Use the following script to create an Azure AD service principal user *myapp* using the service principal *AppSP*. - - - Replace `` with your `TenantId` gathered earlier. - - Replace `` with your `ClientId` gathered earlier. - - Replace `` with your client secret created earlier. - - Replace `` with your SQL logical server name. If your server name is `myserver.database.windows.net`, replace `` with `myserver`. - - Replace `` with your SQL Database name. - - ```powershell - # PowerShell script for creating a new SQL user called myapp using application AppSP with secret - # AppSP is part of an Azure AD admin for the Azure SQL server below - - # Download latest MSAL - https://www.powershellgallery.com/packages/MSAL.PS - Import-Module MSAL.PS - - $tenantId = "" # tenantID (Azure Directory ID) were AppSP resides - $clientId = "" # AppID also ClientID for AppSP - $clientSecret = "" # Client secret for AppSP - $scopes = "https://database.windows.net/.default" # The end-point - - $result = Get-MsalToken -RedirectUri $uri -ClientId $clientId -ClientSecret (ConvertTo-SecureString $clientSecret -AsPlainText -Force) -TenantId $tenantId -Scopes $scopes - - $Tok = $result.AccessToken - #Write-host "token" - $Tok - - $SQLServerName = "" # Azure SQL logical server name - $DatabaseName = "" # Azure SQL database name - - Write-Host "Create SQL connection string" - $conn = New-Object System.Data.SqlClient.SQLConnection - $conn.ConnectionString = "Data Source=$SQLServerName.database.windows.net;Initial Catalog=$DatabaseName;Connect Timeout=30" - $conn.AccessToken = $Tok - - Write-host "Connect to database and execute SQL script" - $conn.Open() - $ddlstmt = 'CREATE USER [myapp] FROM EXTERNAL PROVIDER;' - Write-host " " - Write-host "SQL DDL command" - $ddlstmt - $command = New-Object -TypeName System.Data.SqlClient.SqlCommand($ddlstmt, $conn) - - Write-host "results" - $command.ExecuteNonQuery() - $conn.Close() - ``` - - Alternatively, you can use the code sample in the blog, [Azure AD Service Principal authentication to SQL DB - Code Sample](https://techcommunity.microsoft.com/t5/azure-sql-database/azure-ad-service-principal-authentication-to-sql-db-code-sample/ba-p/481467). Modify the script to execute a DDL statement `CREATE USER [myapp] FROM EXTERNAL PROVIDER`. The same script can be used to create a regular Azure AD user or a group in SQL Database. - - -2. Check if the user *myapp* exists in the database by executing the following command: - - ```sql - SELECT name, type, type_desc, CAST(CAST(sid as varbinary(16)) as uniqueidentifier) as appId from sys.database_principals WHERE name = 'myapp' - GO - ``` - - You should see a similar output: - - ```output - name type type_desc appId - myapp E EXTERNAL_USER 6d228f48-xxxx-xxxx-xxxx-xxxxxxxxxxxx - ``` - -## Next steps - -- [Azure Active Directory service principal with Azure SQL](authentication-aad-service-principal.md) -- [What are managed identities for Azure resources?](../../active-directory/managed-identities-azure-resources/overview.md) -- [How to use managed identities for App Service and Azure Functions](../../app-service/overview-managed-identity.md) -- [Azure AD Service Principal authentication to SQL DB - Code Sample](https://techcommunity.microsoft.com/t5/azure-sql-database/azure-ad-service-principal-authentication-to-sql-db-code-sample/ba-p/481467) -- [Application and service principal objects in Azure Active Directory](../../active-directory/develop/app-objects-and-service-principals.md) -- [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps) -- [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md) diff --git a/articles/azure-sql/database/authentication-aad-service-principal.md b/articles/azure-sql/database/authentication-aad-service-principal.md deleted file mode 100644 index 1975b6c82178d..0000000000000 --- a/articles/azure-sql/database/authentication-aad-service-principal.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Azure Active Directory service principal with Azure SQL -description: Utilize AD Applications (service principals) support Azure AD user creation in Azure SQL Database and Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma, wiassaf -ms.date: 02/14/2022 ---- - -# Azure Active Directory service principal with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa-dedicated-only](../includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md)] - -Azure Active Directory (Azure AD) supports user creation in Azure SQL Database (SQL DB) on behalf of Azure AD applications (service principals). This is supported for [Azure SQL Database](sql-database-paas-overview.md) and [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), as well as to both dedicated SQL pools in Azure Synapse workspaces and dedicated SQL pools (formerly SQL DW). - -## Service principal (Azure AD applications) support - -This article applies to applications that are integrated with Azure AD, and are part of Azure AD registration. These applications often need authentication and authorization access to Azure SQL to perform various tasks. This feature allows service principals to create Azure AD users in SQL Database. There was a limitation preventing Azure AD object creation on behalf of Azure AD applications that was removed. - -When an Azure AD application is registered using the Azure portal or a PowerShell command, two objects are created in the Azure AD tenant: - -- An application object -- A service principal object - -For more information on Azure AD applications, see [Application and service principal objects in Azure Active Directory](../../active-directory/develop/app-objects-and-service-principals.md) and [Create an Azure service principal with Azure PowerShell](/powershell/azure/create-azure-service-principal-azureps). - -SQL Database and SQL Managed Instance support the following Azure AD objects: - -- Azure AD users (managed, federated, and guest) -- Azure AD groups (managed and federated) -- Azure AD applications - -The T-SQL command `CREATE USER [Azure_AD_Object] FROM EXTERNAL PROVIDER` on behalf of an Azure AD application is now supported for SQL Database. - -## Functionality of Azure AD user creation using service principals - -Supporting this functionality is useful in Azure AD application automation processes where Azure AD objects are created and maintained in SQL Database without human interaction. Service principals can be an Azure AD admin for the SQL logical server, as part of a group or an individual user. The application can automate Azure AD object creation in SQL Database when executed as a system administrator, and does not require any additional SQL privileges. This allows for a full automation of a database user creation. This feature also supports Azure AD system-assigned managed identity and user-assigned managed identity that can be created as users in SQL Database on behalf of service principals. For more information, see [What are managed identities for Azure resources?](../../active-directory/managed-identities-azure-resources/overview.md) - -## Enable service principals to create Azure AD users - -To enable an Azure AD object creation in SQL Database on behalf of an Azure AD application, the following settings are required: - -1. Assign the server identity. The assigned server identity represents the Managed Service Identity (MSI). The server identity can be system-assigned or user-assigned managed identity. For more information, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). - - For a new Azure SQL logical server, execute the following PowerShell command: - - ```powershell - New-AzSqlServer -ResourceGroupName -Location -ServerName -ServerVersion "12.0" -SqlAdministratorCredentials (Get-Credential) -AssignIdentity - ``` - - For more information, see the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) command, or [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) command for SQL Managed Instance. - - - For existing Azure SQL Logical servers, execute the following command: - - ```powershell - Set-AzSqlServer -ResourceGroupName -ServerName -AssignIdentity - ``` - - For more information, see the [Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver) command, or [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) command for SQL Managed Instance. - - - To check if the server identity is assigned to the server, execute the Get-AzSqlServer command. - - > [!NOTE] - > Server identity can be assigned using REST API and CLI commands as well. For more information, see [az sql server create](/cli/azure/sql/server#az-sql-server-create), [az sql server update](/cli/azure/sql/server#az-sql-server-update), and [Servers - REST API](/rest/api/sql/2020-08-01-preview/servers). - - -2. Grant the Azure AD [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) permission to the server identity created or assigned to the server. - - To grant this permission, follow the description used for SQL Managed Instance that is available in the following article: [Provision Azure AD admin (SQL Managed Instance)](authentication-aad-configure.md?tabs=azure-powershell#provision-azure-ad-admin-sql-managed-instance) - - The Azure AD user who is granting this permission must be part of the Azure AD **Global Administrator** or **Privileged Roles Administrator** role. - - For dedicated SQL pools in an Azure Synapse workspace, use the workspace's managed identity instead of the Azure SQL server identity. - -> [!IMPORTANT] -> With [Microsoft Graph](/graph/overview) support for Azure SQL, the Directory Readers role can be replaced with using lower level permissions. For more information, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md) -> -> Steps 1 and 2 must be executed in the above order. First, create or assign the server identity, followed by granting the [**Directory Readers**](../../active-directory/roles/permissions-reference.md#directory-readers) permission, or lower level permissions discussed in [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). Omitting one of these steps, or both will cause an execution error during an Azure AD object creation in Azure SQL on behalf of an Azure AD application. -> -> You can assign the **Directory Readers** role to a group in Azure AD. The group owners can then add the managed identity as a member of this group, which would bypass the need for a **Global Administrator** or **Privileged Roles Administrator** to grant the **Directory Readers** role. For more information on this feature, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). - -## Troubleshooting and limitations - -- When creating Azure AD objects in Azure SQL on behalf of an Azure AD application without enabling server identity and granting **Directory Readers** permission, or lower level permissions discussed in [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md), the operation will fail with the following possible errors. The following example error is for a PowerShell command execution to create a SQL Database user `myapp` in the article [Tutorial: Create Azure AD users using Azure AD applications](authentication-aad-service-principal-tutorial.md). - - `Exception calling "ExecuteNonQuery" with "0" argument(s): "'myapp' is not a valid login or you do not have permission. Cannot find the user 'myapp', because it does not exist, or you do not have permission."` - - `Exception calling "ExecuteNonQuery" with "0" argument(s): "Principal 'myapp' could not be resolved. Error message: - 'Server identity is not configured. Please follow the steps in "Assign an Azure AD identity to your server and add - Directory Reader permission to your identity" (https://aka.ms/sqlaadsetup)'"` - - For the above error, follow the steps to [Assign an identity to the Azure SQL logical server](authentication-aad-service-principal-tutorial.md#assign-an-identity-to-the-azure-sql-logical-server) and [Assign Directory Readers permission to the SQL logical server identity](authentication-aad-service-principal-tutorial.md#assign-directory-readers-permission-to-the-sql-logical-server-identity). - - Setting the service principal (Azure AD application) as an Azure AD admin for SQL Database is supported using the Azure portal, [PowerShell](authentication-aad-configure.md?tabs=azure-powershell#powershell-for-sql-database-and-azure-synapse), [REST API](/rest/api/sql/2020-08-01-preview/servers), and [CLI](authentication-aad-configure.md?tabs=azure-cli#powershell-for-sql-database-and-azure-synapse) commands. -- Using an Azure AD application with service principal from another Azure AD tenant will fail when accessing SQL Database or SQL Managed Instance created in a different tenant. A service principal assigned to this application must be from the same tenant as the SQL logical server or Managed Instance. -- [Az.Sql 2.9.0](https://www.powershellgallery.com/packages/Az.Sql/2.9.0) module or higher is needed when using PowerShell to set up an individual Azure AD application as Azure AD admin for Azure SQL. Ensure you are upgraded to the latest module. - -## Next steps - -> [!div class="nextstepaction"] -> [Tutorial: Create Azure AD users using Azure AD applications](authentication-aad-service-principal-tutorial.md) diff --git a/articles/azure-sql/database/authentication-azure-ad-logins-tutorial.md b/articles/azure-sql/database/authentication-azure-ad-logins-tutorial.md deleted file mode 100644 index 72dff9759c52d..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-logins-tutorial.md +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: Create and utilize Azure Active Directory server logins -description: This article guides you through creating and utilizing Azure Active Directory logins in the virtual master database of Azure SQL -ms.service: sql-db-mi -ms.subservice: security -ms.topic: tutorial -author: GithubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 03/14/2022 ---- - -# Tutorial: Create and utilize Azure Active Directory server logins - -[!INCLUDE[appliesto-sqldb-sqlmi-asa-dedicated-only](../includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md)] - -> [!NOTE] -> Azure Active Directory (Azure AD) server principals (logins) are currently in public preview for Azure SQL Database. Azure SQL Managed Instance can already utilize Azure AD logins. - -This article guides you through creating and utilizing [Azure Active Directory (Azure AD) principals (logins)](authentication-azure-ad-logins.md) in the virtual master database of Azure SQL. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> - Create an Azure AD login in the virtual master database with the new syntax extension for Azure SQL Database -> - Create a user mapped to an Azure AD login in the virtual master database -> - Grant server roles to an Azure AD user -> - Disable an Azure AD login - -## Prerequisites - -- A SQL Database or SQL Managed Instance with a database. See [Quickstart: Create an Azure SQL Database single database](single-database-create-quickstart.md) if you haven't already created an Azure SQL Database, or [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). -- Azure AD authentication set up for SQL Database or Managed Instance. For more information, see [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md). -- This article instructs you on creating an Azure AD login and user within the virtual master database. Only an Azure AD admin can create a user within the virtual master database, so we recommend you use the Azure AD admin account when going through this tutorial. An Azure AD principal with the `loginmanager` role can create a login, but not a user within the virtual master database. - -## Create Azure AD login - -1. Create an Azure SQL Database login for an Azure AD account. In our example, we'll use `bob@contoso.com` that exists in our Azure AD domain called `contoso`. A login can also be created from an Azure AD group or [service principal (applications)](authentication-aad-service-principal.md). For example, `mygroup` that is an Azure AD group consisting of Azure AD accounts that are a member of that group. For more information, see [CREATE LOGIN (Transact-SQL)](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-current&preserve-view=true). - - > [!NOTE] - > The first Azure AD login must be created by the Azure Active Directory admin. A SQL login cannot create Azure AD logins. - -1. Using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), log into your SQL Database with the Azure AD admin account set up for the server. -1. Run the following query: - - ```sql - Use master - CREATE LOGIN [bob@contoso.com] FROM EXTERNAL PROVIDER - GO - ``` - -1. Check the created login in `sys.server_principals`. Execute the following query: - - ```sql - SELECT name, type_desc, type, is_disabled - FROM sys.server_principals - WHERE type_desc like 'external%' - ``` - - You would see a similar output to the following: - - ```output - Name type_desc type is_disabled - bob@contoso.com EXTERNAL_LOGIN E 0 - ``` - -1. The login `bob@contoso.com` has been created in the virtual master database. - -## Create user from an Azure AD login - -1. Now that we've created an Azure AD login, we can create a database-level Azure AD user that is mapped to the Azure AD login in the virtual master database. We'll continue to use our example, `bob@contoso.com` to create a user in the virtual master database, as we want to demonstrate adding the user to special roles. Only an Azure AD admin or SQL server admin can create users in the virtual master database. - -1. We're using the virtual master database, but you can switch to a database of your choice if you want to create users in other databases. Run the following query. - - ```sql - Use master - CREATE USER [bob@contoso.com] FROM LOGIN [bob@contoso.com] - ``` - - > [!TIP] - > Although it is not required to use Azure AD user aliases (for example, `bob@contoso.com`), it is a recommended best practice to use the same alias for Azure AD users and Azure AD logins. - -1. Check the created user in `sys.database_principals`. Execute the following query: - - ```sql - SELECT name, type_desc, type - FROM sys.database_principals - WHERE type_desc like 'external%' - ``` - - You would see a similar output to the following: - - ```output - Name type_desc type - bob@contoso.com EXTERNAL_USER E - ``` - -> [!NOTE] -> The existing syntax to create an Azure AD user without an Azure AD login is still supported, and requires the creation of a contained user inside SQL Database (without login). -> -> For example, `CREATE USER [bob@contoso.com] FROM EXTERNAL PROVIDER`. - -## Grant server-level roles to Azure AD logins - -You can add logins to the [built-in server-level roles](security-server-roles.md#built-in-server-level-roles), such as the **##MS_DefinitionReader##**, **##MS_ServerStateReader##**, or **##MS_ServerStateManager##** role. - -> [!NOTE] -> The server-level roles mentioned here are not supported for Azure AD groups. - -```sql -ALTER SERVER ROLE ##MS_DefinitionReader## ADD MEMBER [AzureAD_object]; -``` - -```sql -ALTER SERVER ROLE ##MS_ServerStateReader## ADD MEMBER [AzureAD_object]; -``` - -```sql -ALTER SERVER ROLE ##MS_ServerStateManager## ADD MEMBER [AzureAD_object]; -``` - -Permissions aren't effective until the user reconnects. Flush the DBCC cache as well: - -```sql -DBCC FLUSHAUTHCACHE -DBCC FREESYSTEMCACHE('TokenAndPermUserStore') WITH NO_INFOMSGS -``` - -To check which Azure AD logins are part of server-level roles, run the following query: - -```sql -SELECT roles.principal_id AS RolePID,roles.name AS RolePName, - server_role_members.member_principal_id AS MemberPID, members.name AS MemberPName - FROM sys.server_role_members AS server_role_members - INNER JOIN sys.server_principals AS roles - ON server_role_members.role_principal_id = roles.principal_id - INNER JOIN sys.server_principals AS members - ON server_role_members.member_principal_id = members.principal_id; -``` - -## Grant special roles for Azure AD users - -[Special roles for SQL Database](/sql/relational-databases/security/authentication-access/database-level-roles#special-roles-for--and-azure-synapse) can be assigned to users in the virtual master database. - -In order to grant one of the special database roles to a user, the user must exist in the virtual master database. - -To add a user to a role, you can run the following query: - -```sql -ALTER ROLE [dbamanger] ADD MEMBER [AzureAD_object] -``` - -To remove a user from a role, run the following query: - -```sql -ALTER ROLE [dbamanger] DROP MEMBER [AzureAD_object] -``` - -`AzureAD_object` can be an Azure AD user, group, or service principal in Azure AD. - -In our example, we created the user `bob@contoso.com`. Let's give the user the **dbmanager** and **loginmanager** roles. - -1. Run the following query: - - ```sql - ALTER ROLE [dbamanger] ADD MEMBER [bob@contoso.com] - ALTER ROLE [loginmanager] ADD MEMBER [bob@contoso.com] - ``` - -1. Check the database role assignment by running the following query: - - ```sql - SELECT DP1.name AS DatabaseRoleName, - isnull (DP2.name, 'No members') AS DatabaseUserName - FROM sys.database_role_members AS DRM - RIGHT OUTER JOIN sys.database_principals AS DP1 - ON DRM.role_principal_id = DP1.principal_id - LEFT OUTER JOIN sys.database_principals AS DP2 - ON DRM.member_principal_id = DP2.principal_id - WHERE DP1.type = 'R'and DP2.name like 'bob%' - ``` - - You would see a similar output to the following: - - ```output - DatabaseRoleName DatabaseUserName - dbmanager bob@contoso.com - loginmanager bob@contoso.com - ``` - -## Optional - Disable a login - -The [ALTER LOGIN (Transact-SQL)](/sql/t-sql/statements/alter-login-transact-sql?view=azuresqldb-current&preserve-view=true) DDL syntax can be used to enable or disable an Azure AD login in Azure SQL Database. - -```sql -ALTER LOGIN [bob@contoso.com] DISABLE -``` - -For the `DISABLE` or `ENABLE` changes to take immediate effect, the authentication cache and the **TokenAndPermUserStore** cache must be cleared using the following T-SQL commands: - -```sql -DBCC FLUSHAUTHCACHE -DBCC FREESYSTEMCACHE('TokenAndPermUserStore') WITH NO_INFOMSGS -``` - -Check that the login has been disabled by executing the following query: - -```sql -SELECT name, type_desc, type -FROM sys.server_principals -WHERE is_disabled = 1 -``` - -A use case for this would be to allow read-only on [geo-replicas](active-geo-replication-overview.md), but deny connection on a primary server. - -## See also - -For more information and examples, see: - -- [Azure Active Directory server principals](authentication-azure-ad-logins.md) -- [CREATE LOGIN (Transact-SQL)](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-current&preserve-view=true) -- [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-azure-ad-logins.md b/articles/azure-sql/database/authentication-azure-ad-logins.md deleted file mode 100644 index 8da87483b0fa7..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-logins.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Azure Active Directory server principals -description: Using Azure Active Directory server principals (logins) in Azure SQL -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 03/14/2022 ---- - -# Azure Active Directory server principals - -[!INCLUDE[appliesto-sqldb-sqlmi-asa-dedicated-only](../includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md)] - -> [!NOTE] -> Azure Active Directory (Azure AD) server principals (logins) are currently in public preview for Azure SQL Database. Azure SQL Managed Instance can already utilize Azure AD logins. - -You can now create and utilize Azure AD server principals, which are logins in the virtual master database of a SQL Database. There are several benefits of using Azure AD server principals for SQL Database: - -- Support [Azure SQL Database server roles for permission management](security-server-roles.md). -- Support multiple Azure AD users with [special roles for SQL Database](/sql/relational-databases/security/authentication-access/database-level-roles#special-roles-for--and-azure-synapse), such as the `loginmanager` and `dbmanager` roles. -- Functional parity between SQL logins and Azure AD logins. -- Increase functional improvement support, such as utilizing [Azure AD-only authentication](authentication-azure-ad-only-authentication.md). Azure AD-only authentication allows SQL authentication to be disabled, which includes the SQL server admin, SQL logins and users. -- Allows Azure AD principals to support geo-replicas. Azure AD principals will be able to connect to the geo-replica of a user database, with a *read-only* permission and *deny* permission to the primary server. -- Ability to use Azure AD service principal logins with special roles to execute a full automation of user and database creation, as well as maintenance provided by Azure AD applications. -- Closer functionality between Managed Instance and SQL Database, as Managed Instance already supports Azure AD logins in the master database. - -For more information on Azure AD authentication in Azure SQL, see [Use Azure Active Directory authentication](authentication-aad-overview.md) - -## Permissions - -The following permissions are required to utilize or create Azure AD logins in the virtual master database. - -- Azure AD admin permission or membership in the `loginmanager` server role. The first Azure AD login can only be created by the Azure AD admin. -- Must be a member of Azure AD within the same directory used for Azure SQL Database - -By default, the standard permission granted to newly created Azure AD login in the `master` database is **VIEW ANY DATABASE**. - -## Azure AD logins syntax - -New syntax for Azure SQL Database to use Azure AD server principals has been introduced with this feature release. - -### Create login syntax - -```syntaxsql -CREATE LOGIN login_name { FROM EXTERNAL PROVIDER | WITH [,..] }   - - ::=      -    PASSWORD = {'password'}   -    | , SID = sid, ] -``` - -The *login_name* specifies the Azure AD principal, which is an Azure AD user, group, or application. - -For more information, see [CREATE LOGIN (Transact-SQL)](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-current&preserve-view=true). - -### Create user syntax - -The below T-SQL syntax is already available in SQL Database, and can be used for creating database-level Azure AD principals mapped to Azure AD logins in the virtual master database. - -To create an Azure AD user from an Azure AD login, use the following syntax. Only the Azure AD admin can execute this command in the virtual master database. - -```syntaxsql -CREATE USER user_name FROM LOGIN login_name -``` - -For more information, see [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql). - -### Disable or enable a login using ALTER LOGIN syntax - -The [ALTER LOGIN (Transact-SQL)](/sql/t-sql/statements/alter-login-transact-sql?view=azuresqldb-current&preserve-view=true) DDL syntax can be used to enable or disable an Azure AD login in Azure SQL Database. - -```syntaxsql -ALTER LOGIN login_name DISABLE -``` - -The Azure AD principal `login_name` won't be able to log into any user database in the SQL Database logical server where an Azure AD user principal, `user_name` mapped to login `login_name` was created. - -> [!NOTE] -> - `ALTER LOGIN login_name DISABLE` is not supported for contained users. -> - `ALTER LOGIN login_name DISABLE` is not supported for Azure AD groups. -> - An individual disabled login cannot belong to a user who is part of a login group created in the master database (for example, an Azure AD admin group). -> - For the `DISABLE` or `ENABLE` changes to take immediate effect, the authentication cache and the **TokenAndPermUserStore** cache must be cleared using the T-SQL commands. -> -> ```sql -> DBCC FLUSHAUTHCACHE -> DBCC FREESYSTEMCACHE('TokenAndPermUserStore') WITH NO_INFOMSGS -> ``` - -## Roles for Azure AD principals - -[Special roles for SQL Database](/sql/relational-databases/security/authentication-access/database-level-roles#special-roles-for--and-azure-synapse) can be assigned to *users* in the virtual master database for Azure AD principals, including **dbmanager** and **loginmanager**. - -[Azure SQL Database server roles](security-server-roles.md) can be assigned to *logins* in the virtual master database. - -For a tutorial on how to grant these roles, see [Tutorial: Create and utilize Azure Active Directory server logins](authentication-azure-ad-logins-tutorial.md). - - -## Limitations and remarks - -- The SQL server admin can’t create Azure AD logins or users in any databases. -- Changing a database ownership to an Azure AD group as database owner isn't supported. - - `ALTER AUTHORIZATION ON database:: TO [my_aad_group]` fails with an error message: - ```output - Msg 33181, Level 16, State 1, Line 4 - The new owner cannot be Azure Active Directory group. - ``` - - Changing a database ownership to an individual user is supported. -- A SQL admin or SQL user can’t execute the following Azure AD operations: - - `CREATE LOGIN [bob@contoso.com] FROM EXTERNAL PROVIDER` - - `CREATE USER [bob@contoso.com] FROM EXTERNAL PROVIDER` - - `EXECUTE AS USER [bob@contoso.com]` - - `ALTER AUTHORIZATION ON securable::name TO [bob@contoso.com]` -- Impersonation of Azure AD server-level principals (logins) isn't supported: - - [EXECUTE AS Clause (Transact-SQL)](/sql/t-sql/statements/execute-as-clause-transact-sql) - - [EXECUTE AS (Transact-SQL)](/sql/t-sql/statements/execute-as-transact-sql) - - Impersonation of Azure AD database-level principals (users) at a user database-level is still supported. -- Azure AD logins overlapping with Azure AD administrator aren't supported. Azure AD admin takes precedence over any login. If an Azure AD account already has access to the server as an Azure AD admin, either directly or as a member of the admin group, the login created for this user won't have any effect. The login creation isn't blocked through T-SQL. After the account authenticates to the server, the login will have the effective permissions of an Azure AD admin, and not of a newly created login. -- Changing permissions on specific Azure AD login object isn't supported: - - `GRANT ON LOGIN :: TO ` -- When permissions are altered for an Azure AD login with existing open connections to an Azure SQL Database, permissions aren't effective until the user reconnects. Also [flush the authentication cache and the TokenAndPermUserStore cache](#disable-or-enable-a-login-using-alter-login-syntax). This applies to server role membership change using the [ALTER SERVER ROLE](/sql/t-sql/statements/alter-server-role-transact-sql) statement. -- Setting an Azure AD login mapped to an Azure AD group as the database owner isn't supported. -- [Azure SQL Database server roles](security-server-roles.md) aren't supported for Azure AD groups. - -## Next steps - -> [!div class="nextstepaction"] -> [Tutorial: Create and utilize Azure Active Directory server logins](authentication-azure-ad-logins-tutorial.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-azure-ad-only-authentication-create-server.md b/articles/azure-sql/database/authentication-azure-ad-only-authentication-create-server.md deleted file mode 100644 index 319a3714e718c..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-only-authentication-create-server.md +++ /dev/null @@ -1,740 +0,0 @@ ---- -title: Create server with Azure Active Directory only authentication enabled -description: This article guides you through creating an Azure SQL logical server or managed instance with Azure Active Directory (Azure AD) only authentication enabled, which disables connectivity using SQL Authentication -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/06/2022 ---- - -# Create server with Azure AD-only authentication enabled in Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - - -This how-to guide outlines the steps to create a [logical server](logical-servers.md) for Azure SQL Database or [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) with [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) enabled during provisioning. The Azure AD-only authentication feature prevents users from connecting to the server or managed instance using SQL authentication, and only allows connection using Azure AD authentication. - -## Prerequisites - -- Version 2.26.1 or later is needed when using The Azure CLI. For more information on the installation and the latest version, see [Install the Azure CLI](/cli/azure/install-azure-cli). -- [Az 6.1.0](https://www.powershellgallery.com/packages/Az/6.1.0) module or higher is needed when using PowerShell. -- If you're provisioning a managed instance using the Azure CLI, PowerShell, or REST API, a virtual network and subnet needs to be created before you begin. For more information, see [Create a virtual network for Azure SQL Managed Instance](../managed-instance/virtual-network-subnet-create-arm-template.md). - -## Permissions - -To provision a logical server or managed instance, you'll need to have the appropriate permissions to create these resources. Azure users with higher permissions, such as subscription [Owners](../../role-based-access-control/built-in-roles.md#owner), [Contributors](../../role-based-access-control/built-in-roles.md#contributor), [Service Administrators](../../role-based-access-control/rbac-and-directory-admin-roles.md#classic-subscription-administrator-roles), and [Co-Administrators](../../role-based-access-control/rbac-and-directory-admin-roles.md#classic-subscription-administrator-roles) have the privilege to create a SQL server or managed instance. To create these resources with the least privileged Azure RBAC role, use the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) role for SQL Database and [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) role for SQL Managed Instance. - -The [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) Azure RBAC role doesn't have enough permissions to create a server or instance with Azure AD-only authentication enabled. The [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role will be required to manage the Azure AD-only authentication feature after server or instance creation. - -## Provision with Azure AD-only authentication enabled - -The following section provides you with examples and scripts on how to create a logical server or managed instance with an Azure AD admin set for the server or instance, and have Azure AD-only authentication enabled during server creation. For more information on the feature, see [Azure AD-only authentication](authentication-azure-ad-only-authentication.md). - -In our examples, we're enabling Azure AD-only authentication during server or managed instance creation, with a system assigned server admin and password. This will prevent server admin access when Azure AD-only authentication is enabled, and only allows the Azure AD admin to access the resource. It's optional to add parameters to the APIs to include your own server admin and password during server creation. However, the password can’t be reset until you disable Azure AD-only authentication. An example of how to use these optional parameters to specify the server admin login name is presented in the [PowerShell](?tabs=azure-powershell#azure-sql-database) tab on this page. - -> [!NOTE] -> To change the existing properties after server or managed instance creation, other existing APIs should be used. For more information, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis) and [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md). -> -> If Azure AD-only authentication is set to false, which it is by default, a server admin and password will need to be included in all APIs during server or managed instance creation. - -## Azure SQL Database - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment](https://portal.azure.com/#create/Microsoft.AzureSQL) option page in the Azure portal. - -1. If you aren't already signed in to Azure portal, sign in when prompted. - -1. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. - -1. For **Resource group**, select **Create new**, enter a name for your resource group, and select **OK**. - -1. For **Database name**, enter a name for your database. - -1. For **Server**, select **Create new**, and fill out the new server form with the following values: - - - **Server name**: Enter a unique server name. Server names must be globally unique for all servers in Azure, not just unique within a subscription. Enter a value, and the Azure portal will let you know if it's available or not. - - **Location**: Select a location from the dropdown list - - **Authentication method**: Select **Use only Azure Active Directory (Azure AD) authentication**. - - Select **Set admin**, which brings up a menu to select an Azure AD principal as your logical server Azure AD administrator. When you're finished, use the **Select** button to set your admin. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-portal-create-server.png" alt-text="screenshot of creating a server with Azure AD-only authentication enabled"::: - -1. Select **Next: Networking** at the bottom of the page. - -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. - -1. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. - -1. Leave **Connection policy** and **Minimum TLS version** settings as their default value. - -1. Select **Next: Security** at the bottom of the page. Configure any of the settings for **Microsoft Defender for SQL**, **Ledger**, **Identity**, and **Transparent data encryption** for your environment. You can also skip these settings. - - > [!NOTE] - > Using a user-assigned managed identity (UMI) is not supported with Azure AD-only authentication. Do not set the server identity in the **Identity** section as a UMI. - -1. Select **Review + create** at the bottom of the page. - -1. On the **Review + create** page, after reviewing, select **Create**. - -# [The Azure CLI](#tab/azure-cli) - -The Azure CLI command `az sql server create` is used to provision a new logical server. The below command will provision a new server with Azure AD-only authentication enabled. - -The server SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this server creation, the SQL Administrator login won't be used. - -The server Azure AD admin will be the account you set for ``, and can be used to manage the server. - -Replace the following values in the example: - -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: The Azure AD Object ID for the user -- ``: Name of the resource group for your logical server -- ``: Use a unique logical server name - -```azurecli -az sql server create --enable-ad-only-auth --external-admin-principal-type User --external-admin-name --external-admin-sid -g -n -``` - -For more information, see [az sql server create](/cli/azure/sql/server#az-sql-server-create). - -To check the server status after creation, see the following command: - -```azurecli -az sql server show --name --resource-group --expand-ad-admin -``` - -# [PowerShell](#tab/azure-powershell) - -The PowerShell command `New-AzSqlServer` is used to provision a new Azure SQL logical server. The below command will provision a new server with Azure AD-only authentication enabled. - -The server SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this server creation, the SQL Administrator login won't be used. - -The server Azure AD admin will be the account you set for ``, and can be used to manage the server. - -Replace the following values in the example: - -- ``: Name of the resource group for your logical server -- ``: Location of the server, such as `West US`, or `Central US` -- ``: Use a unique logical server name -- ``: Can be an Azure AD user or group. For example, `DummyLogin` - -```powershell -New-AzSqlServer -ResourceGroupName "" -Location "" -ServerName "" -ServerVersion "12.0" -ExternalAdminName "" -EnableActiveDirectoryOnlyAuthentication -``` - -Here's an example of specifying the server admin name (instead of letting the server admin name being automatically created) at the time of logical server creation. As mentioned earlier, this login isn't usable when Azure AD-only authentication is enabled. - -```powershell -$cred = Get-Credential -New-AzSqlServer -ResourceGroupName "" -Location "" -ServerName "" -ServerVersion "12.0" -ExternalAdminName "" -EnableActiveDirectoryOnlyAuthentication -SqlAdministratorCredentials $cred -``` - -For more information, see [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver). - -# [REST API](#tab/rest-api) - -The [Servers - Create Or Update](/rest/api/sql/2020-11-01-preview/servers/create-or-update) REST API can be used to create a logical server with Azure AD-only authentication enabled during provisioning. - -The script below will provision a logical server, set the Azure AD admin as ``, and enable Azure AD-only authentication. The server SQL Administrator login will also be created automatically and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provisioning, the SQL Administrator login won't be used. - -The Azure AD admin, `` can be used to manage the server when the provisioning is complete. - -Replace the following values in the example: - -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **Overview** pane, you should see your **Tenant ID** -- ``: Your subscription ID can be found in the Azure portal -- ``: Use a unique logical server name -- ``: Name of the resource group for your logical server -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: Location of the server, such as `westus2`, or `centralus` -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **User** pane, search for the Azure AD user and find their **Object ID**. If you're using an application (service principal) as the Azure AD admin, replace this value with the **Application ID**. You will need to update the `principalType` as well. - -```rest -Import-Module Azure -Import-Module MSAL.PS - -$tenantId = '' -$clientId = '1950a258-227b-4e31-a9cf-717495945fc2' # Static Microsoft client ID used for getting a token -$subscriptionId = '' -$uri = "urn:ietf:wg:oauth:2.0:oob" -$authUrl = "https://login.windows.net/$tenantId" -$serverName = "" -$resourceGroupName = "" - -Login-AzAccount -tenantId $tenantId - -# login as a user with SQL Server Contributor role or higher - -# Get a token - -$result = Get-MsalToken -RedirectUri $uri -ClientId $clientId -TenantId $tenantId -Scopes "https://management.core.windows.net/.default" - -#Authetication header -$authHeader = @{ -'Content-Type'='application\json; ' -'Authorization'=$result.CreateAuthorizationHeader() -} - -# Enable Azure AD-only auth -# No server admin is specified, and only Azure AD admin and Azure AD-only authentication is set to true -# Server admin (login and password) is generated by the system - -# Authentication body -# The sid is the Azure AD Object ID for the user or group, and Application ID for applications. Update the principalType as well - -$body = '{ -"location": "", -"properties": { "administrators":{ "login":"", "sid":"", "tenantId":"", "principalType":"User", "azureADOnlyAuthentication":true } - } -}' - -# Provision the server - -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/?api-version=2020-11-01-preview -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -To check the server status, you can use the following script: - -```rest -$uri = 'https://management.azure.com/subscriptions/'+$subscriptionId+'/resourceGroups/'+$resourceGroupName+'/providers/Microsoft.Sql/servers/'+$serverName+'?api-version=2020-11-01-preview&$expand=administrators/activedirectory' - -$responce=Invoke-WebRequest -Uri $uri -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" - -$responce.statuscode - -$responce.content -``` - -# [ARM Template](#tab/arm-template) - -For more information and ARM templates, see [Azure Resource Manager templates for Azure SQL Database & SQL Managed Instance](arm-templates-content-guide.md). - -To provision a logical server with an Azure AD admin set for the server and Azure AD-only authentication enabled using an ARM Template, see our [Azure SQL logical server with Azure AD-only authentication](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sql/sql-logical-server-aad-only-auth) quickstart template. - -You can also use the following template. Use a [Custom deployment in the Azure portal](https://portal.azure.com/#create/Microsoft.Template), and **Build your own template in the editor**. Next, **Save** the configuration once you pasted in the example. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "server": { - "type": "string", - "defaultValue": "[uniqueString('sql', resourceGroup().id)]", - "metadata": { - "description": "The name of the logical server." - } - }, - "location": { - "type": "string", - "defaultValue": "[resourceGroup().location]", - "metadata": { - "description": "Location for all resources." - } - }, - "aad_admin_name": { - "type": "String", - "metadata": { - "description": "The name of the Azure AD admin for the SQL server." - } - }, - "aad_admin_objectid": { - "type": "String", - "metadata": { - "description": "The Object ID of the Azure AD admin if the admin is a user or group. For Applications, use the Application ID." - } - }, - "aad_admin_tenantid": { - "type": "String", - "defaultValue": "[subscription().tenantId]", - "metadata": { - "description": "The Tenant ID of the Azure Active Directory" - } - }, - "aad_admin_type": { - "defaultValue": "User", - "allowedValues": [ - "User", - "Group", - "Application" - ], - "type": "String" - }, - "aad_only_auth": { - "defaultValue": true, - "type": "Bool" - } - }, - "resources": [ - { - "type": "Microsoft.Sql/servers", - "apiVersion": "2020-11-01-preview", - "name": "[parameters('server')]", - "location": "[parameters('location')]", - "properties": { - "administrators": { - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantid')]", - "principalType": "[parameters('aad_admin_type')]", - "azureADOnlyAuthentication": "[parameters('aad_only_auth')]" - } - } - } - ] -} -``` - ---- - -## Azure SQL Managed Instance - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment](https://portal.azure.com/#create/Microsoft.AzureSQL) option page in the Azure portal. - -1. If you aren't already signed in to Azure portal, sign in when prompted. - -1. Under **SQL managed instances**, leave **Resource type** set to **Single instance**, and select **Create**. - -1. Fill out the mandatory information required on the **Basics** tab for **Project details** and **Managed Instance details**. This is a minimum set of information required to provision a SQL Managed Instance. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic.png" alt-text="Azure portal screenshot of the create SQL Managed Instance basic tab "::: - - For more information on the configuration options, see [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -1. Under **Authentication**, select **Use only Azure Active Directory (Azure AD) authentication** for the **Authentication method**. - -1. Select **Set admin**, which brings up a menu to select an Azure AD principal as your managed instance Azure AD administrator. When you're finished, use the **Select** button to set your admin. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic-choose-authentication.png" alt-text="Azure portal screenshot of the create SQL Managed Instance basic tab and choosing Azure AD only authentication"::: - -1. You can leave the rest of the settings default. For more information on the **Networking**, **Security**, or other tabs and settings, follow the guide in the article [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -1. Once you're done with configuring your settings, select **Review + create** to proceed. Select **Create** to start provisioning the managed instance. - -# [The Azure CLI](#tab/azure-cli) - -The Azure CLI command `az sql mi create` is used to provision a new Azure SQL Managed Instance. The below command will provision a new managed instance with Azure AD-only authentication enabled. - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The managed instance SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provision, the SQL Administrator login won't be used. - -The Azure AD admin will be the account you set for ``, and can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: The Azure AD Object ID for the user -- ``: Name the managed instance you want to create -- ``: Name of the resource group for your managed instance. The resource group should also include the virtual network and subnet created -- The `subnet` parameter needs to be updated with the ``, ``, ``, and ``. Your subscription ID can be found in the Azure portal - -```azurecli -az sql mi create --enable-ad-only-auth --external-admin-principal-type User --external-admin-name --external-admin-sid -g -n --subnet /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/ -``` - -For more information, see [az sql mi create](/cli/azure/sql/mi#az-sql-mi-create). - -# [PowerShell](#tab/azure-powershell) - -The PowerShell command `New-AzSqlInstance` is used to provision a new Azure SQL Managed Instance. The below command will provision a new managed instance with Azure AD-only authentication enabled. - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The managed instance SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provision, the SQL Administrator login won't be used. - -The Azure AD admin will be the account you set for ``, and can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Name the managed instance you want to create -- ``: Name of the resource group for your managed instance. The resource group should also include the virtual network and subnet created -- ``: Location of the server, such as `West US`, or `Central US` -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- The `SubnetId` parameter needs to be updated with the ``, ``, ``, and ``. Your subscription ID can be found in the Azure portal - - -```powershell -New-AzSqlInstance -Name "" -ResourceGroupName "" -ExternalAdminName "" -EnableActiveDirectoryOnlyAuthentication -Location "" -SubnetId "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/" -LicenseType LicenseIncluded -StorageSizeInGB 1024 -VCore 16 -Edition "GeneralPurpose" -ComputeGeneration Gen4 -``` - -For more information, see [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance). - -# [REST API](#tab/rest-api) - -The [Managed Instances - Create Or Update](/rest/api/sql/2020-11-01-preview/managed-instances/create-or-update) REST API can be used to create a managed instance with Azure AD-only authentication enabled during provisioning. - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The script below will provision a managed instance, set the Azure AD admin as ``, and enable Azure AD-only authentication. The instance SQL Administrator login will also be created automatically and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provisioning, the SQL Administrator login won't be used. - -The Azure AD admin, `` can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **Overview** pane, you should see your **Tenant ID** -- ``: Your subscription ID can be found in the Azure portal -- ``: Use a unique managed instance name -- ``: Name of the resource group for your logical server -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: Location of the server, such as `westus2`, or `centralus` -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **User** pane, search for the Azure AD user and find their **Object ID**. If you're using an application (service principal) as the Azure AD admin, replace this value with the **Application ID**. You'll need to update the `principalType` as well. -- The `subnetId` parameter needs to be updated with the ``, the `Subscription ID`, ``, and `` - - -```rest -Import-Module Azure -Import-Module MSAL.PS - -$tenantId = '' -$clientId = '1950a258-227b-4e31-a9cf-717495945fc2' # Static Microsoft client ID used for getting a token -$subscriptionId = '' -$uri = "urn:ietf:wg:oauth:2.0:oob" -$instanceName = "" -$resourceGroupName = "" -$scopes ="https://management.core.windows.net/.default" - -Login-AzAccount -tenantId $tenantId - -# Login as an Azure AD user with permission to provision a managed instance - -$result = Get-MsalToken -RedirectUri $uri -ClientId $clientId -TenantId $tenantId -Scopes $scopes - -$authHeader = @{ -'Content-Type'='application\json; ' -'Authorization'=$result.CreateAuthorizationHeader() -} - -$body = '{ -"name": "", "type": "Microsoft.Sql/managedInstances", "identity": { "type": "SystemAssigned"},"location": "", "sku": {"name": "GP_Gen5", "tier": "GeneralPurpose", "family":"Gen5","capacity": 8}, -"properties": {"administrators":{ "login":"", "sid":"", "tenantId":"", "principalType":"User", "azureADOnlyAuthentication":true }, -"subnetId": "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", -"licenseType": "LicenseIncluded", "vCores": 8, "storageSizeInGB": 2048, "collation": "SQL_Latin1_General_CP1_CI_AS", "proxyOverride": "Proxy", "timezoneId": "UTC", "privateEndpointConnections": [], "storageAccountType": "GRS", "zoneRedundant": false - } -}' - -# To provision the instance, execute the `PUT` command - -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$instanceName/?api-version=2020-11-01-preview -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" - -``` - -To check the results, execute the `GET` command: - -```rest -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$instanceName/?api-version=2020-11-01-preview -Method GET -Headers $authHeader | Format-List -``` - -# [ARM Template](#tab/arm-template) - -To provision a new managed instance, virtual network and subnet, with an Azure AD admin set for the instance and Azure AD-only authentication enabled, use the following template. - -Use a [Custom deployment in the Azure portal](https://portal.azure.com/#create/Microsoft.Template), and **Build your own template in the editor**. Next, **Save** the configuration once you pasted in the example. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "managedInstanceName": { - "type": "String", - "metadata": { - "description": "Enter managed instance name." - } - }, - "aad_admin_name": { - "type": "String", - "metadata": { - "description": "The name of the Azure AD admin for the SQL managed instance." - } - }, - "aad_admin_objectid": { - "type": "String", - "metadata": { - "description": "The Object ID of the Azure AD admin. The Object ID of the Azure AD admin if the admin is a user or group. For Applications, use the Application ID." - } - }, - "aad_admin_tenantid": { - "type": "String", - "defaultValue": "[subscription().tenantId]", - "metadata": { - "description": "The Tenant ID of the Azure Active Directory" - } - }, - "aad_admin_type": { - "defaultValue": "User", - "allowedValues": [ - "User", - "Group", - "Application" - ], - "type": "String" - }, - "aad_only_auth": { - "defaultValue": true, - "type": "Bool" - }, - "location": { - "defaultValue": "[resourceGroup().location]", - "type": "String", - "metadata": { - "description": "Enter location. If you leave this field blank resource group location would be used." - } - }, - "virtualNetworkName": { - "type": "String", - "defaultValue": "SQLMI-VNET", - "metadata": { - "description": "Enter virtual network name. If you leave this field blank name will be created by the template." - } - }, - "addressPrefix": { - "defaultValue": "10.0.0.0/16", - "type": "String", - "metadata": { - "description": "Enter virtual network address prefix." - } - }, - "subnetName": { - "type": "String", - "defaultValue": "ManagedInstances", - "metadata": { - "description": "Enter subnet name. If you leave this field blank name will be created by the template." - } - }, - "subnetPrefix": { - "defaultValue": "10.0.0.0/24", - "type": "String", - "metadata": { - "description": "Enter subnet address prefix." - } - }, - "skuName": { - "defaultValue": "GP_Gen5", - "allowedValues": [ - "GP_Gen5", - "BC_Gen5" - ], - "type": "String", - "metadata": { - "description": "Enter sku name." - } - }, - "vCores": { - "defaultValue": 16, - "allowedValues": [ - 8, - 16, - 24, - 32, - 40, - 64, - 80 - ], - "type": "Int", - "metadata": { - "description": "Enter number of vCores." - } - }, - "storageSizeInGB": { - "defaultValue": 256, - "minValue": 32, - "maxValue": 8192, - "type": "Int", - "metadata": { - "description": "Enter storage size." - } - }, - "licenseType": { - "defaultValue": "LicenseIncluded", - "allowedValues": [ - "BasePrice", - "LicenseIncluded" - ], - "type": "String", - "metadata": { - "description": "Enter license type." - } - } - }, - "variables": { - "networkSecurityGroupName": "[concat('SQLMI-', parameters('managedInstanceName'), '-NSG')]", - "routeTableName": "[concat('SQLMI-', parameters('managedInstanceName'), '-Route-Table')]" - }, - "resources": [ - { - "type": "Microsoft.Network/networkSecurityGroups", - "apiVersion": "2020-06-01", - "name": "[variables('networkSecurityGroupName')]", - "location": "[parameters('location')]", - "properties": { - "securityRules": [ - { - "name": "allow_tds_inbound", - "properties": { - "description": "Allow access to data", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "1433", - "sourceAddressPrefix": "VirtualNetwork", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 1000, - "direction": "Inbound" - } - }, - { - "name": "allow_redirect_inbound", - "properties": { - "description": "Allow inbound redirect traffic to SQL Managed Instance inside the virtual network", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "11000-11999", - "sourceAddressPrefix": "VirtualNetwork", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 1100, - "direction": "Inbound" - } - }, - { - "name": "deny_all_inbound", - "properties": { - "description": "Deny all other inbound traffic", - "protocol": "*", - "sourcePortRange": "*", - "destinationPortRange": "*", - "sourceAddressPrefix": "*", - "destinationAddressPrefix": "*", - "access": "Deny", - "priority": 4096, - "direction": "Inbound" - } - }, - { - "name": "deny_all_outbound", - "properties": { - "description": "Deny all other outbound traffic", - "protocol": "*", - "sourcePortRange": "*", - "destinationPortRange": "*", - "sourceAddressPrefix": "*", - "destinationAddressPrefix": "*", - "access": "Deny", - "priority": 4096, - "direction": "Outbound" - } - } - ] - } - }, - { - "type": "Microsoft.Network/routeTables", - "apiVersion": "2020-06-01", - "name": "[variables('routeTableName')]", - "location": "[parameters('location')]", - "properties": { - "disableBgpRoutePropagation": false - } - }, - { - "type": "Microsoft.Network/virtualNetworks", - "apiVersion": "2020-06-01", - "name": "[parameters('virtualNetworkName')]", - "location": "[parameters('location')]", - "dependsOn": [ - "[variables('routeTableName')]", - "[variables('networkSecurityGroupName')]" - ], - "properties": { - "addressSpace": { - "addressPrefixes": [ - "[parameters('addressPrefix')]" - ] - }, - "subnets": [ - { - "name": "[parameters('subnetName')]", - "properties": { - "addressPrefix": "[parameters('subnetPrefix')]", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', variables('routeTableName'))]" - }, - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('networkSecurityGroupName'))]" - }, - "delegations": [ - { - "name": "miDelegation", - "properties": { - "serviceName": "Microsoft.Sql/managedInstances" - } - } - ] - } - } - ] - } - }, - { - "type": "Microsoft.Sql/managedInstances", - "apiVersion": "2020-11-01-preview", - "name": "[parameters('managedInstanceName')]", - "location": "[parameters('location')]", - "dependsOn": [ - "[parameters('virtualNetworkName')]" - ], - "sku": { - "name": "[parameters('skuName')]" - }, - "identity": { - "type": "SystemAssigned" - }, - "properties": { - "subnetId": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkName'), parameters('subnetName'))]", - "storageSizeInGB": "[parameters('storageSizeInGB')]", - "vCores": "[parameters('vCores')]", - "licenseType": "[parameters('licenseType')]", - "administrators": { - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantid')]", - "principalType": "[parameters('aad_admin_type')]", - "azureADOnlyAuthentication": "[parameters('aad_only_auth')]" - } - } - } - ] -} -``` - ---- - -### Grant Directory Readers permissions - -Once the deployment is complete for your managed instance, you may notice that the SQL Managed Instance needs **Read** permissions to access Azure Active Directory. Read permissions can be granted by clicking on the displayed message in the Azure portal by a person with enough privileges. For more information, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). - -:::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-portal-read-permissions.png" alt-text="screenshot of the Active Directory admin menu in Azure portal showing Read permissions needed"::: - -## Limitations - -- To reset the server administrator password, Azure AD-only authentication must be disabled. -- If Azure AD-only authentication is disabled, you must create a server with a server admin and password when using all APIs. - -## Next steps - -- If you already have a SQL server or managed instance, and just want to enable Azure AD-only authentication, see [Tutorial: Enable Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-tutorial.md). -- For more information on the Azure AD-only authentication feature, see [Azure AD-only authentication with Azure SQL](authentication-azure-ad-only-authentication.md). -- If you're looking to enforce server creation with Azure AD-only authentication enabled, see [Azure Policy for Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-policy.md) diff --git a/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy-how-to.md b/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy-how-to.md deleted file mode 100644 index 6a5db41172160..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy-how-to.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Using Azure Policy to enforce Azure Active Directory only authentication -description: This article guides you through using Azure Policy to enforce Azure Active Directory (Azure AD) only authentication with Azure SQL Database and Azure SQL Managed Instance -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 09/22/2021 ---- - -# Using Azure Policy to enforce Azure Active Directory only authentication with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -> [!NOTE] -> The **Azure AD-only authentication** and associated Azure Policy feature discussed in this article is in **public preview**. - -This article guides you through creating an Azure Policy that would enforce Azure AD-only authentication when users create an Azure SQL Managed Instance, or a [logical server](logical-servers.md) for Azure SQL Database. To learn more about Azure AD-only authentication during resource creation, see [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md). - -In this article, you learn how to: - -> [!div class="checklist"] -> - Create an Azure Policy that enforces logical server or managed instance creation with [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) enabled -> - Check Azure Policy compliance - -## Prerequisite - -- Have permissions to manage Azure Policy. For more information, see [Azure RBAC permissions in Azure Policy](../../governance/policy/overview.md#azure-rbac-permissions-in-azure-policy). - -## Create an Azure Policy - -Start off by creating an Azure Policy enforcing SQL Database or Managed Instance provisioning with Azure AD-only authentication enabled. - -1. Go to the [Azure portal](https://portal.azure.com). -1. Search for the service **Policy**. -1. Under the Authoring settings, select **Definitions**. -1. In the **Search** box, search for *Azure Active Directory only authentication*. - - There are two built-in policies available to enforce Azure AD-only authentication. One is for SQL Database, and the other is for Managed Instance. - - - Azure SQL Database should have Azure Active Directory Only Authentication enabled - - Azure SQL Managed Instance should have Azure Active Directory Only Authentication enabled - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy for Azure AD-only authentication"::: - -1. Select the policy name for your service. In this example, we'll use Azure SQL Database. Select **Azure SQL Database should have Azure Active Directory Only Authentication enabled**. -1. Select **Assign** in the new menu. - - > [!NOTE] - > The JSON script in the menu shows the built-in policy definition that can be used as a template to build a custom Azure Policy for SQL Database. The default is set to `Audit`. - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/assign-policy-azure-ad-only-authentication.png" alt-text="Screenshot of assigning Azure Policy for Azure AD-only authentication"::: - -1. In the **Basics** tab, add a **Scope** by using the selector (**...**) on the side of the box. - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/selecting-scope-policy-azure-ad-only-authentication.png" alt-text="Screenshot of selecting Azure Policy scope for Azure AD-only authentication"::: - -1. in the **Scope** pane, select your **Subscription** from the drop-down menu, and select a **Resource Group** for this policy. Once you're done, use the **Select** button to save the selection. - - > [!NOTE] - > If you do not select a resource group, the policy will apply to the whole subscription. - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/adding-scope-policy-azure-ad-only-authentication.png" alt-text="Screenshot of adding Azure Policy scope for Azure AD-only authentication"::: - -1. Once you're back on the **Basics** tab, customize the **Assignment name** and provide an optional **Description**. Make sure the **Policy enforcement** is **Enabled**. -1. Go over to the **Parameters** tab. Unselect the option **Only show parameters that require input**. -1. Under **Effect**, select **Deny**. This setting will prevent a logical server creation without Azure AD-only authentication enabled. - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/deny-policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy effect parameter for Azure AD-only authentication"::: - -1. In the **Non-compliance messages** tab, you can customize the policy message that displays if a violation of the policy has occurred. The message will let users know what policy was enforced during server creation. - - :::image type="content" source="media/authentication-azure-ad-only-authentication-policy/non-compliance-message-policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy non-compliance message for Azure AD-only authentication"::: - -1. Select **Review + create**. Review the policy and select the **Create** button. - -> [!NOTE] -> It may take some time for the newly created policy to be enforced. - -## Check policy compliance - -You can check the **Compliance** setting under the **Policy** service to see the compliance state. - -Search for the assignment name that you have given earlier to the policy. - -:::image type="content" source="media/authentication-azure-ad-only-authentication-policy/compliance-policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy compliance for Azure AD-only authentication"::: - -Once the logical server is created with Azure AD-only authentication, the policy report will increase the counter under the **Resources by compliance state** visual. You'll be able to see which resources are compliant, or non-compliant. - -If the resource group that the policy was chosen to cover contains already created servers, the policy report will indicate those resources that are compliant and non-compliant. - -> [!NOTE] -> Updating the compliance report may take some time. Changes related to resource creation or Azure AD-only authentication settings are not reported immediately. - -## Provision a server - -You can then try to provision a logical server or managed instance in the resource group that you assigned the Azure Policy. If Azure AD-only authentication is enabled during server creation, the provision will succeed. When Azure AD-only authentication isn't enabled, the provision will fail. - -For more information, see [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md). - -## Next steps - -- Overview of [Azure Policy for Azure AD-only authentication](authentication-azure-ad-only-authentication-policy.md) -- [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md) -- Overview of [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy.md b/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy.md deleted file mode 100644 index 28ab47ac2121c..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-only-authentication-policy.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Azure Policy for Azure Active Directory only authentication -description: This article provides information on how to enforce an Azure policy to create an Azure SQL Database or Azure SQL Managed Instance with Azure Active Directory (Azure AD) only authentication enabled -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 11/02/2021 ---- - -# Azure Policy for Azure Active Directory only authentication with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure Policy can enforce the creation of an Azure SQL Database or Azure SQL Managed Instance with [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) enabled during provisioning. With this policy in place, any attempts to create a [logical server in Azure](logical-servers.md) or managed instance will fail if it isn't created with Azure AD-only authentication enabled. - -The Azure Policy can be applied to the whole Azure subscription, or just within a resource group. - -Two new built-in policies have been introduced in Azure Policy: - -- Azure SQL Database should have Azure Active Directory Only Authentication enabled -- Azure SQL Managed Instance should have Azure Active Directory Only Authentication enabled - -For more information on Azure Policy, see [What is Azure Policy?](../../governance/policy/overview.md) and [Azure Policy definition structure](../../governance/policy/concepts/definition-structure.md). - -## Permissions - -For an overview of the permissions needed to manage Azure Policy, see [Azure RBAC permissions in Azure Policy](../../governance/policy/overview.md#azure-rbac-permissions-in-azure-policy). - -### Actions - -If you're using a custom role to manage Azure Policy, the following [Actions](../../role-based-access-control/role-definitions.md#actions) are needed. - -- */read -- Microsoft.Authorization/policyassignments/* -- Microsoft.Authorization/policydefinitions/* -- Microsoft.Authorization/policyexemptions/* -- Microsoft.Authorization/policysetdefinitions/* -- Microsoft.PolicyInsights/* - -For more information on custom roles, see [Azure custom roles](../../role-based-access-control/custom-roles.md). - -## Manage Azure Policy for Azure AD-only authentication - -The Azure AD-only authentication policies can be managed by going to the [Azure portal](https://portal.azure.com), and searching for the **Policy** service. Under **Definitions**, search for *Azure Active Directory only authentication*. - -:::image type="content" source="media/authentication-azure-ad-only-authentication-policy/policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy for Azure AD-only authentication"::: - -For a guide on how to add an Azure Policy for Azure AD-only authentication, see [Using Azure Policy to enforce Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-policy-how-to.md). - -There are three effects for these policies: - -- **Audit** - The default setting, and will only capture an audit report in the Azure Policy activity logs -- **Deny** - Prevents logical server or managed instance creation without [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) enabled -- **Disabled** - Will disable the policy, and won't restrict users from creating a logical server or managed instance without Azure AD-only authentication enabled - -If the Azure Policy for Azure AD-only authentication is set to **Deny**, Azure SQL logical server or managed instance creation will fail. The details of this failure will be recorded in the **Activity log** of the resource group. - -## Policy compliance - -You can view the **Compliance** setting under the **Policy** service to see the compliance state. The **Compliance state** will tell you whether the server or managed instance is currently in compliance with having Azure AD-only authentication enabled. - -The Azure Policy can prevent a new logical server or managed instance from being created without having Azure AD-only authentication enabled, but the feature can be changed after server or managed instance creation. If a user has disabled Azure AD-only authentication after the server or managed instance was created, the compliance state will be `Non-compliant` if the Azure Policy is set to **Deny**. - -:::image type="content" source="media/authentication-azure-ad-only-authentication-policy/check-compliance-policy-azure-ad-only-authentication.png" alt-text="Screenshot of Azure Policy Compliance menu for Azure AD-only authentication"::: - -## Limitations - -- Azure Policy enforces Azure AD-only authentication during logical server or managed instance creation. Once the server is created, authorized Azure AD users with special roles (for example, SQL Security Manager) can disable the Azure AD-only authentication feature. The Azure Policy allows it, but in this case, the server or managed instance will be listed in the compliance report as `Non-compliant` and the report will indicate the server or managed instance name. -- For more remarks, known issues, and permissions needed, see [Azure AD-only authentication](authentication-azure-ad-only-authentication.md). - -## Next steps - -> [!div class="nextstepaction"] -> [Using Azure Policy to enforce Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-policy-how-to.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-azure-ad-only-authentication-tutorial.md b/articles/azure-sql/database/authentication-azure-ad-only-authentication-tutorial.md deleted file mode 100644 index 7b84f44edc1cb..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-only-authentication-tutorial.md +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: Enable Azure Active Directory only authentication -description: This article guides you through enabling the Azure Active Directory (Azure AD) only authentication feature with Azure SQL Database and Azure SQL Managed Instance -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: tutorial -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 11/02/2021 ---- - -# Tutorial: Enable Azure Active Directory only authentication with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article guides you through enabling the [Azure AD-only authentication](authentication-azure-ad-only-authentication.md) feature within Azure SQL Database and Azure SQL Managed Instance. If you are looking to provision a SQL Database or SQL Managed Instance with Azure AD-only authentication enabled, see [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md). - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> - Assign role to enable Azure AD-only authentication -> - Enable Azure AD-only authentication using the Azure portal, Azure CLI, or PowerShell -> - Check whether Azure AD-only authentication is enabled -> - Test connecting to Azure SQL -> - Disable Azure AD-only authentication using the Azure portal, Azure CLI, or PowerShell - - -## Prerequisites - -- An Azure AD instance. For more information, see [Configure and manage Azure AD authentication with Azure SQL](authentication-aad-configure.md). -- A SQL Database or SQL Managed Instance with a database, and logins or users. See [Quickstart: Create an Azure SQL Database single database](single-database-create-quickstart.md) if you haven't already created an Azure SQL Database, or [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -## Assign role to enable Azure AD-only authentication - -In order to enable or disable Azure AD-only authentication, selected built-in roles are required for the Azure AD users executing these operations in this tutorial. We're going to assign the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role to the user in this tutorial. - -For more information on how to assign a role to an Azure AD account, see [Assign administrator and non-administrator roles to users with Azure Active Directory](../../active-directory/fundamentals/active-directory-users-assign-role-azure-portal.md) - -For more information on the required permission to enable or disable Azure AD-only authentication, see the [Permissions section of Azure AD-only authentication](authentication-azure-ad-only-authentication.md#permissions) article. - -1. In our example, we'll assign the **SQL Security Manager** role to the user `UserSqlSecurityManager@contoso.onmicrosoft.com`. Using privileged user that can assign Azure AD roles, sign into the [Azure portal](https://portal.azure.com/). -1. Go to your SQL server resource, and select **Access control (IAM)** in the menu. Select the **Add** button and then **Add role assignment** in the drop-down menu. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control.png" alt-text="Access control pane in the Azure portal"::: - -1. In the **Add role assignment** pane, select the Role **SQL Security Manager**, and select the user that you want to have the ability to enable or disable Azure AD-only authentication. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control-add-role.png" alt-text="Add role assignment pane in the Azure portal"::: - -1. Click **Save**. - -## Enable Azure AD-only authentication - -# [Portal](#tab/azure-portal) - -## Enable in SQL Database using Azure portal - -To enable Azure AD-only authentication auth in the Azure portal, see the steps below. - -1. Using the user with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role, go to the [Azure portal](https://portal.azure.com/). -1. Go to your SQL server resource, and select **Azure Active Directory** under the **Settings** menu. - - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal.png" alt-text="Enable Azure AD only auth menu"::: - -1. If you haven't added an **Azure Active Directory admin**, you'll need to set this before you can enable Azure AD-only authentication. -1. Select the **Support only Azure Active Directory authentication for this server** checkbox. -1. The **Enable Azure AD authentication only** popup will show. Click **Yes** to enable the feature and **Save** the setting. - -## Enable in SQL Managed Instance using Azure portal - -To enable Azure AD-only authentication auth in the Azure portal, see the steps below. - -1. Using the user with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role, go to the [Azure portal](https://portal.azure.com/). -1. Go to your **SQL managed instance** resource, and select **Active Directory admin** under the **Settings** menu. - -1. If you haven't added an **Azure Active Directory admin**, you'll need to set this before you can enable Azure AD-only authentication. -1. Select the **Support only Azure Active Directory authentication for this managed instance** checkbox. -1. The **Enable Azure AD authentication only** popup will show. Click **Yes** to enable the feature and **Save** the setting. - -# [The Azure CLI](#tab/azure-cli) - -## Enable in SQL Database using Azure CLI - -To enable Azure AD-only authentication in Azure SQL Database using Azure CLI, see the commands below. [Install the latest version of Azure CLI](/cli/azure/install-azure-cli-windows). You must have Azure CLI version **2.14.2** or higher. For more information on these commands, see [az sql server ad-only-auth](/cli/azure/sql/server/ad-only-auth). - -For more information on managing Azure AD-only authentication using APIs, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis). - -> [!NOTE] -> The Azure AD admin must be set for the server before enabling Azure AD-only authentication. Otherwise, the Azure CLI command will fail. -> -> For permissions and actions required of the user performing these commands to enable Azure AD-only authentication, see the [Azure AD-only authentication](authentication-azure-ad-only-authentication.md#permissions) article. - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql server ad-only-auth enable --resource-group --name - ``` - -## Enable in SQL Managed Instance using Azure CLI - -To enable Azure AD-only authentication in Azure SQL Managed Instance using Azure CLI, see the commands below. [Install the latest version of Azure CLI](/cli/azure/install-azure-cli-windows). - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql mi ad-only-auth enable --resource-group --name - ``` - -# [PowerShell](#tab/azure-powershell) - -## Enable in SQL Database using PowerShell - -To enable Azure AD-only authentication in Azure SQL Database using PowerShell, see the commands below. [Az.Sql 2.10.0](https://www.powershellgallery.com/packages/Az.Sql/2.10.0) module or higher is required to execute these commands. For more information on these commands, see [Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication](/powershell/module/az.sql/enable-azsqlinstanceactivedirectoryonlyauthentication). - -For more information on managing Azure AD-only authentication using APIs, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis) - -> [!NOTE] -> The Azure AD admin must be set for the server before enabling Azure AD-only authentication. Otherwise, the PowerShell command will fail. -> -> For permissions and actions required of the user performing these commands to enable Azure AD-only authentication, see the [Azure AD-only authentication](authentication-azure-ad-only-authentication.md#permissions) article. If the user has insufficient permissions, you will get the following error: -> -> ```output -> Enable-AzSqlServerActiveDirectoryOnlyAuthentication : The client -> 'UserSqlServerContributor@contoso.onmicrosoft.com' with object id -> '' does not have authorization to perform -> action 'Microsoft.Sql/servers/azureADOnlyAuthentications/write' over scope -> '/subscriptions/...' -> ``` - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```powershell - Enable-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName -ResourceGroupName - ``` - -## Enable in SQL Managed Instance using PowerShell - -To enable Azure AD-only authentication in Azure SQL Managed Instance using PowerShell, see the commands below. [Az.Sql 2.10.0](https://www.powershellgallery.com/packages/Az.Sql/2.10.0) module or higher is required to execute these commands. - -For more information on managing Azure AD-only authentication using APIs, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis). - - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL Managed Instance name, and `` with your Azure Resource that holds the **SQL managed instance**. - - ```powershell - Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName -ResourceGroupName - ``` - ---- - -## Check the Azure AD-only authentication status - -Check whether Azure AD-only authentication is enabled for your server or instance. - -# [Portal](#tab/azure-portal) - -## Check status in SQL Database - -Go to your **SQL server** resource in the [Azure portal](https://portal.azure.com/). Select **Azure Active Directory** under the **Settings** menu. - -## Check status in SQL Managed Instance - -Go to your **SQL managed instance** resource in the [Azure portal](https://portal.azure.com/). Select **Active Directory admin** under the **Settings** menu. - -# [The Azure CLI](#tab/azure-cli) - -These commands can be used to check whether Azure AD-only authentication is enabled for your [logical server](logical-servers.md) for Azure SQL Database, or SQL Managed Instance. Members of the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) and [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) roles can use these commands to check the status of Azure AD-only authentication, but can't enable or disable the feature. - -## Check status in SQL Database - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. For more information on managing Azure AD-only authentication using APIs, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis) - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql server ad-only-auth get --resource-group --name - ``` - -1. You should see the following output: - - ```json - { - "azureAdOnlyAuthentication": true, - "/subscriptions//resourceGroups/mygroup/providers/Microsoft.Sql/servers/myserver/azureADOnlyAuthentications/Default", - "name": "Default", - "resourceGroup": "myresource", - "type": "Microsoft.Sql/servers" - } - ``` - -## Check status in SQL Managed Instance - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql mi ad-only-auth get --resource-group --name - ``` - -1. You should see the following output: - - ```json - { - "azureAdOnlyAuthentication": true, - "id": "/subscriptions//resourceGroups/myresource/providers/Microsoft.Sql/managedInstances/myinstance/azureADOnlyAuthentications/Default", - "name": "Default", - "resourceGroup": "myresource", - "type": "Microsoft.Sql/managedInstances" - } - ``` - -# [PowerShell](#tab/azure-powershell) - -These commands can be used to check whether Azure AD-only authentication is enabled for your [logical server](logical-servers.md) for Azure SQL Database, or SQL Managed Instance. Members of the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) and [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) roles can use these commands to check the status of Azure AD-only authentication, but can't enable or disable the feature. - -The status will return **True** if the feature is enabled, and **False** if disabled. - -## Check status in SQL Database - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. For more information on managing Azure AD-only authentication using APIs, see [Managing Azure AD-only authentication using APIs](authentication-azure-ad-only-authentication.md#managing-azure-ad-only-authentication-using-apis) - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```powershell - Get-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName -ResourceGroupName - ``` - -## Check status in SQL Managed Instance - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL Managed Instance name, and `` with your Azure Resource that holds the **SQL managed instance**. - - ```powershell - Get-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName -ResourceGroupName - ``` - ---- - -## Test SQL authentication with connection failure - -After enabling Azure AD-only authentication, test with [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to [connect to your SQL Database or SQL Managed Instance](connect-query-ssms.md). Use SQL authentication for the connection. - -You should see a login failed message similar to the following output: - -```output -Cannot connect to .database.windows.net. -Additional information: - Login failed for user 'username'. Reason: Azure Active Directory only authentication is enabled. - Please contact your system administrator. (Microsoft SQL Server, Error: 18456) -``` - -## Disable Azure AD-only authentication - -By disabling the Azure AD-only authentication feature, you allow both SQL authentication and Azure AD authentication for Azure SQL. - -# [Portal](#tab/azure-portal) - -## Disable in SQL Database using Azure portal - -1. Using the user with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role, go to the [Azure portal](https://portal.azure.com/). -1. Go to your SQL server resource, and select **Azure Active Directory** under the **Settings** menu. -1. To disable the Azure AD-only authentication feature, uncheck the **Support only Azure Active Directory authentication for this server** checkbox and **Save** the setting. - -## Disable in SQL Managed Instance using Azure portal - -1. Using the user with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role, go to the [Azure portal](https://portal.azure.com/). -1. Go to your **SQL managed instance** resource, and select **Active Directory admin** under the **Settings** menu. -1. To disable the Azure AD-only authentication feature, uncheck the **Support only Azure Active Directory authentication for this managed instance** checkbox and **Save** the setting. - -# [The Azure CLI](#tab/azure-cli) - -## Disable in SQL Database using Azure CLI - -To disable Azure AD-only authentication in Azure SQL Database using Azure CLI, see the commands below. - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql server ad-only-auth disable --resource-group --name - ``` - -1. After disabling Azure AD-only authentication, you should see the following output when you check the status: - - ```json - { - "azureAdOnlyAuthentication": false, - "/subscriptions//resourceGroups/mygroup/providers/Microsoft.Sql/servers/myserver/azureADOnlyAuthentications/Default", - "name": "Default", - "resourceGroup": "myresource", - "type": "Microsoft.Sql/servers" - } - ``` - -## Disable in SQL Managed Instance using Azure CLI - -To disable Azure AD-only authentication in Azure SQL Managed Instance using Azure CLI, see the commands below. - -1. [Sign into Azure](/cli/azure/authenticate-azure-cli) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```azurecli - az login - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```azurecli - az sql mi ad-only-auth disable --resource-group --name - ``` - -1. After disabling Azure AD-only authentication, you should see the following output when you check the status: - - ```json - { - "azureAdOnlyAuthentication": false, - "id": "/subscriptions//resourceGroups/myresource/providers/Microsoft.Sql/managedInstances/myinstance/azureADOnlyAuthentications/Default", - "name": "Default", - "resourceGroup": "myresource", - "type": "Microsoft.Sql/managedInstances" - } - ``` - -# [PowerShell](#tab/azure-powershell) - -## Disable in SQL Database using PowerShell - -To disable Azure AD-only authentication in Azure SQL Database using PowerShell, see the commands below. - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL server name, and `` with your Azure Resource that holds the SQL server. - - ```powershell - Disable-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName -ResourceGroupName - ``` - -## Disable in SQL Managed Instance using PowerShell - -To disable Azure AD-only authentication in Azure SQL Managed Instance using PowerShell, see the commands below. - -1. [Sign into Azure](/powershell/azure/authenticate-azureps) using the account with the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role. - - ```powershell - Connect-AzAccount - ``` - -1. Run the following command, replacing `` with your SQL Managed Instance name, and `` with your Azure Resource that holds the managed instance. - - ```powershell - Disable-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName -ResourceGroupName - ``` - ---- - -## Test connecting to Azure SQL again - -After disabling Azure AD-only authentication, test connecting using a SQL authentication login. You should now be able to connect to your server or instance. - -## Next steps - -- [Azure AD-only authentication with Azure SQL](authentication-azure-ad-only-authentication.md) -- [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md) -- [Using Azure Policy to enforce Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-policy-how-to.md) diff --git a/articles/azure-sql/database/authentication-azure-ad-only-authentication.md b/articles/azure-sql/database/authentication-azure-ad-only-authentication.md deleted file mode 100644 index 2d9613d11c0d3..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-only-authentication.md +++ /dev/null @@ -1,426 +0,0 @@ ---- -title: Azure Active Directory-only authentication -description: This article provides information on the Azure AD-only authentication feature with Azure SQL -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma, wiassaf -ms.date: 04/01/2022 -ms.custom: ignite-fall-2021, devx-track-azurecli ---- - -# Azure AD-only authentication with Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa-dedicated-only](../includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md)] - -Azure AD-only authentication is a feature within [Azure SQL](../azure-sql-iaas-vs-paas-what-is-overview.md) that allows the service to only support Azure AD authentication, and is supported for [Azure SQL Database](sql-database-paas-overview.md) and [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md). - -Azure AD-only authentication is also available for dedicated SQL pools (formerly SQL DW) in standalone servers. Azure AD-only authentication can be enabled for the Azure Synapse workspace. For more information, see [Azure AD-only authentication with Azure Synapse workspaces](../../synapse-analytics/sql/active-directory-authentication.md). - -SQL authentication is disabled when enabling Azure AD-only authentication in the Azure SQL environment, including connections from SQL server administrators, logins, and users. Only users using [Azure AD authentication](authentication-aad-overview.md) are authorized to connect to the server or database. - -Azure AD-only authentication can be enabled or disabled using the Azure portal, Azure CLI, PowerShell, or REST API. Azure AD-only authentication can also be configured during server creation with an Azure Resource Manager (ARM) template. - -For more information on Azure SQL authentication, see [Authentication and authorization](logins-create-manage.md#authentication-and-authorization). - -## Feature description - -When enabling Azure AD-only authentication, [SQL authentication](logins-create-manage.md#authentication-and-authorization) is disabled at the server or managed instance level and prevents any authentication based on any SQL authentication credentials. SQL authentication users won't be able to connect to the [logical server](logical-servers.md) for Azure SQL Database or managed instance, including all of its databases. Although SQL authentication is disabled, new SQL authentication logins and users can still be created by Azure AD accounts with proper permissions. Newly created SQL authentication accounts won't be allowed to connect to the server. Enabling Azure AD-only authentication doesn't remove existing SQL authentication login and user accounts. The feature only prevents these accounts from connecting to the server, and any database created for this server. - -You can also force servers to be created with Azure AD-only authentication enabled using Azure Policy. For more information, see [Azure Policy for Azure AD-only authentication](authentication-azure-ad-only-authentication-policy.md). - -## Permissions - -Azure AD-only authentication can be enabled or disabled by Azure AD users who are members of high privileged [Azure AD built-in roles](../../role-based-access-control/built-in-roles.md), such as Azure subscription [Owners](../../role-based-access-control/built-in-roles.md#owner), [Contributors](../../role-based-access-control/built-in-roles.md#contributor), and [Global Administrators](../../active-directory/roles/permissions-reference.md#global-administrator). Additionally, the role [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) can also enable or disable the Azure AD-only authentication feature. - -The [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) and [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) roles won't have permissions to enable or disable the Azure AD-only authentication feature. This is consistent with the [Separation of Duties](security-best-practice.md#implement-separation-of-duties) approach, where users who can create an Azure SQL server or create an Azure AD admin, can't enable or disable security features. - -### Actions required - -The following actions are added to the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role to allow management of the Azure AD-only authentication feature. - -- Microsoft.Sql/servers/azureADOnlyAuthentications/* -- Microsoft.Sql/servers/administrators/read - required only for users accessing the Azure portal **Azure Active Directory** menu -- Microsoft.Sql/managedInstances/azureADOnlyAuthentications/* -- Microsoft.Sql/managedInstances/read - -The above actions can also be added to a custom role to manage Azure AD-only authentication. For more information, see [Create and assign a custom role in Azure Active Directory](../../active-directory/roles/custom-create.md). - -## Managing Azure AD-only authentication using APIs - -> [!IMPORTANT] -> The Azure AD admin must be set before enabling Azure AD-only authentication. - -# [Azure CLI](#tab/azure-cli) - -You must have Azure CLI version **2.14.2** or higher. - -`name` corresponds to the prefix of the server or instance name (for example, `myserver`) and `resource-group` corresponds to the resource the server belongs to (for example, `myresource`). - -## Azure SQL Database - -For more information, see [az sql server ad-only-auth](/cli/azure/sql/server/ad-only-auth). - -### Enable or disable in SQL Database - -**Enable** - -```azurecli -az sql server ad-only-auth enable --resource-group myresource --name myserver -``` - -**Disable** - -```azurecli -az sql server ad-only-auth disable --resource-group myresource --name myserver -``` - -### Check the status in SQL Database - -```azurecli -az sql server ad-only-auth get --resource-group myresource --name myserver -``` - -## Azure SQL Managed Instance - -For more information, see [az sql mi ad-only-auth](/cli/azure/sql/mi/ad-only-auth). - -**Enable** - -```azurecli -az sql mi ad-only-auth enable --resource-group myresource --name myserver -``` - -**Disable** - -```azurecli -az sql mi ad-only-auth disable --resource-group myresource --name myserver -``` - -### Check the status in SQL Managed Instance - -```azurecli -az sql mi ad-only-auth get --resource-group myresource --name myserver -``` - -# [PowerShell](#tab/azure-powershell) - -[Az.Sql 2.10.0](https://www.powershellgallery.com/packages/Az.Sql/2.10.0) module or higher is required. - -`ServerName` or `InstanceName` correspond to the prefix of the server name (for example, `myserver` or `myinstance`) and `ResourceGroupName` corresponds to the resource the server belongs to (for example, `myresource`). - -## Azure SQL Database - -### Enable or disable in SQL Database - -**Enable** - -For more information, see [Enable-AzSqlServerActiveDirectoryOnlyAuthentication](/powershell/module/az.sql/enable-azsqlserveractivedirectoryonlyauthentication). You can also run `get-help Enable-AzSqlServerActiveDirectoryOnlyAuthentication -full`. - -```powershell -Enable-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName myserver -ResourceGroupName myresource -``` - -You can also use the following command: - -```powershell -Get-AzSqlServer -ServerName myserver | Enable-AzSqlServerActiveDirectoryOnlyAuthentication -``` - -**Disable** - -For more information, see [Disable-AzSqlServerActiveDirectoryOnlyAuthentication](/powershell/module/az.sql/disable-azsqlserveractivedirectoryonlyauthentication). - -```powershell -Disable-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName myserver -ResourceGroupName myresource -``` - -### Check the status in SQL Database - -```powershell -Get-AzSqlServerActiveDirectoryOnlyAuthentication -ServerName myserver -ResourceGroupName myresource -``` - -You can also use the following command: - -```powershell -Get-AzSqlServer -ServerName myserver | Get-AzSqlServerActiveDirectoryOnlyAuthentication -``` - -## Azure SQL Managed Instance - -### Enable or disable in SQL Managed Instance - -**Enable** - -For more information, see [Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication](/powershell/module/az.sql/enable-azsqlinstanceactivedirectoryonlyauthentication). - -```powershell -Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName myinstance -ResourceGroupName myresource -``` - -You can also use the following command: - -```powershell -Get-AzSqlInstance -InstanceName myinstance | Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication -``` - -For more information on these PowerShell commands, run `get-help Enable-AzSqlInstanceActiveDirectoryOnlyAuthentication -full`. - -**Disable** - -For more information, see [Disable-AzSqlInstanceActiveDirectoryOnlyAuthentication](/powershell/module/az.sql/disable-azsqlinstanceactivedirectoryonlyauthentication). - -```powershell -Disable-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName myinstance -ResourceGroupName myresource -``` - -### Check the status in SQL Managed Instance - -```powershell -Get-AzSqlInstanceActiveDirectoryOnlyAuthentication -InstanceName myinstance -ResourceGroupName myresource -``` - -You can also use the following command: - -```powershell -Get-AzSqlInstance -InstanceName myinstance | Get-AzSqlInstanceActiveDirectoryOnlyAuthentication -``` - -# [REST API](#tab/rest-api) - -The following parameters will need to be defined: - -- `` can be found by navigating to **Subscriptions** in the Azure portal. -- `` correspond to the prefix of the server or instance name (for example, `myserver`). -- `` corresponds to the resource the server belongs to (for example, `myresource`) - -To use latest MSAL, download it from https://www.powershellgallery.com/packages/MSAL.PS. - -```rest -$subscriptionId = '' -$serverName = "" -$resourceGroupName = "" -``` - -## Azure SQL Database - -For more information, see the [Server Azure AD Only Authentications](/rest/api/sql/2021-02-01-preview/serverazureadonlyauthentications) REST API documentation. - -### Enable or disable in SQL Database - -**Enable** - -```rest -$body = @{ properties = @{ azureADOnlyAuthentication = 1 } } | ConvertTo-Json -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -**Disable** - -```rest -$body = @{ properties = @{ azureADOnlyAuthentication = 0 } } | ConvertTo-Json -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -### Check the status in SQL Database - -```rest -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method GET -Headers $authHeader | Format-List -``` - -## Azure SQL Managed Instance - -For more information, see the [Managed Instance Azure AD Only Authentications](/rest/api/sql/2021-02-01-preview/managedinstanceazureadonlyauthentications) REST API documentation. - -### Enable or disable in SQL Managed Instance - -**Enable** - -```rest -$body = @{ properties = @{ azureADOnlyAuthentication = 1 } } | ConvertTo-Json -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -**Disable** - -```rest -$body = @{ properties = @{ azureADOnlyAuthentication = 0 } } | ConvertTo-Json -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -### Check the status in SQL Managed Instance - -```rest -Invoke-RestMethod -Uri "https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$serverName/azureADOnlyAuthentications/default?api-version=2020-02-02-preview" -Method GET -Headers $authHeader | Format-List -``` - -# [ARM Template](#tab/arm) - -- Input the Azure AD admin for the deployment. You will find the user Object ID by going to the [Azure portal](https://portal.azure.com) and navigating to your **Azure Active Directory** resource. Under **Manage**, select **Users**. Search for the user you want to set as the Azure AD admin for your Azure SQL server. Select the user, and under their **Profile** page, you will see the **Object ID**. -- The Tenant ID can be found in the **Overview** page of your **Azure Active Directory** resource. - -## Azure SQL Database - -### Enable - -The below ARM Template enables Azure AD-only authentication in your Azure SQL Database. To disable Azure AD-only authentication, set the `azureADOnlyAuthentication` property to `false`. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "sqlServer_name": { - "type": "String" - }, - "aad_admin_name": { - "type": "String" - }, - "aad_admin_objectid": { - "type": "String" - }, - "aad_admin_tenantid": { - "type": "String" - } - }, - "resources": [ - { - "type": "Microsoft.Sql/servers/administrators", - "apiVersion": "2020-02-02-preview", - "name": "[concat(parameters('sqlServer_name'), '/ActiveDirectory')]", - "properties": { - "administratorType": "ActiveDirectory", - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantId')]" - } - }, - { - "type": "Microsoft.Sql/servers/azureADOnlyAuthentications", - "apiVersion": "2020-02-02-preview", - "name": "[concat(parameters('sqlServer_name'), '/Default')]", - "dependsOn": [ - "[resourceId('Microsoft.Sql/servers/administrators', parameters('sqlServer_name'), 'ActiveDirectory')]" - ], - "properties": { - "azureADOnlyAuthentication": true - } - } - ] -} -``` - -For more information, see [Microsoft.Sql servers/azureADOnlyAuthentications](/azure/templates/microsoft.sql/servers/azureadonlyauthentications). - -## Azure SQL Managed Instance - -### Enable - -The below ARM Template enables Azure AD-only authentication in your Azure SQL Managed Instance. To disable Azure AD-only authentication, set the `azureADOnlyAuthentication` property to `false`. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "instance": { - "type": "String" - }, - "aad_admin_name": { - "type": "String" - }, - "aad_admin_objectid": { - "type": "String" - }, - "aad_admin_tenantid": { - "type": "String" - } - }, - "resources": [ - { - "type": "Microsoft.Sql/managedInstances/administrators", - "apiVersion": "2020-02-02-preview", - "name": "[concat(parameters('instance'), '/ActiveDirectory')]", - "properties": { - "administratorType": "ActiveDirectory", - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantId')]" - } - }, - { - "type": "Microsoft.Sql/managedInstances/azureADOnlyAuthentications", - "apiVersion": "2020-02-02-preview", - "name": "[concat(parameters('instance'), '/Default')]", - "dependsOn": [ - "[resourceId('Microsoft.Sql/managedInstances/administrators', parameters('instance'), 'ActiveDirectory')]" - ], - "properties": { - "azureADOnlyAuthentication": true - } - } - ] -} - -``` - -For more information, see [Microsoft.Sql managedInstances/azureADOnlyAuthentications](/azure/templates/microsoft.sql/managedinstances/azureadonlyauthentications). - ---- - -### Checking Azure AD-only authentication using T-SQL - -The [SEVERPROPERTY](/sql/t-sql/functions/serverproperty-transact-sql) `IsExternalAuthenticationOnly` has been added to check if Azure AD-only authentication is enabled for your server or managed instance. `1` indicates that the feature is enabled, and `0` represents the feature is disabled. - -```sql -SELECT SERVERPROPERTY('IsExternalAuthenticationOnly') -``` - -## Remarks - -- A [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) can set or remove an Azure AD admin, but can't set the **Azure Active Directory authentication only** setting. The [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) can't set or remove an Azure AD admin, but can set the **Azure Active Directory authentication only** setting. Only accounts with higher Azure RBAC roles or custom roles that contain both permissions can set or remove an Azure AD admin and set the **Azure Active Directory authentication only** setting. One such role is the [Contributor](../../role-based-access-control/built-in-roles.md#contributor) role. -- After enabling or disabling **Azure Active Directory authentication only** in the Azure portal, an **Activity log** entry can be seen in the **SQL server** menu. - :::image type="content" source="media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal-sql-server-activity-log.png" alt-text="Activity log entry in the Azure portal"::: -- The **Azure Active Directory authentication only** setting can only be enabled or disabled by users with the right permissions if the **Azure Active Directory admin** is specified. If the Azure AD admin isn't set, the **Azure Active Directory authentication only** setting remains inactive and cannot be enabled or disabled. Using APIs to enable Azure AD-only authentication will also fail if the Azure AD admin hasn't been set. -- Changing an Azure AD admin when Azure AD-only authentication is enabled is supported for users with the appropriate permissions. -- Changing an Azure AD admin and enabling or disabling Azure AD-only authentication is allowed in the Azure portal for users with the appropriate permissions. Both operations can be completed with one **Save** in the Azure portal. The Azure AD admin must be set in order to enable Azure AD-only authentication. -- Removing an Azure AD admin when the Azure AD-only authentication feature is enabled isn't supported. Using an API to remove an Azure AD admin will fail if Azure AD-only authentication is enabled. - - If the **Azure Active Directory authentication only** setting is enabled, the **Remove admin** button is inactive in the Azure portal. -- Removing an Azure AD admin and disabling the **Azure Active Directory authentication only** setting is allowed, but requires the right user permission to complete the operations. Both operations can be completed with one **Save** in the Azure portal. -- Azure AD users with proper permissions can impersonate existing SQL users. - - Impersonation continues working between SQL authentication users even when the Azure AD-only authentication feature is enabled. - -### Limitations for Azure AD-only authentication in SQL Database - -When Azure AD-only authentication is enabled for SQL Database, the following features aren't supported: - -- [Azure SQL Database server roles](security-server-roles.md) are supported for [Azure AD server principals](authentication-azure-ad-logins.md), but not if the Azure AD login is a group. -- [Elastic jobs](job-automation-overview.md) -- [SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md) -- [Change data capture (CDC)](/sql/relational-databases/track-changes/about-change-data-capture-sql-server) - If you create a database in Azure SQL Database as an Azure AD user and enable change data capture on it, a SQL user will not be able to disable or make changes to CDC artifacts. However, another Azure AD user will be able to enable or disable CDC on the same database. Similarly, if you create an Azure SQL Database as a SQL user, enabling or disabling CDC as an Azure AD user won't work -- [Transactional replication](../managed-instance/replication-transactional-overview.md) - Since SQL authentication is required for connectivity between replication participants, when Azure AD-only authentication is enabled, transactional replication is not supported for SQL Database for scenarios where transactional replication is used to push changes made in an Azure SQL Managed Instance, on-premises SQL Server, or an Azure VM SQL Server instance to a database in Azure SQL Database -- [SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) -- EXEC AS statement for Azure AD group member accounts - -### Limitations for Azure AD-only authentication in Managed Instance - -When Azure AD-only authentication is enabled for Managed Instance, the following features aren't supported: - -- [Transactional replication](../managed-instance/replication-transactional-overview.md) -- [SQL Agent Jobs in Managed Instance](../managed-instance/job-automation-managed-instance.md) supports Azure AD-only authentication. However, the Azure AD user who is a member of an Azure AD group that has access to the managed instance cannot own SQL Agent Jobs -- [SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) -- EXEC AS statement for Azure AD group member accounts - -For more limitations, see [T-SQL differences between SQL Server & Azure SQL Managed Instance](../managed-instance/transact-sql-tsql-differences-sql-server.md#logins-and-users). - -## Next steps - -> [!div class="nextstepaction"] -> [Tutorial: Enable Azure Active Directory only authentication with Azure SQL](authentication-azure-ad-only-authentication-tutorial.md) - -> [!div class="nextstepaction"] -> [Create server with Azure AD-only authentication enabled in Azure SQL](authentication-azure-ad-only-authentication-create-server.md) diff --git a/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity-create-server.md b/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity-create-server.md deleted file mode 100644 index 05bdb6dbcf591..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity-create-server.md +++ /dev/null @@ -1,348 +0,0 @@ ---- -title: Create an Azure SQL logical server using a user-assigned managed identity -description: This article guides you through creating an Azure SQL logical server using a user-assigned managed identity -titleSuffix: Azure SQL Database -ms.service: sql-database -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 12/15/2021 ---- - -# Create an Azure SQL Database server with a user-assigned managed identity - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> User-assigned managed identity for Azure SQL is in **public preview**. If you're looking for a guide on Azure SQL Managed Instance, see [Create an Azure SQL Managed Instance with a user-assigned managed identity](../managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md). - -This how-to guide outlines the steps to create a [logical server](logical-servers.md) for Azure SQL Database with a [user-assigned managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types). For more information on the benefits of using a user-assigned managed identity for the server identity in Azure SQL Database, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md). - -## Prerequisites - -- To provision a SQL Database server with a user-assigned managed identity, the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) role (or a role with greater permissions), along with an Azure RBAC role containing the following action is required: - - Microsoft.ManagedIdentity/userAssignedIdentities/*/assign/action - For example, the [Managed Identity Operator](../../role-based-access-control/built-in-roles.md#managed-identity-operator) has this action. -- Create a user-assigned managed identity and assign it the necessary permission to be a server or managed instance identity. For more information, see [Manage user-assigned managed identities](../../active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities.md) and [user-assigned managed identity permissions for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md#permissions). -- [Az.Sql module 3.4](https://www.powershellgallery.com/packages/Az.Sql/3.4.0) or higher is required when using PowerShell for user-assigned managed identities. -- [The Azure CLI 2.26.0](/cli/azure/install-azure-cli) or higher is required to use the Azure CLI with user-assigned managed identities. -- For a list of limitations and known issues with using user-assigned managed identity, see [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md#limitations-and-known-issues) - -## Create server configured with a user-assigned managed identity - -The following steps outline the process of creating a new Azure SQL Database logical server and a new database with a user-assigned managed identity assigned. - -> [!NOTE] -> Multiple user-assigned managed identities can be added to the server, but only one identity can be the primary identity at any given time. In this example, the system-assigned managed identity is disabled, but it can be enabled as well. - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment](https://portal.azure.com/#create/Microsoft.AzureSQL) option page in the Azure portal. - -2. If you aren't already signed in to Azure portal, sign in when prompted. - -3. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - -4. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. - -5. For **Resource group**, select **Create new**, enter a name for your resource group, and select **OK**. - -6. For **Database name** enter your desired database name. - -7. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - - **Server name**: Enter a unique server name. Server names must be globally unique for all servers in Azure, not just unique within a subscription. - - **Server admin login**: Enter an admin login name, for example: `azureuser`. - - **Password**: Enter a password that meets the password requirements, and enter it again in the **Confirm password** field. - - **Location**: Select a location from the dropdown list - -8. Select **Next: Networking** at the bottom of the page. - -9. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. - -10. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. - -11. Select **Next: Security** at the bottom of the page. - -12. On the Security tab, under **Identity (preview)**, select **Configure Identities**. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/create-server-configure-identities.png" alt-text="Screenshot of Azure portal security settings of the create database process"::: - -13. On the **Identity (preview)** blade, under **User assigned managed identity**, select **Add**. Select the desired **Subscription** and then under **User assigned managed identities** select the desired user assigned managed identity from the selected subscription. Then select the **Select** button. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/user-assigned-managed-identity-configuration.png" alt-text="Azure portal screenshot of adding user assigned managed identity when configuring server identity"::: - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/select-a-user-assigned-managed-identity.png" alt-text="Azure portal screenshot of user assigned managed identity when configuring server identity"::: - -14. Under **Primary identity**, select the same user-assigned managed identity selected in the previous step. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/select-a-primary-identity.png" alt-text="Azure portal screenshot of selecting primary identity for server"::: - - > [!NOTE] - > If the system-assigned managed identity is the primary identity, the **Primary identity** field must be empty. - -15. Select **Apply** - -16. Select **Review + create** at the bottom of the page - -17. On the **Review + create** page, after reviewing, select **Create**. - -# [The Azure CLI](#tab/azure-cli) - -The Azure CLI command `az sql server create` is used to provision a new logical server. The below command will provision a new server with a user-assigned managed identity. The example will also enable [Azure AD-only authentication](authentication-azure-ad-only-authentication.md), and set an Azure AD admin for the server. - -The server SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this server creation, the SQL Administrator login won't be used. - -The server Azure AD admin will be the account you set for ``, and can be used to manage the server. - -Replace the following values in the example: - -- ``: Your subscription ID can be found in the Azure portal -- ``: Name of the resource group for your logical server -- ``: The user-assigned managed identity. Can also be used as the primary identity. -- ``: The primary identity you want to use as the server identity -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: The Azure AD Object ID for the user -- ``: Use a unique logical server name -- ``: Location of the server, such as `westus`, or `centralus` - -```azurecli -az sql server create --assign-identity --identity-type UserAssigned --user-assigned-identity-id /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/ --primary-user-assigned-identity-id /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/ --enable-ad-only-auth --external-admin-principal-type User --external-admin-name --external-admin-sid -g -n -l -``` - -For more information, see [az sql server create](/cli/azure/sql/server#az-sql-server-create). - -> [!NOTE] -> The above example provisions a server with only a user-assigned managed identity. You could set the `--identity-type` to be `UserAssigned,SystemAssigned` if you wanted both types of managed identities to be created with the server. - -To check the server status after creation, see the following command: - -```azurecli -az sql server show --name --resource-group --expand-ad-admin -``` - -# [PowerShell](#tab/azure-powershell) - -The PowerShell command `New-AzSqlServer` is used to provision a new Azure SQL logical server. The below command will provision a new server with a user-assigned managed identity. The example will also enable [Azure AD-only authentication](authentication-azure-ad-only-authentication.md), and set an Azure AD admin for the server. - -The server SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this server creation, the SQL Administrator login won't be used. - -The server Azure AD admin will be the account you set for ``, and can be used to manage the server. - -Replace the following values in the example: - -- ``: Name of the resource group for your logical server -- ``: Location of the server, such as `West US`, or `Central US` -- ``: Use a unique logical server name -- ``: Your subscription ID can be found in the Azure portal -- ``: The user-assigned managed identity. Can also be used as the primary identity -- ``: The primary identity you want to use as the server identity -- ``: Can be an Azure AD user or group. For example, `DummyLogin` - -```powershell -New-AzSqlServer -ResourceGroupName "" -Location "" -ServerName "" -ServerVersion "12.0" -AssignIdentity -IdentityType "UserAssigned" -UserAssignedIdentityId "/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" -PrimaryUserAssignedIdentityId "/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" -ExternalAdminName "" -EnableActiveDirectoryOnlyAuthentication -``` - -For more information, see [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver). - -> [!NOTE] -> The above example provisions a server with only a user-assigned managed identity. You could set the `-IdentityType` to be `"UserAssigned,SystemAssigned"` if you wanted both types of managed identities to be created with the server. - -To check the server status after creation, see the following command: - -```powershell -Get-AzSqlServer -ResourceGroupName "" -ServerName "" -ExpandActiveDirectoryAdministrator -``` - -# [REST API](#tab/rest-api) - -The [Servers - Create Or Update](/rest/api/sql/2020-11-01-preview/servers/create-or-update) REST API can be used to create a logical server with a user-assigned managed identity. - -The script below will provision a logical server, set the Azure AD admin as ``, and enable [Azure AD-only authentication](authentication-azure-ad-only-authentication.md). The server SQL Administrator login will also be created automatically and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provisioning, the SQL Administrator login won't be used. - -The Azure AD admin, `` can be used to manage the server when the provisioning is complete. - -Replace the following values in the example: - -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **Overview** pane, you should see your **Tenant ID** -- ``: Your subscription ID can be found in the Azure portal -- ``: Use a unique logical server name -- ``: Name of the resource group for your logical server -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: Location of the server, such as `westus2`, or `centralus` -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **User** pane, search for the Azure AD user and find their **Object ID** -- ``: The user-assigned managed identity. Can also be used as the primary identity -- ``: The primary identity you want to use as the server identity - -```rest -Import-Module Azure -Import-Module MSAL.PS - -$tenantId = '' -$clientId = '1950a258-227b-4e31-a9cf-717495945fc2' # Static Microsoft client ID used for getting a token -$subscriptionId = '' -$uri = "urn:ietf:wg:oauth:2.0:oob" -$authUrl = "https://login.windows.net/$tenantId" -$serverName = "" -$resourceGroupName = "" - -Login-AzAccount -tenantId $tenantId - -# login as a user with SQL Server Contributor role or higher - -# Get a token - -$result = Get-MsalToken -RedirectUri $uri -ClientId $clientId -TenantId $tenantId -Scopes "https://management.core.windows.net/.default" - -#Authetication header -$authHeader = @{ -'Content-Type'='application\json; ' -'Authorization'=$result.CreateAuthorizationHeader() -} - -# Enable Azure AD-only auth and sets a user-managed identity as the server identity -# No server admin is specified, and only Azure AD admin and Azure AD-only authentication is set to true -# Server admin (login and password) is generated by the system -# The sid is the Azure AD Object ID for the user -# Replace all values in a <> - -$body = '{ -"location": "", -"identity": {"type" : "UserAssigned", "UserAssignedIdentities" : {"/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" : {}}}, -"properties": { "PrimaryUserAssignedIdentityId":"/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/","administrators":{ "login":"", "sid":"", "tenantId":"", "principalType":"User", "azureADOnlyAuthentication":true } - } -}' - -# Provision the server - -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/?api-version=2020-11-01-preview -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" -``` - -> [!NOTE] -> The above example provisions a server with only a user-assigned managed identity. You could set the `"type"` to be `"UserAssigned,SystemAssigned"` if you wanted both types of managed identities to be created with the server. - -To check the server status, you can use the following script: - -```rest -$uri = 'https://management.azure.com/subscriptions/'+$subscriptionId+'/resourceGroups/'+$resourceGroupName+'/providers/Microsoft.Sql/servers/'+$serverName+'?api-version=2020-11-01-preview&$expand=administrators/activedirectory' - -$responce=Invoke-WebRequest -Uri $uri -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" - -$responce.statuscode - -$responce.content -``` - -# [ARM Template](#tab/arm-template) - -Here's an example of an ARM template that creates an Azure SQL logical server with a user-assigned managed identity. The template also adds an Azure AD admin set for the server and enables [Azure AD-only authentication](authentication-azure-ad-only-authentication.md), but this can be removed from the template example. - -For more information and ARM templates, see [Azure Resource Manager templates for Azure SQL Database & SQL Managed Instance](arm-templates-content-guide.md). - -Use a [Custom deployment in the Azure portal](https://portal.azure.com/#create/Microsoft.Template), and **Build your own template in the editor**. Next, **Save** the configuration once you pasted in the example. - -To get your user-assigned managed identity **Resource ID**, search for **Managed Identities** in the [Azure portal](https://portal.azure.com). Find your managed identity, and go to **Properties**. An example of your UMI **Resource ID** will look like `/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/`. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "server": { - "type": "string", - "defaultValue": "[uniqueString('sql', resourceGroup().id)]", - "metadata": { - "description": "The name of the logical server." - } - }, - "location": { - "type": "string", - "defaultValue": "[resourceGroup().location]", - "metadata": { - "description": "Location for all resources." - } - }, - "aad_admin_name": { - "type": "String", - "metadata": { - "description": "The name of the Azure AD admin for the SQL server." - } - }, - "aad_admin_objectid": { - "type": "String", - "metadata": { - "description": "The Object ID of the Azure AD admin." - } - }, - "aad_admin_tenantid": { - "type": "String", - "defaultValue": "[subscription().tenantId]", - "metadata": { - "description": "The Tenant ID of the Azure Active Directory" - } - }, - "aad_admin_type": { - "defaultValue": "User", - "allowedValues": [ - "User", - "Group", - "Application" - ], - "type": "String" - }, - "aad_only_auth": { - "defaultValue": true, - "type": "Bool" - }, - "user_identity_resource_id": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The Resource ID of the user-assigned managed identity, in the form of /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/." - } - }, - "AdminLogin": { - "minLength": 1, - "type": "String" - }, - "AdminLoginPassword": { - "type": "SecureString" - } - }, - "resources": [ - { - "type": "Microsoft.Sql/servers", - "apiVersion": "2020-11-01-preview", - "name": "[parameters('server')]", - "location": "[parameters('location')]", - "identity": { - "type": "UserAssigned", - "UserAssignedIdentities": { - "[parameters('user_identity_resource_id')]": {} - } - }, - "properties": { - "administratorLogin": "[parameters('AdminLogin')]", - "administratorLoginPassword": "[parameters('AdminLoginPassword')]", - "PrimaryUserAssignedIdentityId": "[parameters('user_identity_resource_id')]", - "administrators": { - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantid')]", - "principalType": "[parameters('aad_admin_type')]", - "azureADOnlyAuthentication": "[parameters('aad_only_auth')]" - } - } - } - ] -} - -``` - ---- - -## See also - -- [User-assigned managed identity in Azure AD for Azure SQL](authentication-azure-ad-user-assigned-managed-identity.md) -- [Create an Azure SQL Managed Instance with a user-assigned managed identity](../managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md). \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity.md b/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity.md deleted file mode 100644 index b371dec175607..0000000000000 --- a/articles/azure-sql/database/authentication-azure-ad-user-assigned-managed-identity.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: User-assigned managed identity in Azure AD for Azure SQL -description: User-assigned managed identities (UMI) in Azure AD (AD) for Azure SQL Database, SQL Managed Instance, and dedicated SQL pools in Azure Synapse Analytics. -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 03/09/2022 ---- - -# User-assigned managed identity in Azure AD for Azure SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa-dedicated-only](../includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md)] - -> [!NOTE] -> User-assigned managed identity for Azure SQL is in **public preview**. - -Azure Active Directory (AD) supports two types of managed identities: System-assigned managed identity (SMI) and user-assigned managed identity (UMI). For more information, see [Managed identity types](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types). - -A system-assigned managed identity is automatically assigned to a managed instance when it is created. When using Azure AD authentication with Azure SQL Managed Instance, a managed identity must be assigned to the server identity. Previously, only a system-assigned managed identity could be assigned to the Managed Instance or SQL Database server identity. With support for user-assigned managed identity, the UMI can be assigned to Azure SQL Managed Instance or Azure SQL Database as the instance or server identity. This feature is now supported for SQL Database. - -> [!NOTE] -> This article applies only to dedicated SQL pools (formerly SQL DW) in standalone Azure SQL servers. For more information on user-assigned managed identities for dedicated pools in Azure Synapse workspaces, see [Using a user-assigned managed identity](../../synapse-analytics/security/workspaces-encryption.md#using-a-user-assigned-managed-identity). - -## Benefits of using user-assigned managed identities - -There are several benefits of using UMI as a server identity. - -- User flexibility to create and maintain their own user-assigned managed identities for a given tenant. UMI can be used as server identities for Azure SQL. UMI is managed by the user, compared to an SMI, which identity is uniquely defined per server, and assigned by the system. -- In the past, the Azure AD [Directory Readers](authentication-aad-directory-readers-role.md) role was required when using SMI as the server or instance identity. With the introduction of accessing Azure AD using [Microsoft Graph](/graph/api/resources/azure-ad-overview), users concerned with giving high-level permissions such as the Directory Readers role to the SMI or UMI can alternatively give lower-level permissions so that the server or instance identity can access [Microsoft Graph](/graph/api/resources/azure-ad-overview). For more information on providing Directory Readers permissions and it's function, see [Directory Readers role in Azure Active Directory for Azure SQL](authentication-aad-directory-readers-role.md). -- Users can choose a specific UMI to be the server or instance identity for all SQL Databases or Managed Instances in the tenant, or have multiple UMIs assigned to different servers or instances. For example, different UMIs can be used in different servers representing different features. For example, a UMI serving transparent data encryption in one server, and a UMI serving Azure AD authentication in another server. -- UMI is needed to create an Azure SQL logical server configured with transparent data encryption (TDE) with customer-managed keys (CMK). For more information, see [Customer-managed transparent data encryption using user-assigned managed identity](transparent-data-encryption-byok-identity.md). -- User-assigned managed identities are independent from logical servers or managed instances. When a logical server or instance is deleted, the system-assigned managed identity is deleted as well. User-assigned managed identities aren't deleted with the server. - -> [!NOTE] -> The instance identity (SMI or UMI) must be enabled to allow support for Azure AD authentication in Managed Instance. For SQL Database, enabling the server identity is optional and required only if an Azure AD service principal (Azure AD application) oversees creating and managing Azure AD users, groups, or application in the server. For more information, see [Azure Active Directory service principal with Azure SQL](authentication-aad-service-principal.md). - -## Creating a user-assigned managed identity - -For information on how to create a user-assigned managed identity, see [Manage user-assigned managed identities](../../active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities.md). - -## Permissions - -Once the UMI is created, some permissions are needed to allow the UMI to read from [Microsoft Graph](/graph/api/resources/azure-ad-overview) as the server identity. Grant the permissions below, or give the UMI the [Directory Readers](authentication-aad-directory-readers-role-tutorial.md) role. These permissions should be granted before provisioning an Azure SQL logical server or managed instance. Once the permissions are granted to the UMI, they're enabled for all servers or instances that are created with the UMI assigned as a server identity. - -> [!IMPORTANT] -> Only a [Global Administrator](../../active-directory/roles/permissions-reference.md#global-administrator) or [Privileged Role Administrator](../../active-directory/roles/permissions-reference.md#privileged-role-administrator) can grant these permissions. - -- [**User.Read.All**](/graph/permissions-reference#user-permissions) - allows access to Azure AD user information -- [**GroupMember.Read.All**](/graph/permissions-reference#group-permissions) – allows access to Azure AD group information -- [**Application.Read.ALL**](/graph/permissions-reference#application-resource-permissions) – allows access to Azure AD service principal (applications) information - -### Grant permissions - -The following is a sample PowerShell script that will grant the necessary permissions for UMI or SMI. This sample will assign permissions to the UMI `umiservertest`. To execute the script, you must sign in as a user with a "Global Administrator" or "Privileged Role Administrator" role, and have the following [Microsoft Graph permissions](/graph/auth/auth-concepts#microsoft-graph-permissions): -- User.Read.All -- GroupMember.Read.All -- Application.Read.ALL - -```powershell -# Script to assign permissions to the UMI "umiservertest" - -import-module AzureAD -$tenantId = '' # Your Azure AD tenant ID - -Connect-AzureAD -TenantID $tenantId -# Login as a user with a "Global Administrator" or "Privileged Role Administrator" role -# Script to assign permissions to existing UMI -# The following Microsoft Graph permissions are required: -# User.Read.All -# GroupMember.Read.All -# Application.Read.ALL - -# Search for Microsoft Graph -$AAD_SP = Get-AzureADServicePrincipal -SearchString "Microsoft Graph"; -$AAD_SP -# Use Microsoft Graph; in this example, this is the first element $AAD_SP[0] - -#Output - -#ObjectId AppId DisplayName -#-------- ----- ----------- -#47d73278-e43c-4cc2-a606-c500b66883ef 00000003-0000-0000-c000-000000000000 Microsoft Graph -#44e2d3f6-97c3-4bc7-9ccd-e26746638b6d 0bf30f3b-4a52-48df-9a82-234910c4a086 Microsoft Graph #Change - -$MSIName = ""; # Name of your user-assigned or system-assigned managed identity -$MSI = Get-AzureADServicePrincipal -SearchString $MSIName -if($MSI.Count -gt 1) -{ -Write-Output "More than 1 principal found, please find your principal and copy the right object ID. Now use the syntax $MSI = Get-AzureADServicePrincipal -ObjectId " - -# Choose the right UMI or SMI - -Exit -} - -# If you have more UMIs with similar names, you have to use the proper $MSI[ ]array number - -# Assign the app roles - -$AAD_AppRole = $AAD_SP.AppRoles | Where-Object {$_.Value -eq "User.Read.All"} -New-AzureADServiceAppRoleAssignment -ObjectId $MSI.ObjectId -PrincipalId $MSI.ObjectId -ResourceId $AAD_SP.ObjectId[0] -Id $AAD_AppRole.Id -$AAD_AppRole = $AAD_SP.AppRoles | Where-Object {$_.Value -eq "GroupMember.Read.All"} -New-AzureADServiceAppRoleAssignment -ObjectId $MSI.ObjectId -PrincipalId $MSI.ObjectId -ResourceId $AAD_SP.ObjectId[0] -Id $AAD_AppRole.Id -$AAD_AppRole = $AAD_SP.AppRoles | Where-Object {$_.Value -eq "Application.Read.All"} -New-AzureADServiceAppRoleAssignment -ObjectId $MSI.ObjectId -PrincipalId $MSI.ObjectId -ResourceId $AAD_SP.ObjectId[0] -Id $AAD_AppRole.Id -``` - -In the final steps of the script, if you have more UMIs with similar names, you have to use the proper `$MSI[ ]array` number, for example, `$AAD_SP.ObjectId[0]`. - -### Check permissions for user-assigned manage identity - -To check permissions for a UMI, go to the [Azure portal](https://portal.azure.com). In the **Azure Active Directory** resource, go to **Enterprise applications**. Select **All Applications** for the **Application type**, and search for the UMI that was created. - -:::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-search-enterprise-applications.png" alt-text="Screenshot of Azure portal Enterprise application settings"::: - -Select the UMI, and go to the **Permissions** settings under **Security**. - -:::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-check-user-assigned-managed-identity-permissions.png" alt-text="Screenshot of user-assigned managed identity permissions"::: - -## Managing a managed identity for a server or instance - -To create an Azure SQL logical server with a user-assigned managed identity, see the following guide: [Create an Azure SQL logical server using a user-assigned managed identity](authentication-azure-ad-user-assigned-managed-identity-create-server.md) - -### Set managed identities in the Azure portal - -To set the identity for the SQL server or SQL managed instance in the [Azure portal](https://portal.azure.com): - -1. Go to your **SQL server** or **SQL managed instance** resource. -1. Under **Security**, select the **Identity (preview)** setting. -1. Under **User assigned managed identity**, select **Add**. -1. Select the desired **Subscription** and then under **User assigned managed identities** select the desired user assigned managed identity from the selected subscription. Then select the **Select** button. - -:::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity/existing-server-select-managed-identity.png" alt-text="Azure portal screenshot of user assigned managed identity when configuring existing server identity"::: - -### Create or set a managed identity using the Azure CLI - -The Azure CLI 2.26.0 (or higher) is required to run these commands with UMI. - -#### Azure SQL Database - -- To provision a new server with UMI, use the [az sql server create](/cli/azure/sql/server#az-sql-server-create) command. -- To obtain the UMI server information, use the [az sql server show](/cli/azure/sql/server#az-sql-server-show) command. -- To update the UMI server setting, use the [az sql server update](/cli/azure/sql/server#az-sql-server-update) command. - -#### Azure SQL Managed Instance - -- To provision a new managed instance with UMI, use the [az sql mi create](/cli/azure/sql/mi#az-sql-mi-create) command. -- To obtain the UMI managed instance information, use the [az sql server show](/cli/azure/sql/mi#az-sql-mi-show) command. -- To update the UMI managed instance setting, use the [az sql mi update](/cli/azure/sql/mi#az-sql-mi-update) command. - -### Create or set a managed identity using PowerShell - -[Az.Sql module 3.4](https://www.powershellgallery.com/packages/Az.Sql/3.4.0) or greater is required when using PowerShell with UMI. - -#### Azure SQL Database - -- To provision a new server with UMI, use the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) command. -- To obtain the UMI server information, use the [Get-AzSqlServer](/powershell/module/az.sql/get-azsqlserver) command. -- To update the UMI server setting, use the [Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver) command. - -#### Azure SQL Managed Instance - -- To provision a new managed instance with UMI, use the [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) command. -- To obtain the UMI managed instance information, use the [Get-AzSqlInstance](/powershell/module/az.sql/get-azsqlinstance) command. -- To update the UMI managed instance setting, use the [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) command. - -### Create or set a managed identity using REST API - -The REST API provisioning script used in [Creating an Azure SQL logical server using a user-assigned managed identity](authentication-azure-ad-user-assigned-managed-identity-create-server.md) or [Create an Azure SQL Managed Instance with a user-assigned managed identity](../managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md) can also be used to update the UMI settings for the server. Rerun the provisioning command in the guide with the updated user-assigned managed identity property that you want to update. - -### Create or set a managed identity using an ARM template - -The ARM template used in [Creating an Azure SQL logical server using a user-assigned managed identity](authentication-azure-ad-user-assigned-managed-identity-create-server.md) or [Create an Azure SQL Managed Instance with a user-assigned managed identity](../managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md) can also be used to update the UMI settings for the server. Rerun the provisioning command in the guide with the updated user-assigned managed identity property that you want to update. - -> [!NOTE] -> You can't change the SQL server administrator or password, nor the Azure AD admin by re-running the provisioning command for the ARM template. - -## Limitations and known issues - -- After a Managed Instance is created, the **Active Directory admin** blade in the Azure portal shows a warning: `Managed Instance needs permissions to access Azure Active Directory. Click here to grant "Read" permissions to your Managed Instance.` If the user-assigned managed identity was given the appropriate permissions discussed in the above [Permissions](#permissions) section, this warning can be ignored. -- If a system-assigned or user-assigned managed identity is used as the server or instance identity, deleting the identity will result in the server or instance inability to access Microsoft Graph. Azure AD authentication and other functions will fail. To restore Azure AD functionality, a new SMI or UMI must be assigned to the server with appropriate permissions. -- Permissions to access Microsoft Graph using UMI or SMI can only be granted using PowerShell. These permissions can't be granted using the Azure portal. - -## Next steps - -> [!div class="nextstepaction"] -> [Create an Azure SQL logical server using a user-assigned managed identity](authentication-azure-ad-user-assigned-managed-identity-create-server.md) - -> [!div class="nextstepaction"] -> [Create an Azure SQL Managed Instance with a user-assigned managed identity](../managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md) - -> [!div class="nextstepaction"] -> [Using a user-assigned managed identity in Azure Synapse workspaces](../../synapse-analytics/security/workspaces-encryption.md#using-a-user-assigned-managed-identity) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-mfa-ssms-configure.md b/articles/azure-sql/database/authentication-mfa-ssms-configure.md deleted file mode 100644 index 8f17e346c162a..0000000000000 --- a/articles/azure-sql/database/authentication-mfa-ssms-configure.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Configure multi-factor authentication -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: Learn how to use multi-factored authentication with SSMS for Azure SQL Database, Azure SQL Managed Instance and Azure Synapse Analytics. -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=3 -ms.devlang: -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 08/27/2019 ---- - -# Configure multi-factor authentication for SQL Server Management Studio and Azure AD -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article shows you how to use Azure Active Directory (Azure AD) multi-factor authentication (MFA) with SQL Server Management Studio (SSMS). Azure AD MFA can be used when connecting SSMS or SqlPackage.exe to [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md). For an overview of multi-factor authentication, see [Universal Authentication with SQL Database, SQL Managed Instance, and Azure Synapse (SSMS support for MFA)](../database/authentication-mfa-ssms-overview.md). - -> [!IMPORTANT] -> Databases in Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse are referred to collectively in the remainder of this article as databases, and the server is referring to the [server](logical-servers.md) that hosts databases for Azure SQL Database and Azure Synapse. - -## Configuration steps - -1. **Configure an Azure Active Directory** - For more information, see [Administering your Azure AD directory](/previous-versions/azure/azure-services/hh967611(v=azure.100)), [Integrating your on-premises identities with Azure Active Directory](../../active-directory/hybrid/whatis-hybrid-identity.md), [Add your own domain name to Azure AD](https://azure.microsoft.com/blog/20../../windows-azure-now-supports-federation-with-windows-server-active-directory/), [Microsoft Azure now supports federation with Windows Server Active Directory](https://azure.microsoft.com/blog/20../../windows-azure-now-supports-federation-with-windows-server-active-directory/), and [Manage Azure AD using Windows PowerShell](/previous-versions/azure/jj151815(v=azure.100)). -2. **Configure MFA** - For step-by-step instructions, see [What is Azure AD Multi-Factor Authentication?](../../active-directory/authentication/concept-mfa-howitworks.md), [Conditional Access (MFA) with Azure SQL Database and Data Warehouse](conditional-access-configure.md). (Full Conditional Access requires a Premium Azure Active Directory. Limited MFA is available with a standard Azure AD.) -3. **Configure Azure AD Authentication** - For step-by-step instructions, see [Connecting to SQL Database, SQL Managed Instance, or Azure Synapse using Azure Active Directory Authentication](authentication-aad-overview.md). -4. **Download SSMS** - On the client computer, download the latest SSMS, from [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - -## Connecting by using universal authentication with SSMS - -The following steps show how to connect using the latest SSMS. - -[!INCLUDE[ssms-connect-azure-ad](../includes/ssms-connect-azure-ad.md)] - -1. To connect using Universal Authentication, on the **Connect to Server** dialog box in SQL Server Management Studio (SSMS), select **Active Directory - Universal with MFA support**. (If you see **Active Directory Universal Authentication** you are not on the latest version of SSMS.) - - ![Screenshot of the Connection Properties tab in the Connect to Server dialog in S S M S. "MyDatabase" is selected in the Connect to database dropdown.](./media/authentication-mfa-ssms-configure/mfa-no-tenant-ssms.png) -2. Complete the **User name** box with the Azure Active Directory credentials, in the format `user_name@domain.com`. - - ![Screenshot of the Connect to Server dialog settings for Server type, Server name, Authentication, and User name.](./media/authentication-mfa-ssms-configure/1mfa-universal-connect-user.png) -3. If you are connecting as a guest user, you no longer need to complete the AD domain name or tenant ID field for guest users because SSMS 18.x or later automatically recognizes it. For more information, see [Universal Authentication with SQL Database, SQL Managed Instance, and Azure Synapse (SSMS support for MFA)](../database/authentication-mfa-ssms-overview.md). - - ![Screenshot of the Connection Properties tab in the Connect to Server dialog in S S M S. "MyDatabase" is selected in the Connect to database dropdown.](./media/authentication-mfa-ssms-configure/mfa-no-tenant-ssms.png) - - However, If you are connecting as a guest user using SSMS 17.x or older, you must click **Options**, and on the **Connection Property** dialog box, and complete the **AD domain name or tenant ID** box. - - ![Screenshot of the Connection Properties tab in the Connect to Server dialog in S S M S.The option AD domain name or tenant ID property is filled in.](./media/authentication-mfa-ssms-configure/mfa-tenant-ssms.png) - -4. Select **Options** and specify the database on the **Options** dialog box. (If the connected user is a guest user (i.e. joe@outlook.com), you must check the box and add the current AD domain name or tenant ID as part of Options. See [Universal Authentication with SQL Database and Azure Synapse Analytics (SSMS support for MFA)](../database/authentication-mfa-ssms-overview.md). Then click **Connect**. -5. When the **Sign in to your account** dialog box appears, provide the account and password of your Azure Active Directory identity. No password is required if a user is part of a domain federated with Azure AD. - - ![Screenshot of the Sign in to your account dialog for Azure SQL Database and Data Warehouse. The account and password are filled in.](./media/authentication-mfa-ssms-configure/2mfa-sign-in.png) - - > [!NOTE] - > For Universal Authentication with an account that does not require MFA, you connect at this point. For users requiring MFA, continue with the following steps: - > - -6. Two MFA setup dialog boxes might appear. This one time operation depends on the MFA administrator setting, and therefore may be optional. For an MFA enabled domain this step is sometimes pre-defined (for example, the domain requires users to use a smartcard and pin). - - ![Screenshot of the Sign in to your account dialog for Azure SQL Database and Data Warehouse with a prompt to set up additional security verification.](./media/authentication-mfa-ssms-configure/3mfa-setup.png) - -7. The second possible one time dialog box allows you to select the details of your authentication method. The possible options are configured by your administrator. - - ![Screenshot of the Additional security verification dialog with options for selecting and configuring an authentication method.](./media/authentication-mfa-ssms-configure/4mfa-verify-1.png) -8. The Azure Active Directory sends the confirming information to you. When you receive the verification code, enter it into the **Enter verification code** box, and click **Sign in**. - - ![Screenshot of the Sign in to your account dialog for Azure SQL Database and Data Warehouse with a prompt to Enter a verification code.](./media/authentication-mfa-ssms-configure/5mfa-verify-2.png) - -When verification is complete, SSMS connects normally presuming valid credentials and firewall access. - -## Next steps - -- For an overview of multi-factor authentication, see [Universal Authentication with SQL Database, SQL Managed Instance, and Azure Synapse (SSMS support for MFA)](../database/authentication-mfa-ssms-overview.md). -- Grant others access to your database: [SQL Database Authentication and Authorization: Granting Access](logins-create-manage.md) -- Make sure others can connect through the firewall: [Configure a server-level firewall rule using the Azure portal](./firewall-configure.md) \ No newline at end of file diff --git a/articles/azure-sql/database/authentication-mfa-ssms-overview.md b/articles/azure-sql/database/authentication-mfa-ssms-overview.md deleted file mode 100644 index d02ff519e6f19..0000000000000 --- a/articles/azure-sql/database/authentication-mfa-ssms-overview.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Using multi-factor Azure Active Directory authentication -description: Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics support connections from SQL Server Management Studio (SSMS) using Active Directory Universal Authentication. -ms.service: sql-db-mi -ms.subservice: security -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -ms.custom: seoapril2019, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/15/2021 -tags: azure-synapse ---- - -# Using multi-factor Azure Active Directory authentication -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics support connections from [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) using *Azure Active Directory - Universal with MFA* authentication. This article discusses the differences between the various authentication options, and also the limitations associated with using Universal Authentication in Azure Active Directory (Azure AD) for Azure SQL. - -**Download the latest SSMS** - On the client computer, download the latest version of SSMS, from [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - -[!INCLUDE[ssms-connect-azure-ad](../includes/ssms-connect-azure-ad.md)] - - -For all the features discussed in this article, use at least July 2017, version 17.2. The most recent connection dialog box, should look similar to the following image: - - ![Screenshot of the Connect to Server dialog in SQL Server Management Studio, showing settings for Server type, Server name, and Authentication.](./media/authentication-mfa-ssms-overview/1mfa-universal-connect.png) - -## Authentication options - -There are two non-interactive authentication models for Azure AD, which can be used in many different applications (ADO.NET, JDCB, ODC, and so on). These two methods never result in pop-up dialog boxes: - -- `Azure Active Directory - Password` -- `Azure Active Directory - Integrated` - -The interactive method that also supports Azure AD multi-factor authentication (MFA) is: - -- `Azure Active Directory - Universal with MFA` - -Azure AD MFA helps safeguard access to data and applications while meeting user demand for a simple sign-in process. It delivers strong authentication with a range of easy verification options (phone call, text message, smart cards with pin, or mobile app notification), allowing users to choose the method they prefer. Interactive MFA with Azure AD can result in a pop-up dialog box for validation. - -For a description of Azure AD multi-factor authentication, see [multi-factor authentication](../../active-directory/authentication/concept-mfa-howitworks.md). -For configuration steps, see [Configure Azure SQL Database multi-factor authentication for SQL Server Management Studio](authentication-mfa-ssms-configure.md). - -### Azure AD domain name or tenant ID parameter - -Beginning with [SSMS version 17](/sql/ssms/download-sql-server-management-studio-ssms), users that are imported into the current Azure AD from other Azure Active Directories as guest users, can provide the Azure AD domain name, or tenant ID when they connect. Guest users include users invited from other Azure ADs, Microsoft accounts such as outlook.com, hotmail.com, live.com, or other accounts like gmail.com. This information allows `Azure Active Directory - Universal with MFA` authentication to identify the correct authenticating authority. This option is also required to support Microsoft accounts (MSA) such as outlook.com, hotmail.com, live.com, or non-MSA accounts. - -All guest users who want to be authenticated using Universal Authentication must enter their Azure AD domain name or tenant ID. This parameter represents the current Azure AD domain name or tenant ID that the Azure SQL logical server is associated with. For example, if the SQL logical server is associated with the Azure AD domain `contosotest.onmicrosoft.com`, where user `joe@contosodev.onmicrosoft.com` is hosted as an imported user from the Azure AD domain `contosodev.onmicrosoft.com`, the domain name required to authenticate this user is `contosotest.onmicrosoft.com`. When the user is a native user of the Azure AD associated to SQL logical server, and is not an MSA account, no domain name or tenant ID is required. To enter the parameter (beginning with SSMS version 17.2): - - -1. Open a connection in SSMS. Input your server name, and select **Azure Active Directory - Universal with MFA** authentication. Add the **User name** that you want to sign in with. -1. Select the **Options** box, and go over to the **Connection Properties** tab. In the **Connect to Database** dialog box, complete the dialog box for your database. Check the **AD domain name or tenant ID** box, and provide authenticating authority, such as the domain name (**contosotest.onmicrosoft.com**) or the GUID of the tenant ID. - - ![Screenshot of the Connection Properties tab highlighting the settings for Connect to database and AD domain name or tenant ID.](./media/authentication-mfa-ssms-overview/mfa-tenant-ssms.png) - -If you are running SSMS 18.x or later, the AD domain name or tenant ID is no longer needed for guest users because 18.x or later automatically recognizes it. - - ![Screenshot of the Connection Properties tab in the Connect to Server dialog in S S M S. "MyDatabase" is selected in the Connect to database field.](./media/authentication-mfa-ssms-overview/mfa-no-tenant-ssms.png) - -### Azure AD business to business support - -Azure AD users that are supported for Azure AD B2B scenarios as guest users (see [What is Azure B2B collaboration](../../active-directory/external-identities/what-is-b2b.md)) can connect to SQL Database and Azure Synapse as individual users or members of an Azure AD group created in the associated Azure AD, and mapped manually using the [CREATE USER (Transact-SQL)](/sql/t-sql/statements/create-user-transact-sql) statement in a given database. - -For example, if `steve@gmail.com` is invited to Azure AD `contosotest` (with the Azure AD domain `contosotest.onmicrosoft.com`), a user `steve@gmail.com` must be created for a specific database (such as **MyDatabase**) by an Azure AD SQL administrator or Azure AD DBO by executing the Transact-SQL `create user [steve@gmail.com] FROM EXTERNAL PROVIDER` statement. If `steve@gmail.com` is part of an Azure AD group, such as `usergroup` then this group must be created for a specific database (such as **MyDatabase**) by an Azure AD SQL administrator, or Azure AD DBO by executing the Transact-SQL statement `create user [usergroup] FROM EXTERNAL PROVIDER` statement. - -After the database user or group is created, then the user `steve@gmail.com` can sign into `MyDatabase` using the SSMS authentication option `Azure Active Directory – Universal with MFA`. By default, the user or group only has connect permission. Any further data access will need to be [granted](/sql/t-sql/statements/grant-transact-sql) in the database by a user with enough privilege. - -> [!NOTE] -> For SSMS 17.x, using `steve@gmail.com` as a guest user, you must check the **AD domain name or tenant ID** box and add the AD domain name `contosotest.onmicrosoft.com` in the **Connection Property** dialog box. The **AD domain name or tenant ID** option is only supported for the **Azure Active Directory - Universal with MFA** authentication. Otherwise, the check box it is greyed out. - -## Universal Authentication limitations - -- SSMS and SqlPackage.exe are the only tools currently enabled for MFA through Active Directory Universal Authentication. -- SSMS version 17.2 supports multi-user concurrent access using Universal Authentication with MFA. For SSMS version 17.0 and 17.1, the tool restricts a login for an instance of SSMS using Universal Authentication to a single Azure Active Directory account. To sign in as another Azure AD account, you must use another instance of SSMS. This restriction is limited to Active Directory Universal Authentication; you can sign into a different server using `Azure Active Directory - Password` authentication, `Azure Active Directory - Integrated` authentication, or `SQL Server Authentication`. -- SSMS supports Active Directory Universal Authentication for Object Explorer, Query Editor, and Query Store visualization. -- SSMS version 17.2 provides DacFx Wizard support for Export/Extract/Deploy Data database. Once a specific user is authenticated through the initial authentication dialog using Universal Authentication, the DacFx Wizard functions the same way it does for all other authentication methods. -- The SSMS Table Designer does not support Universal Authentication. -- There are no additional software requirements for Active Directory Universal Authentication except that you must use a supported version of SSMS. -- See the following link for the latest Microsoft Authentication Library (MSAL) version for Universal authentication: [Overview of the Microsoft Authentication Library (MSAL)](../../active-directory/develop/msal-overview.md#languages-and-frameworks). - -## Next steps - -- For configuration steps, see [Configure Azure SQL Database multi-factor authentication for SQL Server Management Studio](authentication-mfa-ssms-configure.md). -- Grant others access to your database: [SQL Database Authentication and Authorization: Granting Access](logins-create-manage.md) -- Make sure others can connect through the firewall: [Configure a server-level firewall rule using the Azure portal](firewall-configure.md) -- [Configure and manage Azure Active Directory authentication with SQL Database or Azure Synapse](authentication-aad-configure.md) -- [Create Azure AD guest users and set as an Azure AD admin](authentication-aad-guest-users.md) -- [Microsoft SQL Server Data-Tier Application Framework (17.0.0 GA)](https://www.microsoft.com/download/details.aspx?id=55088) -- [SQLPackage.exe](/sql/tools/sqlpackage) -- [Import a BACPAC file to a new database](database-import.md) -- [Export a database to a BACPAC file](database-export.md) -- C# interface [IUniversalAuthProvider Interface](/dotnet/api/microsoft.sqlserver.dac.iuniversalauthprovider) \ No newline at end of file diff --git a/articles/azure-sql/database/auto-failover-group-configure-sql-db.md b/articles/azure-sql/database/auto-failover-group-configure-sql-db.md deleted file mode 100644 index 662db1d56948a..0000000000000 --- a/articles/azure-sql/database/auto-failover-group-configure-sql-db.md +++ /dev/null @@ -1,433 +0,0 @@ ---- -title: Configure an auto-failover group -titleSuffix: Azure SQL Database -description: Learn how to configure an auto-failover group for a single or pooled database in Azure SQL Database using the Azure portal and PowerShell. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: devx-track-azurecli, sql-db-mi-split -ms.topic: how-to -ms.devlang: -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 03/01/2022 -zone_pivot_groups: azure-sql-deployment-option-single-elastic ---- -# Configure an auto-failover group for Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](auto-failover-group-configure-sql-db.md) -> * [Azure SQL Managed Instance](../managed-instance/auto-failover-group-configure-sql-mi.md) - -This topic teaches you how to configure an [auto-failover group](auto-failover-group-sql-db.md) for single and pooled databases in Azure SQL Database by using the Azure portal and Azure PowerShell. For an end-to-end experience, review the [Auto-failover group tutorial](failover-group-add-single-database-tutorial.md). - -> [!NOTE] -> This article covers auto-failover groups for Azure SQL Database. For Azure SQL Managed Instance, see [Configure auto-failover groups in Azure SQL Managed Instance](../managed-instance/auto-failover-group-configure-sql-mi.md). - - -::: zone pivot="azure-sql-single-db" - - -## Prerequisites - -Consider the following prerequisites for creating your failover group for a single database: - -- The server login and firewall settings for the secondary server must match that of your primary server. - -## Create failover group - -# [Portal](#tab/azure-portal) - -Create your failover group and add your single database to it using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** is not in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the database you want to add to the failover group. -1. Select the name of the server under **Server name** to open the settings for the server. - - ![Open server for single db](./media/auto-failover-group-configure-sql-db/open-sql-db-server.png) - -1. Select **Failover groups** under the **Settings** pane, and then select **Add group** to create a new failover group. - - ![Add new failover group](./media/auto-failover-group-configure-sql-db/sqldb-add-new-failover-group.png) - -1. On the **Failover Group** page, enter or select the required values, and then select **Create**. - - - **Databases within the group**: Choose the database you want to add to your failover group. Adding the database to the failover group will automatically start the geo-replication process. - - ![Add SQL Database to failover group](./media/auto-failover-group-configure-sql-db/add-sqldb-to-failover-group.png) - -# [PowerShell](#tab/azure-powershell) - -Create your failover group and add your database to it using PowerShell. - - ```powershell-interactive - $subscriptionId = "" - $resourceGroupName = "" - $location = "" - $adminLogin = "" - $password = "" - $serverName = "" - $databaseName = "" - $drLocation = "" - $drServerName = "" - $failoverGroupName = "" - - # Create a secondary server in the failover region - Write-host "Creating a secondary server in the failover region..." - $drServer = New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -Location $drLocation ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminlogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - $drServer - - # Create a failover group between the servers - $failovergroup = Write-host "Creating a failover group between the primary and secondary server..." - New-AzSqlDatabaseFailoverGroup ` - ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -PartnerServerName $drServerName ` - FailoverGroupName $failoverGroupName ` - FailoverPolicy Automatic ` - -GracePeriodWithDataLossHours 2 - $failovergroup - - # Add the database to the failover group - Write-host "Adding the database to the failover group..." - Get-AzSqlDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName | ` - Add-AzSqlDatabaseToFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - Write-host "Successfully added the database to the failover group..." - ``` - ---- - -## Test failover - -Test failover of your failover group using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Test failover of your failover group using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** is not in the list, select **All services**, then type "Azure SQL" in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the database you want to add to the failover group. - - ![Open server for single db](./media/auto-failover-group-configure-sql-db/open-sql-db-server.png) - -1. Select **Failover groups** under the **Settings** pane and then choose the failover group you just created. - - ![Select the failover group from the portal](./media/auto-failover-group-configure-sql-db/select-failover-group.png) - -1. Review which server is primary and which server is secondary. -1. Select **Failover** from the task pane to fail over your failover group containing your database. -1. Select **Yes** on the warning that notifies you that TDS sessions will be disconnected. - - ![Fail over your failover group containing your database](./media/auto-failover-group-configure-sql-db/failover-sql-db.png) - -1. Review which server is now primary and which server is secondary. If failover succeeded, the two servers should have swapped roles. -1. Select **Failover** again to fail the servers back to their original roles. - -# [PowerShell](#tab/azure-powershell) - -Test failover of your failover group using PowerShell. - -Check the role of the secondary replica: - - ```powershell-interactive - # Set variables - $resourceGroupName = "" - $serverName = "" - $failoverGroupName = "" - - # Check role of secondary replica - Write-host "Confirming the secondary replica is secondary...." - (Get-AzSqlDatabaseFailoverGroup ` - -FailoverGroupName $failoverGroupName ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName).ReplicationRole - ``` - -Fail over to the secondary server: - - ```powershell-interactive - # Set variables - $resourceGroupName = "" - $serverName = "" - $failoverGroupName = "" - - # Failover to secondary server - Write-host "Failing over failover group to the secondary..." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FailoverGroupName $failoverGroupName - Write-host "Failed failover group to successfully to" $drServerName - ``` - -Revert failover group back to the primary server: - - ```powershell-interactive - # Set variables - $resourceGroupName = "" - $serverName = "" - $failoverGroupName = "" - - # Revert failover to primary server - Write-host "Failing over failover group to the primary...." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - Write-host "Failed failover group successfully to back to" $serverName - ``` - ---- - -> [!IMPORTANT] -> If you need to delete the secondary database, remove it from the failover group before deleting it. Deleting a secondary database before it is removed from the failover group can cause unpredictable behavior. - -::: zone-end - -::: zone pivot="azure-sql-elastic-pool" - - -## Prerequisites - -Consider the following prerequisites for creating your failover group for a pooled database: - -- The server login and firewall settings for the secondary server must match that of your primary server. - -## Create failover group - -Create the failover group for your elastic pool using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Create your failover group and add your elastic pool to it using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** is not in the list, select **All services**, then type "Azure SQL" in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the elastic pool you want to add to the failover group. -1. On the **Overview** pane, select the name of the server under **Server name** to open the settings for the server. - - ![Open server for elastic pool](./media/auto-failover-group-configure-sql-db/server-for-elastic-pool.png) - -1. Select **Failover groups** under the **Settings** pane, and then select **Add group** to create a new failover group. - - ![Add new failover group](./media/auto-failover-group-configure-sql-db/sqldb-add-new-failover-group.png) - -1. On the **Failover Group** page, enter or select the required values, and then select **Create**. Either create a new secondary server, or select an existing secondary server. - -1. Select **Databases within the group** then choose the elastic pool you want to add to the failover group. If an elastic pool does not already exist on the secondary server, a warning appears prompting you to create an elastic pool on the secondary server. Select the warning, and then select **OK** to create the elastic pool on the secondary server. - - ![Add elastic pool to failover group](./media/auto-failover-group-configure-sql-db/add-elastic-pool-to-failover-group.png) - -1. Select **Select** to apply your elastic pool settings to the failover group, and then select **Create** to create your failover group. Adding the elastic pool to the failover group will automatically start the geo-replication process. - -# [PowerShell](#tab/azure-powershell) - -Create your failover group and add your elastic pool to it using PowerShell. - - ```powershell-interactive - $subscriptionId = "" - $resourceGroupName = "" - $location = "" - $adminLogin = "" - $password = "" - $serverName = "" - $databaseName = "" - $poolName = "myElasticPool" - $drLocation = "" - $drServerName = "" - $failoverGroupName = "" - - # Create a failover group between the servers - Write-host "Creating failover group..." - New-AzSqlDatabaseFailoverGroup ` - ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -PartnerServerName $drServerName ` - FailoverGroupName $failoverGroupName ` - FailoverPolicy Automatic ` - -GracePeriodWithDataLossHours 2 - Write-host "Failover group created successfully." - - # Add elastic pool to the failover group - Write-host "Enumerating databases in elastic pool...." - $FailoverGroup = Get-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - $databases = Get-AzSqlElasticPoolDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName - Write-host "Adding databases to failover group..." - $failoverGroup = $failoverGroup | Add-AzSqlDatabaseToFailoverGroup ` - -Database $databases - Write-host "Databases added to failover group successfully." - ``` - ---- - -## Test failover - -Test failover of your elastic pool using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Fail your failover group over to the secondary server, and then fail back using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** is not in the list, select **All services**, then type "Azure SQL" in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the elastic pool you want to add to the failover group. -1. On the **Overview** pane, select the name of the server under **Server name** to open the settings for the server. - - ![Open server for elastic pool](./media/auto-failover-group-configure-sql-db/server-for-elastic-pool.png) -1. Select **Failover groups** under the **Settings** pane and then choose the failover group you created in section 2. - - ![Select the failover group from the portal](./media/auto-failover-group-configure-sql-db/select-failover-group.png) - -1. Review which server is primary, and which server is secondary. -1. Select **Failover** from the task pane to fail over your failover group containing your elastic pool. -1. Select **Yes** on the warning that notifies you that TDS sessions will be disconnected. - - ![Fail over your failover group containing your database](./media/auto-failover-group-configure-sql-db/failover-sql-db.png) - -1. Review which server is primary, which server is secondary. If failover succeeded, the two servers should have swapped roles. -1. Select **Failover** again to fail the failover group back to the original settings. - -# [PowerShell](#tab/azure-powershell) - -Test failover of your failover group using PowerShell. - -Check the role of the secondary replica: - - ```powershell-interactive - # Set variables - $resourceGroupName = "" - $serverName = "" - $failoverGroupName = "" - - # Check role of secondary replica - Write-host "Confirming the secondary replica is secondary...." - (Get-AzSqlDatabaseFailoverGroup ` - -FailoverGroupName $failoverGroupName ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName).ReplicationRole - ``` - -Fail over to the secondary server: - - ```powershell-interactive - # Set variables - $resourceGroupName = "" - $serverName = "" - $failoverGroupName = "" - - # Failover to secondary server - Write-host "Failing over failover group to the secondary..." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FailoverGroupName $failoverGroupName - Write-host "Failed failover group to successfully to" $drServerName - ``` - ---- - -> [!IMPORTANT] -> If you need to delete the secondary database, remove it from the failover group before deleting it. Deleting a secondary database before it is removed from the failover group can cause unpredictable behavior. - -::: zone-end - -## Use Private Link - -Using a private link allows you to associate a logical server to a specific private IP address within the virtual network and subnet. - -To use a private link with your failover group, do the following: - -1. Ensure your primary and secondary servers are in a [paired region](../../availability-zones/cross-region-replication-azure.md). -1. Create the virtual network and subnet in each region to host private endpoints for primary and secondary servers such that they have non-overlapping IP address spaces. For example, the primary virtual network address range of 10.0.0.0/16 and the secondary virtual network address range of 10.0.0.1/16 overlaps. For more information about virtual network address ranges, see the blog [designing Azure virtual networks](https://devblogs.microsoft.com/premier-developer/understanding-cidr-notation-when-designing-azure-virtual-networks-and-subnets/). -1. Create a [private endpoint and Azure Private DNS zone for the primary server](../../private-link/create-private-endpoint-portal.md#create-a-private-endpoint). -1. Create a private endpoint for the secondary server as well, but this time choose to reuse the same Private DNS zone that was created for the primary server. -1. Once the private link is established, you can create the failover group following the steps outlined previously in this article. - - -## Locate listener endpoint - -Once your failover group is configured, update the connection string for your application to the listener endpoint. This will keep your application connected to the failover group listener, rather than the primary database, elastic pool, or instance database. That way, you don't have to manually update the connection string every time your database entity fails over, and traffic is routed to whichever entity is currently primary. - -The listener endpoint is in the form of `fog-name.database.windows.net`, and is visible in the Azure portal, when viewing the failover group: - -![Failover group connection string](./media/auto-failover-group-configure-sql-db/find-failover-group-connection-string.png) - -## Change the secondary region - -To illustrate the change sequence, we will assume that server A is the primary server, server B is the existing secondary server, and server C is the new secondary in the third region. To make the transition, follow these steps: - -1. Create additional secondaries of each database on server A to server C using [active geo-replication](active-geo-replication-overview.md). Each database on server A will have two secondaries, one on server B and one on server C. This will guarantee that the primary databases remain protected during the transition. -1. Delete the failover group. At this point login attempts using failover group endpoints will be failing. -1. Re-create the failover group with the same name between servers A and C. -1. Add all primary databases on server A to the new failover group. At this point the login attempts will stop failing. -1. Delete server B. All databases on B will be deleted automatically. - -## Change the primary region - -To illustrate the change sequence, we will assume server A is the primary server, server B is the existing secondary server, and server C is the new primary in the third region. To make the transition, follow these steps: - -1. Perform a planned geo-failover to switch the primary server to B. Server A will become the new secondary server. The failover may result in several minutes of downtime. The actual time will depend on the size of failover group. -1. Create additional secondaries of each database on server B to server C using [active geo-replication](active-geo-replication-overview.md). Each database on server B will have two secondaries, one on server A and one on server C. This will guarantee that the primary databases remain protected during the transition. -1. Delete the failover group. At this point login attempts using failover group endpoints will be failing. -1. Re-create the failover group with the same name between servers B and C. -1. Add all primary databases on B to the new failover group. At this point the login attempts will stop failing. -1. Perform a planned geo-failover of the failover group to switch B and C. Now server C will become the primary and B the secondary. All secondary databases on server A will be automatically linked to the primaries on C. As in step 1, the failover may result in several minutes of downtime. -1. Delete server A. All databases on A will be deleted automatically. - -> [!IMPORTANT] -> When the failover group is deleted, the DNS records for the listener endpoints are also deleted. At that point, there is a non-zero probability of somebody else creating a failover group or a server DNS alias with the same name. Because failover group names and DNS aliases must be globally unique, this will prevent you from using the same name again. To minimize this risk, don't use generic failover group names. - -## Permissions - - - -Permissions for a failover group are managed via [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - -Azure RBAC write access is necessary to create and manage failover groups. The [SQL Server Contributor role](../../role-based-access-control/built-in-roles.md#sql-server-contributor) has all the necessary permissions to manage failover groups. - -The following table lists specific permission scopes for Azure SQL Database: - -| **Action** | **Permission** | **Scope**| -| :---- | :---- | :---- | -| **Create failover group**| Azure RBAC write access | Primary server
    Secondary server
    All databases in failover group | -| **Update failover group** | Azure RBAC write access | Failover group
    All databases on the current primary server| -| **Fail over failover group** | Azure RBAC write access | Failover group on new server | - - -## Remarks - -- Removing a failover group for a single or pooled database does not stop replication, and it does not delete the replicated database. You will need to manually stop geo-replication and delete the database from the secondary server if you want to add a single or pooled database back to a failover group after it's been removed. Failing to do either may result in an error similar to `The operation cannot be performed due to multiple errors` when attempting to add the database to the failover group. -- Auto-failover group name is subject to [naming restrictions](../../azure-resource-manager/management/resource-name-rules.md). - -## Next steps - -For detailed steps configuring a failover group, see the following tutorials: - -- [Add a single database to a failover group](failover-group-add-single-database-tutorial.md) -- [Add an elastic pool to a failover group](failover-group-add-elastic-pool-tutorial.md) -- [Add a managed instance to a failover group](../managed-instance/failover-group-add-instance-tutorial.md) - -For an overview of Azure SQL Database high availability options, see [geo-replication](active-geo-replication-overview.md) and [auto-failover groups](auto-failover-group-overview.md). diff --git a/articles/azure-sql/database/auto-failover-group-sql-db.md b/articles/azure-sql/database/auto-failover-group-sql-db.md deleted file mode 100644 index e7d4139f58981..0000000000000 --- a/articles/azure-sql/database/auto-failover-group-sql-db.md +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: Auto-failover groups overview & best practices -description: Auto-failover groups let you manage geo-replication and automatic / coordinated failover of a group of databases on a server for both single and pooled database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sql-db-mi-split -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 03/01/2022 ---- - -# Auto-failover groups overview & best practices (Azure SQL Database) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](auto-failover-group-sql-db.md) -> * [Azure SQL Managed Instance](../managed-instance/auto-failover-group-sql-mi.md) - -The auto-failover groups feature allows you to manage the replication and failover of some or all databases on a [logical server](logical-servers.md) to another region. This article focuses on using the Auto-failover group feature with Azure SQL Database and some best practices. - -To get started, review [Configure auto-failover group](auto-failover-group-configure-sql-db.md). For an end-to-end experience, see the [Auto-failover group tutorial](failover-group-add-single-database-tutorial.md). - - -> [!NOTE] -> - This article covers auto-failover groups for Azure SQL Database. For Azure SQL Managed Instance, see [Auto-failover groups in Azure SQL Managed Instance](../managed-instance/auto-failover-group-sql-mi.md). -> - Auto-failover groups support geo-replication of all databases in the group to only one secondary server in a different region. If you need to create multiple Azure SQL Database geo-secondary replicas (in the same or different regions) for the same primary replica, use [active geo-replication](active-geo-replication-overview.md). -> - -## Overview - -[!INCLUDE [auto-failover-groups-overview](../includes/auto-failover-group-overview.md)] - - -## Terminology and capabilities - - - - -- **Failover group (FOG)** - - A failover group is a named group of databases managed by a single server that can fail over as a unit to another Azure region in case all or some primary databases become unavailable due to an outage in the primary region. - - > [!IMPORTANT] - > The name of the failover group must be globally unique within the `.database.windows.net` domain. - -- **Servers** - - Some or all of the user databases on a [logical server](logical-servers.md) can be placed in a failover group. Also, a server supports multiple failover groups on a single server. - -- **Primary** - - The server that hosts the primary databases in the failover group. - -- **Secondary** - - The server that hosts the secondary databases in the failover group. The secondary cannot be in the same Azure region as the primary. - -- **Adding single databases to failover group** - - You can put several single databases on the same server into the same failover group. If you add a single database to the failover group, it automatically creates a secondary database using the same edition and compute size on secondary server. You specified that server when the failover group was created. If you add a database that already has a secondary database in the secondary server, that geo-replication link is inherited by the group. When you add a database that already has a secondary database in a server that is not part of the failover group, a new secondary is created in the secondary server. - - > [!IMPORTANT] - > Make sure that the secondary server doesn't have a database with the same name unless it is an existing secondary database. - -- **Adding databases in elastic pool to failover group** - - You can put all or several databases within an elastic pool into the same failover group. If the primary database is in an elastic pool, the secondary is automatically created in the elastic pool with the same name (secondary pool). You must ensure that the secondary server contains an elastic pool with the same exact name and enough free capacity to host the secondary databases that will be created by the failover group. If you add a database in the pool that already has a secondary database in the secondary pool, that geo-replication link is inherited by the group. When you add a database that already has a secondary database in a server that is not part of the failover group, a new secondary is created in the secondary pool. - -- **Failover group read-write listener** - - A DNS CNAME record that points to the current primary. It is created automatically when the failover group is created and allows the read-write workload to transparently reconnect to the primary when the primary changes after failover. When the failover group is created on a server, the DNS CNAME record for the listener URL is formed as `.database.windows.net`. - -- **Failover group read-only listener** - - A DNS CNAME record that points to the current secondary. It is created automatically when the failover group is created and allows the read-only SQL workload to transparently connect to the secondary when the secondary changes after failover. When the failover group is created on a server, the DNS CNAME record for the listener URL is formed as `.secondary.database.windows.net`. - -- **Multiple failover groups** - - You can configure multiple failover groups for the same pair of servers to control the scope of geo-failovers. Each group fails over independently. If your tenant-per-database application is deployed in multiple regions and uses elastic pools, you can use this capability to mix primary and secondary databases in each pool. This way you may be able to reduce the impact of an outage to only some tenant databases. - -[!INCLUDE [auto-failover-group-terminology](../includes/auto-failover-group-terminology.md)] - -## Failover group architecture - -A failover group in Azure SQL Database can include one or multiple databases, typically used by the same application. When you are using auto-failover groups with automatic failover policy, an outage that impacts one or several of the databases in the group will result in an automatic geo-failover. - -The auto-failover group must be configured on the primary server and will connect it to the secondary server in a different Azure region. The groups can include all or some databases in these servers. The following diagram illustrates a typical configuration of a geo-redundant cloud application using multiple databases and auto-failover group. - -![Diagram shows a typical configuration of a geo-redundant cloud application using multiple databases and auto-failover group.](./media/auto-failover-group-overview/auto-failover-group.png) - -When designing a service with business continuity in mind, follow the general guidelines and best practices outlined in this article. When configuring a failover group, ensure that authentication and network access on the secondary is set up to function correctly after geo-failover, when the geo-secondary becomes the new primary. For details, see [SQL Database security after disaster recovery](active-geo-replication-security-configure.md). For more information about designing solutions for disaster recovery, see [Designing Cloud Solutions for Disaster Recovery Using active geo-replication](designing-cloud-solutions-for-disaster-recovery.md). - -For information about using point-in-time restore with failover groups, see [Point in Time Recovery (PITR)](recovery-using-backups.md#point-in-time-restore). - - -## Initial seeding - -When adding databases or elastic pools to a failover group, there is an initial seeding phase before data replication starts. The initial seeding phase is the longest and most expensive operation. Once initial seeding completes, data is synchronized, and then only subsequent data changes are replicated. The time it takes for the initial seeding to complete depends on the size of your data, number of replicated databases, the load on primary databases, and the speed of the link between the primary and secondary. Under normal circumstances, possible seeding speed is up to 500 GB an hour for SQL Database. Seeding is performed for all databases in parallel. - - -## Use multiple failover groups to failover multiple databases - -One or many failover groups can be created between two servers in different regions (primary and secondary servers). Each group can include one or several databases that are recovered as a unit in case all or some primary databases become unavailable due to an outage in the primary region. Creating a failover group creates geo-secondary databases with the same service objective as the primary. If you add an existing geo-replication relationship to a failover group, make sure the geo-secondary is configured with the same service tier and compute size as the primary. - -## Use the read-write listener (primary) - -For read-write workloads, use `.database.windows.net` as the server name in the connection string. Connections will be automatically directed to the primary. This name does not change after failover. Note the failover involves updating the DNS record so the client connections are redirected to the new primary only after the client DNS cache is refreshed. The time to live (TTL) of the primary and secondary listener DNS record is 30 seconds. - -## Use the read-only listener (secondary) - -If you have logically isolated read-only workloads that are tolerant to data latency, you can run them on the geo-secondary. For read-only sessions, use `.secondary.database.windows.net` as the server name in the connection string. Connections will be automatically directed to the geo-secondary. It is also recommended that you indicate read intent in the connection string by using `ApplicationIntent=ReadOnly`. - -In Premium, Business Critical, and Hyperscale service tiers, SQL Database supports the use of [read-only replicas](read-scale-out.md) to offload read-only query workloads, using the `ApplicationIntent=ReadOnly` parameter in the connection string. When you have configured a geo-secondary, you can use this capability to connect to either a read-only replica in the primary location or in the geo-replicated location: -- To connect to a read-only replica in the secondary location, use `ApplicationIntent=ReadOnly` and `.secondary.database.windows.net`. - -## Potential performance degradation after failover - -A typical Azure application uses multiple Azure services and consists of multiple components. The automatic geo-failover of the failover group is triggered based on the state the Azure SQL components alone. Other Azure services in the primary region may not be affected by the outage and their components may still be available in that region. Once the primary databases switch to the secondary (DR) region, the latency between the dependent components may increase. To avoid the impact of higher latency on the application's performance, ensure the redundancy of all the application's components in the DR region, follow these [network security guidelines](#failover-groups-and-network-security), and orchestrate the geo-failover of relevant application components together with the database. - -## Potential data loss after failover - -If an outage occurs in the primary region, recent transactions may not be able to replicate to the geo-secondary. If the automatic failover policy is configured, the system waits for the period you specified by `GracePeriodWithDataLossHours` before initiating an automatic geo-failover. The default value is 1 hour. This favors database availability over no data loss. Setting `GracePeriodWithDataLossHours` to a larger number, such as 24 hours, or disabling automatic geo-failover lets you reduce the likelihood of data loss at the expense of database availability. - -> [!IMPORTANT] -> Elastic pools with 800 or fewer DTUs or 8 or fewer vCores, and more than 250 databases may encounter issues including longer planned geo-failovers and degraded performance. These issues are more likely to occur for write intensive workloads, when geo-replicas are widely separated by geography, or when multiple secondary geo-replicas are used for each database. A symptom of these issues is an increase in geo-replication lag over time, potentially leading to a more extensive data loss in an outage. This lag can be monitored using [sys.dm_geo_replication_link_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-geo-replication-link-status-azure-sql-database). If these issues occur, then mitigation includes scaling up the pool to have more DTUs or vCores, or reducing the number of geo-replicated databases in the pool. - - -## Failover groups and network security - -For some applications the security rules require that the network access to the data tier is restricted to a specific component or components such as a VM, web service, etc. This requirement presents some challenges for business continuity design and the use of failover groups. Consider the following options when implementing such restricted access. - -### Use failover groups and virtual network service endpoints - -If you are using [Virtual Network service endpoints and rules](vnet-service-endpoint-rule-overview.md) to restrict access to your database in SQL Database, be aware that each virtual network service endpoint applies to only one Azure region. The endpoint does not enable other regions to accept communication from the subnet. Therefore, only the client applications deployed in the same region can connect to the primary database. Since a geo-failover results in the SQL Database client sessions being rerouted to a server in a different (secondary) region, these sessions will fail if originated from a client outside of that region. For that reason, the automatic failover policy cannot be enabled if the participating servers or instances are included in the Virtual Network rules. To support manual failover, follow these steps: - -1. Provision the redundant copies of the front-end components of your application (web service, virtual machines etc.) in the secondary region. -2. Configure the [virtual network rules](vnet-service-endpoint-rule-overview.md) individually for primary and secondary server. -3. Enable the [front-end failover using a Traffic manager configuration](designing-cloud-solutions-for-disaster-recovery.md#scenario-1-using-two-azure-regions-for-business-continuity-with-minimal-downtime). -4. Initiate manual geo-failover when the outage is detected. This option is optimized for the applications that require consistent latency between the front-end and the data tier and supports recovery when either front end, data tier or both are impacted by the outage. - -> [!NOTE] -> If you are using the **read-only listener** to load-balance a read-only workload, make sure that this workload is executed in a VM or other resource in the secondary region so it can connect to the secondary database. - -### Use failover groups and firewall rules - -If your business continuity plan requires failover using groups with automatic failover, you can restrict access to your database in SQL Database by using public IP firewall rules. To support automatic failover, follow these steps: - -1. [Create a public IP](../../virtual-network/ip-services/virtual-network-public-ip-address.md#create-a-public-ip-address). -2. [Create a public load balancer](../../load-balancer/quickstart-load-balancer-standard-public-portal.md) and assign the public IP to it. -3. [Create a virtual network and the virtual machines](../../load-balancer/quickstart-load-balancer-standard-public-portal.md) for your front-end components. -4. [Create network security group](../../virtual-network/network-security-groups-overview.md) and configure inbound connections. -5. Ensure that the outbound connections are open to Azure SQL Database in a region by using an `Sql.` [service tag](../../virtual-network/network-security-groups-overview.md#service-tags). -6. Create a [SQL Database firewall rule](firewall-configure.md) to allow inbound traffic from the public IP address you create in step 1. - -For more information on how to configure outbound access and what IP to use in the firewall rules, see [Load balancer outbound connections](../../load-balancer/load-balancer-outbound-connections.md). - -The above configuration will ensure that an automatic geo-failover will not block connections from the front-end components and assumes that the application can tolerate the longer latency between the front end and the data tier. - -> [!IMPORTANT] -> To guarantee business continuity during regional outages you must ensure geographic redundancy for both front-end components and databases. - -## Scale primary database - -You can scale up or scale down the primary database to a different compute size (within the same service tier) without disconnecting any geo-secondaries. When scaling up, we recommend that you scale up the geo-secondary first, and then scale up the primary. When scaling down, reverse the order: scale down the primary first, and then scale down the secondary. When you scale a database to a different service tier, this recommendation is enforced. - -This sequence is recommended specifically to avoid the problem where the geo-secondary at a lower SKU gets overloaded and must be re-seeded during an upgrade or downgrade process. You could also avoid the problem by making the primary read-only, at the expense of impacting all read-write workloads against the primary. - -> [!NOTE] -> If you created a geo-secondary as part of the failover group configuration it is not recommended to scale down the geo-secondary. This is to ensure your data tier has sufficient capacity to process your regular workload after a geo-failover. - -## Prevent loss of critical data - - - -Due to the high latency of wide area networks, geo-replication uses an asynchronous replication mechanism. Asynchronous replication makes the possibility of data loss unavoidable if the primary fails. To protect critical transactions from data loss, an application developer can call the [sp_wait_for_database_copy_sync](/sql/relational-databases/system-stored-procedures/active-geo-replication-sp-wait-for-database-copy-sync) stored procedure immediately after committing the transaction. Calling `sp_wait_for_database_copy_sync` blocks the calling thread until the last committed transaction has been transmitted and hardened in the transaction log of the secondary database. However, it does not wait for the transmitted transactions to be replayed (redone) on the secondary. `sp_wait_for_database_copy_sync` is scoped to a specific geo-replication link. Any user with the connection rights to the primary database can call this procedure. - -> [!NOTE] -> `sp_wait_for_database_copy_sync` prevents data loss after geo-failover for specific transactions, but does not guarantee full synchronization for read access. The delay caused by a `sp_wait_for_database_copy_sync` procedure call can be significant and depends on the size of the not yet transmitted transaction log on the primary at the time of the call. - - -## Permissions - - - -Permissions for a failover group are managed via [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - -Azure RBAC write access is necessary to create and manage failover groups. The [SQL Server Contributor role](../../role-based-access-control/built-in-roles.md#sql-server-contributor) has all the necessary permissions to manage failover groups. - -For specific permission scopes, review how to [configure auto-failover groups in Azure SQL Database](auto-failover-group-sql-db.md#permissions). - -## Limitations - -Be aware of the following limitations: - -- Failover groups cannot be created between two servers in the same Azure region. -- Failover groups cannot be renamed. You will need to delete the group and re-create it with a different name. -- Database rename is not supported for databases in failover group. You will need to temporarily delete failover group to be able to rename a database, or remove the database from the failover group. - -## Programmatically manage failover groups - -As discussed previously, auto-failover groups can also be managed programmatically using Azure PowerShell, Azure CLI, and REST API. The following tables describe the set of commands available. Active geo-replication includes a set of Azure Resource Manager APIs for management, including the [Azure SQL Database REST API](/rest/api/sql/) and [Azure PowerShell cmdlets](/powershell/azure/). These APIs require the use of resource groups and support Azure role-based access control (Azure RBAC). For more information on how to implement access roles, see [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - - -# [PowerShell](#tab/azure-powershell) - -| Cmdlet | Description | -| --- | --- | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) |This command creates a failover group and registers it on both primary and secondary servers| -| [Remove-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/remove-azsqldatabasefailovergroup) | Removes a failover group from the server | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Retrieves a failover group's configuration | -| [Set-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/set-azsqldatabasefailovergroup) |Modifies configuration of a failover group | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup) | Triggers failover of a failover group to the secondary server | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup)|Adds one or more databases to a failover group| - -# [Azure CLI](#tab/azure-cli) - -| Command | Description | -| --- | --- | -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) |This command creates a failover group and registers it on both primary and secondary servers| -| [az sql failover-group delete](/cli/azure/sql/failover-group#az-sql-failover-group-delete) | Removes a failover group from the server | -| [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) | Retrieves a failover group configuration | -| [az sql failover-group update](/cli/azure/sql/failover-group#az-sql-failover-group-update) |Modifies a failover group's configuration and/or adds one or more databases to a failover group| -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Triggers failover of a failover group to the secondary server | - -# [REST API](#tab/rest-api) - -| API | Description | -| --- | --- | -| [Create or Update Failover Group](/rest/api/sql/failovergroups/createorupdate) | Creates or updates a failover group | -| [Delete Failover Group](/rest/api/sql/failovergroups/delete) | Removes a failover group from the server | -| [Failover (Planned)](/rest/api/sql/failovergroups/failover) | Triggers failover from the current primary server to the secondary server with full data synchronization.| -| [Force Failover Allow Data Loss](/rest/api/sql/failovergroups/forcefailoverallowdataloss) | Triggers failover from the current primary server to the secondary server without synchronizing data. This operation may result in data loss. | -| [Get Failover Group](/rest/api/sql/failovergroups/get) | Retrieves a failover group's configuration. | -| [List Failover Groups By Server](/rest/api/sql/failovergroups/listbyserver) | Lists the failover groups on a server. | -| [Update Failover Group](/rest/api/sql/failovergroups/update) | Updates a failover group's configuration. | - ---- - - - -## Next steps - -- For detailed tutorials, see - - [Add SQL Database to a failover group](failover-group-add-single-database-tutorial.md) - - [Add an elastic pool to a failover group](failover-group-add-elastic-pool-tutorial.md) -- For sample scripts, see: - - [Use PowerShell to configure active geo-replication for Azure SQL Database](scripts/setup-geodr-and-failover-database-powershell.md) - - [Use PowerShell to configure active geo-replication for a pooled database in Azure SQL Database](scripts/setup-geodr-and-failover-elastic-pool-powershell.md) - - [Use PowerShell to add an Azure SQL Database to a failover group](scripts/add-database-to-failover-group-powershell.md) -- For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md) -- To learn about Azure SQL Database automated backups, see [SQL Database automated backups](automated-backups-overview.md). -- To learn about using automated backups for recovery, see [Restore a database from the service-initiated backups](recovery-using-backups.md). -- To learn about authentication requirements for a new primary server and database, see [SQL Database security after disaster recovery](active-geo-replication-security-configure.md). diff --git a/articles/azure-sql/database/automated-backups-overview.md b/articles/azure-sql/database/automated-backups-overview.md deleted file mode 100644 index 8490255dcbebc..0000000000000 --- a/articles/azure-sql/database/automated-backups-overview.md +++ /dev/null @@ -1,709 +0,0 @@ ---- -title: Automatic, geo-redundant backups -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Azure SQL Database and Azure SQL Managed Instance automatically create a local database backup every few minutes and use Azure read-access geo-redundant storage for geo-redundancy. -services: sql-database -ms.service: sql-db-mi -ms.subservice: backup-restore -ms.custom: references_regions, devx-track-azurepowershell, devx-track-azurecli -ms.topic: conceptual -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma, wiassaf, danil -ms.date: 01/10/2022 ---- -# Automated backups - Azure SQL Database & Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -[!INCLUDE [GDPR-related guidance](../../../includes/gdpr-intro-sentence.md)] - -## What is a database backup? - -Database backups are an essential part of any business continuity and disaster recovery strategy, because they protect your data from corruption or deletion. These backups enable database restore to a point in time within the configured retention period. If your data protection rules require that your backups are available for an extended time (up to 10 years), you can configure [long-term retention](long-term-retention-overview.md) for both single and pooled databases. - -## Backup and restore essentials - -Databases in Azure SQL Managed instance and non-Hyperscale databases in Azure SQL Database use SQL Server engine technology to back up and restore data. Hyperscale databases have a unique architecture and leverage a different technology for backup and restore: see [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -### Backup frequency - -Both Azure SQL Database and Azure SQL Managed Instance use SQL Server technology to create [full backups](/sql/relational-databases/backup-restore/full-database-backups-sql-server) every week, [differential backups](/sql/relational-databases/backup-restore/differential-backups-sql-server) every 12-24 hours, and [transaction log backups](/sql/relational-databases/backup-restore/transaction-log-backups-sql-server) every 5 to 10 minutes. The frequency of transaction log backups is based on the compute size and the amount of database activity. - -When you restore a database, the service determines which full, differential, and transaction log backups need to be restored. - -Hyperscale databases use [snapshot backup technology](#hyperscale-backups-and-storage-redundancy). - -### Backup storage redundancy - -By default, Azure SQL Database and Azure SQL Managed Instance store data in geo-redundant [storage blobs](../../storage/common/storage-redundancy.md) that are replicated to a [paired region](../../availability-zones/cross-region-replication-azure.md). Geo-redundancy helps to protect against outages impacting backup storage in the primary region and allows you to restore your server to a different region in the event of a disaster. - -The option to configure backup storage redundancy provides the flexibility to choose between locally redundant, zone-redundant, or geo-redundant storage blobs. To ensure that your data stays within the same region where your managed instance or database in Azure SQL Database is deployed, you can change the default geo-redundant backup storage redundancy and configure either locally redundant or zone-redundant storage blobs for backups. Storage redundancy mechanisms store multiple copies of your data so that it is protected from planned and unplanned events, including transient hardware failure, network or power outages, or massive natural disasters. The configured backup storage redundancy is applied to both short-term backup retention settings that are used for point in time restore (PITR) and long-term retention backups used for long-term backups (LTR). - -For Azure SQL Database, backup storage redundancy can be configured at the time of database creation or can be updated for an existing database; the changes made to an existing database apply to future backups only. After the backup storage redundancy of an existing database is updated, it may take up to 48 hours for the changes to be applied. Geo-restore is disabled as soon as a database is updated to use local or zone redundant storage. For Hyperscale databases, the selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and backup storage redundancy. Learn more in [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -> [!IMPORTANT] -> Zone-redundant storage is currently only available in [certain regions](../../storage/common/storage-redundancy.md#zone-redundant-storage). - -### Backup usage - -You can use these backups to: - -- **Point-in-time restore of existing database** - [Restore an existing database to a point in time in the past](recovery-using-backups.md#point-in-time-restore) within the retention period by using the Azure portal, Azure PowerShell, Azure CLI, or REST API. For SQL Database, this operation creates a new database on the same server as the original database, but uses a different name to avoid overwriting the original database. After restore completes, you can delete the original database. Alternatively, you can [rename](/sql/relational-databases/databases/rename-a-database) both the original database, and then rename the restored database to the original database name. Similarly, for SQL Managed Instance, this operation creates a copy of the database on the same or different managed instance in the same subscription and same region. -- **Point-in-time restore of deleted database** - [Restore a deleted database to the time of deletion](recovery-using-backups.md#deleted-database-restore) or to any point in time within the retention period. The deleted database can be restored only on the same server or managed instance where the original database was created. When deleting a database, the service takes a final transaction log backup before deletion, to prevent any data loss. -- **Geo-restore** - [Restore a database to another geographic region](recovery-using-backups.md#geo-restore). Geo-restore allows you to recover from a geographic disaster when you cannot access your database or backups in the primary region. It creates a new database on any existing server or managed instance, in any Azure region. - > [!IMPORTANT] - > Geo-restore is available only for databases in Azure SQL Database or managed instances configured with geo-redundant backup storage. If you are not currently using geo-replicated backups for a database, you can change this by [configuring backup storage redundancy](#configure-backup-storage-redundancy). -- **Restore from long-term backup** - [Restore a database from a specific long-term backup](long-term-retention-overview.md) of a single database or pooled database, if the database has been configured with a long-term retention policy (LTR). LTR allows you to [restore an old version of the database](long-term-backup-retention-configure.md) by using the Azure portal, Azure CLI, or Azure PowerShell to satisfy a compliance request or to run an old version of the application. For more information, see [Long-term retention](long-term-retention-overview.md). - -> [!NOTE] -> In Azure Storage, the term *replication* refers to copying blobs from one location to another. In SQL, *database replication* refers to various technologies used to keep multiple secondary databases synchronized with a primary database. - -### Restore capabilities and features of Azure SQL Database and Azure SQL Managed Instance - -This table summarizes the capabilities and features of [point in time restore (PITR)](recovery-using-backups.md#point-in-time-restore), [geo-restore](recovery-using-backups.md#geo-restore), and [long-term retention backups](long-term-retention-overview.md). - -| **Backup Properties** | Point in time recovery (PITR) | Geo-restore | Long-term backup restore | -|---|---|---|---| -| **Types of SQL backup** | Full, Differential, Log | Replicated copies of PITR backups | Only the full backups | -| **Recovery Point Objective (RPO)** |  5-10 minutes, based on compute size and amount of database activity. | Up to 1 hour, based on geo-replication.\*  |  One week (or user's policy).| -| **Recovery Time Objective (RTO)** | Restore usually takes <12 hours, but could take longer dependent on size and activity. See [Recovery](recovery-using-backups.md#recovery-time). | Restore usually takes <12 hours, but could take longer dependent on size and activity. See [Recovery](recovery-using-backups.md#recovery-time). | Restore usually takes <12 hours, but could take longer dependent on size and activity. See [Recovery](recovery-using-backups.md#recovery-time). | -| **Retention** | 7 days by default, Up to 35 days |  Enabled by default, same as source.\*\* | Not enabled by default, Retention Up to 10 years. | -| **Azure storage**  | Geo-redundant by default. Can optionally configure zone or locally redundant storage. | Available when PITR backup storage redundancy is set to Geo-redundant. Not available when PITR backup store is zone or locally redundant storage. | Geo-redundant by default. Can configure zone or locally redundant storage. | -| **Use to create new database in same region** | Supported | Supported | Supported | -| **Use to create new database in another region** | Not Supported | Supported in any Azure region | Supported in any Azure region | -| **Use to create new database in another Subscription** | Not Supported | Not Supported\*\*\* | Not Supported\*\*\* | -| **Restore via Azure portal**|Yes|Yes|Yes| -| **Restore via PowerShell** |Yes|Yes|Yes| -| **Restore via Azure CLI** |Yes|Yes|Yes| - - -\* For business-critical applications that require large databases and must ensure business continuity, use [Auto-failover groups](auto-failover-group-overview.md). - -\*\* All PITR backups are stored on geo-redundant storage by default. Hence, geo-restore is enabled by default. - -\*\*\* Workaround is to restore to a new server and use Resource Move to move the server to another Subscription. - -### Restoring a database from backups - -To perform a restore, see [Restore database from backups](recovery-using-backups.md). You can try backup configuration and restore operations using the following examples: - -| Operation | Azure portal | Azure CLI | Azure PowerShell | -|---|---|---|---| -| **Change backup retention** | [SQL Database](#change-the-short-term-retention-policy-using-the-azure-portal)
    [SQL Managed Instance](#change-the-short-term-retention-policy-using-the-azure-portal) | [SQL Database](#change-the-short-term-retention-policy-using-azure-cli)
    [SQL Managed Instance](#change-the-short-term-retention-policy-using-azure-cli) | [SQL Database](#change-the-short-term-retention-policy-using-powershell)
    [SQL Managed Instance](#change-the-short-term-retention-policy-using-powershell) | -| **Change long-term backup retention** | [SQL Database](long-term-backup-retention-configure.md#create-long-term-retention-policies)
    [SQL Managed Instance](../managed-instance/long-term-backup-retention-configure.md) | [SQL Database](long-term-backup-retention-configure.md)
    [SQL Managed Instance](../managed-instance/long-term-backup-retention-configure.md) | [SQL Database](long-term-backup-retention-configure.md)
    [SQL Managed Instance](../managed-instance/long-term-backup-retention-configure.md) | -| **Restore a database from a point in time** | [SQL Database](recovery-using-backups.md#point-in-time-restore)
    [SQL Managed Instance](../managed-instance/point-in-time-restore.md) | [SQL Database](/cli/azure/sql/db#az-sql-db-restore)
    [SQL Managed Instance](/cli/azure/sql/midb#az-sql-midb-restore) | [SQL Database](/powershell/module/az.sql/restore-azsqldatabase)
    [SQL Managed Instance](/powershell/module/az.sql/restore-azsqlinstancedatabase) | -| **Restore a deleted database** | [SQL Database](recovery-using-backups.md)
    [SQL Managed Instance](../managed-instance/point-in-time-restore.md#restore-a-deleted-database) | [SQL Database](long-term-backup-retention-configure.md#restore-from-ltr-backups)
    [SQL Managed Instance](../managed-instance/long-term-backup-retention-configure.md#restore-from-ltr-backups) | [SQL Database](/powershell/module/az.sql/get-azsqldeleteddatabasebackup)
    [SQL Managed Instance](/powershell/module/az.sql/get-azsqldeletedinstancedatabasebackup)| -| **Restore a database from Azure Blob storage** | | |
    [SQL Managed Instance](../managed-instance/restore-sample-database-quickstart.md) | - -## Backup scheduling - -The first full backup is scheduled immediately after a new database is created or restored. This backup usually completes within 30 minutes, but it can take longer when the database is large. For example, the initial backup can take longer on a restored database or a database copy, which would typically be larger than a new database. After the first full backup, all further backups are scheduled and managed automatically. The exact timing of all database backups is determined by the SQL Database or SQL Managed Instance service as it balances the overall system workload. You cannot change the schedule of backup jobs or disable them. - -> [!IMPORTANT] -> For a new, restored, or copied database, point-in-time restore capability becomes available from the time when the initial transaction log backup that follows the initial full backup is created. - -## Backup storage consumption - -With SQL Server backup and restore technology, restoring a database to a point in time requires an uninterrupted backup chain consisting of one full backup, optionally one differential backup, and one or more transaction log backups. Azure SQL Database and Azure SQL Managed Instance backup schedules include one full backup every week. Therefore, to provide PITR within the entire retention period, the system must store additional full, differential, and transaction log backups for up to a week longer than the configured retention period. - -In other words, for any point in time during the retention period, there must be a full backup that is older than the oldest time of the retention period, as well as an uninterrupted chain of differential and transaction log backups from that full backup until the next full backup. - -> [!NOTE] -> To provide PITR, additional backups are stored for up to a week longer than the configured retention period. Backup storage is charged at the same rate for all backups. - -Backups that are no longer needed to provide PITR functionality are automatically deleted. Because differential backups and log backups require an earlier full backup to be restorable, all three backup types are purged together in weekly sets. - -For all databases including [TDE encrypted](transparent-data-encryption-tde-overview.md) databases, backups are compressed to reduce backup storage compression and costs. Average backup compression ratio is 3-4 times, however it can be significantly lower or higher depending on the nature of the data and whether data compression is used in the database. - -Azure SQL Database and Azure SQL Managed Instance compute your total used backup storage as a cumulative value. Every hour, this value is reported to the Azure billing pipeline, which is responsible for aggregating this hourly usage to calculate your consumption at the end of each month. After the database is deleted, consumption decreases as backups age out and are deleted. Once all backups are deleted and PITR is no longer possible, billing stops. - -> [!IMPORTANT] -> Backups of a database are retained to provide PITR even if the database has been deleted. While deleting and re-creating a database may save storage and compute costs, it may increase backup storage costs, because the service retains backups for each deleted database, every time it is deleted. - -### Monitor consumption - -For vCore databases in Azure SQL Database, the storage consumed by each type of backup (full, differential, and log) is reported on the database monitoring pane as a separate metric. The following diagram shows how to monitor the backup storage consumption for a single database. This feature is currently not available for managed instances. - -![Monitor database backup consumption in the Azure portal](./media/automated-backups-overview/backup-metrics.png) - -### Fine-tune backup storage consumption - -Backup storage consumption up to the maximum data size for a database is not charged. Excess backup storage consumption will depend on the workload and maximum size of the individual databases. Consider some of the following tuning techniques to reduce your backup storage consumption: - -- Reduce the [backup retention period](#change-the-short-term-retention-policy-using-the-azure-portal) to the minimum possible for your needs. -- Avoid doing large write operations, like index rebuilds, more frequently than you need to. -- For large data load operations, consider using [clustered columnstore indexes](/sql/relational-databases/indexes/columnstore-indexes-overview) and following related [best practices](/sql/relational-databases/indexes/columnstore-indexes-data-loading-guidance), and/or reduce the number of non-clustered indexes. -- In the General Purpose service tier, the provisioned data storage is less expensive than the price of the backup storage. If you have continually high excess backup storage costs, you might consider increasing data storage to save on the backup storage. -- Use TempDB instead of permanent tables in your application logic for storing temporary results and/or transient data. -- Use locally redundant backup storage whenever possible (for example dev/test environments) - -## Backup retention - -Azure SQL Database and Azure SQL Managed Instance provide both short-term and long-term retention of backups. Short-term retention backups allow Point-In-Time-Restore (PITR) within the retention period for the database, while long-term retention provides backups for various compliance requirements. - -### Short-term retention - -For all new, restored, and copied databases, Azure SQL Database and Azure SQL Managed Instance retain sufficient backups to allow PITR within the last seven days by default. Regular full, differential and log backups are taken to ensure databases are restorable to any point-in-time within the retention period defined for the database or managed instance. Additionally, for Azure SQL Databases, differential backups can be configured to either a 12-hour or a 24-hour frequency. - -> [!NOTE] -> A 24-hour differential backup frequency may increase the time required to restore the database. - -Except for Hyperscale and Basic tier databases, you can [change backup retention period](#change-the-short-term-retention-policy) per each active database in the 1-35 day range. As described in [Backup storage consumption](#backup-storage-consumption), backups stored to enable PITR may be older than the retention period. For Azure SQL Managed Instance only, it is possible to set the PITR backup retention rate once a database has been deleted in the 0-35 days range. - -If you delete a database, the system keeps backups in the same way it would for an online database with its specific retention period. You cannot change backup retention period for a deleted database. - -> [!IMPORTANT] -> If you delete a server or a managed instance, all databases on that server or managed instance are also deleted and cannot be recovered. You cannot restore a deleted server or managed instance. But if you had configured long-term retention (LTR) for a database or managed instance, long-term retention backups are not deleted, and can be used to restore databases on a different server or managed instance in the same subscription, to a point in time when a long-term retention backup was taken. - -Backup retention for purposes of PITR within the last 1-35 days is sometimes called short-term backup retention. If you need to keep backups for longer than the maximum short-term retention period of 35 days, you can enable [Long-term retention](long-term-retention-overview.md). - -### Long-term retention - -For both SQL Database and SQL Managed Instance, you can configure full backup long-term retention (LTR) for up to 10 years in Azure Blob storage. After the LTR policy is configured, full backups are automatically copied to a different storage container weekly. To meet various compliance requirements, you can select different retention periods for weekly, monthly, and/or yearly full backups. Storage consumption depends on the selected frequency and retention periods of LTR backups. You can use the [LTR pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=sql-database) to estimate the cost of LTR storage. - -> [!IMPORTANT] -> Updating the backup storage redundancy for an existing Azure SQL Database, only applies to the future backups taken for the database. All existing LTR backups for the database will continue to reside in the existing storage blob and new backups will be stored on the requested storage blob type. - -For more information about LTR, see [Long-term backup retention](long-term-retention-overview.md). - -## Backup storage costs - -The price for backup storage varies and depends on your purchasing model (DTU or vCore), chosen backup storage redundancy option, and also on your region. The backup storage is charged per GB/month consumed, for pricing see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/single/) page and [Azure SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/azure-sql/sql-managed-instance/single/) page. - -For more on purchasing models, see [Choose between the vCore and DTU purchasing models](purchasing-models.md). - -> [!NOTE] -> Azure invoice will show only the excess backup storage consumed, not the entire backup storage consumption. For example, in a hypothetical scenario, if you have provisioned 4TB of data storage, you will get 4TB of free backup storage space. In case that you have used the total of 5.8TB of backup storage space, Azure invoice will show only 1.8TB, as only excess backup storage used is charged. - -### DTU model - -In the DTU model, there's no additional charge for backup storage for databases and elastic pools. The price of backup storage is a part of database or pool price. - -### vCore model - -For single databases in SQL Database, a backup storage amount equal to 100 percent of the maximum data storage size for the database is provided at no extra charge. For elastic pools and managed instances, a backup storage amount equal to 100 percent of the maximum data storage for the pool or the maximum instance storage size, respectively, is provided at no extra charge. - -For single databases, this equation is used to calculate the total billable backup storage usage: - -`Total billable backup storage size = (size of full backups + size of differential backups + size of log backups) – maximum data storage` - -For pooled databases, the total billable backup storage size is aggregated at the pool level and is calculated as follows: - -`Total billable backup storage size = (total size of all full backups + total size of all differential backups + total size of all log backups) - maximum pool data storage` - -For managed instances, the total billable backup storage size is aggregated at the instance level and is calculated as follows: - -`Total billable backup storage size = (total size of full backups + total size of differential backups + total size of log backups) – maximum instance data storage` - -Total billable backup storage, if any, will be charged in GB/month as per the rate of the backup storage redundancy used. This backup storage consumption will depend on the workload and size of individual databases, elastic pools, and managed instances. Heavily modified databases have larger differential and log backups, because the size of these backups is proportional to the amount of changed data. Therefore, such databases will have higher backup charges. - -Azure SQL Database and Azure SQL Managed Instance compute your total billable backup storage as a cumulative value across all backup files. Every hour, this value is reported to the Azure billing pipeline, which aggregates this hourly usage to get your backup storage consumption at the end of each month. If a database is deleted, backup storage consumption will gradually decrease as older backups age out and are deleted. Because differential backups and log backups require an earlier full backup to be restorable, all three backup types are purged together in weekly sets. Once all backups are deleted, billing stops. - -As a simplified example, assume a database has accumulated 744 GB of backup storage and that this amount stays constant throughout an entire month because the database is completely idle. To convert this cumulative storage consumption to hourly usage, divide it by 744.0 (31 days per month * 24 hours per day). SQL Database will report to Azure billing pipeline that the database consumed 1 GB of PITR backup each hour, at a constant rate. Azure billing will aggregate this consumption and show a usage of 744 GB for the entire month. The cost will be based on the amount/GB/month rate in your region. - -Now, a more complex example. Suppose the same idle database has its retention increased from seven days to 14 days in the middle of the month. This increase results in the total backup storage doubling to 1,488 GB. SQL Database would report 1 GB of usage for hours 1 through 372 (the first half of the month). It would report the usage as 2 GB for hours 373 through 744 (the second half of the month). This usage would be aggregated to a final bill of 1,116 GB/month. - -Actual backup billing scenarios are more complex. Because the rate of changes in the database depends on the workload and is variable over time, the size of each differential and log backup will vary as well, causing the hourly backup storage consumption to fluctuate accordingly. Furthermore, each differential backup contains all changes made in the database since the last full backup, thus the total size of all differential backups gradually increases over the course of a week, and then drops sharply once an older set of full, differential, and log backups ages out. For example, if a heavy write activity such as index rebuild has been run just after a full backup completed, then the modifications made by the index rebuild will be included in the transaction log backups taken over the duration of rebuild, in the next differential backup, and in every differential backup taken until the next full backup occurs. For the latter scenario in larger databases, an optimization in the service creates a full backup instead of a differential backup if a differential backup would be excessively large otherwise. This reduces the size of all differential backups until the following full backup. - -You can monitor total backup storage consumption for each backup type (full, differential, transaction log) over time as described in [Monitor consumption](#monitor-consumption). - -### Backup storage redundancy - -Backup storage redundancy impacts backup costs in the following way: -- locally redundant price = x -- zone-redundant price = 1.25x -- geo-redundant price = 2x - -For more details about backup storage pricing visit [Azure SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/single/) and [Azure SQL Managed Instance pricing page](https://azure.microsoft.com/pricing/details/azure-sql/sql-managed-instance/single/). - -> [!IMPORTANT] -> Backup storage redundancy for Hyperscale can only be set during database creation. This setting cannot be modified once the resource is provisioned. [Database copy](database-copy.md) process can be used to update the backup storage redundancy settings for an existing Hyperscale database. Learn more in [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -### Monitor costs - -To understand backup storage costs, go to **Cost Management + Billing** in the Azure portal, select **Cost Management**, and then select **Cost analysis**. Select the desired subscription as the **Scope**, and then filter for the time period and service that you're interested in as follows: - -1. Add a filter for **Service name**. -2. In the drop-down list select **sql database** for a single database or an elastic database pool, or select **sql managed instance** for managed instance. -3. Add another filter for **Meter subcategory**. -4. To monitor PITR backup costs, in the drop-down list select **single/elastic pool pitr backup storage** for a single database or an elastic database pool, or select **managed instance pitr backup storage** for managed instance. Meters will only show up if there exists consumption. -5. To monitor LTR backup costs, in the drop-down list select **ltr backup storage** for a single database or an elastic database pool, or select **sql managed instance - ltr backup storage** for managed instance. Meters will only show up if there exists consumption. - -The **Storage** and **compute** subcategories might interest you as well, but they're not associated with backup storage costs. - -![Backup storage cost analysis](./media/automated-backups-overview/check-backup-storage-cost-sql-mi.png) - - >[!IMPORTANT] - > Meters are only visible for counters that are currently in use. If a counter is not available, it is likely that the category is not currently being used. For example, managed instance counters will not be present for customers who do not have a managed instance deployed. Likewise, storage counters will not be visible for resources that are not consuming storage. For example, if there is no PITR or LTR backup storage consumption, these meters won't be shown. - -For more information, see [Azure SQL Database cost management](cost-management.md). - -## Encrypted backups - -If your database is encrypted with TDE, backups are automatically encrypted at rest, including LTR backups. All new databases in Azure SQL are configured with TDE enabled by default. For more information on TDE, see [Transparent Data Encryption with SQL Database & SQL Managed Instance](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql). - -## Backup integrity - -On an ongoing basis, the Azure SQL engineering team automatically tests the restore of automated database backups. (This testing is not currently available in SQL Managed Instance. You should schedule DBCC CHECKDB on your databases in SQL Managed Instance, scheduled around on your workload.) - -Upon point-in-time restore, databases also receive DBCC CHECKDB integrity checks. - -Any issues found during the integrity check will result in an alert to the engineering team. For more information, see [Data Integrity in SQL Database](https://azure.microsoft.com/blog/data-integrity-in-azure-sql-database/). - -All database backups are taken with the CHECKSUM option to provide additional backup integrity. - -## Compliance - -When you migrate your database from a DTU-based service tier to a vCore-based service tier, the PITR retention is preserved to ensure that your application's data recovery policy isn't compromised. If the default retention doesn't meet your compliance requirements, you can change the PITR retention period. For more information, see [Change the PITR backup retention period](#change-the-short-term-retention-policy). - -[!INCLUDE [GDPR-related guidance](../../../includes/gdpr-intro-sentence.md)] - -## Change the short-term retention policy - -You can change the default PITR backup retention period and the differential backup frequency by using the Azure portal, PowerShell, or the REST API. The following examples illustrate how to change the PITR retention to 28 days and the differential backups to 24 hour interval. - -> [!WARNING] -> If you reduce the current retention period, you lose the ability to restore to points in time older than the new retention period. Backups that are no longer needed to provide PITR within the new retention period are deleted. If you increase the current retention period, you do not immediately gain the ability to restore to older points in time within the new retention period. You gain that ability over time, as the system starts to retain backups for longer. - -> [!NOTE] -> These APIs will affect only the PITR retention period. If you configured LTR for your database, it won't be affected. For information about how to change LTR retention periods, see [Long-term retention](long-term-retention-overview.md). - -### Change the short-term retention policy using the Azure portal - -To change the PITR backup retention period or the differential backup frequency for active databases by using the Azure portal, go to the server or managed instance with the databases whose retention period you want to change. Select **Backups** in the left pane, then select the **Retention policies** tab. Select the database(s) for which you want to change the PITR backup retention. Then select **Configure retention** from the action bar. - -#### [SQL Database](#tab/single-database) - -![Change PITR retention, server level](./media/automated-backups-overview/configure-backup-retention-sqldb.png) - -#### [SQL Managed Instance](#tab/managed-instance) - -![Change PITR retention, managed instance](./media/automated-backups-overview/configure-backup-retention-sqlmi.png) - ---- - -### Change the short-term retention policy using Azure CLI - -Prepare your environment for the Azure CLI. - -[!INCLUDE[azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] - -#### [SQL Database](#tab/single-database) - -Change the PITR backup retention and differential backup frequency for active Azure SQL Databases by using the following example. - -```azurecli -# Set new PITR differential backup frequency on an active individual database -# Valid backup retention must be between 1 and 35 days -# Valid differential backup frequency must be ether 12 or 24 -az sql db str-policy set \ - --resource-group myresourcegroup \ - --server myserver \ - --name mydb \ - --retention-days 28 \ - --diffbackup-hours 24 -``` - -#### [SQL Managed Instance](#tab/managed-instance) - -Use the following example to change the PITR backup retention of a **single active** database in a SQL Managed Instance. - -```azurecli -# Set new PITR backup retention period on an active individual database -# Valid backup retention must be between 1 and 35 days -az sql midb short-term-retention-policy set \ - --resource-group myresourcegroup \ - --managed-instance myinstance \ - --name mymanageddb \ - --retention-days 1 \ -``` - -Use the following example to change the PITR backup retention for **all active** databases in a SQL Managed Instance. - -```azurecli -# Set new PITR backup retention period for ALL active databases -# Valid backup retention must be between 1 and 35 days -az sql midb short-term-retention-policy set \ - --resource-group myresourcegroup \ - --managed-instance myinstance \ - --retention-days 1 \ -``` - ---- - -### Change the short-term retention policy using PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell AzureRM module is still supported by SQL Database and SQL Managed Instance, but all future development is for the Az.Sql module. For more information, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module are substantially identical to those in the AzureRm modules. - -#### [SQL Database](#tab/single-database) - -To change the PITR backup retention and differential backup frequency for active Azure SQL Databases, use the following PowerShell example. - -```powershell -# SET new PITR backup retention period on an active individual database -# Valid backup retention must be between 1 and 35 days -Set-AzSqlDatabaseBackupShortTermRetentionPolicy -ResourceGroupName resourceGroup -ServerName testserver -DatabaseName testDatabase -RetentionDays 28 -``` - -```powershell -# SET new PITR differential backup frequency on an active individual database -# Valid differential backup frequency must be ether 12 or 24. -Set-AzSqlDatabaseBackupShortTermRetentionPolicy -ResourceGroupName resourceGroup -ServerName testserver -DatabaseName testDatabase -RetentionDays 28 -DiffBackupIntervalInHours 24 -``` - -#### [SQL Managed Instance](#tab/managed-instance) - -To change the PITR backup retention for an **single active** database in a SQL Managed Instance, use the following PowerShell example. - -```powershell -# SET new PITR backup retention period on an active individual database -# Valid backup retention must be between 1 and 35 days -Set-AzSqlInstanceDatabaseBackupShortTermRetentionPolicy -ResourceGroupName resourceGroup -InstanceName testserver -DatabaseName testDatabase -RetentionDays 1 -``` - -To change the PITR backup retention for **all active** databases in a SQL Managed Instance, use the following PowerShell example. - -```powershell -# SET new PITR backup retention period for ALL active databases -# Valid backup retention must be between 1 and 35 days -Get-AzSqlInstanceDatabase -ResourceGroupName resourceGroup -InstanceName testserver | Set-AzSqlInstanceDatabaseBackupShortTermRetentionPolicy -RetentionDays 1 -``` - -To change the PITR backup retention for an **single deleted** database in a SQL Managed Instance, use the following PowerShell example. - -```powershell -# SET new PITR backup retention on an individual deleted database -# Valid backup retention must be between 0 (no retention) and 35 days. Valid retention rate can only be lower than the period of the retention period when database was active, or remaining backup days of a deleted database. -Get-AzSqlDeletedInstanceDatabaseBackup -ResourceGroupName resourceGroup -InstanceName testserver -DatabaseName testDatabase | Set-AzSqlInstanceDatabaseBackupShortTermRetentionPolicy -RetentionDays 0 -``` - -To change the PITR backup retention for **all deleted** databases in a SQL Managed Instance, use the following PowerShell example. - -```powershell -# SET new PITR backup retention for ALL deleted databases -# Valid backup retention must be between 0 (no retention) and 35 days. Valid retention rate can only be lower than the period of the retention period when database was active, or remaining backup days of a deleted database -Get-AzSqlDeletedInstanceDatabaseBackup -ResourceGroupName resourceGroup -InstanceName testserver | Set-AzSqlInstanceDatabaseBackupShortTermRetentionPolicy -RetentionDays 0 -``` - -Zero (0) days retention would denote that backup is immediately deleted and no longer kept for a deleted database. -Once PITR backup retention has been reduced for a deleted database, it no longer can be increased. - ---- - -### Change the short-term retention policy using the REST API - -The below request updates the retention period to 28 days and also sets the differential backup frequency to 24 hours. - - -#### [SQL Database](#tab/single-database) - -#### Sample Request - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup/providers/Microsoft.Sql/servers/testserver/databases/testDatabase/backupShortTermRetentionPolicies/default?api-version=2021-02-01-preview -``` - -#### Request Body - -```json -{ - "properties":{ - "retentionDays":28, - "diffBackupIntervalInHours":24 - } -} -``` - -#### Sample Response: - -```json -{ - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/providers/Microsoft.Sql/resourceGroups/resourceGroup/servers/testserver/databases/testDatabase/backupShortTermRetentionPolicies/default", - "name": "default", - "type": "Microsoft.Sql/resourceGroups/servers/databases/backupShortTermRetentionPolicies", - "properties": { - "retentionDays": 28, - "diffBackupIntervalInHours":24 - } -} -``` - - -For more information, see [Backup Retention REST API](/rest/api/sql/backupshorttermretentionpolicies). - -#### [SQL Managed Instance](#tab/managed-instance) - -#### Sample request - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/resourceGroup/providers/Microsoft.Sql/servers/testserver/databases/testDatabase/backupShortTermRetentionPolicies/default?api-version=2017-10-01-preview -``` - -#### Request body - -```json -{ - "properties":{ - "retentionDays":28 - } -} -``` - -#### Sample response - -Status code: 200 - -```json -{ - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/providers/Microsoft.Sql/resourceGroups/resourceGroup/servers/testserver/databases/testDatabase/backupShortTermRetentionPolicies/default", - "name": "default", - "type": "Microsoft.Sql/resourceGroups/servers/databases/backupShortTermRetentionPolicies", - "properties": { - "retentionDays": 28 - } -} -``` - -For more information, see [Backup Retention REST API](/rest/api/sql/backupshorttermretentionpolicies). - ---- - -## Hyperscale backups and storage redundancy - -Hyperscale databases in Azure SQL Database use a [unique architecture](service-tier-hyperscale.md#distributed-functions-architecture) with highly scalable storage and compute performance tiers. - -Hyperscale backups are snapshot based and are nearly instantaneous. Log generated is stored in long term Azure storage for the backup retention period. Hyperscale architecture does not use full database backups or log backups and the backup and restore considerations described in the previous sections of this article do not apply. - -### Backup and restore performance for Hyperscale databases - -Storage and compute separation enables Hyperscale to push down backup and restore operation to the storage layer to reduce the processing burden on the primary compute replica. As a result, database backups don't impact performance of the primary compute node. - -Backup and restore operations for Hyperscale databases are fast regardless of data size due to the use of storage snapshots. A database can be restored to any point in time within its backup retention period. Point in time recovery (PITR) is achieved by reverting to file snapshots, and as such is not a size of data operation. Restore of a Hyperscale database within the same Azure region is a constant-time operation, and even multiple-terabyte databases can be restored in minutes instead of hours or days. Creation of new databases by restoring an existing backup or copying the database also takes advantage of this feature: creating database copies for development or testing purposes, even of multi-terabyte databases, is doable in minutes within the same region when the same storage type is used. - -### Hyperscale backup retention - -Hyperscale backup retention is currently seven days; long-term retention policies aren't currently supported. - -### Hyperscale storage redundancy applies to both data storage and backup storage - -Hyperscale supports configurable storage redundancy. When creating a Hyperscale database, you can choose your preferred storage type: read-access geo-redundant storage (RA-GRS), zone-redundant storage (ZRS), or locally redundant storage (LRS) Azure standard storage. The selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and backup storage redundancy. - -### Consider storage redundancy carefully when you create a Hyperscale database - -Backup storage redundancy for Hyperscale databases can only be set during database creation. This setting cannot be modified once the resource is provisioned. Geo-restore is only available when geo-redundant storage (RA-GRS) has been chosen for backup storage redundancy. The [database copy](database-copy.md) process can be used to update the storage redundancy settings for an existing Hyperscale database. Copying a database to a different storage type will be a size-of-data operation. Find example code in [configure backup storage redundancy](#configure-backup-storage-redundancy). - -> [!IMPORTANT] -> Zone-redundant storage is currently only available in [certain regions](../../storage/common/storage-redundancy.md#zone-redundant-storage). - -### Restoring a Hyperscale database to a different region - -If you need to restore a Hyperscale database in Azure SQL Database to a region other than the one it's currently hosted in, as part of a disaster recovery operation or drill, relocation, or any other reason, the primary method is to do a geo-restore of the database. This involves exactly the same steps as what you would use to restore any other database in SQL Database to a different region: - -1. Create a [server](logical-servers.md) in the target region if you don't already have an appropriate server there. This server should be owned by the same subscription as the original (source) server. -2. Follow the instructions in the [geo-restore](./recovery-using-backups.md#geo-restore) section of the page on restoring a database in Azure SQL Database from automatic backups. - -> [!NOTE] -> Because the source and target are in separate regions, the database cannot share snapshot storage with the source database as in non-geo restores, which complete quickly regardless of database size. In the case of a geo-restore of a Hyperscale database, it will be a size-of-data operation, even if the target is in the paired region of the geo-replicated storage. Therefore, a geo-restore will take time proportional to the size of the database being restored. If the target is in the paired region, data transfer will be within a region, which will be significantly faster than a cross-region data transfer, but it will still be a size-of-data operation. - -If you prefer, you can copy the database to a different region as well. Learn about [Database Copy for Hyperscale](database-copy.md#database-copy-for-azure-sql-hyperscale). - -## Configure backup storage redundancy - -Backup storage redundancy for databases in Azure SQL Database can be configured at the time of database creation or can be updated for an existing database; the changes made to an existing database apply to future backups only. The default value is geo-redundant storage. For differences in pricing between locally redundant, zone-redundant and geo-redundant backup storage visit [managed instance pricing page](https://azure.microsoft.com/pricing/details/azure-sql/sql-managed-instance/single/). Storage redundancy for Hyperscale databases is unique: learn more in [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -For Azure SQL Managed Instance, backup storage redundancy is set at the instance level, and it is applied for all belonging managed databases. It can be configured at the time of an instance creation or updated for existing instances; the backup storage redundancy change would trigger then a new full backup per database and the change will apply for all future backups. The default storage redundancy type is geo-redundancy (RA-GRS). - -> [!NOTE] -> Backup storage redundancy change for SQL Managed Instance is currently available only for the Public cloud via Azure Portal. - -### Configure backup storage redundancy by using the Azure portal - -#### [SQL Database](#tab/single-database) - -In Azure portal, you can configure the backup storage redundancy on the **Create SQL Database** pane. The option is available under the Backup Storage Redundancy section. - -![Open Create SQL Database pane](./media/automated-backups-overview/sql-database-backup-storage-redundancy.png) - -#### [SQL Managed Instance](#tab/managed-instance) - -In the Azure portal, during an instance creation, the default option for the backup storage redundancy is Geo-redundancy. The option to change it is located on the **Compute + storage** pane accessible from the **Configure Managed Instance** option on the **Basics** tab. - -![Open Compute+Storage configuration-pane](./media/automated-backups-overview/open-configuration-blade-managed-instance.png) - -Find the option to select backup storage redundancy on the **Compute + storage** pane. - -![Configure backup storage redundancy](./media/automated-backups-overview/select-backup-storage-redundancy-managed-instance.png) - -To change the Backup storage redundancy option for an existing instance, go to the **Compute + storage** pane, choose the new backup option and select **Apply**. For now, this change will be applied only for PITR backups, while LTR backups will retain the old storage redundancy type. The time to perform the backup redundancy change depends on the size of the all the databases within a single managed instance. Changing the backup redundancy will take more time for instances that have large databases. It's possible to combine the backup storage redundancy change operation with the UpdateSLO operation. Use the **Notification** pane of the Azure portal to view the status of the change operation. - -:::image type="content" source="./media/automated-backups-overview/change-backup-storage-redundancy-managed-instance-notification.png" alt-text="Change backup storage redundancy notification"::: - ---- - -### Configure backup storage redundancy by using the Azure CLI - -#### [SQL Database](#tab/single-database) - -To configure backup storage redundancy when creating a new database, you can specify the `--backup-storage-redundancy` parameter with the `az sql db create` command. Possible values are `Geo`, `Zone`, and `Local`. By default, all databases in Azure SQL Database use geo-redundant storage for backups. Geo-restore is disabled if a database is created or updated with local or zone redundant backup storage. - -This example creates a database in the [General Purpose](service-tier-general-purpose.md) service tier with local backup redundancy: - -```azurecli -az sql db create \ - --resource-group myresourcegroup \ - --server myserver \ - --name mydb \ - --tier GeneralPurpose \ - --backup-storage-redundancy Local -``` - -Carefully consider the configuration option for `--backup-storage-redundancy` when creating a Hyperscale database. Storage redundancy can only be specified during the database creation process for Hyperscale databases. The selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and backup storage redundancy. Learn more in [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -Existing Hyperscale databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore: sample code to copy a Hyperscale database follows in this section. - -This example creates a database in the [Hyperscale](service-tier-general-purpose.md) service tier with Zone redundancy: - -```azurecli -az sql db create \ - --resource-group myresourcegroup \ - --server myserver \ - --name mydb \ - --tier Hyperscale \ - --backup-storage-redundancy Zone -``` -For more information, see [az sql db create](/cli/azure/sql/db#az-sql-db-create) and [az sql db update](/cli/azure/sql/db#az-sql-db-update). - -Except for Hyperscale and Basic tier databases, you can update the backup storage redundancy setting for an existing database with the `--backup-storage-redundancy` parameter and the `az sql db update` command. It may take up to 48 hours for the changes to be applied on the database. Switching from geo-redundant backup storage to local or zone redundant storage disables geo-restore. - -This example code changes the backup storage redundancy to `Local`. - -```azurecli -az sql db update \ - --resource-group myresourcegroup \ - --server myserver \ - --name mydb \ - --backup-storage-redundancy Local -``` - -You cannot update the backup storage redundancy of a Hyperscale database directly. However, you can change it using [the database copy command](database-copy.md) with the `--backup-storage-redundancy` parameter. This example copies a Hyperscale database to a new database using Gen5 hardware and two vCores. The new database has the backup redundancy set to `Zone`. - -```azurecli -az sql db copy \ - --resource-group myresourcegroup \ - --server myserver - --name myHSdb - --dest-resource-group mydestresourcegroup - --dest-server destdb - --dest-name myHSdb - --service-objective HS_Gen5_2 - --read-replicas 0 - --backup-storage-redundancy Zone -``` - -For syntax details, see [az sql db copy](/cli/azure/sql/db#az-sql-db-copy). For an overview of database copy, visit [Copy a transactionally consistent copy of a database in Azure SQL Database](database-copy.md). - -#### [SQL Managed Instance](#tab/managed-instance) - -Configuring backup storage redundancy is not available for a SQL Managed Instance when using the Azure CLI. For more information, see the [Azure portal](#configure-backup-storage-redundancy-by-using-the-azure-portal) or [PowerShell](#configure-backup-storage-redundancy-by-using-powershell) options. - ---- - -### Configure backup storage redundancy by using PowerShell - -#### [SQL Database](#tab/single-database) - -To configure backup storage redundancy when creating a new database, you can specify the `-BackupStorageRedundancy` parameter with the `New-AzSqlDatabase` cmdlet. Possible values are `Geo`, `Zone`, and `Local`. By default, all databases in Azure SQL Database use geo-redundant storage for backups. Geo-restore is disabled if a database is created with local or zone redundant backup storage. - -This example creates a database in the [General Purpose](service-tier-general-purpose.md) service tier with local backup redundancy: - -```powershell -# Create a new database with geo-redundant backup storage. -New-AzSqlDatabase -ResourceGroupName "ResourceGroup01" -ServerName "Server01" -DatabaseName "Database03" -Edition "GeneralPurpose" -Vcore 2 -ComputeGeneration "Gen5" -BackupStorageRedundancy Local -``` - -Carefully consider the configuration option for `--backup-storage-redundancy` when creating a Hyperscale database. Storage redundancy can only be specified during the database creation process for Hyperscale databases. The selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and backup storage redundancy. Learn more in [Hyperscale backups and storage redundancy](#hyperscale-backups-and-storage-redundancy). - -Existing databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore: sample code to copy a Hyperscale database follows in this section. - -This example creates a database in the [Hyperscale](service-tier-general-purpose.md) service tier with Zone redundancy: - -```powershell -# Create a new database with geo-redundant backup storage. -New-AzSqlDatabase -ResourceGroupName "ResourceGroup01" -ServerName "Server01" -DatabaseName "Database03" -Edition "Hyperscale" -Vcore 2 -ComputeGeneration "Gen5" -BackupStorageRedundancy Zone -``` - -For syntax details visit [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase). - -Except for Hyperscale and Basic tier databases, you can use the `-BackupStorageRedundancy` parameter with the `Set-AzSqlDatabase` cmdlet to update the backup storage redundancy setting for an existing database. Possible values are Geo, Zone, and Local. It may take up to 48 hours for the changes to be applied on the database. Switching from geo-redundant backup storage to local or zone redundant storage disables geo-restore. - -This example code changes the backup storage redundancy to `Local`. - -```powershell -# Change the backup storage redundancy for Database01 to zone-redundant. -Set-AzSqlDatabase -ResourceGroupName "ResourceGroup01" -DatabaseName "Database01" -ServerName "Server01" -BackupStorageRedundancy Local -``` - -For details visit [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) - -Backup storage redundancy of an existing Hyperscale database cannot be updated. However, you can use the [database copy command](database-copy.md) to create a copy of the database and use the `-BackupStorageRedundancy` parameter to update the backup storage redundancy. This example copies a Hyperscale database to a new database using Gen5 hardware and two vCores. The new database has the backup redundancy set to `Zone`. - -```powershell -# Change the backup storage redundancy for Database01 to zone-redundant. -New-AzSqlDatabaseCopy -ResourceGroupName "ResourceGroup01" -ServerName "Server01" -DatabaseName "HSSourceDB" -CopyResourceGroupName "DestResourceGroup" -CopyServerName "DestServer" -CopyDatabaseName "HSDestDB" -Vcore 2 -ComputeGeneration "Gen5" -ComputeModel Provisioned -BackupStorageRedundancy Zone -``` - -For syntax details, visit [New-AzSqlDatabaseCopy](/powershell/module/az.sql/new-azsqldatabasecopy). - -For an overview of database copy, visit [Copy a transactionally consistent copy of a database in Azure SQL Database](database-copy.md). - -> [!NOTE] -> To use -BackupStorageRedundancy parameter with database restore, database copy or create secondary operations, use Azure PowerShell version Az.Sql 2.11.0. - - -#### [SQL Managed Instance](#tab/managed-instance) - -For configuring backup storage redundancy during managed instance creation, you can specify -BackupStorageRedundancy parameter. Possible values are Geo, Zone, and Local. - -```powershell -New-AzSqlInstance -Name managedInstance2 -ResourceGroupName ResourceGroup01 -Location westcentralus -AdministratorCredential (Get-Credential) -SubnetId "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/resourcegroup01/providers/Microsoft.Network/virtualNetworks/vnet_name/subnets/subnet_name" -LicenseType LicenseIncluded -StorageSizeInGB 1024 -VCore 16 -Edition "GeneralPurpose" -ComputeGeneration Gen4 -BackupStorageRedundancy Geo -``` - -For more information, see [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance). - ---- - -## Use Azure Policy to enforce backup storage redundancy - -If you have data residency requirements that require you to keep all your data in a single Azure region, you may want to enforce zone-redundant or locally redundant backups for your SQL Database or Managed Instance using Azure Policy. -Azure Policy is a service that you can use to create, assign, and manage policies that apply rules to Azure resources. Azure Policy helps you to keep these resources compliant with your corporate standards and service level agreements. For more information, see [Overview of Azure Policy](../../governance/policy/overview.md). - -### Built-in backup storage redundancy policies - -Following new built-in policies are added, which can be assigned at the subscription or resource group level to block creation of new database(s) or instance(s) with geo-redundant backup storage. - -[SQL Database should avoid using GRS backup redundancy](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fb219b9cf-f672-4f96-9ab0-f5a3ac5e1c13) - - -[SQL Managed Instances should avoid using GRS backup redundancy](https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyDetailBlade/definitionId/%2Fproviders%2FMicrosoft.Authorization%2FpolicyDefinitions%2Fa9934fd7-29f2-4e6d-ab3d-607ea38e9079) - -A full list of built-in policy definitions for SQL Database and Managed Instance can be found [here](./policy-reference.md). - -To enforce data residency requirements at an organizational level, these policies can be assigned to a subscription. After these policies are assigned at a subscription level, users in the given subscription will not be able to create a database or a managed instance with geo-redundant backup storage via Azure portal or Azure PowerShell. - -> [!IMPORTANT] -> Azure policies are not enforced when creating a database via T-SQL. To enforce data residency when creating a database using T-SQL, [use 'LOCAL' or 'ZONE' as input to BACKUP_STORAGE_REDUNDANCY paramater in CREATE DATABASE statement](/sql/t-sql/statements/create-database-transact-sql#create-database-using-zone-redundancy-for-backups). - -Learn how to assign policies using the [Azure portal](../../governance/policy/assign-policy-portal.md) or [Azure PowerShell](../../governance/policy/assign-policy-powershell.md) - -## Next steps - -- Database backups are an essential part of any business continuity and disaster recovery strategy because they protect your data from accidental corruption or deletion. To learn about the other SQL Database business continuity solutions, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). -- For information about how to configure, manage, and restore from long-term retention of automated backups in Azure Blob storage by using the Azure portal, see [Manage long-term backup retention by using the Azure portal](long-term-backup-retention-configure.md). -- For information about how to configure, manage, and restore from long-term retention of automated backups in Azure Blob storage by using PowerShell, see [Manage long-term backup retention by using PowerShell](long-term-backup-retention-configure.md). -- Get more information about how to [restore a database to a point in time by using the Azure portal](recovery-using-backups.md). -- Get more information about how to [restore a database to a point in time by using PowerShell](scripts/restore-database-powershell.md). -- To learn all about backup storage consumption on Azure SQL Managed Instance, see [Backup storage consumption on Managed Instance explained](https://aka.ms/mi-backup-explained). -- To learn how to fine-tune backup storage retention and costs for Azure SQL Managed Instance, see [Fine tuning backup storage costs on Managed Instance](https://aka.ms/mi-backup-tuning). diff --git a/articles/azure-sql/database/automatic-tuning-email-notifications-configure.md b/articles/azure-sql/database/automatic-tuning-email-notifications-configure.md deleted file mode 100644 index 54cdaf6e1e094..0000000000000 --- a/articles/azure-sql/database/automatic-tuning-email-notifications-configure.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: Automatic tuning email notifications how-to guide -description: Enable e-mail notifications for Azure SQL Database automatic query tuning. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 06/03/2019 ---- -# Email notifications for automatic tuning -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - - -Azure SQL Database tuning recommendations are generated by Azure SQL Database [automatic tuning](automatic-tuning-overview.md). This solution continuously monitors and analyzes workloads of databases providing customized tuning recommendations for each individual database related to index creation, index deletion, and optimization of query execution plans. - -Azure SQL Database automatic tuning recommendations can be viewed in the [Azure portal](database-advisor-find-recommendations-portal.md), retrieved with [REST API](/rest/api/sql/databaserecommendedactions/listbydatabaseadvisor) calls, or by using [T-SQL](https://azure.microsoft.com/blog/automatic-tuning-introduces-automatic-plan-correction-and-t-sql-management/) and [PowerShell](/powershell/module/az.sql/get-azsqldatabaserecommendedaction) commands. This article is based on using a PowerShell script to retrieve automatic tuning recommendations. - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -## Automate email notifications for automatic tuning recommendations - -The following solution automates the sending of email notifications containing automatic tuning recommendations. The solution described consists of automating execution of a PowerShell script for retrieving tuning recommendations using [Azure Automation](../../automation/automation-intro.md), and automation of scheduling email delivery job using [Microsoft Power Automate](https://flow.microsoft.com). - -## Create Azure Automation account - -To use Azure Automation, the first step is to create an automation account and to configure it with Azure resources to use for execution of the PowerShell script. To learn more about Azure Automation and its capabilities, see [Getting started with Azure automation](../../automation/index.yml). - -Follow these steps to create an Azure Automation Account through the method of selecting and configuring an Automation app from Azure Marketplace: - -1. Log into the Azure portal. -1. Click on "**+ Create a resource**" in the upper left corner. -1. Search for "**Automation**" (press enter). -1. Click on the Automation app in the search results. - - ![Adding Azure automation](./media/automatic-tuning-email-notifications-configure/howto-email-01.png) - -1. Once inside the "Create an Automation Account" pane, click on "**Create**". -1. Populate the required information: enter a name for this automation account, select your Azure subscription ID and Azure resources to be used for the PowerShell script execution. -1. For the "**Create Azure Run As account**" option, select **Yes** to configure the type of account under which PowerShell script runs with the help of Azure Automation. To learn more about account types, see [Run As account](../../automation/manage-runas-account.md). -1. Conclude creation of the automation account by clicking on **Create**. - -> [!TIP] -> Record your Azure Automation account name, subscription ID, and resources (such as copy-paste to a notepad) exactly as entered while creating the Automation app. You need this information later. - -If you have several Azure subscriptions for which you would like to build the same automation, you need to repeat this process for your other subscriptions. - -## Update Azure Automation modules - -The PowerShell script to retrieve automatic tuning recommendation uses [Get-AzResource](/powershell/module/az.Resources/Get-azResource) and [Get-AzSqlDatabaseRecommendedAction](/powershell/module/az.Sql/Get-azSqlDatabaseRecommendedAction) commands for which Azure Module version 4 and above is required. - -- In case your Azure Modules need updating, see [Az module support in Azure Automation](../../automation/shared-resources/modules.md). - -## Create Azure Automation runbook - -The next step is to create a Runbook in Azure Automation inside which the PowerShell script for retrieval of tuning recommendations resides. - -Follow these steps to create a new Azure Automation runbook: - -1. Access the Azure Automation account you created in the previous step. -1. Once in the automation account pane, click on the "**Runbooks**" menu item on the left-hand side to create a new Azure Automation runbook with the PowerShell script. To learn more about creating automation runbooks, see [Create a new runbook](../../automation/manage-runbooks.md#create-a-runbook). -1. To add a new runbook, click on the "**+Add a runbook**" menu option, and then click on the "**Quick create – Create a new runbook**".. -1. In the Runbook pane, type in the name of your runbook (for the purpose of this example, "**AutomaticTuningEmailAutomation**" is used), select the type of runbook as **PowerShell** and write a description of this runbook to describe its purpose. -1. Click on the **Create** button to finish creating a new runbook. - - ![Add Azure automation runbook](./media/automatic-tuning-email-notifications-configure/howto-email-03.png) - -Follow these steps to load a PowerShell script inside the runbook created: - -1. Inside the "**Edit PowerShell Runbook**" pane, select "**RUNBOOKS**" on the menu tree and expand the view until you see the name of your runbook (in this example "**AutomaticTuningEmailAutomation**"). Select this runbook. -1. On the first line of the "Edit PowerShell Runbook" (starting with the number 1), copy-paste the following PowerShell script code. This PowerShell script is provided as-is to get you started. Modify the script to suite your needs. - -In the header of the provided PowerShell script, you need to replace `` with your Azure subscription ID. To learn how to retrieve your Azure subscription ID, see [Getting your Azure Subscription GUID](/archive/blogs/mschray/getting-your-azure-subscription-guid-new-portal). - -In the case of several subscriptions, you can add them as comma-delimited to the "$subscriptions" property in the header of the script. - -```powershell -# PowerShell script to retrieve Azure SQL Database automatic tuning recommendations. -# -# Provided "as-is" with no implied warranties or support. -# The script is released to the public domain. -# -# Replace in the header with your Azure subscription ID. -# -# Microsoft Azure SQL Database team, 2018-01-22. - -# Set subscriptions : IMPORTANT – REPLACE WITH YOUR SUBSCRIPTION ID -$subscriptions = ("", "", "") - -# Get credentials -$Conn = Get-AutomationConnection -Name AzureRunAsConnection -Connect-AzAccount -ServicePrincipal -Tenant $Conn.TenantID -ApplicationId $Conn.ApplicationID -CertificateThumbprint $Conn.CertificateThumbprint - -# Define the resource types -$resourceTypes = ("Microsoft.Sql/servers/databases") -$advisors = ("CreateIndex", "DropIndex"); -$results = @() - -# Loop through all subscriptions -foreach($subscriptionId in $subscriptions) { - Select-AzSubscription -SubscriptionId $subscriptionId - $rgs = Get-AzResourceGroup - - # Loop through all resource groups - foreach($rg in $rgs) { - $rgname = $rg.ResourceGroupName; - - # Loop through all resource types - foreach($resourceType in $resourceTypes) { - $resources = Get-AzResource -ResourceGroupName $rgname -ResourceType $resourceType - - # Loop through all databases - # Extract resource groups, servers and databases - foreach ($resource in $resources) { - $resourceId = $resource.ResourceId - if ($resourceId -match ".*RESOURCEGROUPS/(?.*)/PROVIDERS.*") { - $ResourceGroupName = $matches['content'] - } else { - continue - } - if ($resourceId -match ".*SERVERS/(?.*)/DATABASES.*") { - $ServerName = $matches['content'] - } else { - continue - } - if ($resourceId -match ".*/DATABASES/(?.*)") { - $DatabaseName = $matches['content'] - } else { - continue - } - - # Skip if master - if ($DatabaseName -eq "master") { - continue - } - - # Loop through all automatic tuning recommendation types - foreach ($advisor in $advisors) { - $recs = Get-AzSqlDatabaseRecommendedAction -ResourceGroupName $ResourceGroupName -ServerName $ServerName -DatabaseName $DatabaseName -AdvisorName $advisor - foreach ($r in $recs) { - if ($r.State.CurrentValue -eq "Active") { - $object = New-Object -TypeName PSObject - $object | Add-Member -Name 'SubscriptionId' -MemberType Noteproperty -Value $subscriptionId - $object | Add-Member -Name 'ResourceGroupName' -MemberType Noteproperty -Value $r.ResourceGroupName - $object | Add-Member -Name 'ServerName' -MemberType Noteproperty -Value $r.ServerName - $object | Add-Member -Name 'DatabaseName' -MemberType Noteproperty -Value $r.DatabaseName - $object | Add-Member -Name 'Script' -MemberType Noteproperty -Value $r.ImplementationDetails.Script - $results += $object - } - } - } - } - } - } -} - -# Format and output results for the email -$table = $results | Format-List -Write-Output $table -``` - -Click the "**Save**" button in the upper right corner to save the script. When you are satisfied with the script, click the "**Publish**" button to publish this runbook. - -At the main runbook pane, you can choose to click on the "**Start**" button to **test** the script. Click on the "**Output**" to view results of the script executed. This output is going to be the content of your email. The sample output from the script can be seen in the following screenshot. - -![Run view automatic tuning recommendations with Azure Automation](./media/automatic-tuning-email-notifications-configure/howto-email-04.png) - -Ensure to adjust the content by customizing the PowerShell script to your needs. - -With the above steps, the PowerShell script to retrieve automatic tuning recommendations is loaded in Azure Automation. The next step is to automate and schedule the email delivery job. - -## Automate the email jobs with Microsoft Power Automate - -To complete the solution, as the final step, create an automation flow in Microsoft Power Automate consisting of three actions (jobs): - -- "**Azure Automation - Create job**" – used to execute the PowerShell script to retrieve automatic tuning recommendations inside the Azure Automation runbook. -- "**Azure Automation - Get job output**" – used to retrieve output from the executed PowerShell script. -- "**Office 365 Outlook – Send an email**" – used to send out email. E-mails are sent out using the work or school account of the individual creating the flow. - -To learn more about Microsoft Power Automate capabilities, see [Getting started with Microsoft Power Automate](/power-automate/getting-started). - -Prerequisite for this step is to sign up for a [Microsoft Power Automate](https://flow.microsoft.com) account and to log in. Once inside the solution, follow these steps to set up a **new flow**: - -1. Access "**My flows**" menu item. -1. Inside My flows, select the "**+Create from blank**" link at the top of the page. -1. Click on the link "**Search for hundreds of connectors and triggers**" at the bottom of the page. -1. In the search field type "**recurrence**", and select "**Schedule - Recurrence**" from the search results to schedule the email delivery job to run. -1. In the Recurrence pane in the Frequency field, select the scheduling frequency for this flow to execute, such as send automated email each Minute, Hour, Day, Week, etc. - -The next step is to add three jobs (create, get output and send email) to the newly created recurring flow. To accomplish adding the required jobs to the flow, follow these steps: - -1. Create action to execute PowerShell script to retrieve tuning recommendations - - - Select "**+New step**", followed by "**Add an action**" inside the Recurrence flow pane. - - In the search field type "**automation**" and select "**Azure Automation – Create job**" from the search results. - - In the Create job pane, configure the job properties. For this configuration, you will need details of your Azure subscription ID, Resource Group and Automation Account **previously recorded** at the **Automation Account pane**. To learn more about options available in this section, see [Azure Automation - Create Job](/connectors/azureautomation/#create-job). - - Complete creating this action by clicking on "**Save flow**". - -2. Create an action to retrieve output from the executed PowerShell script - - - Select "**+New step**", followed by "**Add an action**" inside the Recurrence flow pane - - In the search field type "**automation**" and select "**Azure Automation – Get job output**" from the search results. To learn more about options available in this section, see [Azure Automation – Get job output](/connectors/azureautomation/#get-job-output). - - Populate fields required (similar to creating the previous job) - populate your Azure subscription ID, Resource Group, and Automation Account (as entered in the Automation Account pane). - - Click inside the field "**Job ID**" for the "**Dynamic content**" menu to show up. From within this menu, select the option "**Job ID**". - - Complete creating this action by clicking on "**Save flow**". - -3. Create an action to send out email using Office 365 integration - - - Select "**+New step**", followed by "**Add an action**" inside the Recurrence flow pane. - - In the search field type "**send an email**" and select "**Office 365 Outlook – Send an email**" from the search results. - - In the "**To**" field type in the email address to which you need to send the notification email. - - In the "**Subject**" field type in the subject of your email, for example "Automatic tuning recommendations email notification". - - Click inside the field "**Body**" for the "**Dynamic content**" menu to show up. From within this menu, under "**Get job output**", select "**Content**". - - Complete creating this action by clicking on "**Save flow**". - -> [!TIP] -> To send automated emails to different recipients, create separate flows. In these additional flows, change the recipient email address in the "To" field, and the email subject line in the "Subject" field. Creating new runbooks in Azure Automation with customized PowerShell scripts (such as with change of Azure subscription ID) enables further customization of automated scenarios, such as for example emailing separate recipients on Automated tuning recommendations for separate subscriptions. - -The above concludes steps required to configure the email delivery job workflow. The entire flow consisting of three actions built is shown in the following image. - -![View automatic tuning email notifications flow](./media/automatic-tuning-email-notifications-configure/howto-email-05.png) - -To test the flow, click on "**Run Now**" in the upper right corner inside the flow pane. - -Statistics of running the automated jobs, showing success of email notifications sent out, can be seen from the Flow analytics pane. - -![Running flow for automatic tuning email notifications](./media/automatic-tuning-email-notifications-configure/howto-email-06.png) - -The Flow analytics pane is helpful for monitoring the success of job executions, and if required for troubleshooting. In the case of troubleshooting, you also might want to examine the PowerShell script execution log accessible through the Azure Automation app. - -The final output of the automated email looks similar to the following email received after building and running this solution: - -![Sample email output from automatic tuning email notifications](./media/automatic-tuning-email-notifications-configure/howto-email-07.png) - -By adjusting the PowerShell script, you can adjust the output and formatting of the automated email to your needs. - -You might further customize the solution to build email notifications based on a specific tuning event, and to multiple recipients, for multiple subscriptions or databases, depending on your custom scenarios. - -## Next steps - -- Learn more on how automatic tuning can help you improve database performance, see [Automatic tuning in Azure SQL Database](automatic-tuning-overview.md). -- To enable automatic tuning in Azure SQL Database to manage your workload, see [Enable automatic tuning](automatic-tuning-enable.md). -- To manually review and apply automatic tuning recommendations, see [Find and apply performance recommendations](database-advisor-find-recommendations-portal.md). diff --git a/articles/azure-sql/database/automatic-tuning-enable.md b/articles/azure-sql/database/automatic-tuning-enable.md deleted file mode 100644 index 2a954a6852ba1..0000000000000 --- a/articles/azure-sql/database/automatic-tuning-enable.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: Enable automatic tuning -description: You can enable automatic tuning on your database easily using the Azure portal. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 03/03/2021 ---- -# Enable automatic tuning in the Azure portal to monitor queries and improve workload performance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database automatically manages data services that constantly monitor your queries and identifies the action that you can perform to improve performance of your workload. You can review recommendations and manually apply them, or let Azure SQL Database automatically apply corrective actions - this is known as **automatic tuning mode**. - -Automatic tuning can be enabled at the server or the database level through: - -- The [Azure portal](automatic-tuning-enable.md#azure-portal) -- [REST API](automatic-tuning-enable.md#rest-api) calls -- [T-SQL](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true) commands - -> [!NOTE] -> For Azure SQL Managed Instance, the supported option FORCE_LAST_GOOD_PLAN can only be configured through [T-SQL](https://azure.microsoft.com/blog/automatic-tuning-introduces-automatic-plan-correction-and-t-sql-management). The Azure portal based configuration and automatic index tuning options described in this article do not apply to Azure SQL Managed Instance. - -> [!NOTE] -> Configuring automatic tuning options through the ARM (Azure Resource Manager) template is not supported at this time. - -## Enable automatic tuning on server - -On the server level you can choose to inherit automatic tuning configuration from "Azure Defaults" or not to inherit the configuration. Azure defaults are FORCE_LAST_GOOD_PLAN is enabled, CREATE_INDEX is disabled, and DROP_INDEX is disabled. - -> [!IMPORTANT] -> As of March, 2020 new Azure defaults for automatic tuning are as follows: -> -> - FORCE_LAST_GOOD_PLAN = enabled, CREATE_INDEX = disabled, and DROP_INDEX = disabled. -> - Existing servers with no automatic tuning preferences configured are automatically configured to INHERIT the Azure defaults. This applies to all customers currently having server settings for automatic tuning in an undefined state. -> - New servers created will automatically be configured to INHERIT the Azure defaults (unlike earlier when automatic tuning configuration was in an undefined state upon new server creation). - -### Azure portal - -To enable automatic tuning on a [server](logical-servers.md) in Azure SQL Database, navigate to the server in the Azure portal and then select **Automatic tuning** in the menu. - -![Screenshot shows Automatic tuning in the Azure portal, where you can apply options for a server.](./media/automatic-tuning-enable/server.png) - -> [!NOTE] -> Please note that the **DROP_INDEX** option at this time is not compatible with applications using partition switching and index hints and should not be enabled in these cases. Dropping unused indexes is not supported for Premium and Business Critical service tiers. - -Select the automatic tuning options you want to enable and select **Apply**. - -Automatic tuning options on a server are applied to all databases on this server. By default, all databases inherit configuration from their parent server, but this can be overridden and specified for each database individually. - -### REST API - -To find out more about using a REST API to enable automatic tuning on a **server**, see [Server automatic tuning UPDATE and GET HTTP methods](/rest/api/sql/serverautomatictuning). - -## Enable automatic tuning on an individual database - -Azure SQL Database enables you to individually specify the automatic tuning configuration for each database. On the database level you can choose to inherit automatic tuning configuration from the parent server, "Azure Defaults" or not to inherit the configuration. Azure Defaults are set to FORCE_LAST_GOOD_PLAN is enabled, CREATE_INDEX is disabled, and DROP_INDEX is disabled. - -> [!TIP] -> The general recommendation is to manage the automatic tuning configuration at **server level** so the same configuration settings can be applied on every database automatically. Configure automatic tuning on an individual database only if you need that database to have different settings than others inheriting settings from the same server. - -### Azure portal - -To enable automatic tuning on a **single database**, navigate to the database in the Azure portal and select **Automatic tuning**. - -Individual automatic tuning settings can be separately configured for each database. You can manually configure an individual automatic tuning option, or specify that an option inherits its settings from the server. - -![Screenshot shows Automatic tuning in the Azure portal, where you can apply options for a single database.](./media/automatic-tuning-enable/database.png) - -Please note that DROP_INDEX option at this time is not compatible with applications using partition switching and index hints and should not be enabled in these cases. - -Once you have selected your desired configuration, click **Apply**. - -### REST API - -To find out more about using a REST API to enable automatic tuning on a single database, see [Azure SQL Database automatic tuning UPDATE and GET HTTP methods](/rest/api/sql/databaseautomatictuning). - -### T-SQL - -To enable automatic tuning on a single database via T-SQL, connect to the database and execute the following query: - -```SQL -ALTER DATABASE current SET AUTOMATIC_TUNING = AUTO | INHERIT | CUSTOM -``` - -Setting automatic tuning to AUTO will apply Azure Defaults. Setting it to INHERIT, automatic tuning configuration will be inherited from the parent server. Choosing CUSTOM, you will need to manually configure automatic tuning. - -To configure individual automatic tuning options via T-SQL, connect to the database and execute the query such as this one: - -```SQL -ALTER DATABASE current SET AUTOMATIC_TUNING (FORCE_LAST_GOOD_PLAN = ON, CREATE_INDEX = ON, DROP_INDEX = OFF) -``` - -Setting the individual tuning option to ON will override any setting that database inherited and enable the tuning option. Setting it to OFF will also override any setting that database inherited and disable the tuning option. Automatic tuning option, for which DEFAULT is specified, will inherit the automatic tuning configuration from the server level settings. - -> [!IMPORTANT] -> In the case of [active geo-replication](auto-failover-group-overview.md), Automatic tuning needs to be configured on the primary database only. Automatically applied tuning actions, such as for example index create or delete will be automatically replicated to the read-only secondary. Attempting to enable Automatic tuning via T-SQL on the read-only secondary will result in a failure as having a different tuning configuration on the read-only secondary is unsupported. -> - -To find out more abut T-SQL options to configure automatic tuning, see [ALTER DATABASE SET Options (Transact-SQL)](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true). - -## Troubleshooting - -### Automated recommendation management is disabled - -In case of error messages that automated recommendation management has been disabled, or simply disabled by system, the most common causes are: -- Query Store is not enabled, or -- Query Store is in read-only mode for a specified database, or -- Query Store stopped running because it used the allocated storage space. - -The following steps can be considered to rectify this issue: -- Clean up the Query Store, or modify the data retention period to "auto" by using T-SQL. See how to [configure recommended retention and capture policy for Query Store](./query-performance-insight-use.md#recommended-retention-and-capture-policy). -- Use SQL Server Management Studio (SSMS) and follow these steps: - - Connect to the Azure SQL Database - - Right click on the database - - Go to Properties and click on Query Store - - Change the Operation Mode to Read-Write - - Change the Store Capture Mode to Auto - - Change the Size Based Cleanup Mode to Auto - -### Permissions - -As automatic tuning is an Azure feature, to use it you will need to use Azure's built-in roles. Using SQL Authentication only will not be sufficient to use the feature from the Azure portal. - -To use automatic tuning, the minimum required permission to grant to the user is Azure's built-in [SQL Database contributor](../../role-based-access-control/built-in-roles.md#sql-db-contributor) role. You can also consider using higher privilege roles such as SQL Server Contributor, SQL Managed Instance Contributor, Contributor, and Owner. - -## Configure automatic tuning e-mail notifications - -To receive automated email notifications on recommendations made by the automatic tuning, see the [automatic tuning e-mail notifications](automatic-tuning-email-notifications-configure.md) guide. - -## Next steps - -- Read the [Automatic tuning article](automatic-tuning-overview.md) to learn more about automatic tuning and how it can help you improve your performance. -- See [Performance recommendations](database-advisor-implement-performance-recommendations.md) for an overview of Azure SQL Database performance recommendations. -- See [Query Performance Insights](query-performance-insight-use.md) to learn about viewing the performance impact of your top queries. diff --git a/articles/azure-sql/database/automatic-tuning-overview.md b/articles/azure-sql/database/automatic-tuning-overview.md deleted file mode 100644 index 44a68eeec99b0..0000000000000 --- a/articles/azure-sql/database/automatic-tuning-overview.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Automatic tuning overview -description: Azure SQL Database and Azure SQL Managed Instance analyzes SQL query and automatically adapts to user workload. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 10/18/2021 ---- -# Automatic tuning in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and Azure SQL Managed Instance automatic tuning provides peak performance and stable workloads through continuous performance tuning based on AI and machine learning. - -Automatic tuning is a fully managed intelligent performance service that uses built-in intelligence to continuously monitor queries executed on a database, and it automatically improves their performance. This is achieved through dynamically adapting a database to changing workloads and applying tuning recommendations. Automatic tuning learns horizontally from all databases on Azure through AI and it dynamically improves its tuning actions. The longer a database runs with automatic tuning on, the better it performs. - -Azure SQL Database and Azure SQL Managed Instance automatic tuning might be one of the most important features that you can enable to provide stable and peak performing database workloads. - -## What can automatic tuning do for you - -- Automated performance tuning of databases -- Automated verification of performance gains -- Automated rollback and self-correction -- Tuning history -- Tuning action Transact-SQL (T-SQL) scripts for manual deployments -- Proactive workload performance monitoring -- Scale out capability on hundreds of thousands of databases -- Positive impact to DevOps resources and the total cost of ownership - -## Safe, Reliable, and Proven - -Tuning operations applied to databases in Azure SQL Database are fully safe for the performance of your most intense workloads. The system has been designed with care not to interfere with the user workloads. Automated tuning recommendations are applied only at the times of a low utilization. The system can also temporarily disable automatic tuning operations to protect the workload performance. In such case, "Disabled by the system" message will be shown in Azure portal. Automatic tuning regards workloads with the highest resource priority. - -Automatic tuning mechanisms are mature and have been perfected on several million databases running on Azure. Automated tuning operations applied are verified automatically to ensure there is a positive improvement to the workload performance. Regressed performance recommendations are dynamically detected and promptly reverted. Through the tuning history recorded, there exists a clear trace of tuning improvements made to each database in Azure SQL Database and Azure SQL Managed Instance. - -![How does automatic tuning work](./media/automatic-tuning-overview/how-does-automatic-tuning-work.png) - -Azure SQL Database automatic tuning is sharing its core logic with the SQL Server automatic tuning feature in the database engine. For additional technical information on the built-in intelligence mechanism, see [SQL Server automatic tuning](/sql/relational-databases/automatic-tuning/automatic-tuning). - -## Enable automatic tuning - -- You [enable automatic tuning for Azure SQL Database in the Azure portal](automatic-tuning-enable.md) or by using the [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true) T-SQL statement. -- You enable automatic tuning for Azure SQL Managed Instance by using the [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-mi-current&preserve-view=true) T-SQL statement. - -## Automatic tuning options - -The automatic tuning options available in Azure SQL Database and Azure SQL Managed Instance are: - -| Automatic tuning option | Single database and pooled database support | Instance database support | -| :----------------------------- | ----- | ----- | -| **CREATE INDEX** - Identifies indexes that may improve performance of your workload, creates indexes, and automatically verifies that performance of queries has improved. | Yes | No | -| **DROP INDEX** - Drops unused (over the last 90 days) and duplicate indexes. Unique indexes, including indexes supporting primary key and unique constraints, are never dropped. This option may be automatically disabled when queries with index hints are present in the workload, or when the workload performs partition switching. On Premium and Business Critical service tiers, this option will never drop unused indexes, but will drop duplicate indexes, if any. | Yes | No | -| **FORCE LAST GOOD PLAN** (automatic plan correction) - Identifies Azure SQL queries using an execution plan that is slower than the previous good plan, and queries using the last known good plan instead of the regressed plan. | Yes | Yes | - -### Automatic tuning for SQL Database - -Automatic tuning for Azure SQL Database uses the **CREATE INDEX**, **DROP INDEX**, and **FORCE LAST GOOD PLAN** database advisor recommendations to optimize your database performance. For more information, see [Database advisor recommendations in the Azure portal](database-advisor-find-recommendations-portal.md), in [PowerShell](/powershell/module/az.sql/get-azsqldatabaserecommendedaction), and in the [REST API](/rest/api/sql/serverautomatictuning). - -You can either manually apply tuning recommendations using the Azure portal or you can let automatic tuning autonomously apply tuning recommendations for you. The benefits of letting the system autonomously apply tuning recommendations for you is that it automatically validates there exists a positive gain to the workload performance, and if there is no significant performance improvement detected, it will automatically revert the tuning recommendation. Please note that in case of queries affected by tuning recommendations that are not executed frequently, the validation phase can take up to 72 hrs by design. - -In case you are applying tuning recommendations through T-SQL, the automatic performance validation, and reversal mechanisms are not available. Recommendations applied in such way will remain active and shown in the list of tuning recommendations for 24-48 hours before the system automatically withdraws them. If you would like to remove a recommendation sooner, you can discard it from Azure portal. - -Automatic tuning options can be independently enabled or disabled per database, or they can be configured at the server-level and applied on every database that inherits settings from the server. Servers can inherit Azure defaults for automatic tuning settings. Azure defaults at this time are set to FORCE_LAST_GOOD_PLAN is enabled, CREATE_INDEX is enabled, and DROP_INDEX is disabled. - -> [!IMPORTANT] -> As of March, 2020 changes to Azure defaults for automatic tuning will take effect as follows: -> -> - New Azure defaults will be FORCE_LAST_GOOD_PLAN = enabled, CREATE_INDEX = disabled, and DROP_INDEX = disabled. -> - Existing servers with no automatic tuning preferences configured will be automatically configured to INHERIT the new Azure defaults. This applies to all customers currently having server settings for automatic tuning in an undefined state. -> - New servers created will automatically be configured to INHERIT the new Azure defaults (unlike earlier when automatic tuning configuration was in an undefined state upon new server creation). - -Configuring automatic tuning options on a server and inheriting settings for databases belonging to the parent server is a recommended method for configuring automatic tuning as it simplifies management of automatic tuning options for a large number of databases. - -To learn about building email notifications for automatic tuning recommendations, see [Email notifications for automatic tuning](automatic-tuning-email-notifications-configure.md). - -### Automatic tuning for Azure SQL Managed Instance - -Automatic tuning for SQL Managed Instance only supports **FORCE LAST GOOD PLAN**. For more information about configuring automatic tuning options through T-SQL, see [Automatic tuning introduces automatic plan correction](https://azure.microsoft.com/blog/automatic-tuning-introduces-automatic-plan-correction-and-t-sql-management/) and [Automatic plan correction](/sql/relational-databases/automatic-tuning/automatic-tuning#automatic-plan-correction). - -## Next steps - -- Read the blog post [Artificial Intelligence tunes Azure SQL Database](https://azure.microsoft.com/blog/artificial-intelligence-tunes-azure-sql-databases/). -- Learn how automatic tuning works under the hood in [Automatically indexing millions of databases in Microsoft Azure SQL Database](https://www.microsoft.com/research/uploads/prod/2019/02/autoindexing_azuredb.pdf). -- Learn how automatic tuning can proactively help you [Diagnose and troubleshoot high CPU on Azure SQL Database](high-cpu-diagnose-troubleshoot.md) \ No newline at end of file diff --git a/articles/azure-sql/database/automation-manage.md b/articles/azure-sql/database/automation-manage.md deleted file mode 100644 index 049a62d75c91f..0000000000000 --- a/articles/azure-sql/database/automation-manage.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Manage databases with Azure Automation -description: Learn about how the Azure Automation service can be used to manage Azure SQL Database at scale. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.date: 03/12/2019 ---- - -# Manage databases in Azure SQL Database by using Azure Automation - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This guide will introduce you to the Azure Automation service, and how it can be used to simplify the management of databases in Azure SQL Database. - -## About Azure Automation - -[Azure Automation](https://azure.microsoft.com/services/automation/) is an Azure service for simplifying cloud management through process automation. Using Azure Automation, long-running, manual, error-prone, and frequently repeated tasks can be automated to increase reliability, efficiency, and time to value for your organization. For information on getting started, see [Azure Automation intro](../../automation/automation-intro.md) - -Azure Automation provides a workflow execution engine with high reliability and high availability, and that scales to meet your needs as your organization grows. In Azure Automation, processes can be kicked off manually, by third-party systems, or at scheduled intervals so that tasks happen exactly when needed. - -Lower operational overhead and free up IT / DevOps staff to focus on work that adds business value by moving your cloud management tasks to be run automatically by Azure Automation. - -## How Azure Automation can help manage your databases - -With Azure Automation, you can manage databases in Azure SQL Database by using [PowerShell cmdlets](/powershell/module/servicemanagement/azure.service/#sql) that are available in the [Azure PowerShell tools](/powershell/azure/). Azure Automation has these Azure SQL Database PowerShell cmdlets available out of the box, so that you can perform all of your SQL Database management tasks within the service. You can also pair these cmdlets in Azure Automation with the cmdlets for other Azure services, to automate complex tasks across Azure services and across third-party systems. - -Azure Automation also has the ability to communicate with SQL servers directly, by issuing SQL commands using PowerShell. - -The runbook and module galleries for [Azure Automation](../../automation/automation-runbook-gallery.md) offer a variety of runbooks from Microsoft and the community that you can import into Azure Automation. To use one, download a runbook from the gallery, or you can directly import runbooks from the gallery, or from your Automation account in the Azure portal. - ->[!NOTE] -> The Automation runbook may run from a range of IP addresses at any datacenter in an Azure region. To learn more, see [Automation region DNS records](../../automation/how-to/automation-region-dns-records.md). - -## Next steps - -Now that you've learned the basics of Azure Automation and how it can be used to manage Azure SQL Database, follow these links to learn more about Azure Automation. - -- [Azure Automation Overview](../../automation/automation-intro.md) -- [My first runbook](../../automation/learn/powershell-runbook-managed-identity.md) \ No newline at end of file diff --git a/articles/azure-sql/database/az-cli-script-samples-content-guide.md b/articles/azure-sql/database/az-cli-script-samples-content-guide.md deleted file mode 100644 index 67dfb4c2b49f6..0000000000000 --- a/articles/azure-sql/database/az-cli-script-samples-content-guide.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Azure CLI samples for Azure SQL Database & Managed Instances | Microsoft Docs -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Find Azure CLI script samples to create and manage Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.custom: overview-samples, mvc, sqldbrb=2, devx-track-azurecli, seo-azure-cli -ms.devlang: azurecli -ms.topic: sample -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/22/2021 -keywords: sql database, managed instance, azure cli samples, azure cli examples, azure cli code samples, azure cli script examples ---- - -# Azure CLI samples for Azure SQL Database and SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -You can configure Azure SQL Database and SQL Managed Instance by using the Azure CLI. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - -## Samples - -## [Azure SQL Database](#tab/single-database) - -The following table includes links to Azure CLI script examples to manage single and pooled databases in Azure SQL Database. - -|Area|Description| -|---|---| -|**Create databases**|| -| [Create a single database](scripts/create-and-configure-database-cli.md) | Creates an SQL Database and configures a server-level firewall rule. | -| [Create pooled databases](scripts/move-database-between-elastic-pools-cli.md) | Creates elastic pools, moves pooled databases, and changes compute sizes. | -|**Scale databases**|| -| [Scale a single database](scripts/monitor-and-scale-database-cli.md) | Scales single database. | -| [Scale pooled database](scripts/scale-pool-cli.md) | Scales a SQL elastic pool to a different compute size. | -|**Configure geo-replication**|| -| [Single database](scripts/setup-geodr-failover-database-cli.md)| Configures active geo-replication for a database in Azure SQL Database and fails it over to the secondary replica. | -| [Pooled database](scripts/setup-geodr-failover-pool-cli.md)| Configures active geo-replication for a database in an elastic pool, then fails it over to the secondary replica. | -|**Configure failover group**|| -| [Configure failover group](scripts/setup-geodr-failover-group-cli.md) | Configures a failover group for a group of databases and failover over databases to the secondary server. | -| [Single database](scripts/add-database-to-failover-group-cli.md)| Creates a database and a failover group, adds the database to the failover group, then tests failover to the secondary server. | -| [Pooled database](scripts/add-elastic-pool-to-failover-group-cli.md) | Creates a database, adds it to an elastic pool, adds the elastic pool to the failover group, then tests failover to the secondary server. | -| **Auditing and threat detection** | -| [Configure auditing and threat-detection](scripts/auditing-threat-detection-cli.md)| Configures auditing and threat detection policies for a database in Azure SQL Database. | -| **Back up, restore, copy, and import a database**|| -| [Back up a database](scripts/backup-database-cli.md)| Backs up a database in SQL Database to an Azure storage backup. | -| [Restore a database](scripts/restore-database-cli.md)| Restores a database in SQL Database to a specific point in time. | -| [Copy a database to a new server](scripts/copy-database-to-new-server-cli.md) | Creates a copy of an existing database in SQL Database in a new server. | -| [Import a database from a BACPAC file](scripts/import-from-bacpac-cli.md)| Imports a database to SQL Database from a BACPAC file. | - - -Learn more about the [single-database Azure CLI API](single-database-manage.md#azure-cli). - -## [Azure SQL Managed Instance](#tab/managed-instance) - -The following table includes links to Azure CLI script examples for Azure SQL Managed Instance. - -|Area|Description| -|---|---| -| [Create SQL Managed Instance](../managed-instance/scripts/create-configure-managed-instance-cli.md)| Creates a SQL Managed Instance. | -| [Configure Transparent Data Encryption (TDE)](../managed-instance/scripts/transparent-data-encryption-byok-sql-managed-instance-cli.md)| Configures Transparent Data Encryption (TDE) in SQL Managed Instance by using Azure Key Vault with various key scenarios. | -| [Restore geo-backup](../managed-instance/scripts/restore-geo-backup-cli.md) | Performs a geo-restore between two instanced of SQL Managed Instance to a specific point in time. | - - -For additional SQL Managed Instance examples, see the [create](/archive/blogs/sqlserverstorageengine/create-azure-sql-managed-instance-using-azure-cli), [update](/archive/blogs/sqlserverstorageengine/modify-azure-sql-database-managed-instance-using-azure-cli), [move a database](/archive/blogs/sqlserverstorageengine/cross-instance-point-in-time-restore-in-azure-sql-database-managed-instance), and [working with](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) scripts. - -Learn more about the [SQL Managed Instance Azure CLI API](../managed-instance/api-references-create-manage-instance.md#azure-cli-create-and-configure-managed-instances). - ---- diff --git a/articles/azure-sql/database/azure-defender-for-sql.md b/articles/azure-sql/database/azure-defender-for-sql.md deleted file mode 100644 index 5403028f1c23e..0000000000000 --- a/articles/azure-sql/database/azure-defender-for-sql.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Microsoft Defender for SQL -description: Learn about functionality for managing your database vulnerabilities and detecting anomalous activities that could indicate a threat to your database in Azure SQL Database, Azure SQL Managed Instance, or Azure Synapse. -ms.service: sql-db-mi -ms.subservice: security -ms.devlang: -ms.custom: sqldbrb=2 -ms.topic: conceptual -ms.author: memildin -manager: rkarlin -author: memildin -ms.date: 06/07/2021 ---- -# Microsoft Defender for SQL - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Microsoft Defender for SQL is a unified package for advanced SQL security capabilities. Microsoft Defender for Cloud is available for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. It includes functionality for surfacing and mitigating potential database vulnerabilities, and detecting anomalous activities that could indicate a threat to your database. It provides a single go-to location for enabling and managing these capabilities. - -## What are the benefits of Microsoft Defender for SQL? - -Microsoft Defender for Cloud provides a set of advanced SQL security capabilities, including SQL Vulnerability Assessment and Advanced Threat Protection. -- [Vulnerability Assessment](sql-vulnerability-assessment.md) is an easy-to-configure service that can discover, track, and help you remediate potential database vulnerabilities. It provides visibility into your security state, and it includes actionable steps to resolve security issues and enhance your database fortifications. -- [Advanced Threat Protection](threat-detection-overview.md) detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit your database. It continuously monitors your database for suspicious activities, and it provides immediate security alerts on potential vulnerabilities, Azure SQL injection attacks, and anomalous database access patterns. Advanced Threat Protection alerts provide details of the suspicious activity and recommend action on how to investigate and mitigate the threat. - -Enable Microsoft Defender for SQL once to enable all these included features. With one click, you can enable Microsoft Defender for all databases on your [server](logical-servers.md) in Azure or in your SQL Managed Instance. Enabling or managing Microsoft Defender for Cloud settings requires belonging to the [SQL security manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role, or one of the database or server admin roles. - -For more information about Microsoft Defender for SQL pricing, see the [Microsoft Defender for Cloud pricing page](https://azure.microsoft.com/pricing/details/security-center/). - -## Enable Microsoft Defender for Cloud - -There are multiple ways to enable Microsoft Defender plans. You can enable it at the subscription level (**recommended**) from: - -- [Microsoft Defender for Cloud](#enable-microsoft-defender-for-azure-sql-database-at-the-subscription-level-from-microsoft-defender-for-cloud) -- [Enable Defender plans programmatically with the REST API, Azure CLI, PowerShell, or Azure Policy](#enable-microsoft-defender-plans-programatically) - -Alternatively, you can enable it at the resource level as described in [Enable Microsoft Defender for Azure SQL Database at the resource level](#enable-microsoft-defender-for-azure-sql-database-at-the-resource-level) - -### Enable Microsoft Defender for Azure SQL Database at the subscription level from Microsoft Defender for Cloud -To enable Microsoft Defender for Azure SQL Database at the subscription level from within Microsoft Defender for Cloud: - -1. From the [Azure portal](https://portal.azure.com), open **Defender for Cloud**. -1. From Defender for Cloud's menu, select **Pricing and settings**. -1. Select the relevant subscription. -1. Change the plan setting to **On**. - - :::image type="content" source="media/azure-defender-for-sql/enable-azure-defender-sql-subscription-level.png" alt-text="Enabling Microsoft Defender for Azure SQL Database at the subscription level."::: - -1. Select **Save**. - - -### Enable Microsoft Defender plans programatically - -The flexibility of Azure allows for a number of programmatic methods for enabling Microsoft Defender plans. - -Use any of the following tools to enable Microsoft Defender for your subscription: - -| Method | Instructions | -|--------------|----------------------| -| REST API | [Pricings API](/rest/api/securitycenter/pricings) | -| Azure CLI | [az security pricing](/cli/azure/security/pricing) | -| PowerShell | [Set-AzSecurityPricing](/powershell/module/az.security/set-azsecuritypricing) | -| Azure Policy | [Bundle Pricings](https://github.com/Azure/Azure-Security-Center/blob/master/Pricing%20%26%20Settings/ARM%20Templates/Set-ASC-Bundle-Pricing.json) | - - -### Enable Microsoft Defender for Azure SQL Database at the resource level - -We recommend enabling Microsoft Defender plans at the subscription level and this can help the creation of protected resources. However, if you have an organizational reason to enable Microsoft Defender for Cloud at the server level, use the following steps: - -1. From the [Azure portal](https://portal.azure.com), open your server or managed instance. -1. Under the **Security** heading, select **Defender for Cloud**. -1. Select **Enable Microsoft Defender for SQL**. - - :::image type="content" source="media/azure-defender-for-sql/enable-azure-defender.png" alt-text="Enable Microsoft Defender for SQL from within Azure SQL databases."::: - -> [!NOTE] -> A storage account is automatically created and configured to store your **Vulnerability Assessment** scan results. If you've already enabled Microsoft Defender for another server in the same resource group and region, then the existing storage account is used. -> -> The cost of Microsoft Defender for SQL is aligned with Microsoft Defender for Cloud standard tier pricing per node, where a node is the entire server or managed instance. You are thus paying only once for protecting all databases on the server or managed instance with Microsoft Defender for Cloud. You can evaluate Microsoft Defender for Cloud via a free trial. - -## Manage Microsoft Defender for Cloud settings - -To view and manage Microsoft Defender for Cloud settings: - -1. From the **Security** area of your server or managed instance, select **Defender for Cloud**. - - On this page, you'll see the status of Microsoft Defender for SQL: - - :::image type="content" source="media/azure-defender-for-sql/status-of-defender-for-sql.png" alt-text="Checking the status of Microsoft Defender for SQL inside Azure SQL databases."::: - -1. If Microsoft Defender for SQL is enabled, you'll see a **Configure** link as shown in the previous graphic. To edit the settings for Microsoft Defender for SQL, select **Configure**. - - :::image type="content" source="media/azure-defender-for-sql/security-server-settings.png" alt-text="Settings for Microsoft Defender for SQL."::: - -1. Make the necessary changes and select **Save**. - - -## Next steps - -- Learn more about [Vulnerability Assessment](sql-vulnerability-assessment.md) -- Learn more about [Advanced Threat Protection](threat-detection-configure.md) -- Learn more about [Microsoft Defender for Cloud](../../security-center/security-center-introduction.md) diff --git a/articles/azure-sql/database/block-crud-tsql.md b/articles/azure-sql/database/block-crud-tsql.md deleted file mode 100644 index 3ce6eb5ef2a29..0000000000000 --- a/articles/azure-sql/database/block-crud-tsql.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Block T-SQL commands to create or modify Azure SQL resources -titleSuffix: Block T-SQL commands to create or modify Azure SQL resources -description: This article details a feature allowing Azure administrators to block T-SQL commands to create or modify Azure SQL resources -ms.service: sql-database -ms.subservice: security -ms.custom: -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.topic: article -ms.date: 03/31/2021 -ms.reviewer: kendralittle, mathoma -ROBOTS: NOINDEX ---- - -# What is Block T-SQL CRUD feature? -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb-sqlmi.md)] - - -This feature allows Azure administrators to block the creation or modification of Azure SQL resources through T-SQL. This is enforced at the subscription level to block T-SQL commands from affecting SQL resources in any Azure SQL database or managed instance. - -## Overview - -To block creation or modification of resources through T-SQL and enforce resource management through an Azure Resource Manager template (ARM template) for a given subscription, the subscription level preview features in Azure portal can be used. This is particularly useful when you are using [Azure Policies](../../governance/policy/overview.md) to enforce organizational standards through ARM templates. Since T-SQL does not adhere to the Azure Policies, a block on T-SQL create or modify operations can be applied. The syntax blocked includes CRUD (create, update, delete) statements for databases in Azure SQL, specifically `CREATE DATABASE`, `ALTER DATABASE`, and `DROP DATABASE` statements. - -T-SQL CRUD operations can be blocked via Azure portal, [PowerShell](/powershell/module/az.resources/register-azproviderfeature), or [Azure CLI](/cli/azure/feature#az-feature-register). - -## Permissions - -In order to register or remove this feature, the Azure user must be a member of the Owner or Contributor role of the subscription. - -## Examples - -The following section describes how you can register or unregister a preview feature with Microsoft.Sql resource provider in Azure portal: - -### Register Block T-SQL CRUD - -1. Go to your subscription on Azure portal. -2. Select on **Preview Features** tab. -3. Select **Block T-SQL CRUD**. -4. After you select on **Block T-SQL CRUD**, a new window will open, select **Register**, to register this block with Microsoft.Sql resource provider. - -![Select "Block T-SQL CRUD" in the list of Preview Features](./media/block-tsql-crud/block-tsql-crud.png) - -![With "Block T-SQL CRUD" checked, select Register](./media/block-tsql-crud/block-tsql-crud-register.png) - - -### Re-register Microsoft.sql resource provider -After you register the block of T-SQL CRUD with Microsoft.Sql resource provider, you must re-register the Microsoft.Sql resource provider for the changes to take effect. To re-register the Microsoft.Sql resource provider: - -1. Go to your subscription on Azure portal. -2. Select on **Resource Providers** tab. -3. Search and select **Microsoft.Sql** resource provider. -4. Select **Re-register**. - -> [!NOTE] -> The re-registration step is mandatory for the T-SQL block to be applied to your subscription. - -![Re-register the Microsoft.Sql resource provider](./media/block-tsql-crud/block-tsql-crud-re-register.png) - -### Removing Block T-SQL CRUD -To remove the block on T-SQL create or modify operations from your subscription, first unregister the previously registered T-SQL block. Then, re-register the Microsoft.Sql resource provider as shown above for the removal of T-SQL block to take effect. - - -## Next steps - -- [An overview of Azure SQL Database security capabilities](security-overview.md) -- [Azure SQL Database security best practices](security-best-practice.md) \ No newline at end of file diff --git a/articles/azure-sql/database/business-continuity-high-availability-disaster-recover-hadr-overview.md b/articles/azure-sql/database/business-continuity-high-availability-disaster-recover-hadr-overview.md deleted file mode 100644 index 33a8c6cafc728..0000000000000 --- a/articles/azure-sql/database/business-continuity-high-availability-disaster-recover-hadr-overview.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Cloud business continuity - database recovery -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn how Azure SQL Database and SQL Managed Instance support cloud business continuity and database recovery and help keep mission-critical cloud applications running. -keywords: business continuity,cloud business continuity,database disaster recovery,database recovery -services: sql-database -ms.service: sql-db-mi -ms.subservice: high-availability -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 10/18/2021 ---- -# Overview of business continuity with Azure SQL Database & Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -**Business continuity** in Azure SQL Database and SQL Managed Instance refers to the mechanisms, policies, and procedures that enable your business to continue operating in the face of disruption, particularly to its computing infrastructure. In the most of the cases, SQL Database and SQL Managed Instance will handle the disruptive events that might happen in the cloud environment and keep your applications and business processes running. However, there are some disruptive events that cannot be handled by SQL Database automatically such as: - -- User accidentally deleted or updated a row in a table. -- Malicious attacker succeeded to delete data or drop a database. -- Earthquake caused a power outage and temporary disabled datacenter. - -This overview describes the capabilities that SQL Database and SQL Managed Instance provide for business continuity and disaster recovery. Learn about options, recommendations, and tutorials for recovering from disruptive events that could cause data loss or cause your database and application to become unavailable. Learn what to do when a user or application error affects data integrity, an Azure region has an outage, or your application requires maintenance. - -## SQL Database features that you can use to provide business continuity - -From a database perspective, there are four major potential disruption scenarios: - -- Local hardware or software failures affecting the database node such as a disk-drive failure. -- Data corruption or deletion typically caused by an application bug or human error. Such failures are application-specific and typically cannot be detected by the database service. -- Datacenter outage, possibly caused by a natural disaster. This scenario requires some level of geo-redundancy with application failover to an alternate datacenter. -- Upgrade or maintenance errors, unanticipated issues that occur during planned infrastructure maintenance or upgrades may require rapid rollback to a prior database state. - -To mitigate the local hardware and software failures, SQL Database includes a [high availability architecture](high-availability-sla.md), which guarantees automatic recovery from these failures with up to 99.995% availability SLA. - -To protect your business from data loss, SQL Database and SQL Managed Instance automatically create full database backups weekly, differential database backups every 12 hours, and transaction log backups every 5 - 10 minutes. The backups are stored in RA-GRS storage for at least seven days for all service tiers. All service tiers except Basic support configurable backup retention period for point-in-time restore, up to 35 days. - -SQL Database and SQL Managed Instance also provide several business continuity features that you can use to mitigate various unplanned scenarios. - -- [Temporal tables](../temporal-tables.md) enable you to restore row versions from any point in time. -- [Built-in automated backups](automated-backups-overview.md) and [Point in Time Restore](recovery-using-backups.md#point-in-time-restore) enables you to restore complete database to some point in time within the configured retention period up to 35 days. -- You can [restore a deleted database](recovery-using-backups.md#deleted-database-restore) to the point at which it was deleted if the **server has not been deleted**. -- [Long-term backup retention](long-term-retention-overview.md) enables you to keep the backups up to 10 years. -- [Active geo-replication](active-geo-replication-overview.md) enables you to create readable replicas and manually failover to any replica in case of a datacenter outage or application upgrade. -- [Auto-failover group](auto-failover-group-overview.md#terminology-and-capabilities) allows the application to automatically recover in case of a datacenter outage. - -## Recover a database within the same Azure region - -You can use automatic database backups to restore a database to a point in time in the past. This way you can recover from data corruptions caused by human errors. The point-in-time restore allows you to create a new database in the same server that represents the state of data prior to the corrupting event. For most databases the restore operations takes less than 12 hours. It may take longer to recover a very large or very active database. For more information about recovery time, see [database recovery time](recovery-using-backups.md#recovery-time). - -If the maximum supported backup retention period for point-in-time restore (PITR) is not sufficient for your application, you can extend it by configuring a long-term retention (LTR) policy for the database(s). For more information, see [Long-term backup retention](long-term-retention-overview.md). - -## Compare geo-replication with failover groups - -[Auto-failover groups](auto-failover-group-overview.md#terminology-and-capabilities) simplify the deployment and usage of [geo-replication](active-geo-replication-overview.md) and add the additional capabilities as described in the following table: - -| | Geo-replication | Failover groups | -|:---------------------------------------------| :-------------- | :----------------| -| **Automatic failover** | No | Yes | -| **Fail over multiple databases simultaneously** | No | Yes | -| **User must update connection string after failover** | Yes | No | -| **SQL Managed Instance support** | No | Yes | -| **Can be in same region as primary** | Yes | No | -| **Multiple replicas** | Yes | No | -| **Supports read-scale** | Yes | Yes | - - -## Recover a database to the existing server - -Although rare, an Azure datacenter can have an outage. When an outage occurs, it causes a business disruption that might only last a few minutes or might last for hours. - -- One option is to wait for your database to come back online when the datacenter outage is over. This works for applications that can afford to have the database offline. For example, a development project or free trial you don't need to work on constantly. When a datacenter has an outage, you do not know how long the outage might last, so this option only works if you don't need your database for a while. -- Another option is to restore a database on any server in any Azure region using [geo-redundant database backups](recovery-using-backups.md#geo-restore) (geo-restore). Geo-restore uses a geo-redundant backup as its source and can be used to recover a database even if the database or datacenter is inaccessible due to an outage. -- Finally, you can quickly recover from an outage if you have configured either geo-secondary using [active geo-replication](active-geo-replication-overview.md) or an [auto-failover group](auto-failover-group-overview.md) for your database or databases. Depending on your choice of these technologies, you can use either manual or automatic failover. While failover itself takes only a few seconds, the service will take at least 1 hour to activate it. This is necessary to ensure that the failover is justified by the scale of the outage. Also, the failover may result in small data loss due to the nature of asynchronous replication. - -As you develop your business continuity plan, you need to understand the maximum acceptable time before the application fully recovers after the disruptive event. The time required for application to fully recover is known as Recovery time objective (RTO). You also need to understand the maximum period of recent data updates (time interval) the application can tolerate losing when recovering from an unplanned disruptive event. The potential data loss is known as Recovery point objective (RPO). - -Different recovery methods offer different levels of RPO and RTO. You can choose a specific recovery method, or use a combination of methods to achieve full application recovery. The following table compares RPO and RTO of each recovery option. Auto-failover groups simplify the deployment and usage of geo-replication, and add the additional capabilities as described in the following table: - -| **Recovery method** | **RTO** | **RPO** | -| --- | --- | --- | -| Geo-restore from geo-replicated backups | 12 h | 1 h | -| Auto-failover groups | 1 h | 5 s | -| Manual database failover | 30 s | 5 s | - -> [!NOTE] -> *Manual database failover* refers to failover of a single database to its geo-replicated secondary using the [unplanned mode](active-geo-replication-overview.md#active-geo-replication-terminology-and-capabilities). -See the table earlier in this article for details of the auto-failover RTO and RPO. - -Use auto-failover groups if your application meets any of these criteria: - -- Is mission critical. -- Has a service level agreement (SLA) that does not allow for 12 hours or more of downtime. -- Downtime may result in financial liability. -- Has a high rate of data change and 1 hour of data loss is not acceptable. -- The additional cost of active geo-replication is lower than the potential financial liability and associated loss of business. - -You may choose to use a combination of database backups and active geo-replication depending upon your application requirements. For a discussion of design considerations for stand-alone databases and for elastic pools using these business continuity features, see [Design an application for cloud disaster recovery](designing-cloud-solutions-for-disaster-recovery.md) and [Elastic pool disaster recovery strategies](disaster-recovery-strategies-for-applications-with-elastic-pool.md). - -The following sections provide an overview of the steps to recover using either database backups or active geo-replication. For detailed steps including planning requirements, post recovery steps, and information about how to simulate an outage to perform a disaster recovery drill, see [Recover a database in SQL Database from an outage](disaster-recovery-guidance.md). - -### Prepare for an outage - -Regardless of the business continuity feature you use, you must: - -- Identify and prepare the target server, including server-level IP firewall rules, logins, and `master` database level permissions. -- Determine how to redirect clients and client applications to the new server -- Document other dependencies, such as auditing settings and alerts - -If you do not prepare properly, bringing your applications online after a failover or a database recovery takes additional time and likely also require troubleshooting at a time of stress - a bad combination. - -### Fail over to a geo-replicated secondary database - -If you are using active geo-replication or auto-failover groups as your recovery mechanism, you can configure an automatic failover policy or use [manual unplanned failover](active-geo-replication-configure-portal.md#initiate-a-failover). Once initiated, the failover causes the secondary to become the new primary and ready to record new transactions and respond to queries - with minimal data loss for the data not yet replicated. For information on designing the failover process, see [Design an application for cloud disaster recovery](designing-cloud-solutions-for-disaster-recovery.md). - -> [!NOTE] -> When the datacenter comes back online the old primaries automatically reconnect to the new primary and become secondary databases. If you need to relocate the primary back to the original region, you can initiate a planned failover manually (failback). - -### Perform a geo-restore - -If you are using the automated backups with geo-redundant storage (enabled by default), you can recover the database using [geo-restore](disaster-recovery-guidance.md#recover-using-geo-restore). Recovery usually takes place within 12 hours - with data loss of up to one hour determined by when the last log backup was taken and replicated. Until the recovery completes, the database is unable to record any transactions or respond to any queries. Note, geo-restore only restores the database to the last available point in time. - -> [!NOTE] -> If the datacenter comes back online before you switch your application over to the recovered database, you can cancel the recovery. - -### Perform post failover / recovery tasks - -After recovery from either recovery mechanism, you must perform the following additional tasks before your users and applications are back up and running: - -- Redirect clients and client applications to the new server and restored database. -- Ensure appropriate server-level IP firewall rules are in place for users to connect or use [database-level firewalls](firewall-configure.md#use-the-azure-portal-to-manage-server-level-ip-firewall-rules) to enable appropriate rules. -- Ensure appropriate logins and master database level permissions are in place (or use [contained users](/sql/relational-databases/security/contained-database-users-making-your-database-portable)). -- Configure auditing, as appropriate. -- Configure alerts, as appropriate. - -> [!NOTE] -> If you are using a failover group and connect to the databases using the read-write listener, the redirection after failover will happen automatically and transparently to the application. - -## Upgrade an application with minimal downtime - -Sometimes an application must be taken offline because of planned maintenance such as an application upgrade. [Manage application upgrades](manage-application-rolling-upgrade.md) describes how to use active geo-replication to enable rolling upgrades of your cloud application to minimize downtime during upgrades and provide a recovery path if something goes wrong. - -## Next steps - -For a discussion of application design considerations for single databases and for elastic pools, see [Design an application for cloud disaster recovery](designing-cloud-solutions-for-disaster-recovery.md) and [Elastic pool disaster recovery strategies](disaster-recovery-strategies-for-applications-with-elastic-pool.md). diff --git a/articles/azure-sql/database/conditional-access-configure.md b/articles/azure-sql/database/conditional-access-configure.md deleted file mode 100644 index d1dd1f52c85b3..0000000000000 --- a/articles/azure-sql/database/conditional-access-configure.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Conditional Access -description: Learn how to configure Conditional Access for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.topic: how-to -author: GithubMirek -ms.author: mireks -ms.reviewer: kendralittle, vanto, mathoma -ms.custom: sqldbrb=1 -ms.date: 04/28/2020 -tag: azure-synpase ---- - -# Conditional Access with Azure SQL Database and Azure Synapse Analytics - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -[Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) support Microsoft Conditional Access. - -The following steps show how to configure Azure SQL Database, SQL Managed Instance, or Azure Synapse to enforce a Conditional Access policy. - -## Prerequisites - -- You must configure Azure SQL Database, Azure SQL Managed Instance, or dedicated SQL pool in Azure Synapse to support Azure Active Directory (Azure AD) authentication. For specific steps, see [Configure and manage Azure Active Directory authentication with SQL Database or Azure Synapse](authentication-aad-configure.md). -- When Multi-Factor Authentication is enabled, you must connect with a supported tool, such as the latest SQL Server Management Studio (SSMS). For more information, see [Configure Azure SQL Database multi-factor authentication for SQL Server Management Studio](authentication-mfa-ssms-configure.md). - -## Configure conditional access - -> [!NOTE] -> The below example uses Azure SQL Database, but you should select the appropriate product that you want to configure conditional access. - -1. Sign in to the Azure portal, select **Azure Active Directory**, and then select **Conditional Access**. For more information, see [Azure Active Directory Conditional Access technical reference](../../active-directory/conditional-access/concept-conditional-access-conditions.md). - ![Conditional Access blade](./media/conditional-access-configure/conditional-access-blade.png) - -2. In the **Conditional Access-Policies** blade, click **New policy**, provide a name, and then click **Configure rules**. -3. Under **Assignments**, select **Users and groups**, check **Select users and groups**, and then select the user or group for Conditional Access. Click **Select**, and then click **Done** to accept your selection. - ![select users and groups](./media/conditional-access-configure/select-users-and-groups.png) - -4. Select **Cloud apps**, click **Select apps**. You see all apps available for Conditional Access. Select **Azure SQL Database**, at the bottom click **Select**, and then click **Done**. - ![select SQL Database](./media/conditional-access-configure/select-sql-database.png) - If you can't find **Azure SQL Database** listed in the following third screenshot, complete the following steps: - - Connect to your database in Azure SQL Database by using SSMS with an Azure AD admin account. - - Execute `CREATE USER [user@yourtenant.com] FROM EXTERNAL PROVIDER`. - - Sign into Azure AD and verify that Azure SQL Database, SQL Managed Instance, or Azure Synapse are listed in the applications in your Azure AD instance. - -5. Select **Access controls**, select **Grant**, and then check the policy you want to apply. For this example, we select **Require multi-factor authentication**. - ![select grant access](./media/conditional-access-configure/grant-access.png) - -## Summary - -The selected application (Azure SQL Database) using Azure AD Premium, now enforces the selected Conditional Access policy, **Required multi-factor authentication.** - -For questions about Azure SQL Database and Azure Synapse regarding multi-factor authentication, contact . - -## Next steps - -For a tutorial, see [Secure your database in SQL Database](secure-database-tutorial.md). \ No newline at end of file diff --git a/articles/azure-sql/database/configure-max-degree-of-parallelism.md b/articles/azure-sql/database/configure-max-degree-of-parallelism.md deleted file mode 100644 index 400f4cdbb5fb5..0000000000000 --- a/articles/azure-sql/database/configure-max-degree-of-parallelism.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: "Configure the max degree of parallelism (MAXDOP)" -titleSuffix: Azure SQL Database -description: Learn about the max degree of parallelism (MAXDOP). -ms.date: "04/06/2022" -services: sql-database -dev_langs: - - "TSQL" -ms.service: sql-database -ms.subservice: performance -ms.custom: -ms.devlang: tsql -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma ---- -# Configure the max degree of parallelism (MAXDOP) in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - This article describes the **max degree of parallelism (MAXDOP)** configuration setting in Azure SQL Database. - -> [!NOTE] -> **This content is focused on Azure SQL Database.** Azure SQL Database is based on the latest stable version of the Microsoft SQL Server database engine, so much of the content is similar though troubleshooting and configuration options differ. For more on MAXDOP in SQL Server, see [Configure the max degree of parallelism Server Configuration Option](/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option). - -## Overview - MAXDOP controls intra-query parallelism in the database engine. Higher MAXDOP values generally result in more parallel threads per query, and faster query execution. - - In Azure SQL Database, the default MAXDOP setting for each new single database and elastic pool database is 8. This default prevents unnecessary resource utilization, while still allowing the database engine to execute queries faster using parallel threads. It is not typically necessary to further configure MAXDOP in Azure SQL Database workloads, though it may provide benefits as an advanced performance tuning exercise. - -> [!Note] -> In September 2020, based on years of telemetry in the Azure SQL Database service MAXDOP 8 was made the [default for new databases](https://techcommunity.microsoft.com/t5/azure-sql/changing-default-maxdop-in-azure-sql-database-and-azure-sql/ba-p/1538528), as the optimal value for the widest variety of customer workloads. This default helped prevent performance problems due to excessive parallelism. Prior to that, the default setting for new databases was MAXDOP 0. MAXDOP was not automatically changed for existing databases created prior to September 2020. - - In general, if the database engine chooses to execute a query using parallelism, execution time is faster. However, excess parallelism can consume additional processor resources without improving query performance. At scale, excess parallelism can negatively affect query performance for all queries executing on the same database engine instance. Traditionally, setting an upper bound for parallelism has been a common performance tuning exercise in SQL Server workloads. - - The following table describes database engine behavior when executing queries with different MAXDOP values: - -| MAXDOP | Behavior | -|--|--| -| = 1 | The database engine uses a single serial thread to execute queries. Parallel threads are not used. | -| > 1 | The database engine sets the number of additional [schedulers](/sql/relational-databases/thread-and-task-architecture-guide#sql-server-task-scheduling) to be used by parallel threads to the MAXDOP value, or the total number of logical processors, whichever is smaller. | -| = 0 | The database engine sets the number of additional [schedulers](/sql/relational-databases/thread-and-task-architecture-guide#sql-server-task-scheduling) to be used by parallel threads to the total number of logical processors or 64, whichever is smaller. | - -> [!Note] -> Each query executes with at least one scheduler, and one worker thread on that scheduler. -> -> A query executing with parallelism uses additional schedulers, and additional parallel threads. Because multiple parallel threads may execute on the same scheduler, the total number of threads used to execute a query may be higher than specified MAXDOP value or the total number of logical processors. For more information, see [Scheduling parallel tasks](/sql/relational-databases/thread-and-task-architecture-guide#scheduling-parallel-tasks). - -## Considerations - -- In Azure SQL Database, you can change the default MAXDOP value: - - At the query level, using the **MAXDOP** [query hint](/sql/t-sql/queries/hints-transact-sql-query). - - At the database level, using the **MAXDOP** [database scoped configuration](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql). - -- Long-standing SQL Server MAXDOP considerations and [recommendations](/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option#Guidelines) are applicable to Azure SQL Database. - -- Index operations that create or rebuild an index, or that drop a clustered index, can be resource intensive. You can override the database MAXDOP value for index operations by specifying the MAXDOP index option in the `CREATE INDEX` or `ALTER INDEX` statement. The MAXDOP value is applied to the statement at execution time and is not stored in the index metadata. For more information, see [Configure Parallel Index Operations](/sql/relational-databases/indexes/configure-parallel-index-operations). - -- In addition to queries and index operations, the database scoped configuration option for MAXDOP also controls parallelism of other statements that may use parallel execution, such as DBCC CHECKTABLE, DBCC CHECKDB, and DBCC CHECKFILEGROUP. - -## Recommendations - - Changing MAXDOP for the database can have major impact on query performance and resource utilization, both positive and negative. However, there is no single MAXDOP value that is optimal for all workloads. The [recommendations](/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option#Guidelines) for setting MAXDOP are nuanced, and depend on many factors. - - Some peak concurrent workloads may operate better with a different MAXDOP than others. A properly configured MAXDOP should reduce the risk of performance and availability incidents, and in some cases may reduce costs by being able to avoid unnecessary resource utilization, and thus scale down to a lower service objective. - -### Excessive parallelism - - A higher MAXDOP often reduces duration for CPU-intensive queries. However, excessive parallelism can worsen other concurrent workload performance by starving other queries of CPU and worker thread resources. In extreme cases, excessive parallelism can consume all database or elastic pool resources, causing query timeouts, errors, and application outages. - -> [!Tip] -> We recommend that customers avoid setting MAXDOP to 0 even if it does not appear to cause problems currently. - - Excessive parallelism becomes most problematic when there are more concurrent requests than can be supported by the CPU and worker thread resources provided by the service objective. Avoid MAXDOP 0 to reduce the risk of potential future problems due to excessive parallelism if a database is scaled up, or if future hardware configurations in Azure SQL Database provide more cores for the same database service objective. - -### Modifying MAXDOP - - If you determine that a MAXDOP setting different from the default is optimal for your Azure SQL Database workload, you can use the `ALTER DATABASE SCOPED CONFIGURATION` T-SQL statement. For examples, see the [Examples using Transact-SQL](#examples) section below. To change MAXDOP to a non-default value for each new database you create, add this step to your database deployment process. - - If non-default MAXDOP benefits only a small subset of queries in the workload, you can override MAXDOP at the query level by adding the OPTION (MAXDOP) hint. For examples, see the [Examples using Transact-SQL](#examples) section below. - - Thoroughly test your MAXDOP configuration changes with load testing involving realistic concurrent query loads. - - MAXDOP for the primary and secondary replicas can be configured independently if different MAXDOP settings are optimal for your read-write and read-only workloads. This applies to Azure SQL Database [read scale-out](read-scale-out.md), [geo-replication](active-geo-replication-overview.md), and [Hyperscale](service-tier-hyperscale.md) secondary replicas. By default, all secondary replicas inherit the MAXDOP configuration of the primary replica. - -## Security - -### Permissions - The `ALTER DATABASE SCOPED CONFIGURATION` statement must be executed as the server admin, as a member of the database role `db_owner`, or a user that has been granted the `ALTER ANY DATABASE SCOPED CONFIGURATION` permission. - -## Examples - - These examples use the latest **AdventureWorksLT** sample database when the `SAMPLE` option is chosen for a new single database of Azure SQL Database. - -### PowerShell - -#### MAXDOP database scoped configuration - - This example shows how to use [ALTER DATABASE SCOPED CONFIGURATION](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql) statement to set the `MAXDOP` configuration to `2`. The setting takes effect immediately for new queries. The PowerShell cmdlet [Invoke-SqlCmd](/powershell/module/sqlserver/invoke-sqlcmd) executes the T-SQL queries to set and the return the MAXDOP database scoped configuration. - -```powershell -$dbName = "sample" -$serverName = -$serveradminLogin = -$serveradminPassword = -$desiredMAXDOP = 8 - -$params = @{ - 'database' = $dbName - 'serverInstance' = $serverName - 'username' = $serveradminLogin - 'password' = $serveradminPassword - 'outputSqlErrors' = $true - 'query' = 'ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = ' + $desiredMAXDOP + '; - SELECT [value] FROM sys.database_scoped_configurations WHERE [name] = ''MAXDOP'';' - } - Invoke-SqlCmd @params -``` - -This example is for use with Azure SQL Databases with [read scale-out replicas enabled](read-scale-out.md), [geo-replication](active-geo-replication-overview.md), and [Azure SQL Database hyperscale secondary replicas](service-tier-hyperscale.md). As an example, the primary replica is set to a different default MAXDOP as the secondary replica, anticipating that there may be differences between a read-write and a read-only workload. - -```powershell -$dbName = "sample" -$serverName = -$serveradminLogin = -$serveradminPassword = -$desiredMAXDOP_primary = 8 -$desiredMAXDOP_secondary_readonly = 1 - -$params = @{ - 'database' = $dbName - 'serverInstance' = $serverName - 'username' = $serveradminLogin - 'password' = $serveradminPassword - 'outputSqlErrors' = $true - 'query' = 'ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = ' + $desiredMAXDOP_primary + '; - ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET MAXDOP = ' + $desiredMAXDOP_secondary_readonly + '; - SELECT [value], value_for_secondary FROM sys.database_scoped_configurations WHERE [name] = ''MAXDOP'';' - } - Invoke-SqlCmd @params -``` - -### Transact-SQL - - You can use the [Azure portal query editor](connect-query-portal.md), [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) to execute T-SQL queries against your Azure SQL Database. - -1. Open a new query window. - -2. Connect to the database where you want to change MAXDOP. You cannot change database scoped configurations in the master database. - -3. Copy and paste the following example into the query window and select **Execute**. - -#### MAXDOP database scoped configuration - - This example shows how to determine the current database MAXDOP database scoped configuration using the [sys.database_scoped_configurations](/sql/relational-databases/system-catalog-views/sys-database-scoped-configurations-transact-sql) system catalog view. - -```sql -SELECT [value] FROM sys.database_scoped_configurations WHERE [name] = 'MAXDOP'; -``` - - This example shows how to use [ALTER DATABASE SCOPED CONFIGURATION](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql) statement to set the `MAXDOP` configuration to `8`. The setting takes effect immediately. - -```sql -ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = 8; -``` - -This example is for use with Azure SQL Databases with [read scale-out replicas enabled](read-scale-out.md), [geo-replication](active-geo-replication-overview.md), and [Hyperscale](service-tier-hyperscale.md) secondary replicas. As an example, the primary replica is set to a different MAXDOP than the secondary replica, anticipating that there may be differences between the read-write and read-only workloads. All statements are executed on the primary replica. The `value_for_secondary` column of the `sys.database_scoped_configurations` contains settings for the secondary replica. - -```sql -ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = 8; -ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET MAXDOP = 1; -SELECT [value], value_for_secondary FROM sys.database_scoped_configurations WHERE [name] = 'MAXDOP'; -``` - -#### MAXDOP query hint - - This example shows how to execute a query using the query hint to force the `max degree of parallelism` to `2`. - -```sql -SELECT ProductID, OrderQty, SUM(LineTotal) AS Total -FROM SalesLT.SalesOrderDetail -WHERE UnitPrice < 5 -GROUP BY ProductID, OrderQty -ORDER BY ProductID, OrderQty -OPTION (MAXDOP 2); -GO -``` -#### MAXDOP index option - - This example shows how to rebuild an index using the index option to force the `max degree of parallelism` to `12`. - -```sql -ALTER INDEX ALL ON SalesLT.SalesOrderDetail -REBUILD WITH - ( MAXDOP = 12 - , SORT_IN_TEMPDB = ON - , ONLINE = ON); -``` - -## See also -* [ALTER DATABASE SCOPED CONFIGURATION (Transact-SQL)](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql) -* [sys.database_scoped_configurations (Transact-SQL)](/sql/relational-databases/system-catalog-views/sys-database-scoped-configurations-transact-sql) -* [Configure Parallel Index Operations](/sql/relational-databases/indexes/configure-parallel-index-operations) -* [Query Hints (Transact-SQL)](/sql/t-sql/queries/hints-transact-sql-query) -* [Set Index Options](/sql/relational-databases/indexes/set-index-options) -* [Understand and resolve Azure SQL Database blocking problems](understand-resolve-blocking.md) - -## Next steps - -* [Monitor and Tune for Performance](/sql/relational-databases/performance/monitor-and-tune-for-performance) diff --git a/articles/azure-sql/database/connect-excel.md b/articles/azure-sql/database/connect-excel.md deleted file mode 100644 index a8c224b102a52..0000000000000 --- a/articles/azure-sql/database/connect-excel.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Connect with Excel -description: Learn how to connect Microsoft Excel to a database in Azure SQL Database or Azure SQL Managed Instance. Import data into Excel for reporting and data exploration. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 05/29/2020 ---- - -# Connect Excel to a database in Azure SQL Database or Azure SQL Managed Instance, and create a report -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -You can connect Excel to a database and then import data and create tables and charts based on values in the database. In this tutorial you will set up the connection between Excel and a database table, save the file that stores data and the connection information for Excel, and then create a pivot chart from the database values. - -You'll need to create a database before you get started. If you don't have one, see [Create a database in Azure SQL Database](single-database-create-quickstart.md) and [Create server-level IP firewall](firewall-create-server-level-portal-quickstart.md) to get a database with sample data up and running in a few minutes. - -In this article, you'll import sample data into Excel from that article, but you can follow similar steps with your own data. - -You'll also need a copy of Excel. This article uses [Microsoft Excel 2016](https://products.office.com/). - -## Connect Excel and load data - -1. To connect Excel to a database in SQL Database, open Excel and then create a new workbook or open an existing Excel workbook. -2. In the menu bar at the top of the page, select the **Data** tab, select **Get Data**, select From Azure, and then select **From Azure SQL Database**. - - ![Select data source: Connect Excel to SQL Database.](./media/connect-excel/excel_data_source.png) - -3. In the **SQL Server database** dialog box, type the **Server name** you want to connect to in the form <*servername*>**.database.windows.net**. For example, **msftestserver.database.windows.net**. Optionally, enter in the name of your database. Select **OK** to open the credentials window. - - ![Connect to Database Server Dialog box](./media/connect-excel/server-name.png) - -4. In the **SQL Server database** dialog box, select **Database** on the left side, and then enter in your **User Name** and **Password** for the server you want to connect to. Select **Connect** to open the **Navigator**. - - ![Type the server name and login credentials](./media/connect-excel/connect-to-server.png) - - > [!TIP] - > Depending on your network environment, you may not be able to connect or you may lose the connection if the server doesn't allow traffic from your client IP address. Go to the [Azure portal](https://portal.azure.com/), click SQL servers, click your server, click firewall under settings and add your client IP address. See [How to configure firewall settings](firewall-configure.md) for details. - -5. In the **Navigator**, select the database you want to work with from the list, select the tables or views you want to work with (we chose **vGetAllCategories**), and then select **Load** to move the data from your database to your Excel spreadsheet. - - ![Select a database and table.](./media/connect-excel/select-database-and-table.png) - -## Import the data into Excel and create a pivot chart - -Now that you've established the connection, you have several different options with how to load the data. For example, the following steps create a pivot chart based on the data found in your database in SQL Database. - -1. Follow the steps in the previous section, but this time, instead of selecting **Load**, select **Load to** from the **Load** drop-down. -2. Next, select how you want to view this data in your workbook. We chose **PivotChart**. You can also choose to create a **New worksheet** or to **Add this data to a Data Model**. For more information on Data Models, see [Create a data model in Excel](https://support.office.com/article/Create-a-Data-Model-in-Excel-87E7A54C-87DC-488E-9410-5C75DBCB0F7B). - - ![Choosing the format for data in Excel](./media/connect-excel/import-data.png) - - The worksheet now has an empty pivot table and chart. -3. Under **PivotTable Fields**, select all the check-boxes for the fields you want to view. - - ![Configure database report.](./media/connect-excel/power-pivot-results.png) - -> [!TIP] -> If you want to connect other Excel workbooks and worksheets to the database, select the **Data** tab, and select **Recent Sources** to launch the **Recent Sources** dialog box. From there, choose the connection you created from the list, and then click **Open**. -> ![Recent Sources dialog box](./media/connect-excel/recent-connections.png) - -## Create a permanent connection using .odc file - -To save the connection details permanently, you can create an .odc file and make this connection a selectable option within the **Existing Connections** dialog box. - -1. In the menu bar at the top of the page, select the **Data** tab, and then select **Existing Connections** to launch the **Existing Connections** dialog box. - 1. Select **Browse for more** to open the **Select Data Source** dialog box. - 2. Select the **+NewSqlServerConnection.odc** file and then select **Open** to open the **Data Connection Wizard**. - - ![New Connection dialog box](./media/connect-excel/new-connection.png) - -2. In the **Data Connection Wizard**, type in your server name and your SQL Database credentials. Select **Next**. - 1. Select the database that contains your data from the drop-down. - 2. Select the table or view you're interested in. We chose vGetAllCategories. - 3. Select **Next**. - - ![Data Connection Wizard](./media/connect-excel/data-connection-wizard.png) - -3. Select the location of your file, the **File Name**, and the **Friendly Name** in the next screen of the Data Connection Wizard. You can also choose to save the password in the file, though this can potentially expose your data to unwanted access. Select **Finish** when ready. - - ![Save Data Connection](./media/connect-excel/save-data-connection.png) - -4. Select how you want to import your data. We chose to do a PivotTable. You can also modify the properties of the connection by select **Properties**. Select **OK** when ready. If you did not choose to save the password with the file, then you will be prompted to enter your credentials. - - ![Import Data](./media/connect-excel/import-data2.png) - -5. Verify that your new connection has been saved by expanding the **Data** tab, and selecting **Existing Connections**. - - ![Existing Connection](./media/connect-excel/existing-connection.png) - -## Next steps - -* Learn how to [Connect and query with SQL Server Management Studio](connect-query-ssms.md) for advanced querying and analysis. -* Learn about the benefits of [elastic pools](elastic-pool-overview.md). -* Learn how to [create a web application that connects to Azure SQL Database on the back-end](../../app-service/app-service-web-tutorial-dotnet-sqldatabase.md). diff --git a/articles/azure-sql/database/connect-github-actions-sql-db.md b/articles/azure-sql/database/connect-github-actions-sql-db.md deleted file mode 100644 index 42b1602c400c0..0000000000000 --- a/articles/azure-sql/database/connect-github-actions-sql-db.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: 'Quickstart: Connect to Azure SQL Database with GitHub Actions' -description: Use Azure SQL from a GitHub Actions workflow -author: juliakm -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.topic: quickstart -ms.author: jukullam -ms.date: 05/05/2021 -ms.custom: github-actions-azure, mode-other -ms.reviewer: kendralittle, mathoma ---- - -# Use GitHub Actions to connect to Azure SQL Database - -Get started with [GitHub Actions](https://docs.github.com/en/actions) by using a workflow to deploy database updates to [Azure SQL Database](../azure-sql-iaas-vs-paas-what-is-overview.md). - -## Prerequisites - -You will need: -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). -- A GitHub repository with a dacpac package (`Database.dacpac`). If you don't have a GitHub account, [sign up for free](https://github.com/join). -- An Azure SQL Database. - - [Quickstart: Create an Azure SQL Database single database](single-database-create-quickstart.md) - - [How to create a dacpac package from your existing SQL Server Database](/sql/relational-databases/data-tier-applications/export-a-data-tier-application) - -## Workflow file overview - -A GitHub Actions workflow is defined by a YAML (.yml) file in the `/.github/workflows/` path in your repository. This definition contains the various steps and parameters that make up the workflow. - -The file has two sections: - -|Section |Tasks | -|---------|---------| -|**Authentication** | 1. Define a service principal.
    2. Create a GitHub secret. | -|**Deploy** | 1. Deploy the database. | - -## Generate deployment credentials - -You can create a [service principal](../../active-directory/develop/app-objects-and-service-principals.md) with the [az ad sp create-for-rbac](/cli/azure/ad/sp#az-ad-sp-create-for-rbac) command in the [Azure CLI](/cli/azure/). Run this command with [Azure Cloud Shell](https://shell.azure.com/) in the Azure portal or by selecting the **Try it** button. - -Replace the placeholders `server-name` with the name of your SQL server hosted on Azure. Replace the `subscription-id` and `resource-group` with the subscription ID and resource group connected to your SQL server. - -```azurecli-interactive - az ad sp create-for-rbac --name {server-name} --role contributor - --scopes /subscriptions/{subscription-id}/resourceGroups/{resource-group} - --sdk-auth -``` - -The output is a JSON object with the role assignment credentials that provide access to your database similar to this example. Copy your output JSON object for later. - -```output - { - "clientId": "", - "clientSecret": "", - "subscriptionId": "", - "tenantId": "", - (...) - } -``` - -> [!IMPORTANT] -> It is always a good practice to grant minimum access. The scope in the previous example is limited to the specific server and not the entire resource group. - -## Copy the SQL connection string - -In the Azure portal, go to your Azure SQL Database and open **Settings** > **Connection strings**. Copy the **ADO.NET** connection string. Replace the placeholder values for `your_database` and `your_password`. The connection string will look similar to this output. - -```output - Server=tcp:my-sql-server.database.windows.net,1433;Initial Catalog={your-database};Persist Security Info=False;User ID={admin-name};Password={your-password};MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30; -``` - -You'll use the connection string as a GitHub secret. - -## Configure the GitHub secrets - -1. In [GitHub](https://github.com/), browse your repository. - -1. Select **Settings > Secrets > New secret**. - -1. Paste the entire JSON output from the Azure CLI command into the secret's value field. Give the secret the name `AZURE_CREDENTIALS`. - - When you configure the workflow file later, you use the secret for the input `creds` of the Azure Login action. For example: - - ```yaml - - uses: azure/login@v1 - with: - creds: ${{ secrets.AZURE_CREDENTIALS }} - ``` - -1. Select **New secret** again. - -1. Paste the connection string value into the secret's value field. Give the secret the name `AZURE_SQL_CONNECTION_STRING`. - - -## Add your workflow - -1. Go to **Actions** for your GitHub repository. - -2. Select **Set up your workflow yourself**. - -2. Delete everything after the `on:` section of your workflow file. For example, your remaining workflow may look like this. - - ```yaml - name: CI - - on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - ``` - -1. Rename your workflow `SQL for GitHub Actions` and add the checkout and login actions. These actions will checkout your site code and authenticate with Azure using the `AZURE_CREDENTIALS` GitHub secret you created earlier. - - ```yaml - name: SQL for GitHub Actions - - on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - - jobs: - build: - runs-on: windows-latest - steps: - - uses: actions/checkout@v1 - - uses: azure/login@v1 - with: - creds: ${{ secrets.AZURE_CREDENTIALS }} - ``` - -1. Use the Azure SQL Deploy action to connect to your SQL instance. Replace `SQL_SERVER_NAME` with the name of your server. You should have a dacpac package (`Database.dacpac`) at the root level of your repository. - - ```yaml - - uses: azure/sql-action@v1 - with: - server-name: SQL_SERVER_NAME - connection-string: ${{ secrets.AZURE_SQL_CONNECTION_STRING }} - dacpac-package: './Database.dacpac' - ``` - -1. Complete your workflow by adding an action to logout of Azure. Here is the completed workflow. The file will appear in the `.github/workflows` folder of your repository. - - ```yaml - name: SQL for GitHub Actions - - on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - - - jobs: - build: - runs-on: windows-latest - steps: - - uses: actions/checkout@v1 - - uses: azure/login@v1 - with: - creds: ${{ secrets.AZURE_CREDENTIALS }} - - - uses: azure/sql-action@v1 - with: - server-name: SQL_SERVER_NAME - connection-string: ${{ secrets.AZURE_SQL_CONNECTION_STRING }} - dacpac-package: './Database.dacpac' - - # Azure logout - - name: logout - run: | - az logout - ``` - -## Review your deployment - -1. Go to **Actions** for your GitHub repository. - -1. Open the first result to see detailed logs of your workflow's run. - - :::image type="content" source="media/quickstart-sql-github-actions/github-actions-run-sql.png" alt-text="Log of GitHub actions run"::: - -## Clean up resources - -When your Azure SQL database and repository are no longer needed, clean up the resources you deployed by deleting the resource group and your GitHub repository. - -## Next steps - -> [!div class="nextstepaction"] -> [Learn about Azure and GitHub integration](/azure/developer/github/) diff --git a/articles/azure-sql/database/connect-query-content-reference-guide.md b/articles/azure-sql/database/connect-query-content-reference-guide.md deleted file mode 100644 index ae76b537ed33f..0000000000000 --- a/articles/azure-sql/database/connect-query-content-reference-guide.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: Connect and query -description: Links to Azure SQL Database quickstarts showing how to connect to and query Azure SQL Database, and Azure SQL Managed Instance. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: guide -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 03/17/2021 ---- -# Azure SQL Database and Azure SQL Managed Instance connect and query articles -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -The following document includes links to Azure examples showing how to connect and query Azure SQL Database and Azure SQL Managed Instance. For some related recommendations for Transport Level Security, see [TLS considerations for database connectivity](#tls-considerations-for-database-connectivity). - -## Quickstarts - -| Quickstart | Description | -|---|---| -|[SQL Server Management Studio](connect-query-ssms.md)|This quickstart demonstrates how to use SSMS to connect to a database, and then use Transact-SQL statements to query, insert, update, and delete data in the database.| -|[Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=%2fazure%2fsql-database%2ftoc.json)|This quickstart demonstrates how to use Azure Data Studio to connect to a database, and then use Transact-SQL (T-SQL) statements to create the TutorialDB used in Azure Data Studio tutorials.| -|[Azure portal](connect-query-portal.md)|This quickstart demonstrates how to use the Query editor to connect to a database (Azure SQL Database only), and then use Transact-SQL statements to query, insert, update, and delete data in the database.| -|[Visual Studio Code](connect-query-vscode.md)|This quickstart demonstrates how to use Visual Studio Code to connect to a database, and then use Transact-SQL statements to query, insert, update, and delete data in the database.| -|[.NET with Visual Studio](connect-query-dotnet-visual-studio.md)|This quickstart demonstrates how to use the .NET framework to create a C# program with Visual Studio to connect to a database and use Transact-SQL statements to query data.| -|[.NET core](connect-query-dotnet-core.md)|This quickstart demonstrates how to use .NET Core on Windows/Linux/macOS to create a C# program to connect to a database and use Transact-SQL statements to query data.| -|[Go](connect-query-go.md)|This quickstart demonstrates how to use Go to connect to a database. Transact-SQL statements to query and modify data are also demonstrated.| -|[Java](connect-query-java.md)|This quickstart demonstrates how to use Java to connect to a database and then use Transact-SQL statements to query data.| -|[Node.js](connect-query-nodejs.md)|This quickstart demonstrates how to use Node.js to create a program to connect to a database and use Transact-SQL statements to query data.| -|[PHP](connect-query-php.md)|This quickstart demonstrates how to use PHP to create a program to connect to a database and use Transact-SQL statements to query data.| -|[Python](connect-query-python.md)|This quickstart demonstrates how to use Python to connect to a database and use Transact-SQL statements to query data. | -|[Ruby](connect-query-ruby.md)|This quickstart demonstrates how to use Ruby to create a program to connect to a database and use Transact-SQL statements to query data.| - -## Get server connection information - -Get the connection information you need to connect to the database in Azure SQL Database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **SQL Databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for the database in Azure SQL Database or the fully qualified server name (or IP address) next to **Host** for an Azure SQL Managed Instance or SQL Server on Azure VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server). - -## Get ADO.NET connection information (optional - SQL Database only) - -1. Navigate to the database blade in the Azure portal and, under **Settings**, select **Connection strings**. - -2. Review the complete **ADO.NET** connection string. - - ![ADO.NET connection string](./media/connect-query-dotnet-core/adonet-connection-string2.png) - -3. Copy the **ADO.NET** connection string if you intend to use it. - -## TLS considerations for database connectivity - -Transport Layer Security (TLS) is used by all drivers that Microsoft supplies or supports for connecting to databases in Azure SQL Database or Azure SQL Managed Instance. No special configuration is necessary. For all connections to a SQL Server instance, a database in Azure SQL Database, or an instance of Azure SQL Managed Instance, we recommend that all applications set -the following configurations, or their equivalents: - -- **Encrypt = On** -- **TrustServerCertificate = Off** - -Some systems use different yet equivalent keywords for those configuration keywords. These configurations ensure that the client driver -verifies the identity of the TLS certificate received from the server. - -We also recommend that you disable TLS 1.1 and 1.0 on the client if you need to comply with Payment Card Industry - Data Security -Standard (PCI-DSS). - -Non-Microsoft drivers might not use TLS by default. This can be a factor when connecting to Azure SQL Database or Azure SQL Managed Instance. Applications with embedded drivers might not allow you to control these connection settings. We recommend that you examine the security of such drivers and applications before using them on systems that interact with sensitive data. - -## Drivers - -The following minimal versions of the tools and drivers are recommended if you want to connect to Azure SQL database: - -| Driver/tool | Version | -| --- | --- | -|.NET Framework | 4.6.1 (or .NET Core) | -|ODBC driver| v17 | -|PHP driver| 5.2.0 | -|JDBC driver| 6.4.0 | -|Node.js driver| 2.1.1 | -|OLEDB driver| 18.0.2.0 | -|[SMO](/sql/relational-databases/server-management-objects-smo/sql-server-management-objects-smo-programming-guide) | [150](https://www.nuget.org/packages/Microsoft.SqlServer.SqlManagementObjects) or higher | - -## Libraries - -You can use various libraries and frameworks to connect to Azure SQL Database or Azure SQL Managed Instance. Check out our [Get started tutorials](https://aka.ms/sqldev) to quickly get started with programming languages such as C#, Java, Node.js, PHP, and Python. Then build an app by using SQL Server on Linux or Windows or Docker on macOS. - -The following table lists connectivity libraries or *drivers* that client applications can use from a variety of languages to connect to and use SQL Server running on-premises or in the cloud. You can use them on Linux, Windows, or Docker and use them to connect to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. - -| Language | Platform | Additional resources | Download | Get started | -| :-- | :-- | :-- | :-- | :-- | -| C# | Windows, Linux, macOS | [Microsoft ADO.NET for SQL Server](/sql/connect/ado-net/microsoft-ado-net-sql-server) | [Download](https://dotnet.microsoft.com/download) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/csharp/ubuntu) -| Java | Windows, Linux, macOS | [Microsoft JDBC driver for SQL Server](/sql/connect/jdbc/microsoft-jdbc-driver-for-sql-server/) | [Download](/sql/connect/jdbc/download-microsoft-jdbc-driver-for-sql-server) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/java/ubuntu) -| PHP | Windows, Linux, macOS| [PHP SQL driver for SQL Server](/sql/connect/php/microsoft-php-driver-for-sql-server) | [Download](/sql/connect/php/download-drivers-php-sql-server) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/php/ubuntu/) -| Node.js | Windows, Linux, macOS | [Node.js driver for SQL Server](/sql/connect/node-js/node-js-driver-for-sql-server/) | [Install](/sql/connect/node-js/step-1-configure-development-environment-for-node-js-development/) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/node/ubuntu) -| Python | Windows, Linux, macOS | [Python SQL driver](/sql/connect/python/python-driver-for-sql-server/) | Install choices:
    \* [pymssql](/sql/connect/python/pymssql/step-1-configure-development-environment-for-pymssql-python-development/)
    \* [pyodbc](/sql/connect/python/pyodbc/step-1-configure-development-environment-for-pyodbc-python-development/) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/python/ubuntu) -| Ruby | Windows, Linux, macOS | [Ruby driver for SQL Server](/sql/connect/ruby/ruby-driver-for-sql-server/) | [Install](/sql/connect/ruby/step-1-configure-development-environment-for-ruby-development/) | [Get started](https://www.microsoft.com/sql-server/developer-get-started/ruby/ubuntu) -| C++ | Windows, Linux, macOS | [Microsoft ODBC driver for SQL Server](/sql/connect/odbc/microsoft-odbc-driver-for-sql-server/) | [Download](/sql/connect/odbc/microsoft-odbc-driver-for-sql-server/) | - -### Data-access frameworks - -The following table lists examples of object-relational mapping (ORM) frameworks and web frameworks that client applications can use with SQL Server, Azure SQL Database, Azure SQL Managed Instance, or Azure Synapse Analytics. You can use the frameworks on Linux, Windows, or Docker. - -| Language | Platform | ORM(s) | -| :-- | :-- | :-- | -| C# | Windows, Linux, macOS | [Entity Framework](/ef)
    [Entity Framework Core](/ef/core/index) | -| Java | Windows, Linux, macOS |[Hibernate ORM](https://hibernate.org/orm)| -| PHP | Windows, Linux, macOS | [Laravel (Eloquent)](https://laravel.com/docs/eloquent)
    [Doctrine](https://www.doctrine-project.org/projects/orm.html) | -| Node.js | Windows, Linux, macOS | [Sequelize ORM](https://sequelize.org/) | -| Python | Windows, Linux, macOS |[Django](https://www.djangoproject.com/) | -| Ruby | Windows, Linux, macOS | [Ruby on Rails](https://rubyonrails.org/) | - -## Next steps - -- For connectivity architecture information, see [Azure SQL Database Connectivity Architecture](connectivity-architecture.md). -- Find [SQL Server drivers](/sql/connect/sql-connection-libraries/) that are used to connect from client applications. -- Connect to Azure SQL Database or Azure SQL Managed Instance: - - [Connect and query using .NET (C#)](connect-query-dotnet-core.md) - - [Connect and query using PHP](connect-query-php.md) - - [Connect and query using Node.js](connect-query-nodejs.md) - - [Connect and query using Java](connect-query-java.md) - - [Connect and query using Python](connect-query-python.md) - - [Connect and query using Ruby](connect-query-ruby.md) - - [Install sqlcmd and bcp the SQL Server command-line tools on Linux](/sql/linux/sql-server-linux-setup-tools) - For Linux users, try connecting to Azure SQL Database or Azure SQL Managed Instance using [sqlcmd](/sql/ssms/scripting/sqlcmd-use-the-utility). -- Retry logic code examples: - - [Connect resiliently with ADO.NET][step-4-connect-resiliently-to-sql-with-ado-net-a78n] - - [Connect resiliently with PHP][step-4-connect-resiliently-to-sql-with-php-p42h] - - - -[step-4-connect-resiliently-to-sql-with-ado-net-a78n]: /sql/connect/ado-net/step-4-connect-resiliently-sql-ado-net - -[step-4-connect-resiliently-to-sql-with-php-p42h]: /sql/connect/php/step-4-connect-resiliently-to-sql-with-php diff --git a/articles/azure-sql/database/connect-query-dotnet-core.md b/articles/azure-sql/database/connect-query-dotnet-core.md deleted file mode 100644 index 9f57d119e752a..0000000000000 --- a/articles/azure-sql/database/connect-query-dotnet-core.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Use .NET Core to connect and query a database -description: This topic shows you how to use .NET Core to create a program that connects to a database in Azure SQL Database, or Azure SQL Managed Instance, and queries it using Transact-SQL statements. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=2, devx-track-csharp, mode-other -ms.devlang: csharp -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 05/29/2020 ---- -# Quickstart: Use .NET Core (C#) to query a database -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi-asa.md)] - -In this quickstart, you'll use [.NET Core](https://dotnet.microsoft.com) and C# code to connect to a database. You'll then run a Transact-SQL statement to query data. - -> [!TIP] -> The following Microsoft Learn module helps you learn for free how to [Develop and configure an ASP.NET application that queries a database in Azure SQL Database](/learn/modules/develop-app-that-queries-azure-sql/) - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). -- [.NET Core SDK for your operating system](https://dotnet.microsoft.com/download) installed. -- A database where you can run your query. - - [!INCLUDE[create-configure-database](../includes/create-configure-database.md)] - -## Create a new .NET Core project - -1. Open a command prompt and create a folder named **sqltest**. Navigate to this folder and run this command. - - ```cmd - dotnet new console - ``` - - This command creates new app project files, including an initial C# code file (**Program.cs**), an XML configuration file (**sqltest.csproj**), and needed binaries. - -2. In a text editor, open **sqltest.csproj** and paste the following XML between the `` tags. This XML adds `System.Data.SqlClient` as a dependency. - - ```xml - - - - ``` - -## Insert code to query the database in Azure SQL Database - -1. In a text editor, open **Program.cs**. - -2. Replace the contents with the following code and add the appropriate values for your server, database, username, and password. - -> [!NOTE] -> To use an ADO.NET connection string, replace the 4 lines in the code -> setting the server, database, username, and password with the line below. In -> the string, set your username and password. -> -> `builder.ConnectionString="";` - -```csharp -using System; -using System.Data.SqlClient; -using System.Text; - -namespace sqltest -{ - class Program - { - static void Main(string[] args) - { - try - { - SqlConnectionStringBuilder builder = new SqlConnectionStringBuilder(); - - builder.DataSource = ""; - builder.UserID = ""; - builder.Password = ""; - builder.InitialCatalog = ""; - - using (SqlConnection connection = new SqlConnection(builder.ConnectionString)) - { - Console.WriteLine("\nQuery data example:"); - Console.WriteLine("=========================================\n"); - - connection.Open(); - - String sql = "SELECT name, collation_name FROM sys.databases"; - - using (SqlCommand command = new SqlCommand(sql, connection)) - { - using (SqlDataReader reader = command.ExecuteReader()) - { - while (reader.Read()) - { - Console.WriteLine("{0} {1}", reader.GetString(0), reader.GetString(1)); - } - } - } - } - } - catch (SqlException e) - { - Console.WriteLine(e.ToString()); - } - Console.WriteLine("\nDone. Press enter."); - Console.ReadLine(); - } - } -} -``` - -## Run the code - -1. At the prompt, run the following commands. - - ```cmd - dotnet restore - dotnet run - ``` - -2. Verify that the rows are returned. - - ```text - Query data example: - ========================================= - - master SQL_Latin1_General_CP1_CI_AS - tempdb SQL_Latin1_General_CP1_CI_AS - WideWorldImporters Latin1_General_100_CI_AS - - Done. Press enter. - ``` - -3. Choose **Enter** to close the application window. - -## Next steps - -- [Getting started with .NET Core on Windows/Linux/macOS using the command line](/dotnet/core/tutorials/using-with-xplat-cli). -- Learn how to [connect and query Azure SQL Database or Azure SQL Managed Instance, by using the .NET Framework and Visual Studio](connect-query-dotnet-visual-studio.md). -- Learn how to [Design your first database with SSMS](design-first-database-tutorial.md) or [Design a database and connect with C# and ADO.NET](design-first-database-csharp-tutorial.md). -- For more information about .NET, see [.NET documentation](/dotnet/). diff --git a/articles/azure-sql/database/connect-query-dotnet-visual-studio.md b/articles/azure-sql/database/connect-query-dotnet-visual-studio.md deleted file mode 100644 index 2fa04d3d71f55..0000000000000 --- a/articles/azure-sql/database/connect-query-dotnet-visual-studio.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Use Visual Studio with .NET and C# to query" -description: "Use Visual Studio to create a C# app that connects to a database in Azure SQL Database or Azure SQL Managed Instance and runs queries." -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: devx-track-csharp, sqldbrb=2, mode-ui -ms.devlang: csharp -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 08/10/2020 ---- -# Quickstart: Use .NET and C# in Visual Studio to connect to and query a database -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This quickstart shows how to use the [.NET Framework](https://dotnet.microsoft.com) and C# code in Visual Studio to query a database in Azure SQL or Synapse SQL with Transact-SQL statements. - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). -- [Visual Studio 2019](https://www.visualstudio.com/downloads/) Community, Professional, or Enterprise edition. -- A database where you can run a query. - - [!INCLUDE[create-configure-database](../includes/create-configure-database.md)] - -## Create code to query the database in Azure SQL Database - -1. In Visual Studio, create a new project. - -1. In the **New Project** dialog, select the **Visual C#**, **Console App (.NET Framework)**. - -1. Enter *sqltest* for the project name, and then select **OK**. The new project is created. - -1. Select **Project** > **Manage NuGet Packages**. - -1. In **NuGet Package Manager**, select the **Browse** tab, then search for and select **Microsoft.Data.SqlClient**. - -1. On the **Microsoft.Data.SqlClient** page, select **Install**. - - If prompted, select **OK** to continue with the installation. - - If a **License Acceptance** window appears, select **I Accept**. - -1. When the install completes, you can close **NuGet Package Manager**. - -1. In the code editor, replace the **Program.cs** contents with the following code. Replace your values for ``, ``, ``, and ``. - - ```csharp - using System; - using Microsoft.Data.SqlClient; - using System.Text; - - namespace sqltest - { - class Program - { - static void Main(string[] args) - { - try - { - SqlConnectionStringBuilder builder = new SqlConnectionStringBuilder(); - builder.DataSource = ".database.windows.net"; - builder.UserID = ""; - builder.Password = ""; - builder.InitialCatalog = ""; - - using (SqlConnection connection = new SqlConnection(builder.ConnectionString)) - { - Console.WriteLine("\nQuery data example:"); - Console.WriteLine("=========================================\n"); - - String sql = "SELECT name, collation_name FROM sys.databases"; - - using (SqlCommand command = new SqlCommand(sql, connection)) - { - connection.Open(); - using (SqlDataReader reader = command.ExecuteReader()) - { - while (reader.Read()) - { - Console.WriteLine("{0} {1}", reader.GetString(0), reader.GetString(1)); - } - } - } - } - } - catch (SqlException e) - { - Console.WriteLine(e.ToString()); - } - Console.ReadLine(); - } - } - } - ``` - -## Run the code - -1. To run the app, select **Debug** > **Start Debugging**, or select **Start** on the toolbar, or press **F5**. -1. Verify that the database names and collations are returned, and then close the app window. - -## Next steps - -- Learn how to [connect and query a database in Azure SQL Database by using .NET Core](connect-query-dotnet-core.md) on Windows/Linux/macOS. -- Learn about [Getting started with .NET Core on Windows/Linux/macOS using the command line](/dotnet/core/tutorials/using-with-xplat-cli). -- Learn how to [Design your first database in Azure SQL Database by using SSMS](design-first-database-tutorial.md) or [Design your first database in Azure SQL Database by using .NET](design-first-database-csharp-tutorial.md). -- For more information about .NET, see [.NET documentation](/dotnet/). -- Retry logic example: [Connect resiliently to Azure SQL with ADO.NET][step-4-connect-resiliently-to-sql-with-ado-net-a78n]. - - - - -[step-4-connect-resiliently-to-sql-with-ado-net-a78n]: /sql/connect/ado-net/step-4-connect-resiliently-sql-ado-net diff --git a/articles/azure-sql/database/connect-query-go.md b/articles/azure-sql/database/connect-query-go.md deleted file mode 100644 index f8dfc33b3cca4..0000000000000 --- a/articles/azure-sql/database/connect-query-go.md +++ /dev/null @@ -1,333 +0,0 @@ ---- -title: Use Go to query -description: Use Go to create a program that connects to a database in Azure SQL Database or Azure SQL Managed Instance, and runs queries. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=2, mode-api -ms.devlang: golang -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 04/14/2021 ---- -# Quickstart: Use Golang to query a database in Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -In this quickstart, you'll use the [Golang](https://godoc.org/github.com/denisenkom/go-mssqldb) programming language to connect to a database in Azure SQL Database or Azure SQL Managed Instance. You'll then run Transact-SQL statements to query and modify data. [Golang](https://go.dev/) is an open-source programming language that makes it easy to build simple, reliable, and efficient software. - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). -- A database in Azure SQL Database or Azure SQL Managed Instance. You can use one of these quickstarts to create a database: - - || SQL Database | SQL Managed Instance | SQL Server on Azure VM | - |:--- |:--- |:---|:---| - | **Create**| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - | **Create** | [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - | **Create** | [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) - | **Configure** | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| - | **Configure** ||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) | [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - |**Load data**|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | [Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | - | **Load data** ||Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - > [!IMPORTANT] - > The scripts in this article are written to use the Adventure Works database. With a SQL Managed Instance, you must either import the Adventure Works database into an instance database or modify the scripts in this article to use the Wide World Importers database. - -- Golang and related software for your operating system installed: - - - **macOS**: Install Homebrew and Golang. See [Step 1.2](https://www.microsoft.com/sql-server/developer-get-started/go/mac/). - - **Ubuntu**: Install Golang. See [Step 1.2](https://www.microsoft.com/sql-server/developer-get-started/go/ubuntu/). - - **Windows**: Install Golang. See [Step 1.2](https://www.microsoft.com/sql-server/developer-get-started/go/windows/). - -## Get server connection information - -Get the connection information you need to connect to the database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **SQL Databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for a database in Azure SQL Database or the fully qualified server name (or IP address) next to **Host** for an Azure SQL Managed Instance or SQL Server on Azure VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server). - -## Create Golang project and dependencies - -1. From the terminal, create a new project folder called **SqlServerSample**. - - ```bash - mkdir SqlServerSample - ``` - -2. Navigate to **SqlServerSample** and install the SQL Server driver for Go. - - ```bash - cd SqlServerSample - go get github.com/denisenkom/go-mssqldb - ``` - -## Create sample data - -1. In a text editor, create a file called **CreateTestData.sql** in the **SqlServerSample** folder. In the file, paste this T-SQL code, which creates a schema, table, and inserts a few rows. - - ```sql - CREATE SCHEMA TestSchema; - GO - - CREATE TABLE TestSchema.Employees ( - Id INT IDENTITY(1,1) NOT NULL PRIMARY KEY, - Name NVARCHAR(50), - Location NVARCHAR(50) - ); - GO - - INSERT INTO TestSchema.Employees (Name, Location) VALUES - (N'Jared', N'Australia'), - (N'Nikita', N'India'), - (N'Tom', N'Germany'); - GO - - SELECT * FROM TestSchema.Employees; - GO - ``` - -2. Use `sqlcmd` to connect to the database and run your newly created Azure SQL script. Replace the appropriate values for your server, database, username, and password. - - ```bash - sqlcmd -S .database.windows.net -U -P -d -i ./CreateTestData.sql - ``` - -## Insert code to query the database - -1. Create a file named **sample.go** in the **SqlServerSample** folder. - -2. In the file, paste this code. Add the values for your server, database, username, and password. This example uses the Golang [context methods](https://go.dev/pkg/context/) to make sure there's an active connection. - - ```go - package main - - import ( - _ "github.com/denisenkom/go-mssqldb" - "database/sql" - "context" - "log" - "fmt" - "errors" - ) - - var db *sql.DB - - var server = "" - var port = 1433 - var user = "" - var password = "" - var database = "" - - func main() { - // Build connection string - connString := fmt.Sprintf("server=%s;user id=%s;password=%s;port=%d;database=%s;", - server, user, password, port, database) - - var err error - - // Create connection pool - db, err = sql.Open("sqlserver", connString) - if err != nil { - log.Fatal("Error creating connection pool: ", err.Error()) - } - ctx := context.Background() - err = db.PingContext(ctx) - if err != nil { - log.Fatal(err.Error()) - } - fmt.Printf("Connected!\n") - - // Create employee - createID, err := CreateEmployee("Jake", "United States") - if err != nil { - log.Fatal("Error creating Employee: ", err.Error()) - } - fmt.Printf("Inserted ID: %d successfully.\n", createID) - - // Read employees - count, err := ReadEmployees() - if err != nil { - log.Fatal("Error reading Employees: ", err.Error()) - } - fmt.Printf("Read %d row(s) successfully.\n", count) - - // Update from database - updatedRows, err := UpdateEmployee("Jake", "Poland") - if err != nil { - log.Fatal("Error updating Employee: ", err.Error()) - } - fmt.Printf("Updated %d row(s) successfully.\n", updatedRows) - - // Delete from database - deletedRows, err := DeleteEmployee("Jake") - if err != nil { - log.Fatal("Error deleting Employee: ", err.Error()) - } - fmt.Printf("Deleted %d row(s) successfully.\n", deletedRows) - } - - // CreateEmployee inserts an employee record - func CreateEmployee(name string, location string) (int64, error) { - ctx := context.Background() - var err error - - if db == nil { - err = errors.New("CreateEmployee: db is null") - return -1, err - } - - // Check if database is alive. - err = db.PingContext(ctx) - if err != nil { - return -1, err - } - - tsql := ` - INSERT INTO TestSchema.Employees (Name, Location) VALUES (@Name, @Location); - select isNull(SCOPE_IDENTITY(), -1); - ` - - stmt, err := db.Prepare(tsql) - if err != nil { - return -1, err - } - defer stmt.Close() - - row := stmt.QueryRowContext( - ctx, - sql.Named("Name", name), - sql.Named("Location", location)) - var newID int64 - err = row.Scan(&newID) - if err != nil { - return -1, err - } - - return newID, nil - } - - // ReadEmployees reads all employee records - func ReadEmployees() (int, error) { - ctx := context.Background() - - // Check if database is alive. - err := db.PingContext(ctx) - if err != nil { - return -1, err - } - - tsql := fmt.Sprintf("SELECT Id, Name, Location FROM TestSchema.Employees;") - - // Execute query - rows, err := db.QueryContext(ctx, tsql) - if err != nil { - return -1, err - } - - defer rows.Close() - - var count int - - // Iterate through the result set. - for rows.Next() { - var name, location string - var id int - - // Get values from row. - err := rows.Scan(&id, &name, &location) - if err != nil { - return -1, err - } - - fmt.Printf("ID: %d, Name: %s, Location: %s\n", id, name, location) - count++ - } - - return count, nil - } - - // UpdateEmployee updates an employee's information - func UpdateEmployee(name string, location string) (int64, error) { - ctx := context.Background() - - // Check if database is alive. - err := db.PingContext(ctx) - if err != nil { - return -1, err - } - - tsql := fmt.Sprintf("UPDATE TestSchema.Employees SET Location = @Location WHERE Name = @Name") - - // Execute non-query with named parameters - result, err := db.ExecContext( - ctx, - tsql, - sql.Named("Location", location), - sql.Named("Name", name)) - if err != nil { - return -1, err - } - - return result.RowsAffected() - } - - // DeleteEmployee deletes an employee from the database - func DeleteEmployee(name string) (int64, error) { - ctx := context.Background() - - // Check if database is alive. - err := db.PingContext(ctx) - if err != nil { - return -1, err - } - - tsql := fmt.Sprintf("DELETE FROM TestSchema.Employees WHERE Name = @Name;") - - // Execute non-query with named parameters - result, err := db.ExecContext(ctx, tsql, sql.Named("Name", name)) - if err != nil { - return -1, err - } - - return result.RowsAffected() - } - ``` - -## Run the code - -1. At the command prompt, run the following command. - - ```bash - go run sample.go - ``` - -2. Verify the output. - - ```text - Connected! - Inserted ID: 4 successfully. - ID: 1, Name: Jared, Location: Australia - ID: 2, Name: Nikita, Location: India - ID: 3, Name: Tom, Location: Germany - ID: 4, Name: Jake, Location: United States - Read 4 row(s) successfully. - Updated 1 row(s) successfully. - Deleted 1 row(s) successfully. - ``` - -## Next steps - -- [Design your first database in Azure SQL Database](design-first-database-tutorial.md) -- [Golang driver for SQL Server](https://github.com/denisenkom/go-mssqldb) -- [Report issues or ask questions](https://github.com/denisenkom/go-mssqldb/issues) diff --git a/articles/azure-sql/database/connect-query-java.md b/articles/azure-sql/database/connect-query-java.md deleted file mode 100644 index c922e00e1a75d..0000000000000 --- a/articles/azure-sql/database/connect-query-java.md +++ /dev/null @@ -1,496 +0,0 @@ ---- -title: Use Java and JDBC with Azure SQL Database -description: Learn how to use Java and JDBC with an Azure SQL Database. -services: sql-database -author: jdubois -ms.author: judubois -ms.service: sql-database -ms.subservice: development -ms.topic: quickstart -ms.devlang: java -ms.date: 06/26/2020 -ms.custom: devx-track-java, devx-track-azurecli, mode-api ---- - -# Use Java and JDBC with Azure SQL Database - -This topic demonstrates creating a sample application that uses Java and [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity) to store and retrieve information in [Azure SQL Database](/azure/sql-database/). - -JDBC is the standard Java API to connect to traditional relational databases. - -## Prerequisites - -- An Azure account. If you don't have one, [get a free trial](https://azure.microsoft.com/free/). -- [Azure Cloud Shell](../../cloud-shell/quickstart.md) or [Azure CLI](/cli/azure/install-azure-cli). We recommend Azure Cloud Shell so you'll be logged in automatically and have access to all the tools you'll need. -- A supported [Java Development Kit](/azure/developer/java/fundamentals/java-support-on-azure), version 8 (included in Azure Cloud Shell). -- The [Apache Maven](https://maven.apache.org/) build tool. - -## Prepare the working environment - -We are going to use environment variables to limit typing mistakes, and to make it easier for you to customize the following configuration for your specific needs. - -Set up those environment variables by using the following commands: - -```bash -AZ_RESOURCE_GROUP=database-workshop -AZ_DATABASE_NAME= -AZ_LOCATION= -AZ_SQL_SERVER_USERNAME=demo -AZ_SQL_SERVER_PASSWORD= -AZ_LOCAL_IP_ADDRESS= -``` - -Replace the placeholders with the following values, which are used throughout this article: - -- ``: The name of your Azure SQL Database server. It should be unique across Azure. -- ``: The Azure region you'll use. You can use `eastus` by default, but we recommend that you configure a region closer to where you live. You can have the full list of available regions by entering `az account list-locations`. -- ``: The password of your Azure SQL Database server. That password should have a minimum of eight characters. The characters should be from three of the following categories: English uppercase letters, English lowercase letters, numbers (0-9), and non-alphanumeric characters (!, $, #, %, and so on). -- ``: The IP address of your local computer, from which you'll run your Java application. One convenient way to find it is to point your browser to [whatismyip.akamai.com](http://whatismyip.akamai.com/). - -Next, create a resource group using the following command: - -```azurecli -az group create \ - --name $AZ_RESOURCE_GROUP \ - --location $AZ_LOCATION \ - | jq -``` - -> [!NOTE] -> We use the `jq` utility to display JSON data and make it more readable. This utility is installed by default on [Azure Cloud Shell](https://shell.azure.com/). If you don't like that utility, you can safely remove the `| jq` part of all the commands we'll use. - -## Create an Azure SQL Database instance - -The first thing we'll create is a managed Azure SQL Database server. - -> [!NOTE] -> You can read more detailed information about creating Azure SQL Database servers in [Quickstart: Create an Azure SQL Database single database](./single-database-create-quickstart.md). - -In [Azure Cloud Shell](https://shell.azure.com/), run the following command: - -```azurecli -az sql server create \ - --resource-group $AZ_RESOURCE_GROUP \ - --name $AZ_DATABASE_NAME \ - --location $AZ_LOCATION \ - --admin-user $AZ_SQL_SERVER_USERNAME \ - --admin-password $AZ_SQL_SERVER_PASSWORD \ - | jq -``` - -This command creates an Azure SQL Database server. - -### Configure a firewall rule for your Azure SQL Database server - -Azure SQL Database instances are secured by default. They have a firewall that doesn't allow any incoming connection. To be able to use your database, you need to add a firewall rule that will allow the local IP address to access the database server. - -Because you configured our local IP address at the beginning of this article, you can open the server's firewall by running the following command: - -```azurecli -az sql server firewall-rule create \ - --resource-group $AZ_RESOURCE_GROUP \ - --name $AZ_DATABASE_NAME-database-allow-local-ip \ - --server $AZ_DATABASE_NAME \ - --start-ip-address $AZ_LOCAL_IP_ADDRESS \ - --end-ip-address $AZ_LOCAL_IP_ADDRESS \ - | jq -``` - -### Configure a Azure SQL database - -The Azure SQL Database server that you created earlier is empty. It doesn't have any database that you can use with the Java application. Create a new database called `demo` by running the following command: - -```azurecli -az sql db create \ - --resource-group $AZ_RESOURCE_GROUP \ - --name demo \ - --server $AZ_DATABASE_NAME \ - | jq -``` - -### Create a new Java project - -Using your favorite IDE, create a new Java project, and add a `pom.xml` file in its root directory: - -```xml - - - 4.0.0 - com.example - demo - 0.0.1-SNAPSHOT - demo - - - 1.8 - 1.8 - 1.8 - - - - - com.microsoft.sqlserver - mssql-jdbc - 7.4.1.jre8 - - - -``` - -This file is an [Apache Maven](https://maven.apache.org/) that configures our project to use: - -- Java 8 -- A recent SQL Server driver for Java - -### Prepare a configuration file to connect to Azure SQL database - -Create a *src/main/resources/application.properties* file, and add: - -```properties -url=jdbc:sqlserver://$AZ_DATABASE_NAME.database.windows.net:1433;database=demo;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30; -user=demo@$AZ_DATABASE_NAME -password=$AZ_SQL_SERVER_PASSWORD -``` - -- Replace the two `$AZ_DATABASE_NAME` variables with the value that you configured at the beginning of this article. -- Replace the `$AZ_SQL_SERVER_PASSWORD` variable with the value that you configured at the beginning of this article. - -### Create an SQL file to generate the database schema - -We will use a *src/main/resources/`schema.sql`* file in order to create a database schema. Create that file, with the following content: - -```sql -DROP TABLE IF EXISTS todo; -CREATE TABLE todo (id INT PRIMARY KEY, description VARCHAR(255), details VARCHAR(4096), done BIT); -``` - -## Code the application - -### Connect to the database - -Next, add the Java code that will use JDBC to store and retrieve data from your Azure SQL database. - -Create a *src/main/java/DemoApplication.java* file, that contains: - -```java -package com.example.demo; - -import java.sql.*; -import java.util.*; -import java.util.logging.Logger; - -public class DemoApplication { - - private static final Logger log; - - static { - System.setProperty("java.util.logging.SimpleFormatter.format", "[%4$-7s] %5$s %n"); - log =Logger.getLogger(DemoApplication.class.getName()); - } - - public static void main(String[] args) throws Exception { - log.info("Loading application properties"); - Properties properties = new Properties(); - properties.load(DemoApplication.class.getClassLoader().getResourceAsStream("application.properties")); - - log.info("Connecting to the database"); - Connection connection = DriverManager.getConnection(properties.getProperty("url"), properties); - log.info("Database connection test: " + connection.getCatalog()); - - log.info("Create database schema"); - Scanner scanner = new Scanner(DemoApplication.class.getClassLoader().getResourceAsStream("schema.sql")); - Statement statement = connection.createStatement(); - while (scanner.hasNextLine()) { - statement.execute(scanner.nextLine()); - } - - /* - Todo todo = new Todo(1L, "configuration", "congratulations, you have set up JDBC correctly!", true); - insertData(todo, connection); - todo = readData(connection); - todo.setDetails("congratulations, you have updated data!"); - updateData(todo, connection); - deleteData(todo, connection); - */ - - log.info("Closing database connection"); - connection.close(); - } -} -``` - -This Java code will use the *application.properties* and the *schema.sql* files that we created earlier, in order to connect to the SQL Server database and create a schema that will store our data. - -In this file, you can see that we commented methods to insert, read, update and delete data: we will code those methods in the rest of this article, and you will be able to uncomment them one after each other. - -> [!NOTE] -> The database credentials are stored in the *user* and *password* properties of the *application.properties* file. Those credentials are used when executing `DriverManager.getConnection(properties.getProperty("url"), properties);`, as the properties file is passed as an argument. - -You can now execute this main class with your favorite tool: - -- Using your IDE, you should be able to right-click on the *DemoApplication* class and execute it. -- Using Maven, you can run the application by executing: `mvn exec:java -Dexec.mainClass="com.example.demo.DemoApplication"`. - -The application should connect to the Azure SQL Database, create a database schema, and then close the connection, as you should see in the console logs: - -``` -[INFO ] Loading application properties -[INFO ] Connecting to the database -[INFO ] Database connection test: demo -[INFO ] Create database schema -[INFO ] Closing database connection -``` - -### Create a domain class - -Create a new `Todo` Java class, next to the `DemoApplication` class, and add the following code: - -```java -package com.example.demo; - -public class Todo { - - private Long id; - private String description; - private String details; - private boolean done; - - public Todo() { - } - - public Todo(Long id, String description, String details, boolean done) { - this.id = id; - this.description = description; - this.details = details; - this.done = done; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getDetails() { - return details; - } - - public void setDetails(String details) { - this.details = details; - } - - public boolean isDone() { - return done; - } - - public void setDone(boolean done) { - this.done = done; - } - - @Override - public String toString() { - return "Todo{" + - "id=" + id + - ", description='" + description + '\'' + - ", details='" + details + '\'' + - ", done=" + done + - '}'; - } -} -``` - -This class is a domain model mapped on the `todo` table that you created when executing the *schema.sql* script. - -### Insert data into Azure SQL database - -In the *src/main/java/DemoApplication.java* file, after the main method, add the following method to insert data into the database: - -```java -private static void insertData(Todo todo, Connection connection) throws SQLException { - log.info("Insert data"); - PreparedStatement insertStatement = connection - .prepareStatement("INSERT INTO todo (id, description, details, done) VALUES (?, ?, ?, ?);"); - - insertStatement.setLong(1, todo.getId()); - insertStatement.setString(2, todo.getDescription()); - insertStatement.setString(3, todo.getDetails()); - insertStatement.setBoolean(4, todo.isDone()); - insertStatement.executeUpdate(); -} -``` - -You can now uncomment the two following lines in the `main` method: - -```java -Todo todo = new Todo(1L, "configuration", "congratulations, you have set up JDBC correctly!", true); -insertData(todo, connection); -``` - -Executing the main class should now produce the following output: - -``` -[INFO ] Loading application properties -[INFO ] Connecting to the database -[INFO ] Database connection test: demo -[INFO ] Create database schema -[INFO ] Insert data -[INFO ] Closing database connection -``` - -### Reading data from Azure SQL database - -Let's read the data previously inserted, to validate that our code works correctly. - -In the *src/main/java/DemoApplication.java* file, after the `insertData` method, add the following method to read data from the database: - -```java -private static Todo readData(Connection connection) throws SQLException { - log.info("Read data"); - PreparedStatement readStatement = connection.prepareStatement("SELECT * FROM todo;"); - ResultSet resultSet = readStatement.executeQuery(); - if (!resultSet.next()) { - log.info("There is no data in the database!"); - return null; - } - Todo todo = new Todo(); - todo.setId(resultSet.getLong("id")); - todo.setDescription(resultSet.getString("description")); - todo.setDetails(resultSet.getString("details")); - todo.setDone(resultSet.getBoolean("done")); - log.info("Data read from the database: " + todo.toString()); - return todo; -} -``` - -You can now uncomment the following line in the `main` method: - -```java -todo = readData(connection); -``` - -Executing the main class should now produce the following output: - -``` -[INFO ] Loading application properties -[INFO ] Connecting to the database -[INFO ] Database connection test: demo -[INFO ] Create database schema -[INFO ] Insert data -[INFO ] Read data -[INFO ] Data read from the database: Todo{id=1, description='configuration', details='congratulations, you have set up JDBC correctly!', done=true} -[INFO ] Closing database connection -``` - -### Updating data in Azure SQL Database - -Let's update the data we previously inserted. - -Still in the *src/main/java/DemoApplication.java* file, after the `readData` method, add the following method to update data inside the database: - -```java -private static void updateData(Todo todo, Connection connection) throws SQLException { - log.info("Update data"); - PreparedStatement updateStatement = connection - .prepareStatement("UPDATE todo SET description = ?, details = ?, done = ? WHERE id = ?;"); - - updateStatement.setString(1, todo.getDescription()); - updateStatement.setString(2, todo.getDetails()); - updateStatement.setBoolean(3, todo.isDone()); - updateStatement.setLong(4, todo.getId()); - updateStatement.executeUpdate(); - readData(connection); -} -``` - -You can now uncomment the two following lines in the `main` method: - -```java -todo.setDetails("congratulations, you have updated data!"); -updateData(todo, connection); -``` - -Executing the main class should now produce the following output: - -``` -[INFO ] Loading application properties -[INFO ] Connecting to the database -[INFO ] Database connection test: demo -[INFO ] Create database schema -[INFO ] Insert data -[INFO ] Read data -[INFO ] Data read from the database: Todo{id=1, description='configuration', details='congratulations, you have set up JDBC correctly!', done=true} -[INFO ] Update data -[INFO ] Read data -[INFO ] Data read from the database: Todo{id=1, description='configuration', details='congratulations, you have updated data!', done=true} -[INFO ] Closing database connection -``` - -### Deleting data in Azure SQL database - -Finally, let's delete the data we previously inserted. - -Still in the *src/main/java/DemoApplication.java* file, after the `updateData` method, add the following method to delete data inside the database: - -```java -private static void deleteData(Todo todo, Connection connection) throws SQLException { - log.info("Delete data"); - PreparedStatement deleteStatement = connection.prepareStatement("DELETE FROM todo WHERE id = ?;"); - deleteStatement.setLong(1, todo.getId()); - deleteStatement.executeUpdate(); - readData(connection); -} -``` - -You can now uncomment the following line in the `main` method: - -```java -deleteData(todo, connection); -``` - -Executing the main class should now produce the following output: - -``` -[INFO ] Loading application properties -[INFO ] Connecting to the database -[INFO ] Database connection test: demo -[INFO ] Create database schema -[INFO ] Insert data -[INFO ] Read data -[INFO ] Data read from the database: Todo{id=1, description='configuration', details='congratulations, you have set up JDBC correctly!', done=true} -[INFO ] Update data -[INFO ] Read data -[INFO ] Data read from the database: Todo{id=1, description='configuration', details='congratulations, you have updated data!', done=true} -[INFO ] Delete data -[INFO ] Read data -[INFO ] There is no data in the database! -[INFO ] Closing database connection -``` - -## Conclusion and resources clean up - -Congratulations! You've created a Java application that uses JDBC to store and retrieve data from Azure SQL database. - -To clean up all resources used during this quickstart, delete the resource group using the following command: - -```azurecli -az group delete \ - --name $AZ_RESOURCE_GROUP \ - --yes -``` - -## Next steps - -- [Design your first database in Azure SQL Database](design-first-database-tutorial.md) -- [Microsoft JDBC Driver for SQL Server](https://github.com/microsoft/mssql-jdbc) -- [Report issues/ask questions](https://github.com/microsoft/mssql-jdbc/issues) diff --git a/articles/azure-sql/database/connect-query-nodejs.md b/articles/azure-sql/database/connect-query-nodejs.md deleted file mode 100644 index 03f22b795f191..0000000000000 --- a/articles/azure-sql/database/connect-query-nodejs.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Use Node.js to query a database -description: How to use Node.js to create a program that connects to a database in Azure SQL Database or Azure SQL Managed Instance, and query it using T-SQL statements. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.devlang: javascript -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma, v-masebo -ms.date: 05/19/2021 -ms.custom: seo-javascript-september2019, seo-javascript-october2019, sqldbrb=2, devx-track-js, mode-api ---- -# Quickstart: Use Node.js to query a database in Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -In this quickstart, you use Node.js to connect to a database and query data. - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). - - | Action | SQL Database | SQL Managed Instance | SQL Server on Azure VM | - |:--- |:--- |:---|:---| - | Create| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - || [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - || [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) - | Configure | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| - |||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) | [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - |Load data|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | [Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | - |||Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - - -- [Node.js](https://nodejs.org)-related software - - # [macOS](#tab/macos) - - Install Homebrew and Node.js, and then install the ODBC driver and SQLCMD using steps **1.2** and **1.3** in [Create Node.js apps using SQL Server on macOS](https://www.microsoft.com/sql-server/developer-get-started/node/mac/). - - # [Ubuntu](#tab/ubuntu) - - Install Node.js, and then install the ODBC driver and SQLCMD using steps **1.2** and **1.3** in [Create Node.js apps using SQL Server on Ubuntu](https://www.microsoft.com/sql-server/developer-get-started/node/ubuntu/). - - # [Windows](#tab/windows) - - Install Chocolatey and Node.js, and then install the ODBC driver and SQLCMD using steps **1.2** and **1.3** in [Create Node.js apps using SQL Server on Windows](https://www.microsoft.com/sql-server/developer-get-started/node/windows/). - - --- - -> [!IMPORTANT] -> The scripts in this article are written to use the **Adventure Works** database. - -> [!NOTE] -> You can optionally choose to use an Azure SQL Managed Instance. -> -> To create and configure, use the [Azure portal](../managed-instance/instance-create-quickstart.md), [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md), or [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44), and then set up [on-premises](../managed-instance/point-to-site-p2s-configure.md) or [VM](../managed-instance/connect-vm-instance-configure.md) connectivity. -> -> To load data, see [restore with BACPAC](database-import.md) with the [Adventure Works](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works) file, or see [restore the Wide World Importers database](../managed-instance/restore-sample-database-quickstart.md). - -## Get server connection information - -Get the connection information you need to connect to the database in Azure SQL Database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Go to the **SQL Databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for a database in Azure SQL Database or the fully qualified server name (or IP address) next to **Host** for an Azure SQL Managed Instance or SQL Server on Azure VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to SQL Server](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server). - -## Create the project - -Open a command prompt and create a folder named *sqltest*. Open the folder you created and run the following command: - - ```bash - npm init -y - npm install tedious - ``` - -## Add code to query the database - -1. In your favorite text editor, create a new file, *sqltest.js*. - -1. Replace its contents with the following code. Then add the appropriate values for your server, database, user, and password. - - ```js - const { Connection, Request } = require("tedious"); - - // Create connection to database - const config = { - authentication: { - options: { - userName: "username", // update me - password: "password" // update me - }, - type: "default" - }, - server: "your_server.database.windows.net", // update me - options: { - database: "your_database", //update me - encrypt: true - } - }; - - /* - //Use Azure VM Managed Identity to connect to the SQL database - const config = { - server: process.env["db_server"], - authentication: { - type: 'azure-active-directory-msi-vm', - }, - options: { - database: process.env["db_database"], - encrypt: true, - port: 1433 - } - }; - - //Use Azure App Service Managed Identity to connect to the SQL database - const config = { - server: process.env["db_server"], - authentication: { - type: 'azure-active-directory-msi-app-service', - }, - options: { - database: process.env["db_database"], - encrypt: true, - port: 1433 - } - }); - - */ - - const connection = new Connection(config); - - // Attempt to connect and execute queries if connection goes through - connection.on("connect", err => { - if (err) { - console.error(err.message); - } else { - queryDatabase(); - } - }); - - connection.connect(); - - function queryDatabase() { - console.log("Reading rows from the Table..."); - - // Read all rows from table - const request = new Request( - `SELECT TOP 20 pc.Name as CategoryName, - p.name as ProductName - FROM [SalesLT].[ProductCategory] pc - JOIN [SalesLT].[Product] p ON pc.productcategoryid = p.productcategoryid`, - (err, rowCount) => { - if (err) { - console.error(err.message); - } else { - console.log(`${rowCount} row(s) returned`); - } - } - ); - - request.on("row", columns => { - columns.forEach(column => { - console.log("%s\t%s", column.metadata.colName, column.value); - }); - }); - - connection.execSql(request); - } - ``` - -> [!NOTE] -> For more information about using managed identity for authentication, complete the tutorial to [access data via managed identity](../../app-service/tutorial-connect-msi-sql-database.md). - -> [!NOTE] -> The code example uses the **AdventureWorksLT** sample database in Azure SQL Database. - -## Run the code - -1. At the command prompt, run the program. - - ```bash - node sqltest.js - ``` - -1. Verify the top 20 rows are returned and close the application window. - -## Next steps - -- [Microsoft Node.js Driver for SQL Server](/sql/connect/node-js/node-js-driver-for-sql-server) - -- Connect and query on Windows/Linux/macOS with [.NET core](connect-query-dotnet-core.md), [Visual Studio Code](connect-query-vscode.md), or [SSMS](connect-query-ssms.md) (Windows only) - -- [Get started with .NET Core on Windows/Linux/macOS using the command line](/dotnet/core/tutorials/using-with-xplat-cli) - -- Design your first database in Azure SQL Database using [.NET](design-first-database-csharp-tutorial.md) or [SSMS](design-first-database-tutorial.md) diff --git a/articles/azure-sql/database/connect-query-php.md b/articles/azure-sql/database/connect-query-php.md deleted file mode 100644 index 95ff0f26c6c36..0000000000000 --- a/articles/azure-sql/database/connect-query-php.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Use PHP to query -description: How to use PHP to create a program that connects to a database in Azure SQL Database or Azure SQL Managed Instance, and query it using T-SQL statements. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.devlang: php -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma, v-masebo -ms.date: 05/29/2020 -ms.custom: sqldbrb=2 , mode-other ---- -# Quickstart: Use PHP to query a database in Azure SQL Database -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article demonstrates how to use [PHP](https://php.net/manual/en/intro-whatis.php) to connect to a database in Azure SQL Database or Azure SQL Managed Instance. You can then use T-SQL statements to query data. - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). -- A database in Azure SQL Database or Azure SQL Managed Instance. You can use one of these quickstarts to create and then configure a database: - - | Action | SQL Database | SQL Managed Instance | SQL Server on Azure VM | - |:--- |:--- |:---|:---| - | Create| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - || [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - || [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) - | Configure | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| - |||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) | [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - |Load data|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | [Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | - |||Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - - - - - > [!IMPORTANT] - > The scripts in this article are written to use the Adventure Works database. With a SQL Managed Instance, you must either import the Adventure Works database into an instance database or modify the scripts in this article to use the Wide World Importers database. - -- PHP-related software installed for your operating system: - - - **macOS**, install PHP, the ODBC driver, then install the PHP Driver for SQL Server. See [Step 1, 2, and 3](/sql/connect/php/installation-tutorial-linux-mac). - - - **Linux**, install PHP, the ODBC driver, then install the PHP Driver for SQL Server. See [Step 1, 2, and 3](/sql/connect/php/installation-tutorial-linux-mac). - - - **Windows**, install PHP and PHP Drivers, then install the ODBC driver and SQLCMD. See [Step 1.2 and 1.3](https://www.microsoft.com/sql-server/developer-get-started/php/windows/). - -## Get server connection information - -Get the connection information you need to connect to the database in Azure SQL Database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **SQL Databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for a database in Azure SQL Database or the fully qualified server name (or IP address) next to **Host** for an Azure SQL Managed Instance or SQL Server in an Azure VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server). - -## Add code to query the database - -1. In your favorite text editor, create a new file, *sqltest.php*. - -1. Replace its contents with the following code. Then add the appropriate values for your server, database, user, and password. - - ```PHP - "your_database", // update me - "Uid" => "your_username", // update me - "PWD" => "your_password" // update me - ); - //Establishes the connection - $conn = sqlsrv_connect($serverName, $connectionOptions); - $tsql= "SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM [SalesLT].[ProductCategory] pc - JOIN [SalesLT].[Product] p - ON pc.productcategoryid = p.productcategoryid"; - $getResults= sqlsrv_query($conn, $tsql); - echo ("Reading data from table" . PHP_EOL); - if ($getResults == FALSE) - echo (sqlsrv_errors()); - while ($row = sqlsrv_fetch_array($getResults, SQLSRV_FETCH_ASSOC)) { - echo ($row['CategoryName'] . " " . $row['ProductName'] . PHP_EOL); - } - sqlsrv_free_stmt($getResults); - ?> - ``` - -## Run the code - -1. At the command prompt, run the app. - - ```bash - php sqltest.php - ``` - -1. Verify the top 20 rows are returned and close the app window. - -## Next steps - -- [Design your first database in Azure SQL Database](design-first-database-tutorial.md) -- [Microsoft PHP Drivers for SQL Server](https://github.com/Microsoft/msphpsql/) -- [Report issues or ask questions](https://github.com/Microsoft/msphpsql/issues) -- [Retry logic example: Connect resiliently to Azure SQL with PHP](/sql/connect/php/step-4-connect-resiliently-to-sql-with-php) diff --git a/articles/azure-sql/database/connect-query-portal.md b/articles/azure-sql/database/connect-query-portal.md deleted file mode 100644 index 713db04e0eaa3..0000000000000 --- a/articles/azure-sql/database/connect-query-portal.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -title: Query a SQL Database using the query editor in the Azure portal (preview) -description: Learn how to use the Query Editor to run Transact-SQL (T-SQL) queries against a database in Azure SQL Database. -titleSuffix: Azure SQL Database -keywords: connect to sql database,query sql database, azure portal, portal, query editor -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1, contperf-fy21q3-portal, mode-ui -ms.topic: quickstart -author: Ninarn -ms.author: ninarn -ms.reviewer: kendralittle, mathoma -ms.date: 02/18/2022 ---- -# Quickstart: Use the Azure portal's query editor (preview) to query an Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The query editor is a tool in the Azure portal to run SQL queries against your database in Azure SQL Database or data warehouse in Azure Synapse Analytics. - -In this quickstart, you'll use the query editor to run Transact-SQL (T-SQL) queries against a database. - -## Prerequisites - -Completing this quickstart requires the AdventureWorksLT sample database. You may optionally wish to set an Azure Active Directory (Azure AD) admin for your [server](logical-servers.md). - -### Create a database with sample data - -If you don't have a working copy of the AdventureWorksLT sample database in SQL Database, the following quickstart helps you quickly create one: - -[Quickstart: Create a database in Azure SQL Database using the Azure portal, PowerShell, or Azure CLI](single-database-create-quickstart.md) - -### Set an Azure Active Directory admin for the server (optional) - -Configuring an Azure Active Directory (Azure AD) administrator enables you to use a single identity to sign in to the Azure portal and your database. If you would like to use Azure AD to connect to query editor, follow the below steps. - -This process is optional, you can instead use SQL authentication to connect to the query editor. - -> [!NOTE] -> * Email accounts (for example, outlook.com, gmail.com, yahoo.com, and so on) aren't yet supported as Azure AD admins. Make sure to choose a user created either natively in the Azure AD or federated into the Azure AD. -> * Azure AD admin sign in works with accounts that have 2-factor authentication enabled, but the query editor does not support 2-factor authentication. - -1. In the Azure portal, navigate to your SQL database server. - -2. On the **SQL server** menu, select **Active Directory admin**. - -3. On the SQL Server **Active Directory admin** page toolbar, select **Set admin**. - - ![select active directory](./media/connect-query-portal/select-active-directory.png) - -4. On the **Add admin** page, in the search box, enter a user or group to find, select it as an admin, and then choose the **Select** button. - -5. Back in the SQL Server **Active Directory admin** page toolbar, select **Save**. - -## Use the SQL Query Editor - -1. Sign in to the [Azure portal](https://portal.azure.com/) and select the database you want to query. - -2. In the **SQL database** menu, select **Query editor (preview)**. - - ![find query editor](./media/connect-query-portal/find-query-editor.PNG) - -### Establish a connection to the database - -Even though you're signed into the portal, you still need to provide credentials to access the database. You can connect using SQL authentication or Azure Active Directory to connect to your database. - -#### Connect using SQL Authentication - -1. In the **Login** page, under **SQL server authentication**, enter a **Login** and **Password** for a user that has access to the database. If you're not sure, use the login and password for the Server admin of the database's server. - - ![sign in](./media/connect-query-portal/login-menu.png) - -2. Select **OK**. - -#### Connect using Azure Active Directory - -In the **Query editor (preview)**, look at the **Login** page at the **Active Directory authentication** section. Authentication will happen automatically, so if you are an Azure AD admin to the database you will see a message appear saying you have been signed in. Then select the **Continue as** *\* button. If the page indicates that you have not successfully logged in, you may need to refresh the page. - -### Query a database in SQL Database - -The following example queries should run successfully against the AdventureWorksLT sample database. - -#### Run a SELECT query - -1. Paste the following query into the query editor: - - ```sql - SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM SalesLT.ProductCategory pc - JOIN SalesLT.Product p - ON pc.productcategoryid = p.productcategoryid; - ``` - -2. Select **Run** and then review the output in the **Results** pane. - - ![query editor results](./media/connect-query-portal/query-editor-results.png) - -3. Optionally, you can save the query as a .sql file, or export the returned data as a .json, .csv, or .xml file. - -#### Run an INSERT query - -Run the following [INSERT](/sql/t-sql/statements/insert-transact-sql/) T-SQL statement to add a new product in the `SalesLT.Product` table. - -1. Replace the previous query with this one. - - ```sql - INSERT INTO [SalesLT].[Product] - ( [Name] - , [ProductNumber] - , [Color] - , [ProductCategoryID] - , [StandardCost] - , [ListPrice] - , [SellStartDate] - ) - VALUES - ('myNewProduct' - ,123456789 - ,'NewColor' - ,1 - ,100 - ,100 - ,GETDATE() ); - ``` - - -2. Select **Run** to insert a new row in the `Product` table. The **Messages** pane displays **Query succeeded: Affected rows: 1**. - - -#### Run an UPDATE query - -Run the following [UPDATE](/sql/t-sql/queries/update-transact-sql/) T-SQL statement to modify your new product. - -1. Replace the previous query with this one. - - ```sql - UPDATE [SalesLT].[Product] - SET [ListPrice] = 125 - WHERE Name = 'myNewProduct'; - ``` - -2. Select **Run** to update the specified row in the `Product` table. The **Messages** pane displays **Query succeeded: Affected rows: 1**. - -#### Run a DELETE query - -Run the following [DELETE](/sql/t-sql/statements/delete-transact-sql/) T-SQL statement to remove your new product. - -1. Replace the previous query with this one: - - ```sql - DELETE FROM [SalesLT].[Product] - WHERE Name = 'myNewProduct'; - ``` - -2. Select **Run** to delete the specified row in the `Product` table. The **Messages** pane displays **Query succeeded: Affected rows: 1**. - - -## Troubleshooting - -There are a few things to know when working with the query editor. - -### Configure local network settings - -If you get one of the following errors in the query editor: - - *Your local network settings might be preventing the Query Editor from issuing queries. Please click here for instructions on how to configure your network settings* - - *A connection to the server could not be established. This might indicate an issue with your local firewall configuration or your network proxy settings* - -These errors occur because the query editor uses port 443 and 1443 to communicate. You will need to ensure you have enabled outbound HTTPS traffic on these ports. The instructions below will walk you through how to do this, depending on your Operating System. You might need to work with your corporate IT to grant approval to open this connection on your local network. - -#### Steps for Windows - -1. Open **Windows Defender Firewall**. -2. On the left-side menu, select **Advanced settings**. -3. In **Windows Defender Firewall with Advanced Security**, select **Outbound rules** on the left-side menu. -4. Select **New Rule...** on the right-side menu. - -In the **New outbound rule wizard** follow these steps: - -1. Select **port** as the type of rule you want to create. Select **Next**. -2. Select **TCP**. -3. Select **Specific remote ports** and enter "443, 1443". Then select **Next**. -4. Select "Allow the connection if it is secure". -5. Select **Next** then select **Next** again. -5. Keep "Domain", "Private", and "Public" all selected. -6. Give the rule a name, for example "Access Azure SQL query editor" and optionally a description. Then select **Finish**. - -#### Steps for Mac -1. Open **System Preferences** (Apple menu > System Preferences). -2. Select **Security & Privacy**. -3. Select **Firewall**. -4. If Firewall is off, select **Click the lock to make changes** at the bottom and select **Turn on Firewall**. -4. Select **Firewall Options**. -5. In the **Security & Privacy** window select this option: 'Automatically allow signed software to receive incoming connections'. - -#### Steps for Linux -Run these commands to update iptables - ``` - sudo iptables -A OUTPUT -p tcp --dport 443 -j ACCEPT - sudo iptables -A OUTPUT -p tcp --dport 1443 -j ACCEPT - ``` - -### Connection considerations - -- For public connections to query editor, you need to [add your outbound IP address to the server's allowed firewall rules](firewall-create-server-level-portal-quickstart.md) to access your databases and data warehouses. -- If you have a Private Link connection set up on the server and you are connecting to query editor from an IP in the private Virtual Network, the Query Editor works without needing to add the Client IP address into the SQL database server firewall rules. -- The most basic role-based access control (RBAC) permissions needed to use the query editor are 'Read access to the server and database'. Anyone with this level of access can access the query editor feature. To limit access to particular users, you must prevent them from being able to sign in to the query editor with Azure Active Directory or SQL authentication credentials. If they cannot assign themselves as the AAD admin for the server or access/add a SQL administrator account, they should not be able to use query editor. -- If you see the error message "The X-CSRF-Signature header could not be validated", take the following action to resolve the issue: - - Verify that your computer's clock is set to the right time and time zone. You can also try to match your computer's time zone with Azure by searching for the time zone for the location of your instance, such as East US, Pacific, and so on. - - If you are on a proxy network, make sure that the request header “X-CSRF-Signature” is not being modified or dropped. - - -## Limitations - -- Query editor doesn't support connecting to the `master` database. To connect to the `master` database, explore one or more clients in [Next steps](#next-steps). -- Query editor cannot connect to a [replica database](read-scale-out.md) with `ApplicationIntent=ReadOnly`. To connect in this way from a rich client, you can connect using SQL Server Management Studio and specify `ApplicationIntent=ReadOnly` in the 'Additional Connection Parameters' [tab in connection options](/sql/database-engine/availability-groups/windows/listeners-client-connectivity-application-failover#ConnectToSecondary). -- Query editor has a 5-minute timeout for query execution. To run longer queries, explore one or more clients in [Next steps](#next-steps). -- Query editor only supports cylindrical projection for geography data types. -- Query editor does not support IntelliSense for database tables and views, but does support autocomplete on names that have already been typed. For IntelliSense support, explore one or more clients in [Next steps](#next-steps). -- Pressing **F5** refreshes the query editor page. Any query being worked on will be lost. - -## Next steps - -You can query a database in Azure SQL Database with a variety of clients, including: - -- [Use SSMS to connect to and query Azure SQL Database or Azure SQL Managed Instance](connect-query-ssms.md). -- [Use Visual Studio Code to connect and query](connect-query-vscode.md). -- [Use Azure Data Studio to connect and query Azure SQL database](/sql/azure-data-studio/quickstart-sql-database). - -Learn more about Azure SQL Database in the following articles: - -- [Learn more about the Transact-SQL (T-SQL) supported in Azure SQL Database](transact-sql-tsql-differences-sql-server.md). -- [Azure SQL glossary of terms](../glossary-terms.md). -- [What is Azure SQL?](../azure-sql-iaas-vs-paas-what-is-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/connect-query-python.md b/articles/azure-sql/database/connect-query-python.md deleted file mode 100644 index 2855376fcdace..0000000000000 --- a/articles/azure-sql/database/connect-query-python.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Use Python to query a database -description: This topic shows you how to use Python to create a program that connects to a database in Azure SQL Database and query it using Transact-SQL statements. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: seo-python-october2019, sqldbrb=2, devx-track-python, mode-api -ms.devlang: python -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 12/19/2020 ---- -# Quickstart: Use Python to query a database - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi-asa.md)] - -In this quickstart, you use Python to connect to Azure SQL Database, Azure SQL Managed Instance, or Synapse SQL database and use T-SQL statements to query data. - -## Prerequisites - -To complete this quickstart, you need: - -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?ref=microsoft.com&utm_source=microsoft.com&utm_medium=docs&utm_campaign=visualstudio). - -- A database where you will run a query. - - [!INCLUDE[create-configure-database](../includes/create-configure-database.md)] - -- [Python](https://python.org/downloads) 3 and related software - - - |**Action**|**macOS**|**Ubuntu**|**Windows**| - |----------|-----------|------------|---------| - |Install the ODBC driver, SQLCMD, and the Python driver for SQL Server|Use steps **1.2**, **1.3**, and **2.1** in [create Python apps using SQL Server on macOS](https://www.microsoft.com/sql-server/developer-get-started/python/mac/). This will also install install Homebrew and Python. |[Configure an environment for pyodbc Python development](/sql/connect/python/pyodbc/step-1-configure-development-environment-for-pyodbc-python-development#linux)|[Configure an environment for pyodbc Python development](/sql/connect/python/pyodbc/step-1-configure-development-environment-for-pyodbc-python-development#windows).| - |Install Python and other required packages| |Use `sudo apt-get install python python-pip gcc g++ build-essential`.| | - |Further information|[Microsoft ODBC driver on macOS](/sql/connect/odbc/linux-mac/installing-the-microsoft-odbc-driver-for-sql-server) |[Microsoft ODBC driver on Linux](/sql/connect/odbc/linux-mac/installing-the-microsoft-odbc-driver-for-sql-server)|[Microsoft ODBC driver on Linux](/sql/connect/odbc/linux-mac/installing-the-microsoft-odbc-driver-for-sql-server)| - - - -To further explore Python and the database in Azure SQL Database, see [Azure SQL Database libraries for Python](/python/api/overview/azure/sql), the [pyodbc repository](https://github.com/mkleehammer/pyodbc/wiki/), and a [pyodbc sample](https://github.com/mkleehammer/pyodbc/wiki/Getting-started). - -## Create code to query your database - -1. In a text editor, create a new file named *sqltest.py*. - -1. Add the following code. Get the connection information from the prerequisites section and substitute your own values for \, \, \, and \. - - ```python - import pyodbc - server = '.database.windows.net' - database = '' - username = '' - password = '{}' - driver= '{ODBC Driver 17 for SQL Server}' - - with pyodbc.connect('DRIVER='+driver+';SERVER=tcp:'+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password) as conn: - with conn.cursor() as cursor: - cursor.execute("SELECT TOP 3 name, collation_name FROM sys.databases") - row = cursor.fetchone() - while row: - print (str(row[0]) + " " + str(row[1])) - row = cursor.fetchone() - ``` - - -## Run the code - -1. At a command prompt, run the following command: - - ```cmd - python sqltest.py - ``` - -1. Verify that the databases and their collations are returned, and then close the command window. - -## Next steps - -- [Design your first database in Azure SQL Database](design-first-database-tutorial.md) -- [Microsoft Python drivers for SQL Server](/sql/connect/python/python-driver-for-sql-server/) -- [Python developer center](https://azure.microsoft.com/develop/python/?v=17.23h) diff --git a/articles/azure-sql/database/connect-query-ruby.md b/articles/azure-sql/database/connect-query-ruby.md deleted file mode 100644 index 993ca253e5b1c..0000000000000 --- a/articles/azure-sql/database/connect-query-ruby.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Use Ruby to query -description: This topic shows you how to use Ruby to create a program that connects to a database and query it using Transact-SQL statements. -titleSuffix: Azure SQL Database & SQL Managed Instance -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=2, mode-other -ms.devlang: ruby -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 05/29/2020 ---- -# Quickstart: Use Ruby to query a database in Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This quickstart demonstrates how to use [Ruby](https://www.ruby-lang.org) to connect to a database and query data with Transact-SQL statements. - -## Prerequisites - -To complete this quickstart, you need the following prerequisites: - -- A database. You can use one of these quickstarts to create and then configure the database: - - | Action | SQL Database | SQL Managed Instance | SQL Server on Azure VM | - |:--- |:--- |:---|:---| - | Create| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - || [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - || [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) - | Configure | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| - |||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) | [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - |Load data|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | [Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | - |||Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - > [!IMPORTANT] - > The scripts in this article are written to use the Adventure Works database. With a SQL Managed Instance, you must either import the Adventure Works database into an instance database or modify the scripts in this article to use the Wide World Importers database. - -- Ruby and related software for your operating system: - - - **macOS**: Install Homebrew, rbenv and ruby-build, Ruby, FreeTDS, and TinyTDS. See Steps 1.2, 1.3, 1.4, 1.5, and 2.1 in [Create Ruby apps using SQL Server on macOS](https://www.microsoft.com/sql-server/developer-get-started/ruby/mac/). - - - **Ubuntu**: Install prerequisites for Ruby, rbenv and ruby-build, Ruby, FreeTDS, and TinyTDS. See Steps 1.2, 1.3, 1.4, 1.5, and 2.1 in [Create Ruby apps using SQL Server on Ubuntu](https://www.microsoft.com/sql-server/developer-get-started/ruby/ubuntu/). - - - **Windows**: Install Ruby, Ruby Devkit, and TinyTDS. See [Configure development environment for Ruby development](/sql/connect/ruby/step-1-configure-development-environment-for-ruby-development). - -## Get server connection information - -Get the connection information you need to connect to a database in Azure SQL Database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **SQL databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for a database in Azure SQL Database or the fully qualified server name (or IP address) next to **Host** for an Azure SQL Managed Instance or SQL Server on Azure VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server). - -## Create code to query your database in Azure SQL Database - -1. In a text or code editor, create a new file named *sqltest.rb*. - -1. Add the following code. Substitute the values from your database in Azure SQL Database for ``, ``, ``, and ``. - - >[!IMPORTANT] - >The code in this example uses the sample AdventureWorksLT data, which you can choose as source when creating your database. If your database has different data, use tables from your own database in the SELECT query. - - ```ruby - require 'tiny_tds' - server = '.database.windows.net' - database = '' - username = '' - password = '' - client = TinyTds::Client.new username: username, password: password, - host: server, port: 1433, database: database, azure: true - - puts "Reading data from table" - tsql = "SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM [SalesLT].[ProductCategory] pc - JOIN [SalesLT].[Product] p - ON pc.productcategoryid = p.productcategoryid" - result = client.execute(tsql) - result.each do |row| - puts row - end - ``` - -## Run the code - -1. At a command prompt, run the following command: - - ```bash - ruby sqltest.rb - ``` - -1. Verify that the top 20 Category/Product rows from your database are returned. - -## Next steps -- [Design your first database in Azure SQL Database](design-first-database-tutorial.md) -- [GitHub repository for TinyTDS](https://github.com/rails-sqlserver/tiny_tds) -- [Report issues or ask questions about TinyTDS](https://github.com/rails-sqlserver/tiny_tds/issues) -- [Ruby driver for SQL Server](/sql/connect/ruby/ruby-driver-for-sql-server/) diff --git a/articles/azure-sql/database/connect-query-ssms.md b/articles/azure-sql/database/connect-query-ssms.md deleted file mode 100644 index 4a78a157c97f6..0000000000000 --- a/articles/azure-sql/database/connect-query-ssms.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: "SSMS: Connect and query data" -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn how to connect to Azure SQL Database or SQL Managed Instance using SQL Server Management Studio (SSMS). Then run Transact-SQL (T-SQL) statements to query and edit data. -keywords: connect to sql database,sql server management studio -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=2, mode-other -ms.devlang: -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 09/28/2020 ---- -# Quickstart: Use SSMS to connect to and query Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -In this quickstart, you'll learn how to use SQL Server Management Studio (SSMS) to connect to Azure SQL Database or Azure SQL Managed Instance and run some queries. - -## Prerequisites - -Completing this quickstart requires the following items: - -- [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms/). - -- A database in Azure SQL Database. You can use one of these quickstarts to create and then configure a database in Azure SQL Database: - - | Action | SQL Database | SQL Managed Instance | SQL Server on Azure VM | - |:--- |:--- |:---|:---| - | Create| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - || [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - || [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) - | Configure | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| - |||[Connectivity from on-site](../managed-instance/point-to-site-p2s-configure.md) | [Connect to SQL Server](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) - |Load data|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | [Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) | - |||Restore or import Adventure Works from [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| Restore or import Adventure Works from [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - - > [!IMPORTANT] - > The scripts in this article are written to use the Adventure Works database. With a managed instance, you must either import the Adventure Works database into an instance database or modify the scripts in this article to use the Wide World Importers database. - -If you simply want to run some ad-hoc queries without installing SSMS, see [Quickstart: Use the Azure portal's query editor to query a database in Azure SQL Database](connect-query-portal.md). - -## Get server connection information - -Get the connection information you need to connect to your database. You'll need the fully qualified [server](logical-servers.md) name or host name, database name, and login information to complete this quickstart. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **database** or **managed instance** you want to query. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for your database in SQL Database or the fully qualified server name (or IP address) next to **Host** for your managed instance in SQL Managed Instance or your SQL Server instance on your VM. To copy the server name or host name, hover over it and select the **Copy** icon. - -> [!NOTE] -> For connection information for SQL Server on Azure VM, see [Connect to SQL Server](../virtual-machines/windows/sql-vm-create-portal-quickstart.md#connect-to-sql-server) - -## Connect to your database - -[!INCLUDE[ssms-connect-azure-ad](../includes/ssms-connect-azure-ad.md)] - -In SSMS, connect to your server. - -> [!IMPORTANT] -> A server listens on port 1433. To connect to a server from behind a corporate firewall, the firewall must have this port open. - -1. Open SSMS. - -2. The **Connect to Server** dialog box appears. Enter the following information: - - | Setting     | Suggested value | Description | - | ------------ | ------------------ | ----------- | - | **Server type** | Database engine | Required value. | - | **Server name** | The fully qualified server name | Something like: **servername.database.windows.net**. | - | **Authentication** | SQL Server Authentication | This tutorial uses SQL Authentication. | - | **Login** | Server admin account user ID | The user ID from the server admin account used to create the server. | - | **Password** | Server admin account password | The password from the server admin account used to create the server. | - - ![connect to server](./media/connect-query-ssms/connect.png) - -> [!NOTE] -> This tutorial utilizes SQL Server Authentication. - -3. Select **Options** in the **Connect to Server** dialog box. In the **Connect to database** drop-down menu, select **mySampleDatabase**. Completing the quickstart in the [Prerequisites section](#prerequisites) creates an AdventureWorksLT database named mySampleDatabase. If your working copy of the AdventureWorks database has a different name than mySampleDatabase, then select it instead. - - ![connect to db on server](./media/connect-query-ssms/options-connect-to-db.png) - -4. Select **Connect**. The Object Explorer window opens. - -5. To view the database's objects, expand **Databases** and then expand your database node. - - ![mySampleDatabase objects](./media/connect-query-ssms/connected.png) - -## Query data - -Run this [SELECT](/sql/t-sql/queries/select-transact-sql/) Transact-SQL code to query for the top 20 products by category. - -1. In Object Explorer, right-click **mySampleDatabase** and select **New Query**. A new query window connected to your database opens. - -2. In the query window, paste the following SQL query: - - ```sql - SELECT pc.Name as CategoryName, p.name as ProductName - FROM [SalesLT].[ProductCategory] pc - JOIN [SalesLT].[Product] p - ON pc.productcategoryid = p.productcategoryid; - ``` - -3. On the toolbar, select **Execute** to run the query and retrieve data from the `Product` and `ProductCategory` tables. - - ![query to retrieve data from table Product and ProductCategory](./media/connect-query-ssms/query2.png) - -### Insert data - -Run this [INSERT](/sql/t-sql/statements/insert-transact-sql/) Transact-SQL code to create a new product in the `SalesLT.Product` table. - -1. Replace the previous query with this one. - - ```sql - INSERT INTO [SalesLT].[Product] - ( [Name] - , [ProductNumber] - , [Color] - , [ProductCategoryID] - , [StandardCost] - , [ListPrice] - , [SellStartDate] ) - VALUES - ('myNewProduct' - ,123456789 - ,'NewColor' - ,1 - ,100 - ,100 - ,GETDATE() ); - ``` - -2. Select **Execute** to insert a new row in the `Product` table. The **Messages** pane displays **(1 row affected)**. - -#### View the result - -1. Replace the previous query with this one. - - ```sql - SELECT * FROM [SalesLT].[Product] - WHERE Name='myNewProduct' - ``` - -2. Select **Execute**. The following result appears. - - ![result of Product table query](./media/connect-query-ssms/result.png) - -### Update data - -Run this [UPDATE](/sql/t-sql/queries/update-transact-sql) Transact-SQL code to modify your new product. - -1. Replace the previous query with this one that returns the new record created previously: - - ```sql - UPDATE [SalesLT].[Product] - SET [ListPrice] = 125 - WHERE Name = 'myNewProduct'; - ``` - -2. Select **Execute** to update the specified row in the `Product` table. The **Messages** pane displays **(1 row affected)**. - -### Delete data - -Run this [DELETE](/sql/t-sql/statements/delete-transact-sql/) Transact-SQL code to remove your new product. - -1. Replace the previous query with this one. - - ```sql - DELETE FROM [SalesLT].[Product] - WHERE Name = 'myNewProduct'; - ``` - -2. Select **Execute** to delete the specified row in the `Product` table. The **Messages** pane displays **(1 row affected)**. - -## Next steps - -- For information about SSMS, see [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms/). -- To connect and query using the Azure portal, see [Connect and query with the Azure portal SQL Query editor](connect-query-portal.md). -- To connect and query using Visual Studio Code, see [Connect and query with Visual Studio Code](connect-query-vscode.md). -- To connect and query using .NET, see [Connect and query with .NET](connect-query-dotnet-visual-studio.md). -- To connect and query using PHP, see [Connect and query with PHP](connect-query-php.md). -- To connect and query using Node.js, see [Connect and query with Node.js](connect-query-nodejs.md). -- To connect and query using Java, see [Connect and query with Java](connect-query-java.md). -- To connect and query using Python, see [Connect and query with Python](connect-query-python.md). -- To connect and query using Ruby, see [Connect and query with Ruby](connect-query-ruby.md). diff --git a/articles/azure-sql/database/connect-query-vscode.md b/articles/azure-sql/database/connect-query-vscode.md deleted file mode 100644 index 4eefab41f8763..0000000000000 --- a/articles/azure-sql/database/connect-query-vscode.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: Use Visual Studio Code to connect and query -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn how to connect to Azure SQL Database or SQL Managed Instance on Azure by using Visual Studio Code. Then, run Transact-SQL (T-SQL) statements to query and edit data. -keywords: connect to sql database -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: sqldbrb=2, mode-ui -ms.devlang: -ms.topic: quickstart -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 05/29/2020 ---- -# Quickstart: Use Visual Studio Code to connect and query -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -[Visual Studio Code](https://code.visualstudio.com/docs) is a graphical code editor for Linux, macOS, and Windows. It supports extensions, including the [mssql extension](https://aka.ms/mssql-marketplace) for querying a SQL Server instance, Azure SQL Database, an Azure SQL Managed Instance, and a database in Azure Synapse Analytics. In this quickstart, you'll use Visual Studio Code to connect to Azure SQL Database or Azure SQL Managed Instance and then run Transact-SQL statements to query, insert, update, and delete data. - -## Prerequisites - -- A database in Azure SQL Database or Azure SQL Managed Instance. You can use one of these quickstarts to create and then configure a database in Azure SQL Database: - - | Action | Azure SQL Database | Azure SQL Managed Instance | - |:--- |:--- |:---| - | Create| [Portal](single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | - || [CLI](scripts/create-and-configure-database-cli.md) | [CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44) | - || [PowerShell](scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | - | Configure | [Server-level IP firewall rule](firewall-create-server-level-portal-quickstart.md))| [Connectivity from a virtual machine (VM)](../managed-instance/connect-vm-instance-configure.md)| - |||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) - |Load data|Adventure Works loaded per quickstart|[Restore Wide World Importers](../managed-instance/restore-sample-database-quickstart.md) - |||Restore or import Adventure Works from a [BACPAC](database-import.md) file from [GitHub](https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/adventure-works)| - - > [!IMPORTANT] - > The scripts in this article are written to use the Adventure Works database. With a SQL Managed Instance, you must either import the Adventure Works database into an instance database or modify the scripts in this article to use the Wide World Importers database. - -## Install Visual Studio Code - -Make sure you have installed the latest [Visual Studio Code](https://code.visualstudio.com/Download) and loaded the [mssql extension](https://aka.ms/mssql-marketplace). For guidance on installing the mssql extension, see [Install Visual Studio Code](/sql/linux/sql-server-linux-develop-use-vscode#install-and-start-visual-studio-code) and [mssql for Visual Studio Code -](https://marketplace.visualstudio.com/items?itemName=ms-mssql.mssql). - -## Configure Visual Studio Code - -### **macOS** - -For macOS, you need to install OpenSSL, which is a prerequisite for .NET Core that mssql extension uses. Open your terminal and enter the following commands to install **brew** and **OpenSSL**. - -```bash -ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -brew update -brew install openssl -mkdir -p /usr/local/lib -ln -s /usr/local/opt/openssl/lib/libcrypto.1.0.0.dylib /usr/local/lib/ -ln -s /usr/local/opt/openssl/lib/libssl.1.0.0.dylib /usr/local/lib/ -``` - -### **Linux (Ubuntu)** - -No special configuration needed. - -### **Windows** - -No special configuration needed. - -## Get server connection information - -Get the connection information you need to connect to Azure SQL Database. You'll need the fully qualified server name or host name, database name, and login information for the upcoming procedures. - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -2. Navigate to the **SQL databases** or **SQL Managed Instances** page. - -3. On the **Overview** page, review the fully qualified server name next to **Server name** for SQL Database or the fully qualified server name next to **Host** for a SQL Managed Instance. To copy the server name or host name, hover over it and select the **Copy** icon. - -## Set language mode to SQL - -In Visual Studio Code, set the language mode to **SQL** to enable mssql commands and T-SQL IntelliSense. - -1. Open a new Visual Studio Code window. - -2. Press **Ctrl**+**N**. A new plain text file opens. - -3. Select **Plain Text** in the status bar's lower right-hand corner. - -4. In the **Select language mode** drop-down menu that opens, select **SQL**. - -## Connect to your database - -Use Visual Studio Code to establish a connection to your server. - -> [!IMPORTANT] -> Before continuing, make sure that you have your server and sign in information ready. Once you begin entering the connection profile information, if you change your focus from Visual Studio Code, you have to restart creating the profile. - -1. In Visual Studio Code, press **Ctrl+Shift+P** (or **F1**) to open the Command Palette. - -2. Select **MS SQL:Connect** and choose **Enter**. - -3. Select **Create Connection Profile**. - -4. Follow the prompts to specify the new profile's connection properties. After specifying each value, choose **Enter** to continue. - - | Property       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Server name** | The fully qualified server name | Something like: **mynewserver20170313.database.windows.net**. | - | **Database name** | mySampleDatabase | The database to connect to. | - | **Authentication** | SQL Login| This tutorial uses SQL Authentication. | - | **User name** | User name | The user name of the server admin account used to create the server. | - | **Password (SQL Login)** | Password | The password of the server admin account used to create the server. | - | **Save Password?** | Yes or No | Select **Yes** if you do not want to enter the password each time. | - | **Enter a name for this profile** | A profile name, such as **mySampleProfile** | A saved profile speeds your connection on subsequent logins. | - - If successful, a notification appears saying your profile is created and connected. - -## Query data - -Run the following [SELECT](/sql/t-sql/queries/select-transact-sql) Transact-SQL statement to query for the top 20 products by category. - -1. In the editor window, paste the following SQL query. - - ```sql - SELECT pc.Name as CategoryName, p.name as ProductName - FROM [SalesLT].[ProductCategory] pc - JOIN [SalesLT].[Product] p - ON pc.productcategoryid = p.productcategoryid; - ``` - -2. Press **Ctrl**+**Shift**+**E** to run the query and display results from the `Product` and `ProductCategory` tables. - - ![Query to retrieve data from 2 tables](./media/connect-query-vscode/query.png) - -## Insert data - -Run the following [INSERT](/sql/t-sql/statements/insert-transact-sql) Transact-SQL statement to add a new product into the `SalesLT.Product` table. - -1. Replace the previous query with this one. - - ```sql - INSERT INTO [SalesLT].[Product] - ( [Name] - , [ProductNumber] - , [Color] - , [ProductCategoryID] - , [StandardCost] - , [ListPrice] - , [SellStartDate] - ) - VALUES - ('myNewProduct' - ,123456789 - ,'NewColor' - ,1 - ,100 - ,100 - ,GETDATE() ); - ``` - -2. Press **Ctrl**+**Shift**+**E** to insert a new row in the `Product` table. - -## Update data - -Run the following [UPDATE](/sql/t-sql/queries/update-transact-sql) Transact-SQL statement to update the added product. - -1. Replace the previous query with this one: - - ```sql - UPDATE [SalesLT].[Product] - SET [ListPrice] = 125 - WHERE Name = 'myNewProduct'; - ``` - -2. Press **Ctrl**+**Shift**+**E** to update the specified row in the `Product` table. - -## Delete data - -Run the following [DELETE](/sql/t-sql/statements/delete-transact-sql) Transact-SQL statement to remove the new product. - -1. Replace the previous query with this one: - - ```sql - DELETE FROM [SalesLT].[Product] - WHERE Name = 'myNewProduct'; - ``` - -2. Press **Ctrl**+**Shift**+**E** to delete the specified row in the `Product` table. - -## Next steps - -- To connect and query using SQL Server Management Studio, see [Quickstart: Use SQL Server Management Studio to connect to a database in Azure SQL Database and query data](connect-query-ssms.md). -- To connect and query using the Azure portal, see [Quickstart: Use the SQL Query editor in the Azure portal to connect and query data](connect-query-portal.md). -- For an MSDN magazine article on using Visual Studio Code, see [Create a database IDE with MSSQL extension blog post](/archive/msdn-magazine/2017/june/data-points-visual-studio-code-create-a-database-ide-with-mssql-extension). diff --git a/articles/azure-sql/database/connectivity-architecture.md b/articles/azure-sql/database/connectivity-architecture.md deleted file mode 100644 index f8d4c474318f8..0000000000000 --- a/articles/azure-sql/database/connectivity-architecture.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Azure SQL Database connectivity architecture -description: This article explains the Azure SQL Database connectivity architecture for database connections from within Azure or from outside of Azure. -services: sql-database -ms.service: sql-database -ms.subservice: connect -ms.custom: fasttrack-edit, sqldbrb=1 -titleSuffix: Azure SQL Database and Azure Synapse Analytics -ms.devlang: -ms.topic: conceptual -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 03/18/2022 ---- -# Azure SQL Database and Azure Synapse Analytics connectivity architecture -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -This article explains architecture of various components that direct network traffic to a server in Azure SQL Database or Azure Synapse Analytics. It also explains different connection policies and how it impacts clients connecting from within Azure and clients connecting from outside of Azure. - -This article does *not* apply to **Azure SQL Managed Instance**. Refer to [Connectivity architecture for a managed instance](../managed-instance/connectivity-architecture-overview.md). - -## Connectivity architecture - -The following diagram provides a high-level overview of the connectivity architecture. - -![Diagram that shows a high-level overview of the connectivity architecture.](./media/connectivity-architecture/connectivity-overview.png) - -The following steps describe how a connection is established to Azure SQL Database: - -- Clients connect to the gateway, that has a public IP address and listens on port 1433. -- The gateway, depending on the effective connection policy, redirects or proxies the traffic to the right database cluster. -- Inside the database cluster, traffic is forwarded to the appropriate database. - -## Connection policy - -Servers in SQL Database and Azure Synapse support the following three options for the server's connection policy setting: - -- **Redirect (recommended):** Clients establish connections directly to the node hosting the database, leading to reduced latency and improved throughput. For connections to use this mode, clients need to: - - Allow outbound communication from the client to all Azure SQL IP addresses in the region on ports in the range of 11000 to 11999. Use the Service Tags for SQL to make this easier to manage. - - Allow outbound communication from the client to Azure SQL Database gateway IP addresses on port 1433. - -- **Proxy:** In this mode, all connections are proxied via the Azure SQL Database gateways, leading to increased latency and reduced throughput. For connections to use this mode, clients need to allow outbound communication from the client to Azure SQL Database gateway IP addresses on port 1433. - -- **Default:** This is the connection policy in effect on all servers after creation unless you explicitly alter the connection policy to either `Proxy` or `Redirect`. The default policy is`Redirect` for all client connections originating inside of Azure (for example, from an Azure Virtual Machine) and `Proxy`for all client connections originating outside (for example, connections from your local workstation). - -We highly recommend the `Redirect` connection policy over the `Proxy` connection policy for the lowest latency and highest throughput. However, you will need to meet the additional requirements for allowing network traffic as outlined above. If the client is an Azure Virtual Machine, you can accomplish this using Network Security Groups (NSG) with [service tags](../../virtual-network/network-security-groups-overview.md#service-tags). If the client is connecting from a workstation on-premises then you may need to work with your network admin to allow network traffic through your corporate firewall. - -## Connectivity from within Azure - -If you are connecting from within Azure your connections have a connection policy of `Redirect` by default. A policy of `Redirect` means that after the TCP session is established to Azure SQL Database, the client session is then redirected to the right database cluster with a change to the destination virtual IP from that of the Azure SQL Database gateway to that of the cluster. Thereafter, all subsequent packets flow directly to the cluster, bypassing the Azure SQL Database gateway. The following diagram illustrates this traffic flow. - -![architecture overview](./media/connectivity-architecture/connectivity-azure.png) - -## Connectivity from outside of Azure - -If you are connecting from outside Azure, your connections have a connection policy of `Proxy` by default. A policy of `Proxy` means that the TCP session is established via the Azure SQL Database gateway and all subsequent packets flow via the gateway. The following diagram illustrates this traffic flow. - -![Diagram that shows how the TCP session is established via the Azure SQL Database gateway and all subsequent packets flow via the gateway.](./media/connectivity-architecture/connectivity-onprem.png) - -> [!IMPORTANT] -> Additionally open TCP ports 1434 and 14000-14999 to enable [Connecting with DAC](/sql/database-engine/configure-windows/diagnostic-connection-for-database-administrators#connecting-with-dac) - -## Gateway IP addresses - -The table below lists the individual Gateway IP addresses and also Gateway IP address ranges per region. - -Periodically, we will retire Gateways using old hardware and migrate the traffic to new Gateways as per the process outlined at [Azure SQL Database traffic migration to newer Gateways](gateway-migration.md). We strongly encourage customers to use the **Gateway IP address subnets** in order to not be impacted by this activity in a region. - -> [!IMPORTANT] -> Logins for SQL Database or Azure Synapse can land on **any of the Gateways in a region**. For consistent connectivity to SQL Database or Azure Synapse, allow network traffic to and from **ALL** Gateway IP addresses and Gateway IP address subnets for the region. - -| Region name | Gateway IP addresses | Gateway IP address subnets | -| --- | --- | --- | -| Australia Central | 20.36.105.0, 20.36.104.6, 20.36.104.7 | 20.36.105.32/29 | -| Australia Central 2 | 20.36.113.0, 20.36.112.6 | 20.36.113.32/29 | -| Australia East | 13.75.149.87, 40.79.161.1, 13.70.112.9 | 13.70.112.32/29, 40.79.160.32/29, 40.79.168.32/29 | -| Australia Southeast | 191.239.192.109, 13.73.109.251, 13.77.48.10, 13.77.49.32 | 13.77.49.32/29 | -| Brazil South | 191.233.200.14, 191.234.144.16, 191.234.152.3 | 191.233.200.32/29, 191.234.144.32/29 | -| Canada Central | 40.85.224.249, 52.246.152.0, 20.38.144.1 | 13.71.168.32/29, 20.38.144.32/29, 52.246.152.32/29 | -| Canada East | 40.86.226.166, 52.242.30.154, 40.69.105.9 , 40.69.105.10 | 40.69.105.32/29| -| Central US | 13.67.215.62, 52.182.137.15, 104.208.21.1, 13.89.169.20 | 104.208.21.192/29, 13.89.168.192/29, 52.182.136.192/29 | -| China East | 139.219.130.35 | 52.130.112.136/29 | -| China East 2 | 40.73.82.1 | 52.130.120.88/29 | -| China North | 139.219.15.17 | 52.130.128.88/29 | -| China North 2 | 40.73.50.0 | 52.130.40.64/29 | -| East Asia | 52.175.33.150, 13.75.32.4, 13.75.32.14, 20.205.77.200, 20.205.83.224 | 13.75.32.192/29, 13.75.33.192/29 | -| East US | 40.121.158.30, 40.79.153.12, 40.78.225.32 | 20.42.65.64/29, 20.42.73.0/29, 52.168.116.64/29 | -| East US 2 | 40.79.84.180, 52.177.185.181, 52.167.104.0, 191.239.224.107, 104.208.150.3, 40.70.144.193 | 104.208.150.192/29, 40.70.144.192/29, 52.167.104.192/29 | -| France Central | 40.79.137.0, 40.79.129.1, 40.79.137.8, 40.79.145.12 | 40.79.136.32/29, 40.79.144.32/29 | -| France South | 40.79.177.0, 40.79.177.10 ,40.79.177.12 | 40.79.176.40/29, 40.79.177.32/29 | -| Germany West Central | 51.116.240.0, 51.116.248.0, 51.116.152.0 | 51.116.152.32/29, 51.116.240.32/29, 51.116.248.32/29 | -| Central India | 104.211.96.159, 104.211.86.30 , 104.211.86.31, 40.80.48.32, 20.192.96.32 | 104.211.86.32/29, 20.192.96.32/29 | -| South India | 104.211.224.146 | 40.78.192.32/29, 40.78.193.32/29 | -| West India | 104.211.160.80, 104.211.144.4 | 104.211.144.32/29, 104.211.145.32/29 | -| Japan East | 13.78.61.196, 40.79.184.8, 13.78.106.224, 40.79.192.5, 13.78.104.32, 40.79.184.32 | 13.78.104.32/29, 40.79.184.32/29, 40.79.192.32/29 | -| Japan West | 104.214.148.156, 40.74.100.192, 40.74.97.10 | 40.74.96.32/29 | -| Korea Central | 52.231.32.42, 52.231.17.22 ,52.231.17.23, 20.44.24.32, 20.194.64.33 | 20.194.64.32/29,20.44.24.32/29, 52.231.16.32/29 | -| Korea South | 52.231.200.86, 52.231.151.96 | | -| North Central US | 23.96.178.199, 23.98.55.75, 52.162.104.33, 52.162.105.9 | 52.162.105.192/29 | -| North Europe | 40.113.93.91, 52.138.224.1, 13.74.104.113 | 13.69.233.136/29, 13.74.105.192/29, 52.138.229.72/29 | -| Norway East | 51.120.96.0, 51.120.96.33, 51.120.104.32, 51.120.208.32 | 51.120.96.32/29 | -| Norway West | 51.120.216.0 | 51.120.217.32/29 | -| South Africa North | 102.133.152.0, 102.133.120.2, 102.133.152.32 | 102.133.120.32/29, 102.133.152.32/29, 102.133.248.32/29| -| South Africa West | 102.133.24.0 | 102.133.25.32/29 | -| South Central US | 13.66.62.124, 104.214.16.32, 20.45.121.1, 20.49.88.1 | 20.45.121.32/29, 20.49.88.32/29, 20.49.89.32/29, 40.124.64.136/29 | -| South East Asia | 104.43.15.0, 40.78.232.3, 13.67.16.193 | 13.67.16.192/29, 23.98.80.192/29, 40.78.232.192/29| -| Switzerland North | 51.107.56.0, 51.107.57.0 | 51.107.56.32/29 | -| Switzerland West | 51.107.152.0, 51.107.153.0 | 51.107.153.32/29 | -| UAE Central | 20.37.72.64 | 20.37.72.96/29, 20.37.73.96/29 | -| UAE North | 65.52.248.0 | 40.120.72.32/29, 65.52.248.32/29 | -| UK South | 51.140.184.11, 51.105.64.0, 51.140.144.36, 51.105.72.32 | 51.105.64.32/29, 51.105.72.32/29, 51.140.144.32/29 | -| UK West | 51.141.8.11, 51.140.208.96, 51.140.208.97 | 51.140.208.96/29, 51.140.209.32/29 | -| West Central US | 13.78.145.25, 13.78.248.43, 13.71.193.32, 13.71.193.33 | 13.71.193.32/29 | -| West Europe | 40.68.37.158, 104.40.168.105, 52.236.184.163 | 104.40.169.32/29, 13.69.112.168/29, 52.236.184.32/29 | -| West US | 104.42.238.205, 13.86.216.196 | 13.86.217.224/29 | -| West US 2 | 13.66.226.202, 40.78.240.8, 40.78.248.10 | 13.66.136.192/29, 40.78.240.192/29, 40.78.248.192/29 | -| West US 3 | 20.150.168.0, 20.150.184.2 | 20.150.168.32/29, 20.150.176.32/29, 20.150.184.32/29 | - -## Next steps - -- For information on how to change the Azure SQL Database connection policy for a server, see [conn-policy](/cli/azure/sql/server/conn-policy). -- For information about Azure SQL Database connection behavior for clients that use ADO.NET 4.5 or a later version, see [Ports beyond 1433 for ADO.NET 4.5](adonet-v12-develop-direct-route-ports.md). -- For general application development overview information, see [SQL Database Application Development Overview](develop-overview.md). diff --git a/articles/azure-sql/database/connectivity-settings.md b/articles/azure-sql/database/connectivity-settings.md deleted file mode 100644 index f9635a95d4417..0000000000000 --- a/articles/azure-sql/database/connectivity-settings.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: Connectivity settings for Azure SQL Database and Azure Synapse Analytics -description: This article explains the Transport Layer Security (TLS) version choice and the Proxy versus Redirect settings for Azure SQL Database and Azure Synapse Analytics. -services: sql-database -ms.service: sql-database -ms.subservice: connect -titleSuffix: Azure SQL Database and Azure Synapse Analytics -ms.topic: how-to -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 08/03/2021 -ms.custom: devx-track-azurepowershell, devx-track-azurecli -ms.devlang: azurecli ---- - -# Azure SQL connectivity settings -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa-formerly-sqldw.md)] - -This article introduces settings that control connectivity to the server for Azure SQL Database and [dedicated SQL pool (formerly SQL DW)](../../synapse-analytics\sql-data-warehouse\sql-data-warehouse-overview-what-is.md) in Azure Synapse Analytics. These settings apply to all SQL Database and dedicated SQL pool (formerly SQL DW) databases associated with the server. - -> [!IMPORTANT] -> This article doesn't apply to Azure SQL Managed Instance. This article also does not apply to dedicated SQL pools in Azure Synapse Analytics workspaces. See [Azure Synapse Analytics IP firewall rules](../../synapse-analytics/security/synapse-workspace-ip-firewall.md) for guidance on how to configure IP firewall rules for Azure Synapse Analytics with workspaces. - -The connectivity settings are accessible from the **Firewalls and virtual networks** screen as shown in the following screenshot: - -:::image type="content" source="media/single-database-create-quickstart/manage-connectivity-settings.png" alt-text="Screenshot of the Firewalls and virtual networks settings in Azure portal for SQL server"::: - -> [!NOTE] -> These settings take effect immediately after they're applied. Your customers might experience connection loss if they don't meet the requirements for each setting. - -## Deny public network access - -The default for this setting is **No** so that customers can connect by using either public endpoints (with IP-based server- level firewall rules or with virtual-network firewall rules) or private endpoints (by using Azure Private Link), as outlined in the [network access overview](network-access-controls-overview.md). - -When **Deny public network access** is set to **Yes**, only connections via private endpoints are allowed. All connections via public endpoints will be denied with an error message similar to: - -```output -Error 47073 -An instance-specific error occurred while establishing a connection to SQL Server. -The public network interface on this server is not accessible. -To connect to this server, use the Private Endpoint from inside your virtual network. -``` - -When **Deny public network access** is set to **Yes**, any attempts to add, remove or edit any firewall rules will be denied with an error message similar to: - -```output -Error 42101 -Unable to create or modify firewall rules when public network interface for the server is disabled. -To manage server or database level firewall rules, please enable the public network interface. -``` -Ensure that **Deny public network access** is set to **No** to be able to add, remove or edit any firewall rules for Azure Sql - -## Change public network access via PowerShell - -> [!IMPORTANT] -> Azure SQL Database still supports the PowerShell Azure Resource Manager module, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. The following script requires the [Azure PowerShell module](/powershell/azure/install-az-ps). - -The following PowerShell script shows how to `Get` and `Set` the **Public Network Access** property at the server level: - -```powershell -# Get the Public Network Access property -(Get-AzSqlServer -ServerName sql-server-name -ResourceGroupName sql-server-group).PublicNetworkAccess - -# Update Public Network Access to Disabled -$SecureString = ConvertTo-SecureString "password" -AsPlainText -Force - -Set-AzSqlServer -ServerName sql-server-name -ResourceGroupName sql-server-group -SqlAdministratorPassword $SecureString -PublicNetworkAccess "Disabled" -``` - -## Change public network access via CLI - -> [!IMPORTANT] -> All scripts in this section require the [Azure CLI](/cli/azure/install-azure-cli). - -### Azure CLI in a Bash shell - -The following CLI script shows how to change the **Public Network Access** setting in a Bash shell: - -```azurecli-interactive - -# Get current setting for Public Network Access -az sql server show -n sql-server-name -g sql-server-group --query "publicNetworkAccess" - -# Update setting for Public Network Access -az sql server update -n sql-server-name -g sql-server-group --set publicNetworkAccess="Disabled" -``` - -## Minimal TLS version - -The minimal [Transport Layer Security (TLS)](https://support.microsoft.com/help/3135244/tls-1-2-support-for-microsoft-sql-server) version setting allows customers to choose which version of TLS their SQL database uses. - -Currently, we support TLS 1.0, 1.1, and 1.2. Setting a minimal TLS version ensures that newer TLS versions are supported. For example, choosing a TLS version 1.1 means only connections with TLS 1.1 and 1.2 are accepted, and connections with TLS 1.0 are rejected. After you test to confirm that your applications support it, we recommend setting the minimal TLS version to 1.2. This version includes fixes for vulnerabilities in previous versions and is the highest version of TLS that's supported in Azure SQL Database. - -> [!IMPORTANT] -> The default for the minimal TLS version is to allow all versions. After you enforce a version of TLS, it's not possible to revert to the default. - -For customers with applications that rely on older versions of TLS, we recommend setting the minimal TLS version according to the requirements of your applications. For customers that rely on applications to connect by using an unencrypted connection, we recommend not setting any minimal TLS version. - -For more information, see [TLS considerations for SQL Database connectivity](connect-query-content-reference-guide.md#tls-considerations-for-database-connectivity). - -After you set the minimal TLS version, login attempts from customers who are using a TLS version lower than the minimal TLS version of the server will fail with the following error: - -```output -Error 47072 -Login failed with invalid TLS version -``` - -## Set the minimal TLS version in Azure portal - -In the [Azure portal](https://portal.azure.com), go to your **SQL server** resource. Under the **Security** settings, select **Firewalls and virtual networks**. Select the **Minimum TLS Version** desired for all SQL Databases associated with the server, and select **Save**. - -## Set the minimal TLS version via PowerShell - -> [!IMPORTANT] -> Azure SQL Database still supports the PowerShell Azure Resource Manager module, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. The following script requires the [Azure PowerShell module](/powershell/azure/install-az-ps). - -The following PowerShell script shows how to `Get` and `Set` the **Minimal TLS Version** property at the logical server level: - -```powershell -# Get the Minimal TLS Version property -(Get-AzSqlServer -ServerName sql-server-name -ResourceGroupName sql-server-group).MinimalTlsVersion - -# Update Minimal TLS Version to 1.2 -$SecureString = ConvertTo-SecureString "password" -AsPlainText -Force - -Set-AzSqlServer -ServerName sql-server-name -ResourceGroupName sql-server-group -SqlAdministratorPassword $SecureString -MinimalTlsVersion "1.2" -``` - -## Set the minimal TLS version via the Azure CLI - -> [!IMPORTANT] -> All scripts in this section require the [Azure CLI](/cli/azure/install-azure-cli). - -### Azure CLI in a Bash shell - -The following CLI script shows how to change the **Minimal TLS Version** setting in a Bash shell: - -```azurecli-interactive -# Get current setting for Minimal TLS Version -az sql server show -n sql-server-name -g sql-server-group --query "minimalTlsVersion" - -# Update setting for Minimal TLS Version -az sql server update -n sql-server-name -g sql-server-group --set minimalTlsVersion="1.2" -``` - -## Change the connection policy - -[Connection policy](connectivity-architecture.md#connection-policy) determines how customers connect to Azure SQL Database. - -## Change the connection policy via PowerShell - -> [!IMPORTANT] -> Azure SQL Database still supports the PowerShell Azure Resource Manager module, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. The following script requires the [Azure PowerShell module](/powershell/azure/install-az-ps). - -The following PowerShell script shows how to change the connection policy by using PowerShell: - -```powershell -# Get SQL Server ID -$sqlserverid=(Get-AzSqlServer -ServerName sql-server-name -ResourceGroupName sql-server-group).ResourceId - -# Set URI -$id="$sqlserverid/connectionPolicies/Default" - -# Get current connection policy -(Get-AzResource -ResourceId $id -ApiVersion 2014-04-01 -Verbose).Properties.ConnectionType - -# Update connection policy -Set-AzResource -ResourceId $id -Properties @{"connectionType" = "Proxy"} -f -``` - -## Change the connection policy via the Azure CLI - -> [!IMPORTANT] -> All scripts in this section require the [Azure CLI](/cli/azure/install-azure-cli). - -### Azure CLI in a Bash shell - -The following CLI script shows how to change the connection policy in a Bash shell: - -```azurecli-interactive -# Get SQL Server ID -sqlserverid=$(az sql server show -n sql-server-name -g sql-server-group --query 'id' -o tsv) - -# Set URI -ids="$sqlserverid/connectionPolicies/Default" - -# Get current connection policy -az resource show --ids $ids - -# Update connection policy -az resource update --ids $ids --set properties.connectionType=Proxy -``` - -### Azure CLI from a Windows command prompt - -The following CLI script shows how to change the connection policy from a Windows command prompt (with the Azure CLI installed): - -```azurecli -# Get SQL Server ID and set URI -FOR /F "tokens=*" %g IN ('az sql server show --resource-group myResourceGroup-571418053 --name server-538465606 --query "id" -o tsv') do (SET sqlserverid=%g/connectionPolicies/Default) - -# Get current connection policy -az resource show --ids %sqlserverid% - -# Update connection policy -az resource update --ids %sqlserverid% --set properties.connectionType=Proxy -``` - -## Next steps - -- For an overview of how connectivity works in Azure SQL Database, refer to [Connectivity architecture](connectivity-architecture.md). -- For information on how to change the connection policy for a server, see [conn-policy](/cli/azure/sql/server/conn-policy). - - -[2]: media/single-database-create-quickstart/manage-connectivity-flowchart.png diff --git a/articles/azure-sql/database/cost-management.md b/articles/azure-sql/database/cost-management.md deleted file mode 100644 index 7b9fdd545e543..0000000000000 --- a/articles/azure-sql/database/cost-management.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Plan and manage costs -description: Learn how to plan for and manage costs for Azure SQL Database by using cost analysis in the Azure portal. -author: LitKnd -ms.author: kendralittle -ms.custom: subject-cost-optimization -ms.service: sql-database -ms.subservice: service-overview -ms.reviewer: mathoma -ms.topic: how-to -ms.date: 06/30/2021 ---- - -# Plan and manage costs for Azure SQL Database - -This article describes how you plan for and manage costs for Azure SQL Database. - -First, you use the Azure pricing calculator to add Azure resources, and review the estimated costs. After you've started using Azure SQL Database resources, use Cost Management features to set budgets and monitor costs. You can also review forecasted costs and identify spending trends to identify areas where you might want to act. Costs for Azure SQL Database are only a portion of the monthly costs in your Azure bill. Although this article explains how to plan for and manage costs for Azure SQL Database, you're billed for all Azure services and resources used in your Azure subscription, including any third-party services. - -## Prerequisites - -Cost analysis supports most Azure account types, but not all of them. To view the full list of supported account types, see [Understand Cost Management data](../../cost-management-billing/costs/understand-cost-mgt-data.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). To view cost data, you need at least read access for an Azure account. - -For information about assigning access to Azure Cost Management data, see [Assign access to data](../../cost-management-billing/costs/assign-access-acm-data.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). - - -## SQL Database initial cost considerations - -When working with Azure SQL Database, there are several cost-saving features to consider: - -### vCore or DTU purchasing models - -Azure SQL Database supports two purchasing models: vCore and DTU. The way you get charged varies between the purchasing models so it's important to understand the model that works best for your workload when planning and considering costs. For information about vCore and DTU purchasing models, see [Choose between the vCore and DTU purchasing models](purchasing-models.md). - -### Provisioned or serverless - -In the vCore purchasing model, Azure SQL Database also supports two types of compute tiers: provisioned throughput and serverless. The way you get charged for each compute tier varies so it's important to understand what works best for your workload when planning and considering costs. For details, see [vCore model overview - compute tiers](service-tiers-sql-database-vcore.md#compute-tiers). - -In the provisioned compute tier of the vCore-based purchasing model, you can exchange your existing licenses for discounted rates. For details, see [Azure Hybrid Benefit (AHB)](../azure-hybrid-benefit.md). - -### Elastic pools - -For environments with multiple databases that have varying and unpredictable usage demands, elastic pools can provide cost savings compared to provisioning the same number of single databases. For details, see [Elastic pools](elastic-pool-overview.md). - -## Estimate Azure SQL Database costs - -Use the [Azure pricing calculator](https://azure.microsoft.com/pricing/calculator/) to estimate costs for different Azure SQL Database configurations. For more information, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/azure-sql-database/). - -The information and pricing in the following image are for example purposes only: - -:::image type="content" source="media/cost-management/pricing-calc.png" alt-text="Azure SQL Database pricing calculator example"::: - -You can also estimate how different Retention Policy options affect cost. The information and pricing in the following image are for example purposes only: - -:::image type="content" source="media/cost-management/backup-storage.png" alt-text="Azure SQL Database pricing calculator example for storage"::: - - -## Understand the full billing model for Azure SQL Database - -Azure SQL Database runs on Azure infrastructure that accrues costs along with Azure SQL Database when you deploy the new resource. It's important to understand that additional infrastructure might accrue cost. - -Azure SQL Database (except for serverless) is billed on a predictable, hourly rate. If the SQL database is active for less than one hour, you are billed for the highest service tier selected, provisioned storage, and IO that applied during that hour, regardless of usage or whether the database was active for less than an hour. - -Billing depends on the SKU of your product, the generation hardware of your SKU, and the meter category. Azure SQL Database has the following possible SKUs: - -- Basic (B) -- Standard (S) -- Premium (P) -- General purpose (GP) -- Business critical (BC) -- And for storage: geo-redundant storage (GRS), locally redundant storage (LRS), and zone-redundant storage (ZRS) -- It's also possible to have a deprecated SKU from deprecated resource offerings - -For more information, see [vCore-based purchasing model](service-tiers-vcore.md), [DTU-based purchasing model](service-tiers-dtu.md), or [compare purchasing models](purchasing-models.md). - - -The following table shows the most common billing meters and their possible SKUs for **single databases**: - -| Measurement| Possible SKU(s) | Description | -| :----|:----|:----| -| Backup\* | GP/BC/HS | Measures the consumption of storage used by backups, billed by the amount of storage utilized in GB per month. | -| Backup (LTR) | GRS/LRS/ZRS/GF | Measures the consumption of storage used by long-term backups configured via long-term retention, billed by the amount of storage utilized. | -| Compute | B/S/P/GP/BC | Measures the consumption of your compute resources per hour. | -| Compute (primary/named replica) | HS | Measures the consumption of your compute resources per hour of your primary HS replica. -| Compute (HA replica) | HS | Measures the consumption of your compute resources per hour of your secondary HS replica. | -| Compute (ZR add-on) | GP | Measures the consumption of your compute resources per minute of your zone redundant added-on replica. | -| Compute (serverless) | GP | Measures the consumption of your serverless compute resources per minute. | -| License | GP/BC/HS | The billing for your SQL Server license accrued per month. | -| Storage | B/S\*/P\*/G/BC/HS | Billed monthly, by the amount of data stored per hour. | - -\* In the DTU purchasing model, an initial set of storage for data and backups is provided at no additional cost. The size of the storage depends on the service tier selected. Extra data storage can be purchased in the standard and premium tiers. For more information, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/azure-sql-database/). - -The following table shows the most common billing meters and their possible SKUs for **elastic pools**: - -| Measurement| Possible SKU(s) | Description | -|:----|:----|:----| -| Backup\* | GP/BC | Measures the consumption of storage used by backups, billed per GB per hour on a monthly basis. | -| Compute | B/S/P/GP/BC | Measures the consumption of your compute resources per hour, such as vCores and memory or DTUs. | -| License | GP/BC | The billing for your SQL Server license accrued per month. | -| Storage | B/S\*/P\*/GP/HS | Billed monthly, both by the amount of data stored on the drive using storage space per hour, and the throughput of megabytes per second (MBPS). | - -\* In the DTU purchasing model, an initial set of storage for data and backups is provided at no additional cost. The size of the storage depends on the service tier selected. Extra data storage can be purchased in the standard and premium tiers. For more information, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/azure-sql-database/). - -### Using Monetary Credit with Azure SQL Database - -You can pay for Azure SQL Database charges with your Azure Prepayment (previously called monetary commitment) credit. However, you can't use Azure Prepayment credit to pay for charges for third-party products and services including those from the Azure Marketplace. - -## Review estimated costs in the Azure portal - -As you go through the process of creating an Azure SQL Database, you can see the estimated costs during configuration of the compute tier. - -To access this screen, select **Configure database** on the **Basics** tab of the **Create SQL Database** page. The information and pricing in the following image are for example purposes only: - - :::image type="content" source="media/cost-management/cost-estimate.png" alt-text="Example showing cost estimate in the Azure portal"::: - -If your Azure subscription has a spending limit, Azure prevents you from spending over your credit amount. As you create and use Azure resources, your credits are used. When you reach your credit limit, the resources that you deployed are disabled for the rest of that billing period. You can't change your credit limit, but you can remove it. For more information about spending limits, see [Azure spending limit](../../cost-management-billing/manage/spending-limit.md). - -## Monitor costs - -As you start using Azure SQL Database, you can see the estimated costs in the portal. Use the following steps to review the cost estimate: - -1. Sign into the Azure portal and navigate to the resource group for your Azure SQL database. You can locate the resource group by navigating to your database and select **Resource group** in the **Overview** section. -1. In the menu, select **Cost analysis**. -1. View **Accumulated costs** and set the chart at the bottom to **Service name**. This chart shows an estimate of your current SQL Database costs. To narrow costs for the entire page to Azure SQL Database, select **Add filter** and then, select **Azure SQL Database**. The information and pricing in the following image are for example purposes only: - - :::image type="content" source="media/cost-management/cost-analysis.png" alt-text="Example showing accumulated costs in the Azure portal"::: - -From here, you can explore costs on your own. For more and information about the different cost analysis settings, see [Start analyzing costs](../../cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). - -## Create budgets - -You can create [budgets](../../cost-management-billing/costs/tutorial-acm-create-budgets.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn) to manage costs and create [alerts](../../cost-management-billing/costs/cost-mgt-alerts-monitor-usage-spending.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn) that automatically notify stakeholders of spending anomalies and overspending risks. Alerts are based on spending compared to budget and cost thresholds. Budgets and alerts are created for Azure subscriptions and resource groups, so they're useful as part of an overall cost monitoring strategy. - -Budgets can be created with filters for specific resources or services in Azure if you want more granularity present in your monitoring. Filters help ensure that you don't accidentally create new resources. For more about the filter options when you create a budget, see [Group and filter options](../../cost-management-billing/costs/group-filter.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). - -## Export cost data - -You can also [export your cost data](../../cost-management-billing/costs/tutorial-export-acm-data.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn) to a storage account. This is helpful when you need to do further data analysis on cost. For example, a finance team can analyze the data using Excel or Power BI. You can export your costs on a daily, weekly, or monthly schedule and set a custom date range. Exporting cost data is the recommended way to retrieve cost datasets. - -## Other ways to manage and reduce costs for Azure SQL Database - -Azure SQL Database also enables you to scale resources up or down to control costs based on your application needs. For details, see [Dynamically scale database resources](scale-resources.md). - -Save money by committing to a reservation for compute resources for one to three years. For details, see [Save costs for resources with reserved capacity](reserved-capacity-overview.md). - - -## Next steps - -- Learn [how to optimize your cloud investment with Azure Cost Management](../../cost-management-billing/costs/cost-mgt-best-practices.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). -- Learn more about managing costs with [cost analysis](../../cost-management-billing/costs/quick-acm-cost-analysis.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). -- Learn about how to [prevent unexpected costs](../../cost-management-billing/cost-management-billing-overview.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn). -- Take the [Cost Management](/learn/paths/control-spending-manage-bills?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn) guided learning course. \ No newline at end of file diff --git a/articles/azure-sql/database/data-discovery-and-classification-overview.md b/articles/azure-sql/database/data-discovery-and-classification-overview.md deleted file mode 100644 index 1d155b27cdbff..0000000000000 --- a/articles/azure-sql/database/data-discovery-and-classification-overview.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: Data Discovery & Classification -description: Data Discovery & Classification for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=1 -titleSuffix: Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse -ms.devlang: -ms.topic: conceptual -author: Madhumitatripathy -ms.author: matripathy -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 02/22/2022 -tags: azure-synapse ---- -# Data Discovery & Classification -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Data Discovery & Classification is built into Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. It provides basic capabilities for discovering, classifying, labeling, and reporting the sensitive data in your databases. - -Your most sensitive data might include business, financial, healthcare, or personal information. It can serve as infrastructure for: - -- Helping to meet standards for data privacy and requirements for regulatory compliance. -- Various security scenarios, such as monitoring (auditing) access to sensitive data. -- Controlling access to and hardening the security of databases that contain highly sensitive data. - -> [!NOTE] -> For information about SQL Server on-premises, see [SQL Data Discovery & Classification](/sql/relational-databases/security/sql-data-discovery-and-classification). - -## What is Data Discovery & Classification? - -Data Discovery & Classification currently supports the following capabilities: - -- **Discovery and recommendations:** The classification engine scans your database and identifies columns that contain potentially sensitive data. It then provides you with an easy way to review and apply recommended classification via the Azure portal. - -- **Labeling:** You can apply sensitivity-classification labels persistently to columns by using new metadata attributes that have been added to the SQL Server database engine. This metadata can then be used for sensitivity-based auditing scenarios. - -- **Query result-set sensitivity:** The sensitivity of a query result set is calculated in real time for auditing purposes. - -- **Visibility:** You can view the database-classification state in a detailed dashboard in the Azure portal. Also, you can download a report in Excel format to use for compliance and auditing purposes and other needs. - -## Discover, classify, and label sensitive columns - -This section describes the steps for: - -- Discovering, classifying, and labeling columns that contain sensitive data in your database. -- Viewing the current classification state of your database and exporting reports. - -The classification includes two metadata attributes: - -- **Labels**: The main classification attributes, used to define the sensitivity level of the data stored in the column. -- **Information types**: Attributes that provide more granular information about the type of data stored in the column. - -### Define and customize your classification taxonomy - -Data Discovery & Classification comes with a built-in set of sensitivity labels and a built-in set of information types and discovery logic. You can customize this taxonomy and define a set and ranking of classification constructs specifically for your environment. - -You define and customize of your classification taxonomy in one central place for your entire Azure organization. That location is in [Microsoft Defender for Cloud](../../security-center/security-center-introduction.md), as part of your security policy. Only someone with administrative rights on the organization's root management group can do this task. - -As part of policy management, you can define custom labels, rank them, and associate them with a selected set of information types. You can also add your own custom information types and configure them with string patterns. The patterns are added to the discovery logic for identifying this type of data in your databases. - -For more information, see [Customize the SQL information protection policy in Microsoft Defender for Cloud (Preview)](../../security-center/security-center-info-protection-policy.md). - -After the organization-wide policy has been defined, you can continue classifying individual databases by using your customized policy. - -### Classify your database - -> [!NOTE] -> The below example uses Azure SQL Database, but you should select the appropriate product that you want to configure Data Discovery & Classification. - -1. Go to the [Azure portal](https://portal.azure.com). - -1. Go to **Data Discovery & Classification** under the **Security** heading in your Azure SQL Database pane. The Overview tab includes a summary of the current classification state of the database. The summary includes a detailed list of all classified columns, which you can also filter to show only specific schema parts, information types, and labels. If you haven’t classified any columns yet, [skip to step 4](#step-4). - - ![Overview](./media/data-discovery-and-classification-overview/data-discovery-and-classification.png) - -1. To download a report in Excel format, select **Export** in the top menu of the pane. - -1. To begin classifying your data, select the **Classification** tab on the **Data Discovery & Classification** page. - - The classification engine scans your database for columns containing potentially sensitive data and provides a list of recommended column classifications. - -1. View and apply classification recommendations: - - - To view the list of recommended column classifications, select the recommendations panel at the bottom of the pane. - - - To accept a recommendation for a specific column, select the check box in the left column of the relevant row. To mark all recommendations as accepted, select the leftmost check box in the recommendations table header. - - - To apply the selected recommendations, select **Accept selected recommendations**. - - ![Recommendations for classification](./media/data-discovery-and-classification-overview/recommendation.png) - -1. You can also classify columns manually, as an alternative or in addition to the recommendation-based classification: - - 1. Select **Add classification** in the top menu of the pane. - - 1. In the context window that opens, select the schema, table, and column that you want to classify, and the information type and sensitivity label. - - 1. Select **Add classification** at the bottom of the context window. - - ![Manually add classification](./media/data-discovery-and-classification-overview/manually-add-classification.png) - - -1. To complete your classification and persistently label (tag) the database columns with the new classification metadata, select **Save** in the **Classification** page. - -## Audit access to sensitive data - -An important aspect of the classification is the ability to monitor access to sensitive data. [Azure SQL Auditing](/azure/azure-sql/database/auditing-overview) has been enhanced to include a new field in the audit log called `data_sensitivity_information`. This field logs the sensitivity classifications (labels) of the data that was returned by a query. Here's an example: - -[![Audit log](./media/data-discovery-and-classification-overview/11_data_classification_audit_log.png)](./media/data-discovery-and-classification-overview/11_data_classification_audit_log.png#lightbox) - -These are the activites that are actually auditable with sensitivity information: -- ALTER TABLE ... DROP COLUMN -- BULK INSERT -- DELETE -- INSERT -- MERGE -- UPDATE -- UPDATETEXT -- WRITETEXT -- DROP TABLE -- BACKUP -- DBCC CloneDatabase -- SELECT INTO -- INSERT INTO EXEC -- TRUNCATE TABLE -- DBCC SHOW_STATISTICS -- sys.dm_db_stats_histogram - -Use [sys.fn_get_audit_file](/sql/relational-databases/system-functions/sys-fn-get-audit-file-transact-sql) to returns information from an audit file stored in an Azure Storage account. - -## Permissions - -These built-in roles can read the data classification of a database: - -- Owner -- Reader -- Contributor -- SQL Security Manager -- User Access Administrator - -These are the required actions to read the data classification of a database are: - -- Microsoft.Sql/servers/databases/currentSensitivityLabels/* -- Microsoft.Sql/servers/databases/recommendedSensitivityLabels/* -- Microsoft.Sql/servers/databases/schemas/tables/columns/sensitivityLabels/* - -These built-in roles can modify the data classification of a database: - -- Owner -- Contributor -- SQL Security Manager - -This is the required action to modify the data classification of a database are: - -- Microsoft.Sql/servers/databases/schemas/tables/columns/sensitivityLabels/* - -Learn more about role-based permissions in [Azure RBAC](../../role-based-access-control/overview.md). - -> [!NOTE] -> The Azure SQL built-in roles in this section apply to a dedicated SQL pool (formerly SQL DW) but are not available for dedicated SQL pools and other SQL resources within Azure Synapse workspaces. For SQL resources in Azure Synapse workspaces, use the available actions for data classification to create custom Azure roles as needed for labelling. For more information on the `Microsoft.Synapse/workspaces/sqlPools` provider operations, see [Microsoft.Synapse](../../role-based-access-control/resource-provider-operations.md#microsoftsynapse). - -## Manage classifications - -You can use T-SQL, a REST API, or PowerShell to manage classifications. - -### Use T-SQL - -You can use T-SQL to add or remove column classifications, and to retrieve all classifications for the entire database. - -> [!NOTE] -> When you use T-SQL to manage labels, there's no validation that labels that you add to a column exist in the organization's information-protection policy (the set of labels that appear in the portal recommendations). So, it's up to you to validate this. - -For information about using T-SQL for classifications, see the following references: - -- To add or update the classification of one or more columns: [ADD SENSITIVITY CLASSIFICATION](/sql/t-sql/statements/add-sensitivity-classification-transact-sql) -- To remove the classification from one or more columns: [DROP SENSITIVITY CLASSIFICATION](/sql/t-sql/statements/drop-sensitivity-classification-transact-sql) -- To view all classifications on the database: [sys.sensitivity_classifications](/sql/relational-databases/system-catalog-views/sys-sensitivity-classifications-transact-sql) - -### Use PowerShell cmdlets -Manage classifications and recommendations for Azure SQL Database and Azure SQL Managed Instance using PowerShell. - -#### PowerShell cmdlets for Azure SQL Database - -- [Get-AzSqlDatabaseSensitivityClassification](/powershell/module/az.sql/get-azsqldatabasesensitivityclassification) -- [Set-AzSqlDatabaseSensitivityClassification](/powershell/module/az.sql/set-azsqldatabasesensitivityclassification) -- [Remove-AzSqlDatabaseSensitivityClassification](/powershell/module/az.sql/remove-azsqldatabasesensitivityclassification) -- [Get-AzSqlDatabaseSensitivityRecommendation](/powershell/module/az.sql/get-azsqldatabasesensitivityrecommendation) -- [Enable-AzSqlDatabaSesensitivityRecommendation](/powershell/module/az.sql/enable-azsqldatabasesensitivityrecommendation) -- [Disable-AzSqlDatabaseSensitivityRecommendation](/powershell/module/az.sql/disable-azsqldatabasesensitivityrecommendation) - -#### PowerShell cmdlets for Azure SQL Managed Instance - -- [Get-AzSqlInstanceDatabaseSensitivityClassification](/powershell/module/az.sql/get-azsqlinstancedatabasesensitivityclassification) -- [Set-AzSqlInstanceDatabaseSensitivityClassification](/powershell/module/az.sql/set-azsqlinstancedatabasesensitivityclassification) -- [Remove-AzSqlInstanceDatabaseSensitivityClassification](/powershell/module/az.sql/remove-azsqlinstancedatabasesensitivityclassification) -- [Get-AzSqlInstanceDatabaseSensitivityRecommendation](/powershell/module/az.sql/get-azsqlinstancedatabasesensitivityrecommendation) -- [Enable-AzSqlInstanceDatabaseSensitivityRecommendation](/powershell/module/az.sql/enable-azsqlinstancedatabasesensitivityrecommendation) -- [Disable-AzSqlInstanceDatabaseSensitivityRecommendation](/powershell/module/az.sql/disable-azsqlinstancedatabasesensitivityrecommendation) - -### Use the REST API - -You can use the REST API to programmatically manage classifications and recommendations. The published REST API supports the following operations: - -- [Create Or Update](/rest/api/sql/sensitivitylabels/createorupdate): Creates or updates the sensitivity label of the specified column. -- [Delete](/rest/api/sql/sensitivitylabels/delete): Deletes the sensitivity label of the specified column. -- [Disable Recommendation](/rest/api/sql/sensitivitylabels/disablerecommendation): Disables sensitivity recommendations on the specified column. -- [Enable Recommendation](/rest/api/sql/sensitivitylabels/enablerecommendation): Enables sensitivity recommendations on the specified column. (Recommendations are enabled by default on all columns.) -- [Get](/rest/api/sql/sensitivitylabels/get): Gets the sensitivity label of the specified column. -- [List Current By Database](/rest/api/sql/sensitivitylabels/listcurrentbydatabase): Gets the current sensitivity labels of the specified database. -- [List Recommended By Database](/rest/api/sql/sensitivitylabels/listrecommendedbydatabase): Gets the recommended sensitivity labels of the specified database. - -## Retrieve classifications metadata using SQL drivers - -You can use the following SQL drivers to retrieve classification metadata: - -- [ODBC Driver](/sql/connect/odbc/data-classification) -- [OLE DB Driver](/sql/connect/oledb/features/using-data-classification) -- [JDBC Driver](/sql/connect/jdbc/data-discovery-classification-sample) -- [Microsoft Drivers for PHP for SQL Server](/sql/connect/php/release-notes-php-sql-driver) - -## FAQ - Advanced classification capabilities - -**Question**: Will [Microsoft Purview](../../purview/overview.md) replace SQL Data Discovery & Classification or will SQL Data Discovery & Classification be retired soon? -**Answer**: We continue to support SQL Data Discovery & Classification and encourage you to adopt [Microsoft Purview](../../purview/overview.md) which has richer capabilities to drive advanced classification capabilities and data governance. If we decide to retire any service, feature, API or SKU, you will receive advance notice including a migration or transition path. Learn more about Microsoft Lifecycle policies here. - -## Next steps - -- Consider configuring [Azure SQL Auditing](/azure/azure-sql/database/auditing-overview) for monitoring and auditing access to your classified sensitive data. -- For a presentation that includes data Discovery & Classification, see [Discovering, classifying, labeling & protecting SQL data | Data Exposed](https://www.youtube.com/watch?v=itVi9bkJUNc). -- To classify your Azure SQL Databases and Azure Synapse Analytics with Microsoft Purview labels using T-SQL commands, see [Classify your Azure SQL data using Microsoft Purview labels](../../sql-database/scripts/sql-database-import-purview-labels.md). \ No newline at end of file diff --git a/articles/azure-sql/database/database-advisor-find-recommendations-portal.md b/articles/azure-sql/database/database-advisor-find-recommendations-portal.md deleted file mode 100644 index 2594bc3dd0ac1..0000000000000 --- a/articles/azure-sql/database/database-advisor-find-recommendations-portal.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Apply performance recommendations -description: Use the Azure portal to find performance recommendations that can optimize performance of your database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 12/19/2018 ---- -# Find and apply performance recommendations -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You can use the Azure portal to find performance recommendations that can optimize performance of your database in Azure SQL Database or to correct some issue identified in your workload. The **Performance recommendation** page in the Azure portal enables you to find the top recommendations based on their potential impact. - -## Viewing recommendations - -To view and apply performance recommendations, you need the correct [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md) permissions in Azure. **Reader**, **SQL DB Contributor** permissions are required to view recommendations, and **Owner**, **SQL DB Contributor** permissions are required to execute any actions; create or drop indexes and cancel index creation. - -Use the following steps to find performance recommendations on the Azure portal: - -1. Sign in to the [Azure portal](https://portal.azure.com/). -2. Go to **All services** > **SQL databases**, and select your database. -3. Navigate to **Performance recommendation** to view available recommendations for the selected database. - -Performance recommendations are shown in the table similar to the one shown on the following figure: - -![Screenshot shows performance recommendations in a table with action and recommendation description.](./media/database-advisor-find-recommendations-portal/recommendations.png) - -Recommendations are sorted by their potential impact on performance into the following categories: - -| Impact | Description | -|:--- |:--- | -| High |High impact recommendations should provide the most significant performance impact. | -| Medium |Medium impact recommendations should improve performance, but not substantially. | -| Low |Low impact recommendations should provide better performance than without, but improvements might not be significant. | - -> [!NOTE] -> Azure SQL Database needs to monitor activities at least for a day in order to identify some recommendations. The Azure SQL Database can more easily optimize for consistent query patterns than it can for random spotty bursts of activity. If recommendations are not currently available, the **Performance recommendation** page provides a message explaining why. - -You can also view the status of the historical operations. Select a recommendation or status to see more information. - -Here is an example of the "Create index" recommendation in the Azure portal. - -![Create index](./media/database-advisor-find-recommendations-portal/sql-database-performance-recommendation.png) - -## Applying recommendations - -Azure SQL Database gives you full control over how recommendations are enabled using any of the following three options: - -* Apply individual recommendations one at a time. -* Enable the Automatic tuning to automatically apply recommendations. -* To implement a recommendation manually, run the recommended T-SQL script against your database. - -Select any recommendation to view its details and then click **View script** to review the exact details of how the recommendation is created. - -The database remains online while the recommendation is applied -- using performance recommendation or automatic tuning never takes a database offline. - -### Apply an individual recommendation - -You can review and accept recommendations one at a time. - -1. On the **Recommendations** page, select a recommendation. -2. On the **Details** page, click the **Apply** button. - - ![Apply recommendation](./media/database-advisor-find-recommendations-portal/apply.png) - -Selected recommendations are applied on the database. - -### Removing recommendations from the list - -If your list of recommendations contains items that you want to remove from the list, you can discard the recommendation: - -1. Select a recommendation in the list of **Recommendations** to open the details. -2. Click **Discard** on the **Details** page. - -If desired, you can add discarded items back to the **Recommendations** list: - -1. On the **Recommendations** page, click **View discarded**. -2. Select a discarded item from the list to view its details. -3. Optionally, click **Undo Discard** to add the index back to the main list of **Recommendations**. - -> [!NOTE] -> Please note that if SQL Database [Automatic tuning](automatic-tuning-overview.md) is enabled, and if you have manually discarded a recommendation from the list, such recommendation will never be applied automatically. Discarding a recommendation is a handy way for users to have Automatic tuning enabled in cases when requiring that a specific recommendation shouldn’t be applied. -> You can revert this behavior by adding discarded recommendations back to the Recommendations list by selecting the Undo Discard option. - -### Enable automatic tuning - -You can set your database to implement recommendations automatically. As recommendations become available, they are automatically applied. As with all recommendations managed by the service, if the performance impact is negative, the recommendation is reverted. - -1. On the **Recommendations** page, click **Automate**: - - ![Advisor settings](./media/database-advisor-find-recommendations-portal/settings.png) -2. Select actions to automate: - - ![Screenshot that shows where to select the actions to automate.](./media/database-advisor-find-recommendations-portal/server.png) - -> [!NOTE] -> Please note that **DROP_INDEX** option is currently not compatible with applications using partition switching and index hints. - -Once you have selected your desired configuration, click Apply. - -### Manually apply recommendations through T-SQL - -Select any recommendation and then click **View script**. Run this script against your database to manually apply the recommendation. - -*Indexes that are manually executed are not monitored and validated for performance impact by the service* so it is suggested that you monitor these indexes after creation to verify they provide performance gains and adjust or delete them if necessary. For details about creating indexes, see [CREATE INDEX (Transact-SQL)](/sql/t-sql/statements/create-index-transact-sql). In addition, manually applied recommendations will remain active and shown in the list of recommendations for 24-48 hrs. before the system automatically withdraws them. If you would like to remove a recommendation sooner, you can manually discard it. - -### Canceling recommendations - -Recommendations that are in a **Pending**, **Validating**, or **Success** status can be canceled. Recommendations with a status of **Executing** cannot be canceled. - -1. Select a recommendation in the **Tuning History** area to open the **recommendations details** page. -2. Click **Cancel** to abort the process of applying the recommendation. - -## Monitoring operations - -Applying a recommendation might not happen instantaneously. The portal provides details regarding the status of recommendation. The following are possible states that an index can be in: - -| Status | Description | -|:--- |:--- | -| Pending |Apply recommendation command has been received and is scheduled for execution. | -| Executing |The recommendation is being applied. | -| Validating |Recommendation was successfully applied and the service is measuring the benefits. | -| Success |Recommendation was successfully applied and benefits have been measured. | -| Error |An error occurred during the process of applying the recommendation. This can be a transient issue, or possibly a schema change to the table and the script is no longer valid. | -| Reverting |The recommendation was applied, but has been deemed non-performant and is being automatically reverted. | -| Reverted |The recommendation was reverted. | - -Click an in-process recommendation from the list to see more information: - -![Screenshot that shows the list of in-process recommendations.](./media/database-advisor-find-recommendations-portal/operations.png) - -### Reverting a recommendation - -If you used the performance recommendations to apply the recommendation (meaning you did not manually run the T-SQL script), it automatically reverts the change if it finds the performance impact to be negative. If for any reason you simply want to revert a recommendation, you can do the following: - -1. Select a successfully applied recommendation in the **Tuning history** area. -2. Click **Revert** on the **recommendation details** page. - -![Recommended Indexes](./media/database-advisor-find-recommendations-portal/details.png) - -## Monitoring performance impact of index recommendations - -After recommendations are successfully implemented (currently, index operations and parameterize queries recommendations only), you can click **Query Insights** on the recommendation details page to open [Query Performance Insights](query-performance-insight-use.md) and see the performance impact of your top queries. - -![Monitor performance impact](./media/database-advisor-find-recommendations-portal/query-insights.png) - -## Summary - -Azure SQL Database provides recommendations for improving database performance. By providing T-SQL scripts, you get assistance in optimizing your database and ultimately improving query performance. - -## Next steps - -Monitor your recommendations and continue to apply them to refine performance. Database workloads are dynamic and change continuously. Azure SQL Database continues to monitor and provide recommendations that can potentially improve your database's performance. - -* See [Automatic tuning](automatic-tuning-overview.md) to learn more about the automatic tuning in Azure SQL Database. -* See [Performance recommendations](database-advisor-implement-performance-recommendations.md) for an overview of Azure SQL Database performance recommendations. -* See [Query Performance Insights](query-performance-insight-use.md) to learn about viewing the performance impact of your top queries. - -## Additional resources - -* [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) -* [CREATE INDEX](/sql/t-sql/statements/create-index-transact-sql) -* [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/database-advisor-implement-performance-recommendations.md b/articles/azure-sql/database/database-advisor-implement-performance-recommendations.md deleted file mode 100644 index 732388558ad8b..0000000000000 --- a/articles/azure-sql/database/database-advisor-implement-performance-recommendations.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Database advisor performance recommendations for Azure SQL Database -description: Azure SQL Database provides recommendations for databases that can improve query performance in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: fasttrack-edit, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 03/10/2020 ---- -# Database Advisor performance recommendations for Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database learns and adapts with your application. Azure SQL Database has a number of database advisors that provide customized recommendations that enable you to maximize performance. These database advisors continuously assess and analyze the usage history and provide recommendations based on workload patterns that help improve performance. - -## Performance overview - -Performance overview provides a summary of your database performance, and helps you with performance tuning and troubleshooting. - -![Performance overview for Azure SQL Database](./media/database-advisor-implement-performance-recommendations/performance-overview-annotated.png) - -- The **Recommendations** tile provides a breakdown of tuning recommendations for your database (top three recommendations are shown if there are more). Clicking this tile takes you to **[Performance recommendation options](database-advisor-find-recommendations-portal.md#viewing-recommendations)**. -- The **Tuning activity** tile provides a summary of the ongoing and completed tuning actions for your database, giving you a quick view into the history of tuning activity. Clicking this tile takes you to the full tuning history view for your database. -- The **Auto-tuning** tile shows the **[auto-tuning configuration](automatic-tuning-enable.md)** for your database (tuning options that are automatically applied to your database). Clicking this tile opens the automation configuration dialog. -- The **Database queries** tile shows the summary of the query performance for your database (overall DTU usage and top resource consuming queries). Clicking this tile takes you to **[Query Performance Insight](query-performance-insight-use.md)**. - -## Performance recommendation options - -Performance recommendation options available in Azure SQL Database are: - -| Performance recommendation | Single database and pooled database support | Instance database support | -| :----------------------------- | ----- | ----- | -| **Create index recommendations** - Recommends creation of indexes that may improve performance of your workload. | Yes | No | -| **Drop index recommendations** - Recommends removal of redundant and duplicate indexes daily, except for unique indexes, and indexes that were not used for a long time (>90 days). Please note that this option is not compatible with applications using partition switching and index hints. Dropping unused indexes is not supported for Premium and Business Critical service tiers. | Yes | No | -| **Parameterize queries recommendations (preview)** - Recommends forced parameterization in cases when you have one or more queries that are constantly being recompiled but end up with the same query execution plan. | Yes | No | -| **Fix schema issues recommendations (preview)** - Recommendations for schema correction appear when Azure SQL Database notices an anomaly in the number of schema-related SQL errors that are happening on your database. Microsoft is currently deprecating "Fix schema issue" recommendations. | Yes | No | - -![Performance recommendations for Azure SQL Database](./media/database-advisor-implement-performance-recommendations/performance-recommendations-annotated.png) - -To apply performance recommendations, see [applying recommendations](database-advisor-find-recommendations-portal.md#applying-recommendations). To view the status of recommendations, see [Monitoring operations](database-advisor-find-recommendations-portal.md#monitoring-operations). - -You can also find complete history of tuning actions that were applied in the past. - -## Create index recommendations - -Azure SQL Database continuously monitors the queries that are running and identifies the indexes that could improve performance. After there's enough confidence that a certain index is missing, a new **Create index** recommendation is created. - -Azure SQL Database builds confidence by estimating the performance gain the index would bring through time. Depending on the estimated performance gain, recommendations are categorized as high, medium, or low. - -Indexes that are created by using recommendations are always flagged as auto-created indexes. You can see which indexes are auto-created by looking at the [sys.indexes view](/sql/relational-databases/system-catalog-views/sys-indexes-transact-sql). Auto-created indexes don't block ALTER/RENAME commands. - -If you try to drop the column that has an auto-created index over it, the command passes. The auto-created index is dropped with the command as well. Regular indexes block the ALTER/RENAME command on columns that are indexed. - -After the create index recommendation is applied, Azure SQL Database compares the performance of the queries with the baseline performance. If the new index improved performance, the recommendation is flagged as successful and the impact report is available. If the index didn't improve performance, it's automatically reverted. Azure SQL Database uses this process to ensure that recommendations improve database performance. - -Any **create index** recommendation has a back-off policy that doesn't allow applying the recommendation if the resource usage of a database or pool is high. The back-off policy takes into account CPU, Data IO, Log IO, and available storage. - -If CPU, data IO, or log IO is higher than 80% in the previous 30 minutes, the create index recommendation is postponed. If the available storage will be below 10% after the index is created, the recommendation goes into an error state. If, after a couple of days, automatic tuning still believes that the index would be beneficial, the process starts again. - -This process repeats until there's enough available storage to create an index, or until the index isn't seen as beneficial anymore. - -## Drop index recommendations - -Besides detecting missing indexes, Azure SQL Database continuously analyzes the performance of existing indexes. If an index is not used, Azure SQL Database recommends dropping it. Dropping an index is recommended in two cases: - -- The index is a duplicate of another index (same indexed and included column, partition schema, and filters). -- The index hasn't been used for a prolonged period (93 days). - -Drop index recommendations also go through the verification after implementation. If the performance improves, the impact report is available. If performance degrades, the recommendation is reverted. - -## Parameterize queries recommendations (preview) - -*Parameterize queries* recommendations appear when you have one or more queries that are constantly being recompiled but end up with the same query execution plan. This condition creates an opportunity to apply forced parameterization. Forced parameterization, in turn, allows query plans to be cached and reused in the future, which improves performance and reduces resource usage. - -Every query initially needs to be compiled to generate an execution plan. Each generated plan is added to the plan cache. Subsequent executions of the same query can reuse this plan from the cache, which eliminates the need for additional compilation. - -Queries with non-parameterized values can lead to performance overhead because the execution plan is recompiled each time the non-parameterized values are different. In many cases, the same queries with different parameter values generate the same execution plans. These plans, however, are still separately added to the plan cache. - -The process of recompiling execution plans uses database resources, increases the query duration time, and overflows the plan cache. These events, in turn, cause plans to be evicted from the cache. This behavior can be altered by setting the forced parameterization option on the database. - -To help you estimate the impact of this recommendation, you are provided with a comparison between the actual CPU usage and the projected CPU usage (as if the recommendation were applied). This recommendation can help you gain CPU savings. It can also help you decrease query duration and overhead for the plan cache, which means that more of the plans can stay in the cache and be reused. You can apply this recommendation quickly by selecting the **Apply** command. - -After you apply this recommendation, it enables forced parameterization within minutes on your database. It starts the monitoring process, which lasts for approximately 24 hours. After this period, you can see the validation report. This report shows the CPU usage of your database 24 hours before and after the recommendation has been applied. Azure SQL Database Advisor has a safety mechanism that automatically reverts the applied recommendation if performance regression has been detected. - -## Fix schema issues recommendations (preview) - -> [!IMPORTANT] -> Microsoft is currently deprecating "Fix schema issue" recommendations. We recommend that you use [Intelligent Insights](intelligent-insights-overview.md) to monitor your database performance issues, including schema issues that the "Fix schema issue" recommendations previously covered. - -**Fix schema issues** recommendations appear when Azure SQL Database notices an anomaly in the number of schema-related SQL errors that are happening on your database. This recommendation typically appears when your database encounters multiple schema-related errors (invalid column name, invalid object name, and so on) within an hour. - -"Schema issues" are a class of syntax errors. They occur when the definition of the SQL query and the definition of the database schema aren't aligned. For example, one of the columns that's expected by the query might be missing in the target table or vice-versa. - -The "Fix schema issue" recommendation appears when Azure SQL Database notices an anomaly in the number of schema-related SQL errors that are happening on your database. The following table shows the errors that are related to schema issues: - -| SQL error code | Message | -| --- | --- | -| 201 |Procedure or function '*' expects parameter '*', which was not supplied. | -| 207 |Invalid column name '*'. | -| 208 |Invalid object name '*'. | -| 213 |Column name or number of supplied values does not match table definition. | -| 2812 |Could not find stored procedure '*'. | -| 8144 |Procedure or function * has too many arguments specified. | - -## Custom applications - -Developers might consider developing custom applications using performance recommendations for Azure SQL Database. All recommendations listed in the portal for a database can be accessed through [Get-AzSqlDatabaseRecommendedAction](/powershell/module/az.sql/get-azsqldatabaserecommendedaction) API. - -## Next steps - -- For more information about automatic tuning of database indexes and query execution plans, see [Azure SQL Database automatic tuning](automatic-tuning-overview.md). -- For more information about automatically monitoring database performance with automated diagnostics and root cause analysis of performance issues, see [Azure SQL Intelligent Insights](intelligent-insights-overview.md). -- See [Query Performance Insights](query-performance-insight-use.md) to learn about and view the performance impact of your top queries. \ No newline at end of file diff --git a/articles/azure-sql/database/database-copy.md b/articles/azure-sql/database/database-copy.md deleted file mode 100644 index 67ca7aef0c316..0000000000000 --- a/articles/azure-sql/database/database-copy.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Copy a database -description: Create a transactionally consistent copy of an existing database in Azure SQL Database on either the same server or a different server. -services: sql-database -ms.service: sql-database -ms.subservice: data-movement -ms.custom: sqldbrb=1, devx-track-azurepowershell, devx-track-azurecli -ms.topic: how-to -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 1/19/2022 ---- -# Copy a transactionally consistent copy of a database in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database provides several methods for creating a copy of an existing [database](single-database-overview.md) on either the same server or a different server. You can copy a database by using Azure portal, PowerShell, Azure CLI, or T-SQL. - -## Overview - -A database copy is a transactionally consistent snapshot of the source database as of a point in time after the copy request is initiated. You can select the same server or a different server for the copy. Also you can choose to keep the backup redundancy, service tier and compute size of the source database, or use a different backup storage redundancy and/or compute size within the same or a different service tier. After the copy is complete, it becomes a fully functional, independent database. The logins, users, and permissions in the copied database are managed independently from the source database. The copy is created using the geo-replication technology. Once replica seeding is complete, the geo-replication link is automatically terminated. All the requirements for using geo-replication apply to the database copy operation. See [Active geo-replication overview](active-geo-replication-overview.md) for details. - -## Database Copy for Azure SQL Hyperscale - -For Azure SQL Hyperscale the target database determines whether the copy will be a fast copy or a size of data copy. - -Fast copy: When the copy is done in the same region as the source, the copy will be created from the snapshots of blobs, this copy is a fast operation regardless of the database size. - -Size of data copy: When the target database is in a different region than the source or if the database backup storage redundancy (Local, Zonal, Geo) from the target differs from the source database, the copy operation will be a size of data operation. Copy time will not be directly proportional to size as page server blobs are copied in parallel. - -## Logins in the database copy - -When you copy a database to the same server, the same logins can be used on both databases. The security principal you use to copy the database becomes the database owner on the new database. - -When you copy a database to a different server, the security principal that initiated the copy operation on the target server becomes the owner of the new database. - -Regardless of the target server, all database users, their permissions, and their security identifiers (SIDs) are copied to the database copy. Using [contained database users](logins-create-manage.md) for data access ensures that the copied database has the same user credentials, so that after the copy is complete you can immediately access it with the same credentials. - -If you use server level logins for data access and copy the database to a different server, the login-based access might not work. This can happen because the logins do not exist on the target server, or because their passwords and security identifiers (SIDs) are different. To learn about managing logins when you copy a database to a different server, see [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). After the copy operation to a different server succeeds, and before other users are remapped, only the login associated with the database owner, or the server administrator can log in to the copied database. To resolve logins and establish data access after the copying operation is complete, see [Resolve logins](#resolve-logins). - -## Copy using the Azure portal - -To copy a database by using the Azure portal, open the page for your database, and then click **Copy**. - - ![Database copy](./media/database-copy/database-copy.png) - -## Copy using PowerShell or the Azure CLI - -To copy a database, use the following examples. - -# [PowerShell](#tab/azure-powershell) - -For PowerShell, use the [New-AzSqlDatabaseCopy](/powershell/module/az.sql/new-azsqldatabasecopy) cmdlet. - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -```powershell -New-AzSqlDatabaseCopy -ResourceGroupName "" -ServerName $sourceserver -DatabaseName "" ` - -CopyResourceGroupName "myResourceGroup" -CopyServerName $targetserver -CopyDatabaseName "CopyOfMySampleDatabase" -``` - -The database copy is an asynchronous operation but the target database is created immediately after the request is accepted. If you need to cancel the copy operation while still in progress, drop the the target database using the [Remove-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) cmdlet. - -For a complete sample PowerShell script, see [Copy a database to a new server](scripts/copy-database-to-new-server-powershell.md). - -# [Azure CLI](#tab/azure-cli) - -```azurecli -az sql db copy --dest-name "CopyOfMySampleDatabase" --dest-resource-group "myResourceGroup" --dest-server $targetserver ` - --name "" --resource-group "" --server $sourceserver -``` - -The database copy is an asynchronous operation but the target database is created immediately after the request is accepted. If you need to cancel the copy operation while still in progress, drop the the target database using the [az sql db delete](/cli/azure/sql/db#az-sql-db-delete) command. - -* * * - -## Copy using Transact-SQL - -Log in to the master database with the server administrator login or the login that created the database you want to copy. For database copy to succeed, logins that are not the server administrator must be members of the `dbmanager` role. For more information about logins and connecting to the server, see [Manage logins](logins-create-manage.md). - -Start copying the source database with the [CREATE DATABASE ... AS COPY OF](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true#copy-a-database) statement. The T-SQL statement continues running until the database copy operation is complete. - -> [!NOTE] -> Terminating the T-SQL statement does not terminate the database copy operation. To terminate the operation, drop the target database. -> -> Database copy using T-SQL is not supported when connecting to the destination server over a [private endpoint](private-endpoint-overview.md). If a private endpoint is configured but public network access is allowed, database copy is supported when connected to the destination server from a public IP address. Once the copy operation completes, public access can be [denied](connectivity-settings.md#deny-public-network-access). - -> [!IMPORTANT] -> Selecting backup storage redundancy when using T-SQL CREATE DATABASE ... AS COPY OF command is not supported yet. - -### Copy to the same server - -Log in to the master database with the server administrator login or the login that created the database you want to copy. For database copying to succeed, logins that are not the server administrator must be members of the `dbmanager` role. - -This command copies Database1 to a new database named Database2 on the same server. Depending on the size of your database, the copying operation might take some time to complete. - - ```sql - -- Execute on the master database to start copying - CREATE DATABASE Database2 AS COPY OF Database1; - ``` - -### Copy to an elastic pool - -Log in to the master database with the server administrator login or the login that created the database you want to copy. For database copying to succeed, logins that are not the server administrator must be members of the `dbmanager` role. - -This command copies Database1 to a new database named Database2 in an elastic pool named pool1. Depending on the size of your database, the copying operation might take some time to complete. - -Database1 can be a single or pooled database. Copying between different tier pools is supported, but some cross-tier copies will not succeed. For example, you can copy a single or elastic standard db into a general purpose pool, but you can't copy a standard elastic db into a premium pool. - - ```sql - -- Execute on the master database to start copying - CREATE DATABASE "Database2" - AS COPY OF "Database1" - (SERVICE_OBJECTIVE = ELASTIC_POOL( name = "pool1" ) ); - ``` - -### Copy to a different server - -Log in to the master database of the target server where the new database is to be created. Use a login that has the same name and password as the database owner of the source database on the source server. The login on the target server must also be a member of the `dbmanager` role, or be the server administrator login. - -This command copies Database1 on server1 to a new database named Database2 on server2. Depending on the size of your database, the copying operation might take some time to complete. - -```sql --- Execute on the master database of the target server (server2) to start copying from Server1 to Server2 -CREATE DATABASE Database2 AS COPY OF server1.Database1; -``` - -> [!IMPORTANT] -> Both servers' firewalls must be configured to allow inbound connection from the IP of the client issuing the T-SQL CREATE DATABASE ... AS COPY OF command. To determine the source IP address of current connection, execute `SELECT client_net_address FROM sys.dm_exec_connections WHERE session_id = @@SPID;` - -### Copy to a different subscription - -You can use the steps in the [Copy a SQL Database to a different server](#copy-to-a-different-server) section to copy your database to a server in a different subscription using T-SQL. Make sure you use a login that has the same name and password as the database owner of the source database. Additionally, the login must be a member of the `dbmanager` role or a server administrator, on both source and target servers. - -```sql ---Step# 1 ---Create login and user in the master database of the source server. - -CREATE LOGIN loginname WITH PASSWORD = 'xxxxxxxxx' -GO -CREATE USER [loginname] FOR LOGIN [loginname] WITH DEFAULT_SCHEMA=[dbo]; -GO -ALTER ROLE dbmanager ADD MEMBER loginname; -GO - ---Step# 2 ---Create the user in the source database and grant dbowner permission to the database. - -CREATE USER [loginname] FOR LOGIN [loginname] WITH DEFAULT_SCHEMA=[dbo]; -GO -ALTER ROLE db_owner ADD MEMBER loginname; -GO - ---Step# 3 ---Capture the SID of the user "loginname" from master database - -SELECT [sid] FROM sysusers WHERE [name] = 'loginname'; - ---Step# 4 ---Connect to Destination server. ---Create login and user in the master database, same as of the source server. - -CREATE LOGIN loginname WITH PASSWORD = 'xxxxxxxxx', SID = [SID of loginname login on source server]; -GO -CREATE USER [loginname] FOR LOGIN [loginname] WITH DEFAULT_SCHEMA=[dbo]; -GO -ALTER ROLE dbmanager ADD MEMBER loginname; -GO - ---Step# 5 ---Execute the copy of database script from the destination server using the credentials created - -CREATE DATABASE new_database_name -AS COPY OF source_server_name.source_database_name; -``` - -> [!NOTE] -> The [Azure portal](https://portal.azure.com), PowerShell, and the Azure CLI do not support database copy to a different subscription. - -> [!TIP] -> Database copy using T-SQL supports copying a database from a subscription in a different Azure tenant. This is only supported when using a SQL authentication login to log in to the target server. -> Creating a database copy on a logical server in a different Azure tenant is not supported when [Azure Active Directory](https://techcommunity.microsoft.com/t5/azure-sql/support-for-azure-ad-user-creation-on-behalf-of-azure-ad/ba-p/2346849) auth is active (enabled) on either source or target logical server. - -## Monitor the progress of the copying operation - -Monitor the copying process by querying the [sys.databases](/sql/relational-databases/system-catalog-views/sys-databases-transact-sql), [sys.dm_database_copies](/sql/relational-databases/system-dynamic-management-views/sys-dm-database-copies-azure-sql-database), and [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) views. While the copying is in progress, the **state_desc** column of the sys.databases view for the new database is set to **COPYING**. - -* If the copying fails, the **state_desc** column of the sys.databases view for the new database is set to **SUSPECT**. Execute the DROP statement on the new database, and try again later. -* If the copying succeeds, the **state_desc** column of the sys.databases view for the new database is set to **ONLINE**. The copying is complete, and the new database is a regular database that can be changed independent of the source database. - -> [!NOTE] -> If you decide to cancel the copying while it is in progress, execute the [DROP DATABASE](/sql/t-sql/statements/drop-database-transact-sql) statement on the new database. - -> [!IMPORTANT] -> If you need to create a copy with a substantially smaller service objective than the source, the target database may not have sufficient resources to complete the seeding process and it can cause the copy operation to fail. In this scenario use a geo-restore request to create a copy in a different server and/or a different region. See [Recover an Azure SQL Database using database backups](recovery-using-backups.md#geo-restore) for more information. - -## Azure RBAC roles and permissions to manage database copy - -To create a database copy, you will need to be in the following roles - -* Subscription Owner or -* SQL Server Contributor role or -* Custom role on the source and target databases with following permission: - - Microsoft.Sql/servers/databases/read - Microsoft.Sql/servers/databases/write - -To cancel a database copy, you will need to be in the following roles - -* Subscription Owner or -* SQL Server Contributor role or -* Custom role on the source and target databases with following permission: - - Microsoft.Sql/servers/databases/read - Microsoft.Sql/servers/databases/write - -To manage database copy using the Azure portal, you will also need the following permissions: - - Microsoft.Resources/subscriptions/resources/read - Microsoft.Resources/subscriptions/resources/write - Microsoft.Resources/deployments/read - Microsoft.Resources/deployments/write - Microsoft.Resources/deployments/operationstatuses/read - -If you want to see the operations under deployments in the resource group on the portal, operations across multiple resource providers including SQL operations, you will need these additional permissions: - - Microsoft.Resources/subscriptions/resourcegroups/deployments/operations/read - Microsoft.Resources/subscriptions/resourcegroups/deployments/operationstatuses/read - -## Resolve logins - -After the new database is online on the target server, use the [ALTER USER](/sql/t-sql/statements/alter-user-transact-sql?view=azuresqldb-current&preserve-view=true) statement to remap the users from the new database to logins on the target server. To resolve orphaned users, see [Troubleshoot Orphaned Users](/sql/sql-server/failover-clusters/troubleshoot-orphaned-users-sql-server). See also [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). - -All users in the new database retain the permissions that they had in the source database. The user who initiated the database copy becomes the database owner of the new database. After the copying succeeds and before other users are remapped, only the database owner can log in to the new database. - -To learn about managing users and logins when you copy a database to a different server, see [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). - -## Database copy errors - -The following errors can be encountered while copying a database in Azure SQL Database. For more information, see [Copy an Azure SQL Database](database-copy.md). - -| Error code | Severity | Description | -| ---:| ---:|:--- | -| 40635 |16 |Client with IP address '%.*ls' is temporarily disabled. | -| 40637 |16 |Create database copy is currently disabled. | -| 40561 |16 |Database copy failed. Either the source or target database does not exist. | -| 40562 |16 |Database copy failed. The source database has been dropped. | -| 40563 |16 |Database copy failed. The target database has been dropped. | -| 40564 |16 |Database copy failed due to an internal error. Please drop target database and try again. | -| 40565 |16 |Database copy failed. No more than 1 concurrent database copy from the same source is allowed. Please drop target database and try again later. | -| 40566 |16 |Database copy failed due to an internal error. Please drop target database and try again. | -| 40567 |16 |Database copy failed due to an internal error. Please drop target database and try again. | -| 40568 |16 |Database copy failed. Source database has become unavailable. Please drop target database and try again. | -| 40569 |16 |Database copy failed. Target database has become unavailable. Please drop target database and try again. | -| 40570 |16 |Database copy failed due to an internal error. Please drop target database and try again later. | -| 40571 |16 |Database copy failed due to an internal error. Please drop target database and try again later. | - -## Next steps - -* For information about logins, see [Manage logins](logins-create-manage.md) and [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). -* To export a database, see [Export the database to a BACPAC](database-export.md). diff --git a/articles/azure-sql/database/database-export.md b/articles/azure-sql/database/database-export.md deleted file mode 100644 index 30da482c7e2ed..0000000000000 --- a/articles/azure-sql/database/database-export.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Export a database to a BACPAC file -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Export a database to a BACPAC file using the Azure portal or a CLI -services: sql-database -ms.service: sql-db-mi -ms.subservice: data-movement -author: LitKnd -ms.custom: sqldbrb=2 -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/10/2021 -ms.topic: how-to ---- -# Export to a BACPAC file - Azure SQL Database and Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -When you need to export a database for archiving or for moving to another platform, you can export the database schema and data to a [BACPAC](/sql/relational-databases/data-tier-applications/data-tier-applications#bacpac) file. A BACPAC file is a ZIP file with an extension of BACPAC containing the metadata and data from the database. A BACPAC file can be stored in Azure Blob storage or in local storage in an on-premises location and later imported back into [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), or a [SQL Server instance](/sql/database-engine/sql-server-database-engine-overview). - -## Considerations - -- For an export to be transactionally consistent, you must ensure either that no write activity is occurring during the export, or that you are exporting from a [transactionally consistent copy](database-copy.md) of your database. -- If you are exporting to blob storage, the maximum size of a BACPAC file is 200 GB. To archive a larger BACPAC file, export to local storage. -- Exporting a BACPAC file to Azure premium storage using the methods discussed in this article is not supported. -- Storage behind a firewall is currently not supported. -- Immutable storage is currently not supported. -- Storage file name or the input value for StorageURI should be fewer than 128 characters long and cannot end with '.' and cannot contain special characters like a space character or '<,>,*,%,&,:,\,/,?'. -- If the export operation exceeds 20 hours, it may be canceled. To increase performance during export, you can: - - - Temporarily increase your compute size. - - Cease all read and write activity during the export. - - Use a [clustered index](/sql/relational-databases/indexes/clustered-and-nonclustered-indexes-described) with non-null values on all large tables. Without clustered indexes, an export may fail if it takes longer than 6-12 hours. This is because the export service needs to complete a table scan to try to export entire table. A good way to determine if your tables are optimized for export is to run **DBCC SHOW_STATISTICS** and make sure that the *RANGE_HI_KEY* is not null and its value has good distribution. For details, see [DBCC SHOW_STATISTICS](/sql/t-sql/database-console-commands/dbcc-show-statistics-transact-sql). -- [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) does not currently support exporting a database to a BACPAC file using the Azure portal or Azure PowerShell. To export a managed instance into a BACPAC file, use SQL Server Management Studio (SSMS) or [SQLPackage](/sql/tools/sqlpackage). -- For databases in the [Hyperscale service tier](service-tier-hyperscale.md), BACPAC export/import from Azure portal, from PowerShell using [New-AzSqlDatabaseExport](/powershell/module/az.sql/new-azsqldatabaseexport) or [New-AzSqlDatabaseImport](/powershell/module/az.sql/new-azsqldatabaseimport), from Azure CLI using [az sql db export](/cli/azure/sql/db#az-sql-db-export) and [az sql db import](/cli/azure/sql/db#az-sql-db-import), and from [REST API](/rest/api/sql/) is not supported. BACPAC import/export for smaller Hyperscale databases (up to 200 GB) is supported using SSMS and [SQLPackage](/sql/tools/sqlpackage) version 18.4 and later. For larger databases, BACPAC export/import may take a long time, and may fail for various reasons. - -> [!NOTE] -> BACPACs are not intended to be used for backup and restore operations. Azure automatically creates backups for every user database. For details, see [business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md) and [SQL Database backups](automated-backups-overview.md). - -> [!NOTE] -> [Import and Export using Private Link](database-import-export-private-link.md) is in preview. - -## The Azure portal - -Exporting a BACPAC of a database from [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) or from a database in the [Hyperscale service tier](service-tier-hyperscale.md) using the Azure portal is not currently supported. See [Considerations](#considerations). - -> [!NOTE] -> Machines processing import/export requests submitted through the Azure portal or PowerShell need to store the BACPAC file as well as temporary files generated by the Data-Tier Application Framework (DacFX). The disk space required varies significantly among databases with the same size and can require disk space up to three times the size of the database. Machines running the import/export request only have 450GB local disk space. As a result, some requests may fail with the error `There is not enough space on the disk`. In this case, the workaround is to run sqlpackage.exe on a machine with enough local disk space. We encourage using [SQLPackage](#sqlpackage-utility) to import/export databases larger than 150GB to avoid this issue. - -1. To export a database using the [Azure portal](https://portal.azure.com), open the page for your database and select **Export** on the toolbar. - - ![Screenshot that highlights the Export button.](./media/database-export/database-export1.png) - -2. Specify the BACPAC filename, select an existing Azure storage account and container for the export, and then provide the appropriate credentials for access to the source database. A SQL **Server admin login** is needed here even if you are the Azure admin, as being an Azure admin does not equate to having admin permissions in Azure SQL Database or Azure SQL Managed Instance. - - ![Database export](./media/database-export/database-export2.png) - -3. Select **OK**. - -4. To monitor the progress of the export operation, open the page for the server containing the database being exported. Under **Data management**, select **Import/Export history**. - -## SQLPackage utility - -We recommend the use of the SQLPackage utility for scale and performance in most production environments. You can run multiple sqlpackage.exe commands in parallel for subsets of tables to speed up import/export operations. - -To export a database in SQL Database using the [SQLPackage](/sql/tools/sqlpackage) command-line utility, see [Export parameters and properties](/sql/tools/sqlpackage#export-parameters-and-properties). The SQLPackage utility ships with the latest versions of [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools for Visual Studio](/sql/ssdt/download-sql-server-data-tools-ssdt), or you can download the latest version of [SQLPackage](/sql/tools/sqlpackage/sqlpackage-download) directly from the Microsoft download center. - -This example shows how to export a database using sqlpackage.exe with Active Directory Universal Authentication: - -```cmd -sqlpackage.exe /a:Export /tf:testExport.BACPAC /scs:"Data Source=apptestserver.database.windows.net;Initial Catalog=MyDB;" /ua:True /tid:"apptest.onmicrosoft.com" -``` - -## SQL Server Management Studio (SSMS) - -The newest versions of SQL Server Management Studio provide a wizard to export a database in Azure SQL Database or a SQL Managed Instance database to a BACPAC file. See the [Export a Data-tier Application](/sql/relational-databases/data-tier-applications/export-a-data-tier-application). - -## PowerShell - -Exporting a BACPAC of a database from [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) or from a database in the [Hyperscale service tier](service-tier-hyperscale.md) using PowerShell is not currently supported. See [Considerations](#considerations). - -Use the [New-AzSqlDatabaseExport](/powershell/module/az.sql/new-azsqldatabaseexport) cmdlet to submit an export database request to the Azure SQL Database service. Depending on the size of your database, the export operation may take some time to complete. - -```powershell -$exportRequest = New-AzSqlDatabaseExport -ResourceGroupName $ResourceGroupName -ServerName $ServerName ` - -DatabaseName $DatabaseName -StorageKeytype $StorageKeytype -StorageKey $StorageKey -StorageUri $BacpacUri ` - -AdministratorLogin $creds.UserName -AdministratorLoginPassword $creds.Password -``` - -To check the status of the export request, use the [Get-AzSqlDatabaseImportExportStatus](/powershell/module/az.sql/get-azsqldatabaseimportexportstatus) cmdlet. Running this cmdlet immediately after the request usually returns **Status: InProgress**. When you see **Status: Succeeded** the export is complete. - -```powershell -$exportStatus = Get-AzSqlDatabaseImportExportStatus -OperationStatusLink $exportRequest.OperationStatusLink -[Console]::Write("Exporting") -while ($exportStatus.Status -eq "InProgress") -{ - Start-Sleep -s 10 - $exportStatus = Get-AzSqlDatabaseImportExportStatus -OperationStatusLink $exportRequest.OperationStatusLink - [Console]::Write(".") -} -[Console]::WriteLine("") -$exportStatus -``` -## Cancel the export request - -Use the [Database Operations - Cancel API](/rest/api/sql/databaseoperations/cancel) -or the PowerShell [Stop-AzSqlDatabaseActivity command](/powershell/module/az.sql/Stop-AzSqlDatabaseActivity) to cancel an export request. Here is an example PowerShell command: - -```cmd -Stop-AzSqlDatabaseActivity -ResourceGroupName $ResourceGroupName -ServerName $ServerName -DatabaseName $DatabaseName -OperationId $Operation.OperationId -``` - -## Next steps - -- To learn about long-term backup retention of a single database and pooled databases as an alternative to exporting a database for archive purposes, see [Long-term backup retention](long-term-retention-overview.md). You can use SQL Agent jobs to schedule [copy-only database backups](/sql/relational-databases/backup-restore/copy-only-backups-sql-server) as an alternative to long-term backup retention. -- To learn about importing a BACPAC to a SQL Server database, see [Import a BACPAC to a SQL Server database](/sql/relational-databases/data-tier-applications/import-a-BACPAC-file-to-create-a-new-user-database). -- To learn about exporting a BACPAC from a SQL Server database, see [Export a Data-tier Application](/sql/relational-databases/data-tier-applications/export-a-data-tier-application) -- To learn about using the Data Migration Service to migrate a database, see [Migrate from SQL Server to Azure SQL Database offline using DMS](../../dms/tutorial-sql-server-to-azure-sql.md). -- If you are exporting from SQL Server as a prelude to migration to Azure SQL Database, see [Migrate a SQL Server database to Azure SQL Database](migrate-to-database-from-sql-server.md). -- To learn how to manage and share storage keys and shared access signatures securely, see [Azure Storage Security Guide](../../storage/blobs/security-recommendations.md). diff --git a/articles/azure-sql/database/database-import-export-azure-services-off.md b/articles/azure-sql/database/database-import-export-azure-services-off.md deleted file mode 100644 index 793b9e0610a17..0000000000000 --- a/articles/azure-sql/database/database-import-export-azure-services-off.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: Import or export an Azure SQL Database without allowing Azure services to access the server. -description: Import or export an Azure SQL Database without allowing Azure services to access the server. -services: sql-database -ms.service: sql-database -ms.subservice: migration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/08/2020 ---- -# Import or export an Azure SQL Database without allowing Azure services to access the server -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article shows you how to import or export an Azure SQL Database when *Allow Azure Services* is set to *OFF* on the server. The workflow uses an Azure virtual machine to run SqlPackage to perform the import or export operation. - -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com/). - -## Create the Azure virtual machine - -Create an Azure virtual machine by selecting the **Deploy to Azure** button. - -This template allows you to deploy a simple Windows virtual machine using a few different options for the Windows version, using the latest patched version. This will deploy a A2 size VM in the resource group location and return the fully qualified domain name of the VM. -

    - -[![Image showing a button labeled "Deploy to Azure".](https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/1-CONTRIBUTION-GUIDE/images/deploytoazure.png)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.compute%2Fvm-simple-windows%2Fazuredeploy.json) - -For more information, see [Very simple deployment of a Windows VM](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-simple-windows). - -## Connect to the virtual machine - -The following steps show you how to connect to your virtual machine using a remote desktop connection. - -1. After deployment completes, go to the virtual machine resource. - - ![Screenshot shows a virtual machine Overview page with a Connect button.](./media/database-import-export-azure-services-off/vm.png) - -2. Select **Connect**. - - A Remote Desktop Protocol file (.rdp file) form appears with the public IP address and port number for the virtual machine. - - ![RDP form](./media/database-import-export-azure-services-off/rdp.png) - -3. Select **Download RDP File**. - - > [!NOTE] - > You can also use SSH to connect to your VM. - -4. Close the **Connect to virtual machine** form. -5. To connect to your VM, open the downloaded RDP file. -6. When prompted, select **Connect**. On a Mac, you need an RDP client such as this [Remote Desktop Client](https://apps.apple.com/app/microsoft-remote-desktop-10/id1295203466?mt=12) from the Mac App Store. - -7. Enter the username and password you specified when creating the virtual machine, then choose **OK**. - -8. You might receive a certificate warning during the sign-in process. Choose **Yes** or **Continue** to proceed with the connection. - -## Install SqlPackage - -[Download and install the latest version of SqlPackage](/sql/tools/sqlpackage-download). - -For additional information, see [SqlPackage.exe](/sql/tools/sqlpackage). - -## Create a firewall rule to allow the VM access to the database - -Add the virtual machine's public IP address to the server's firewall. - -The following steps create a server-level IP firewall rule for your virtual machine's public IP address and enables connectivity from the virtual machine. - -1. Select **SQL databases** from the left-hand menu and then select your database on the **SQL databases** page. The overview page for your database opens, showing you the fully qualified server name (such as **servername.database.windows.net**) and provides options for further configuration. - -2. Copy this fully qualified server name to use when connecting to your server and its databases. - - ![server name](./media/database-import-export-azure-services-off/server-name.png) - -3. Select **Set server firewall** on the toolbar. The **Firewall settings** page for the server opens. - - ![server-level IP firewall rule](./media/database-import-export-azure-services-off/server-firewall-rule.png) - -4. Choose **Add client IP** on the toolbar to add your virtual machine's public IP address to a new server-level IP firewall rule. A server-level IP firewall rule can open port 1433 for a single IP address or a range of IP addresses. - -5. Select **Save**. A server-level IP firewall rule is created for your virtual machine's public IP address opening port 1433 on the server. - -6. Close the **Firewall settings** page. - -## Export a database using SqlPackage - -To export an Azure SQL Database using the [SqlPackage](/sql/tools/sqlpackage) command-line utility, see [Export parameters and properties](/sql/tools/sqlpackage#export-parameters-and-properties). The SqlPackage utility ships with the latest versions of [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt), or you can download the latest version of [SqlPackage](/sql/tools/sqlpackage-download). - -We recommend the use of the SqlPackage utility for scale and performance in most production environments. For a SQL Server Customer Advisory Team blog about migrating using BACPAC files, see [Migrating from SQL Server to Azure SQL Database using BACPAC Files](/archive/blogs/sqlcat/migrating-from-sql-server-to-azure-sql-database-using-bacpac-files). - -This example shows how to export a database using SqlPackage.exe with Active Directory Universal Authentication. Replace with values that are specific to your environment. - -```cmd -SqlPackage.exe /a:Export /tf:testExport.bacpac /scs:"Data Source=.database.windows.net;Initial Catalog=MyDB;" /ua:True /tid:"apptest.onmicrosoft.com" -``` - -## Import a database using SqlPackage - -To import a SQL Server database using the [SqlPackage](/sql/tools/sqlpackage) command-line utility, see [import parameters and properties](/sql/tools/sqlpackage#import-parameters-and-properties). SqlPackage has the latest [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt). You can also download the latest version of [SqlPackage](/sql/tools/sqlpackage-download). - -For scale and performance, we recommend using SqlPackage in most production environments rather than using the Azure portal. For a SQL Server Customer Advisory Team blog about migrating using `BACPAC` files, see [migrating from SQL Server to Azure SQL Database using BACPAC Files](/archive/blogs/sqlcat/migrating-from-sql-server-to-azure-sql-database-using-bacpac-files). - -The following SqlPackage command imports the **AdventureWorks2017** database from local storage to an Azure SQL Database. It creates a new database called **myMigratedDatabase** with a **Premium** service tier and a **P6** Service Objective. Change these values as appropriate for your environment. - -```cmd -sqlpackage.exe /a:import /tcs:"Data Source=.database.windows.net;Initial Catalog=myMigratedDatabase>;User Id=;Password=" /sf:AdventureWorks2017.bacpac /p:DatabaseEdition=Premium /p:DatabaseServiceObjective=P6 -``` - -> [!IMPORTANT] -> To connect to tAzure SQL Database from behind a corporate firewall, the firewall must have port 1433 open. - -This example shows how to import a database using SqlPackage with Active Directory Universal Authentication. - -```cmd -sqlpackage.exe /a:Import /sf:testExport.bacpac /tdn:NewDacFX /tsn:apptestserver.database.windows.net /ua:True /tid:"apptest.onmicrosoft.com" -``` - -## Performance considerations - -Export speeds vary due to many factors (for example, data shape) so it's impossible to predict what speed should be expected. SqlPackage may take considerable time, particularly for large databases. - -To get the best performance you can try the following strategies: - -1. Make sure no other workload is running on the database. Create a copy before export may be the best solution to ensure no other workloads are running. -2. Increase database service level objective (SLO) to better handle the export workload (primarily read I/O). If the database is currently GP_Gen5_4, perhaps a Business Critical tier would help with read workload. -3. Make sure there are clustered indexes particularly for large tables. -4. Virtual machines (VMs) should be in the same region as the database to help avoid network constraints. -5. VMs should have SSD with adequate size for generating temp artifacts before uploading to blob storage. -6. VMs should have adequate core and memory configuration for the specific database. - -## Store the imported or exported .BACPAC file - -The .BACPAC file can be stored in [Azure Blobs](../../storage/blobs/storage-blobs-overview.md), or [Azure Files](../../storage/files/storage-files-introduction.md). - -To achieve the best performance, use Azure Files. SqlPackage operates with the filesystem so it can access Azure Files directly. - -To reduce cost, use Azure Blobs, which cost less than a premium Azure file share. However, it will require you to copy the [.BACPAC file](/sql/relational-databases/data-tier-applications/data-tier-applications#bacpac) between the the blob and the local file system before the import or export operation. As a result the process will take longer. - -To upload or download .BACPAC files, see [Transfer data with AzCopy and Blob storage](../../storage/common/storage-use-azcopy-v10.md#transfer-data), and [Transfer data with AzCopy and file storage](../../storage/common/storage-use-azcopy-files.md). - -Depending on your environment, you might need to [Configure Azure Storage firewalls and virtual networks](../../storage/common/storage-network-security.md). - -## Next steps - -- To learn how to connect to and query an imported SQL Database, see [Quickstart: Azure SQL Database: Use SQL Server Management Studio to connect and query data](connect-query-ssms.md). -- For a SQL Server Customer Advisory Team blog about migrating using BACPAC files, see [Migrating from SQL Server to Azure SQL Database using BACPAC Files](https://techcommunity.microsoft.com/t5/DataCAT/Migrating-from-SQL-Server-to-Azure-SQL-Database-using-Bacpac/ba-p/305407). -- For a discussion of the entire SQL Server database migration process, including performance recommendations, see [SQL Server database migration to Azure SQL Database](migrate-to-database-from-sql-server.md). -- To learn how to manage and share storage keys and shared access signatures securely, see [Azure Storage Security Guide](../../storage/blobs/security-recommendations.md). diff --git a/articles/azure-sql/database/database-import-export-hang.md b/articles/azure-sql/database/database-import-export-hang.md deleted file mode 100644 index 65e22246969bf..0000000000000 --- a/articles/azure-sql/database/database-import-export-hang.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Import and export of a database takes a long time -description: "Azure SQL Database and Azure SQL Managed Instance Import/Export service takes a long time to import or export a database" -ms.custom: seo-lt-2019, sqldbrb=1 -services: sql-database -ms.service: sql-db-mi -ms.subservice: data-movement -ms.topic: troubleshooting -author: v-miegge -ms.author: ramakoni -ms.reviewer: kendralittle, mathoma -ms.date: 09/27/2019 ---- - -# Azure SQL Database and Managed Instance Import/Export service takes a long time to import or export a database - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -When you use the Import/Export service, the process might take longer than expected. This article describes the potential causes for this delay and alternative workaround methods. - -## Azure SQL Database Import/Export service - -The Azure SQL Database Import/Export service is a REST-based web service that runs in every Azure data center. This service is called when you use either the [Import database](database-import.md#using-azure-portal) or [Export](./database-import.md#using-azure-portal) option to move your database in the Azure portal. The service provides free request queuing and compute services to perform imports and exports between Azure SQL Database and Azure Blob storage. - -The import and export operations don't represent a traditional physical database backup but instead a logical backup of the database that uses a special BACPAC format. The BACPAC format lets you avoid having to use a physical format that might vary between versions of Microsoft SQL Server, Azure SQL Database, and Azure SQL Managed Instance. - -## What causes delays in the process? - -The Azure SQL Database Import/Export service provides a limited number of compute virtual machines (VMs) per region to process import and export operations. The compute VMs are hosted per region to make sure that the import or export avoids cross-region bandwidth delays and charges. If too many requests are made at the same time in the same region, significant delays can occur in processing the operations. The time that's required to complete requests can vary from a few seconds to many hours. - - -## Recommended solutions - -If your database exports are used only for recovery from accidental data deletion, all the Azure SQL Database editions provide self-service restoration capability from system-generated backups. But if you need these exports for other reasons, and if you require consistently faster or more predictable import/export performance, consider the following options: - -* [Export to a BACPAC file by using the SQLPackage utility](./database-export.md#sqlpackage-utility). -* [Export to a BACPAC file by using SQL Server Management Studio (SSMS)](./database-export.md#sql-server-management-studio-ssms). -* Run the BACPAC import or export directly in your code by using the Microsoft SQL Server Data-Tier Application Framework (DacFx) API. For additional information, see: - * [Export a data-tier application](/sql/relational-databases/data-tier-applications/export-a-data-tier-application) - * [Microsoft.SqlServer.Dac Namespace](/dotnet/api/microsoft.sqlserver.dac) - * [Download DACFx](https://www.microsoft.com/download/details.aspx?id=55713) - -## Things to consider when you export or import a database - -* All the methods discussed in this article use up the Database Transaction Unit (DTU) quota, which causes throttling by the Azure SQL Database service. You can [view the DTU stats for the database on the Azure portal](./monitor-tune-overview.md#azure-sql-database-and-azure-sql-managed-instance-resource-monitoring). If the database has reached its resource limits, [upgrade the service tier](./scale-resources.md) to add more resources. -* Ideally, you should run client applications (like the sqlpackage utility or your custom DAC application) from a VM in the same region as your database. Otherwise, you might experience performance issues related to network latency. -* Exporting large tables without clustered indexes can be very slow or even cause failure. This behavior occurs because the table can't be split up and exported in parallel. Instead, it must be exported in a single transaction, and that causes slow performance and potential failure during export, especially for large tables. - - -## Related documents - -[Considerations when exporting a database](./database-export.md#considerations) diff --git a/articles/azure-sql/database/database-import-export-private-link.md b/articles/azure-sql/database/database-import-export-private-link.md deleted file mode 100644 index ed49899d8acc7..0000000000000 --- a/articles/azure-sql/database/database-import-export-private-link.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Import or export an Azure SQL Database using Private link -description: Import or export an Azure SQL Database using Private Link without allowing Azure services to access the server. -services: sql-database -ms.service: sql-database -ms.subservice: migration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: -ms.date: 2/16/2022 ---- -# Import or export an Azure SQL Database using Private Link without allowing Azure services to access the server - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Running Import or Export via Azure PowerShell or Azure portal requires you to set [Allow Access to Azure Services](network-access-controls-overview.md) to ON, otherwise Import/Export operation fails with an error. Often, users want to perform Import or Export using a private end point without allowing access to all Azure services. - -## What is Import-Export Private Link? - -Import Export Private Link is a Service Managed Private Endpoint created by Microsoft and that is exclusively used by the Import-Export, database and Azure Storage services for all communications. The private end point has to be manually approved by user in the Azure portal for both server and storage. - -:::image type="content" source="./media/database-import-export-private-link/import-export-private-link.png" alt-text="Screenshot of Import Export Private link architecture"::: - -To use Private Link with Import-Export, user database and Azure Storage blob container must be hosted on the same type of Azure Cloud. For example, either both in Azure Commercial or both on Azure Gov. Hosting across cloud types isn't supported. - -This article explains how to import or export an Azure SQL Database using [Private Link](private-endpoint-overview.md) with *Allow Azure Services* is set to *OFF* on the Azure SQL server. - -> [!NOTE] -> Import Export using Private Link for Azure SQL Database is currently in preview - -> [!IMPORTANT] -> Import or Export of a database from [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) or from a database in the [Hyperscale service tier](service-tier-hyperscale.md) using PowerShell isn't currently supported. - ---- - -## Configure Import-Export Private Link -Import-Export Private Link can be configured via Azure portal, PowerShell or using REST API. - -### Configure Import-Export Private link using Azure portal - -#### Create Import Private Link -1. Go to server into which you would like to import database. Select Import database from toolbar in Overview page. -2. In Import Database page, select Use Private Link option -:::image type="content" source="./media/database-import-export-private-link/import-database-private-link.png" alt-text="Screenshot that shows how to enable Import Private link" lightbox="media/database-import-export-private-link/import-database-private-link.png"::: -3. Enter the storage account, server credentials, Database details and select on Ok - -#### Create Export Private Link -1. Go to the database that you would like to export. Select Export database from toolbar in Overview page -2. In Export Database page, select Use Private Link option -:::image type="content" source="./media/database-import-export-private-link/export-database-private-link.png" alt-text="Screenshot that shows how to enable Export Private Link" lightbox="media/database-import-export-private-link/export-database-private-link.png"::: -3. Enter the storage account, server sign-in credentials, Database details and select Ok - -#### Approve Private End Points - -##### Approve Private Endpoints in Private Link Center -1. Go to Private Link Center -2. Navigate to Private endpoints section -3. Approve the private endpoints you created using Import/Export service - -##### Approve Private End Point connection on Azure SQL Database -1. Go to the server that hosts the database. -2. Open the ‘Private endpoint connections’ page in security section on the left. -3. Select the private endpoint you want to approve. -4. Select Approve to approve the connection. - -:::image type="content" source="media/database-import-export-private-link/approve-private-link.png" alt-text="Screenshot that shows how to approve Azure SQL Database Private Link"::: - -##### Approve Private End Point connection on Azure Storage -1. Go to the storage account that hosts the blob container that holds BACPAC file. -2. Open the ‘Private endpoint connections’ page in security section on the left. -3. Select the Import-Export private endpoints you want to approve. -4. Select Approve to approve the connection. - -:::image type="content" source="./media/database-import-export-private-link/approve-private-link-storage.png" alt-text="Screenshot that shows how to approve Azure Storage Private Link in Azure Storage"::: - -After the Private End points are approved both in Azure SQL Server and Storage account, Import or Export jobs will be kicked off. Until then, the jobs will be on hold. - -You can check the status of Import or Export jobs in Import-Export History page under Data Management section in Azure SQL Server page. -:::image type="content" source="./media/database-import-export-private-link/import-export-status.png" alt-text="Screenshot that shows how to check Import Export Jobs Status" lightbox="media/database-import-export-private-link/import-export-status.png"::: - ---- - -### Configure Import-Export Private Link using PowerShell - -#### Import a Database using Private link in PowerShell -Use the [New-AzSqlDatabaseImport](/PowerShell/module/az.sql/new-azsqldatabaseimport) cmdlet to submit an import database request to Azure. Depending on database size, the import may take some time to complete. The DTU based provisioning model supports select database max size values for each tier. When importing a database [use one of these supported values](/sql/t-sql/statements/create-database-transact-sql). - -```PowerShell -$importRequest = New-AzSqlDatabaseImport -ResourceGroupName "" ` - -ServerName "" -DatabaseName "" ` - -DatabaseMaxSizeBytes "" -StorageKeyType "StorageAccessKey" ` - -StorageKey $(Get-AzStorageAccountKey -ResourceGroupName $resourceGroupName ` - -StorageAccountName "").Value[0] - -StorageUri "https://myStorageAccount.blob.core.windows.net/importsample/sample.bacpac" ` - -Edition "Standard" -ServiceObjectiveName "P6" ` -UseNetworkIsolation $true ` - -StorageAccountResourceIdForPrivateLink "/subscriptions//resourcegroups//providers/Microsoft.Storage/storageAccounts/" ` - -SqlServerResourceIdForPrivateLink "/subscriptions//resourceGroups//providers/Microsoft.Sql/servers/" ` - -AdministratorLogin "" ` - -AdministratorLoginPassword $(ConvertTo-SecureString -String "" -AsPlainText -Force) - -``` - -#### Export a Database using Private Link in PowerShell -Use the [New-AzSqlDatabaseExport](/PowerShell/module/az.sql/new-azsqldatabaseexport) cmdlet to submit an export database request to the Azure SQL Database service. Depending on the size of your database, the export operation may take some time to complete. - -```PowerShell -$importRequest = New-AzSqlDatabaseExport -ResourceGroupName "" ` - -ServerName "" -DatabaseName "" ` - -DatabaseMaxSizeBytes "" -StorageKeyType "StorageAccessKey" ` - -StorageKey $(Get-AzStorageAccountKey -ResourceGroupName $resourceGroupName ` - -StorageAccountName "").Value[0] - -StorageUri "https://myStorageAccount.blob.core.windows.net/importsample/sample.bacpac" ` - -Edition "Standard" -ServiceObjectiveName "P6" ` -UseNetworkIsolation $true ` - -StorageAccountResourceIdForPrivateLink "/subscriptions//resourcegroups//providers/Microsoft.Storage/storageAccounts/" ` - -SqlServerResourceIdForPrivateLink "/subscriptions//resourceGroups//providers/Microsoft.Sql/servers/" ` - -AdministratorLogin "" ` - -AdministratorLoginPassword $(ConvertTo-SecureString -String "" -AsPlainText -Force) -``` - ---- - -### Create Import-Export Private link using REST API -Existing APIs to perform Import and Export jobs have been enhanced to support Private Link. Refer to [Import Database API](/rest/api/sql/2021-08-01-preview/servers/import-database) - -## Limitations - -- Import using Private Link does not support specifying a backup storage redundancy while creating a new database and creates with the default geo-redundant backup storage redundancy. As a work around, first create an empty database with desired backup storage redundancy using Azure portal or PowerShell and then import the BACPAC into this empty database. -- Import and Export operations are not supported in Azure SQL DB Hyperscale tier yet. -- Import using REST API with private link can only be done to existing database since the API uses database extensions. To workaround this create an empty database with desired name and call Import REST API with Private link. - - -## Next steps -- [Import or Export Azure SQL Database without allowing Azure services to access the server](database-import-export-azure-services-off.md) -- [Import a database from a BACPAC file](database-import.md) diff --git a/articles/azure-sql/database/database-import.md b/articles/azure-sql/database/database-import.md deleted file mode 100644 index 76e6719010efc..0000000000000 --- a/articles/azure-sql/database/database-import.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Import a BACPAC file to create a database in Azure SQL Database -description: Create a new database in Azure SQL Database or Azure SQL Managed Instance from a BACPAC file. -services: sql-database -ms.service: sql-db-mi -ms.subservice: backup-restore -ms.custom: sqldbrb=1, devx-track-azurepowershell, mode-api -ms.devlang: -ms.topic: quickstart -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma -ms.date: 10/29/2020 ---- -# Quickstart: Import a BACPAC file to a database in Azure SQL Database or Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -You can import a SQL Server database into Azure SQL Database or SQL Managed Instance using a [BACPAC](/sql/relational-databases/data-tier-applications/data-tier-applications#bacpac) file. You can import the data from a BACPAC file stored in Azure Blob storage (standard storage only) or from local storage in an on-premises location. To maximize import speed by providing more and faster resources, scale your database to a higher service tier and compute size during the import process. You can then scale down after the import is successful. - -> [!NOTE] -> The imported database's compatibility level is based on the source database's compatibility level. - -> [!IMPORTANT] -> After importing your database, you can choose to operate the database at its current compatibility level (level 100 for the AdventureWorks2008R2 database) or at a higher level. For more information on the implications and options for operating a database at a specific compatibility level, see [ALTER DATABASE Compatibility Level](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level). See also [ALTER DATABASE SCOPED CONFIGURATION](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql) for information about additional database-level settings related to compatibility levels. - -> [!NOTE] -> [Import and Export using Private Link](database-import-export-private-link.md) is in preview. - -## Using Azure portal - -Watch this video to see how to import from a BACPAC file in the Azure portal or continue reading below: - -> [!VIDEO https://docs.microsoft.com/Shows/Data-Exposed/Its-just-SQL-Restoring-a-database-to-Azure-SQL-DB-from-backup/player?WT.mc_id=dataexposed-c9-niner] - -The [Azure portal](https://portal.azure.com) *only* supports creating a single database in Azure SQL Database and *only* from a BACPAC file stored in Azure Blob storage. - -To migrate a database into an [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) from a BACPAC file, use SQL Server Management Studio or SQLPackage, using the Azure portal or Azure PowerShell is not currently supported. - -> [!NOTE] -> Machines processing import/export requests submitted through the Azure portal or PowerShell need to store the BACPAC file as well as temporary files generated by the Data-Tier Application Framework (DacFX). The disk space required varies significantly among databases with the same size and can require disk space up to 3 times the size of the database. Machines running the import/export request only have 450GB local disk space. As a result, some requests may fail with the error `There is not enough space on the disk`. In this case, the workaround is to run sqlpackage.exe on a machine with enough local disk space. We encourage using SqlPackage to import/export databases larger than 150GB to avoid this issue. - -1. To import from a BACPAC file into a new single database using the Azure portal, open the appropriate server page and then, on the toolbar, select **Import database**. - - ![Database import1](./media/database-import/sql-server-import-database.png) - -1. Select the storage account and the container for the BACPAC file and then select the BACPAC file from which to import. - -1. Specify the new database size (usually the same as origin) and provide the destination SQL Server credentials. For a list of possible values for a new database in Azure SQL Database, see [Create Database](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true). - - ![Database import2](./media/database-import/sql-server-import-database-settings.png) - -1. Click **OK**. - -1. To monitor an import's progress, open the database's server page, and, under **Settings**, select **Import/Export history**. When successful, the import has a **Completed** status. - - ![Database import status](./media/database-import/sql-server-import-database-history.png) - -1. To verify the database is live on the server, select **SQL databases** and verify the new database is **Online**. - -## Using SqlPackage - -To import a SQL Server database using the [SqlPackage](/sql/tools/sqlpackage) command-line utility, see [import parameters and properties](/sql/tools/sqlpackage#import-parameters-and-properties). [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools for Visual Studio](/sql/ssdt/download-sql-server-data-tools-ssdt) include SqlPackage. You can also download the latest [SqlPackage](https://www.microsoft.com/download/details.aspx?id=53876) from the Microsoft download center. - -For scale and performance, we recommend using SqlPackage in most production environments rather than using the Azure portal. For a SQL Server Customer Advisory Team blog about migrating using `BACPAC` files, see [migrating from SQL Server to Azure SQL Database using BACPAC Files](/archive/blogs/sqlcat/migrating-from-sql-server-to-azure-sql-database-using-bacpac-files). - -The DTU based provisioning model supports select database max size values for each tier. When importing a database [use one of these supported values](/sql/t-sql/statements/create-database-transact-sql). - -The following SqlPackage command imports the **AdventureWorks2008R2** database from local storage to a logical SQL server named **mynewserver20170403**. It creates a new database called **myMigratedDatabase** with a **Premium** service tier and a **P6** Service Objective. Change these values as appropriate for your environment. - -```cmd -sqlpackage.exe /a:import /tcs:"Data Source=.database.windows.net;Initial Catalog=;User Id=;Password=" /sf:AdventureWorks2008R2.bacpac /p:DatabaseEdition=Premium /p:DatabaseServiceObjective=P6 -``` - -> [!IMPORTANT] -> To connect to Azure SQL Database from behind a corporate firewall, the firewall must have port 1433 open. To connect to SQL Managed Instance, you must have a [point-to-site connection](../managed-instance/point-to-site-p2s-configure.md) or an express route connection. - -This example shows how to import a database using SqlPackage with Active Directory Universal Authentication. - -```cmd -sqlpackage.exe /a:Import /sf:testExport.bacpac /tdn:NewDacFX /tsn:apptestserver.database.windows.net /ua:True /tid:"apptest.onmicrosoft.com" -``` - -## Using PowerShell - -> [!NOTE] -> [A SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) does not currently support migrating a database into an instance database from a BACPAC file using Azure PowerShell. To import into a SQL Managed Instance, use SQL Server Management Studio or SQLPackage. - -> [!NOTE] -> The machines processing import/export requests submitted through portal or PowerShell need to store the bacpac file as well as temporary files generated by Data-Tier Application Framework (DacFX). The disk space required varies significantly among DBs with same size and can take up to 3 times of the database size. Machines running the import/export request only have 450GB local disk space. As result, some requests may fail with "There is not enough space on the disk" error. In this case, the workaround is to run sqlpackage.exe on a machine with enough local disk space. When importing/exporting databases larger than 150GB, use SqlPackage to avoid this issue. - -# [PowerShell](#tab/azure-powershell) - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -Use the [New-AzSqlDatabaseImport](/powershell/module/az.sql/new-azsqldatabaseimport) cmdlet to submit an import database request to Azure. Depending on database size, the import may take some time to complete. The DTU based provisioning model supports select database max size values for each tier. When importing a database [use one of these supported values](/sql/t-sql/statements/create-database-transact-sql). - -```powershell -$importRequest = New-AzSqlDatabaseImport -ResourceGroupName "" ` - -ServerName "" -DatabaseName "" ` - -DatabaseMaxSizeBytes "" -StorageKeyType "StorageAccessKey" ` - -StorageKey $(Get-AzStorageAccountKey ` - -ResourceGroupName "" -StorageAccountName "").Value[0] ` - -StorageUri "https://myStorageAccount.blob.core.windows.net/importsample/sample.bacpac" ` - -Edition "Standard" -ServiceObjectiveName "P6" ` - -AdministratorLogin "" ` - -AdministratorLoginPassword $(ConvertTo-SecureString -String "" -AsPlainText -Force) -``` - -You can use the [Get-AzSqlDatabaseImportExportStatus](/powershell/module/az.sql/get-azsqldatabaseimportexportstatus) cmdlet to check the import's progress. Running the cmdlet immediately after the request usually returns `Status: InProgress`. The import is complete when you see `Status: Succeeded`. - -```powershell -$importStatus = Get-AzSqlDatabaseImportExportStatus -OperationStatusLink $importRequest.OperationStatusLink - -[Console]::Write("Importing") -while ($importStatus.Status -eq "InProgress") { - $importStatus = Get-AzSqlDatabaseImportExportStatus -OperationStatusLink $importRequest.OperationStatusLink - [Console]::Write(".") - Start-Sleep -s 10 -} - -[Console]::WriteLine("") -$importStatus -``` - -# [Azure CLI](#tab/azure-cli) - -Use the [az-sql-db-import](/cli/azure/sql/db#az-sql-db-import) command to submit an import database request to Azure. Depending on database size, the import may take some time to complete. The DTU based provisioning model supports select database max size values for each tier. When importing a database [use one of these supported values](/sql/t-sql/statements/create-database-transact-sql). - -```azurecli -# get the storage account key -az storage account keys list --resource-group "" --account-name "" - -az sql db import --resource-group "" --server "" --name "" ` - --storage-key-type "StorageAccessKey" --storage-key "" ` - --storage-uri "https://myStorageAccount.blob.core.windows.net/importsample/sample.bacpac" ` - -u "" -p "" -``` - -* * * - -> [!TIP] -> For another script example, see [Import a database from a BACPAC file](scripts/import-from-bacpac-powershell.md). - -## Cancel the import request - -Use the [Database Operations - Cancel API](/rest/api/sql/databaseoperations/cancel) -or the PowerShell [Stop-AzSqlDatabaseActivity command](/powershell/module/az.sql/Stop-AzSqlDatabaseActivity), here an example of powershell command. - -```cmd -Stop-AzSqlDatabaseActivity -ResourceGroupName $ResourceGroupName -ServerName $ServerName -DatabaseName $DatabaseName -OperationId $Operation.OperationId -``` - - -## Limitations - -- Importing to a database in elastic pool isn't supported. You can import data into a single database and then move the database to an elastic pool. -- Import Export Service does not work when Allow access to Azure services is set to OFF. However you can work around the problem by manually running sqlpackage.exe from an Azure VM or performing the export directly in your code by using the DACFx API. -- Import does not support specifying a backup storage redundancy while creating a new database and creates with the default geo-redundant backup storage redundancy. To workaround, first create an empty database with desired backup storage redundancy using Azure portal or PowerShell and then import the BACPAC into this empty database. -- Storage behind a firewall is currently not supported. - - -## Import using wizards - -You can also use these wizards. - -- [Import Data-tier Application Wizard in SQL Server Management Studio](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database#using-the-import-data-tier-application-wizard). -- [SQL Server Import and Export Wizard](/sql/integration-services/import-export-data/start-the-sql-server-import-and-export-wizard). - -## Next steps - -- To learn how to connect to and query a database in Azure SQL Database, see [Quickstart: Azure SQL Database: Use SQL Server Management Studio to connect to and query data](connect-query-ssms.md). -- For a SQL Server Customer Advisory Team blog about migrating using BACPAC files, see [Migrating from SQL Server to Azure SQL Database using BACPAC Files](https://techcommunity.microsoft.com/t5/DataCAT/Migrating-from-SQL-Server-to-Azure-SQL-Database-using-Bacpac/ba-p/305407). -- For a discussion of the entire SQL Server database migration process, including performance recommendations, see [SQL Server database migration to Azure SQL Database](migrate-to-database-from-sql-server.md). -- To learn how to manage and share storage keys and shared access signatures securely, see [Azure Storage Security Guide](../../storage/blobs/security-recommendations.md). diff --git a/articles/azure-sql/database/design-first-database-csharp-tutorial.md b/articles/azure-sql/database/design-first-database-csharp-tutorial.md deleted file mode 100644 index 1f4a033b47895..0000000000000 --- a/articles/azure-sql/database/design-first-database-csharp-tutorial.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Design your first relational database C# -description: Learn to design your first relational database in Azure SQL Database with C# using ADO.NET. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: "seo-lt-2019, sqldbrb=1, devx-track-csharp" -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 07/29/2019 ---- -# Tutorial: Design a relational database in Azure SQL Database C# and ADO.NET -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database is a relational database-as-a-service (DBaaS) in the Microsoft Cloud (Azure). In this tutorial, you learn how to use the Azure portal and ADO.NET with Visual Studio to: - -> [!div class="checklist"] -> -> * Create a database using the Azure portal -> * Set up a server-level IP firewall rule using the Azure portal -> * Connect to the database with ADO.NET and Visual Studio -> * Create tables with ADO.NET -> * Insert, update, and delete data with ADO.NET -> * Query data ADO.NET - -*If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -> [!TIP] -> The following Microsoft Learn module helps you learn for free how to [Develop and configure an ASP.NET application that queries an Azure SQL Database](/learn/modules/develop-app-that-queries-azure-sql/), including the creation of a simple database. - -## Prerequisites - -An installation of [Visual Studio 2019](https://www.visualstudio.com/downloads/) or later. - -## Create a blank database in Azure SQL Database - -A database in Azure SQL Database is created with a defined set of compute and storage resources. The database is created within an [Azure resource group](../../active-directory-b2c/overview.md) and is managed using an [logical SQL server](logical-servers.md). - -Follow these steps to create a blank database. - -1. Click **Create a resource** in the upper left-hand corner of the Azure portal. -2. On the **New** page, select **Databases** in the Azure Marketplace section, and then click **SQL Database** in the **Featured** section. - - ![create empty-database](./media/design-first-database-csharp-tutorial/create-empty-database.png) - -3. Fill out the **SQL Database** form with the following information, as shown on the preceding image: - - | Setting       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Database name** | *yourDatabase* | For valid database names, see [Database identifiers](/sql/relational-databases/databases/database-identifiers). | - | **Subscription** | *yourSubscription* | For details about your subscriptions, see [Subscriptions](https://account.windowsazure.com/Subscriptions). | - | **Resource group** | *yourResourceGroup* | For valid resource group names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). | - | **Select source** | Blank database | Specifies that a blank database should be created. | - -4. Click **Server** to use an existing server or create and configure a new server. Either select an existing server or click **Create a new server** and fill out the **New server** form with the following information: - - | Setting       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Server name** | Any globally unique name | For valid server names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). | - | **Server admin login** | Any valid name | For valid login names, see [Database identifiers](/sql/relational-databases/databases/database-identifiers). | - | **Password** | Any valid password | Your password must have at least eight characters and must use characters from three of the following categories: uppercase characters, lowercase characters, numbers, and non-alphanumeric characters. | - | **Location** | Any valid location | For information about regions, see [Azure Regions](https://azure.microsoft.com/regions/). | - - ![create database-server](./media/design-first-database-csharp-tutorial/create-database-server.png) - -5. Click **Select**. -6. Click **Pricing tier** to specify the service tier, the number of DTUs or vCores, and the amount of storage. You may explore the options for the number of DTUs/vCores and storage that is available to you for each service tier. - - After selecting the service tier, the number of DTUs or vCores, and the amount of storage, click **Apply**. - -7. Enter a **Collation** for the blank database (for this tutorial, use the default value). For more information about collations, see [Collations](/sql/t-sql/statements/collations) - -8. Now that you've completed the **SQL Database** form, click **Create** to provision the database. This step may take a few minutes. - -9. On the toolbar, click **Notifications** to monitor the deployment process. - - ![Screenshot shows Notifications in the Azure portal with Deployment in progress.](./media/design-first-database-csharp-tutorial/notification.png) - -## Create a server-level IP firewall rule - -SQL Database creates an IP firewall at the server-level. This firewall prevents external applications and tools from connecting to the server and any databases on the server unless a firewall rule allows their IP through the firewall. To enable external connectivity to your database, you must first add an IP firewall rule for your IP address (or IP address range). Follow these steps to create a [server-level IP firewall rule](firewall-configure.md). - -> [!IMPORTANT] -> SQL Database communicates over port 1433. If you are trying to connect to this service from within a corporate network, outbound traffic over port 1433 may not be allowed by your network's firewall. If so, you cannot connect to your database unless your administrator opens port 1433. - -1. After the deployment is complete, click **SQL databases** from the left-hand menu and then click *yourDatabase* on the **SQL databases** page. The overview page for your database opens, showing you the fully qualified **Server name** (such as *yourserver.database.windows.net*) and provides options for further configuration. - -2. Copy this fully qualified server name for use to connect to your server and databases from SQL Server Management Studio. - - ![server name](./media/design-first-database-csharp-tutorial/server-name.png) - -3. Click **Set server firewall** on the toolbar. The **Firewall settings** page for the server opens. - - ![server-level IP firewall rule](./media/design-first-database-csharp-tutorial/server-firewall-rule.png) - -4. Click **Add client IP** on the toolbar to add your current IP address to a new IP firewall rule. An IP firewall rule can open port 1433 for a single IP address or a range of IP addresses. - -5. Click **Save**. A server-level IP firewall rule is created for your current IP address opening port 1433 on the server. - -6. Click **OK** and then close the **Firewall settings** page. - -Your IP address can now pass through the IP firewall. You can now connect to your database using SQL Server Management Studio or another tool of your choice. Be sure to use the server admin account you created previously. - -> [!IMPORTANT] -> By default, access through the SQL Database IP firewall is enabled for all Azure services. Click **OFF** on this page to disable access for all Azure services. - -[!INCLUDE [sql-database-csharp-adonet-create-query-2](../../../includes/sql-database-csharp-adonet-create-query-2.md)] - -## Next steps - -In this tutorial, you learned basic database tasks such as create a database and tables, connect to the database, load data, and run queries. You learned how to: - -> [!div class="checklist"] -> -> * Create a database using the Azure portal -> * Set up a server-level IP firewall rule using the Azure portal -> * Connect to the database with ADO.NET and Visual Studio -> * Create tables with ADO.NET -> * Insert, update, and delete data with ADO.NET -> * Query data ADO.NET - -Advance to the next tutorial to learn about data migration. - -> [!div class="nextstepaction"] -> [Migrate SQL Server to Azure SQL Database offline using DMS](../../dms/tutorial-sql-server-to-azure-sql.md) \ No newline at end of file diff --git a/articles/azure-sql/database/design-first-database-tutorial.md b/articles/azure-sql/database/design-first-database-tutorial.md deleted file mode 100644 index a7bafdc2a8db1..0000000000000 --- a/articles/azure-sql/database/design-first-database-tutorial.md +++ /dev/null @@ -1,285 +0,0 @@ ---- -title: "Tutorial: Design your first relational database using SSMS" -description: Learn to design your first relational database in Azure SQL Database using SQL Server Management Studio. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.topic: tutorial -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma, v-masebo -ms.date: 07/29/2019 -ms.custom: sqldbrb=1 ---- -# Tutorial: Design a relational database in Azure SQL Database using SSMS -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - -Azure SQL Database is a relational database-as-a-service (DBaaS) in the Microsoft Cloud (Azure). In this tutorial, you learn how to use the Azure portal and [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) (SSMS) to: - -> [!div class="checklist"] -> -> - Create a database using the Azure portal* -> - Set up a server-level IP firewall rule using the Azure portal -> - Connect to the database with SSMS -> - Create tables with SSMS -> - Bulk load data with BCP -> - Query data with SSMS - -*If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -> [!TIP] -> The following Microsoft Learn module helps you learn for free how to [Develop and configure an ASP.NET application that queries an Azure SQL Database](/learn/modules/develop-app-that-queries-azure-sql/), including the creation of a simple database. -> [!NOTE] -> For the purpose of this tutorial, we are using Azure SQL Database. You could also use a pooled database in an elastic pool or a SQL Managed Instance. For connectivity to a SQL Managed Instance, see these SQL Managed Instance quickstarts: [Quickstart: Configure Azure VM to connect to an Azure SQL Managed Instance](../managed-instance/connect-vm-instance-configure.md) and [Quickstart: Configure a point-to-site connection to an Azure SQL Managed Instance from on-premises](../managed-instance/point-to-site-p2s-configure.md). - -## Prerequisites - -To complete this tutorial, make sure you've installed: - -- [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) (latest version) -- [BCP and SQLCMD](https://www.microsoft.com/download/details.aspx?id=36433) (latest version) - -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com/). - -## Create a blank database in Azure SQL Database - -A database in Azure SQL Database is created with a defined set of compute and storage resources. The database is created within an [Azure resource group](../../active-directory-b2c/overview.md) and is managed using an [logical SQL server](logical-servers.md). - -Follow these steps to create a blank database. - -1. On the Azure portal menu or from the **Home** page, select **Create a resource**. -2. On the **New** page, select **Databases** in the Azure Marketplace section, and then click **SQL Database** in the **Featured** section. - - ![create empty-database](./media/design-first-database-tutorial/create-empty-database.png) - -3. Fill out the **SQL Database** form with the following information, as shown on the preceding image: - - | Setting       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Database name** | *yourDatabase* | For valid database names, see [Database identifiers](/sql/relational-databases/databases/database-identifiers). | - | **Subscription** | *yourSubscription* | For details about your subscriptions, see [Subscriptions](https://account.windowsazure.com/Subscriptions). | - | **Resource group** | *yourResourceGroup* | For valid resource group names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). | - | **Select source** | Blank database | Specifies that a blank database should be created. | - -4. Click **Server** to use an existing server or create and configure a new server. Either select an existing server or click **Create a new server** and fill out the **New server** form with the following information: - - | Setting       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Server name** | Any globally unique name | For valid server names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). | - | **Server admin login** | Any valid name | For valid login names, see [Database identifiers](/sql/relational-databases/databases/database-identifiers). | - | **Password** | Any valid password | Your password must have at least eight characters and must use characters from three of the following categories: upper case characters, lower case characters, numbers, and non-alphanumeric characters. | - | **Location** | Any valid location | For information about regions, see [Azure Regions](https://azure.microsoft.com/regions/). | - - ![create database-server](./media/design-first-database-tutorial/create-database-server.png) - -5. Click **Select**. -6. Click **Pricing tier** to specify the service tier, the number of DTUs or vCores, and the amount of storage. You may explore the options for the number of DTUs/vCores and storage that is available to you for each service tier. - - After selecting the service tier, the number of DTUs or vCores, and the amount of storage, click **Apply**. - -7. Enter a **Collation** for the blank database (for this tutorial, use the default value). For more information about collations, see [Collations](/sql/t-sql/statements/collations) - -8. Now that you've completed the **SQL Database** form, click **Create** to provision the database. This step may take a few minutes. - -9. On the toolbar, click **Notifications** to monitor the deployment process. - - ![Screenshot shows the Notifications menu with Deployment in progress.](./media/design-first-database-tutorial/notification.png) - -## Create a server-level IP firewall rule - -Azure SQL Database creates an IP firewall at the server-level. This firewall prevents external applications and tools from connecting to the server and any databases on the server unless a firewall rule allows their IP through the firewall. To enable external connectivity to your database, you must first add an IP firewall rule for your IP address (or IP address range). Follow these steps to create a [server-level IP firewall rule](firewall-configure.md). - -> [!IMPORTANT] -> Azure SQL Database communicates over port 1433. If you are trying to connect to this service from within a corporate network, outbound traffic over port 1433 may not be allowed by your network's firewall. If so, you cannot connect to your database unless your administrator opens port 1433. - -1. After the deployment completes, select **SQL databases** from the Azure portal menu or search for and select *SQL databases* from any page. - -1. Select *yourDatabase* on the **SQL databases** page. The overview page for your database opens, showing you the fully qualified **Server name** (such as `contosodatabaseserver01.database.windows.net`) and provides options for further configuration. - - ![server name](./media/design-first-database-tutorial/server-name.png) - -1. Copy this fully qualified server name for use to connect to your server and databases from SQL Server Management Studio. - -1. Click **Set server firewall** on the toolbar. The **Firewall settings** page for the server opens. - - ![server-level IP firewall rule](./media/design-first-database-tutorial/server-firewall-rule.png) - -1. Click **Add client IP** on the toolbar to add your current IP address to a new IP firewall rule. An IP firewall rule can open port 1433 for a single IP address or a range of IP addresses. - -1. Click **Save**. A server-level IP firewall rule is created for your current IP address opening port 1433 on the server. - -1. Click **OK** and then close the **Firewall settings** page. - -Your IP address can now pass through the IP firewall. You can now connect to your database using SQL Server Management Studio or another tool of your choice. Be sure to use the server admin account you created previously. - -> [!IMPORTANT] -> By default, access through the SQL Database IP firewall is enabled for all Azure services. Click **OFF** on this page to disable for all Azure services. - -## Connect to the database - -Use [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) to establish a connection to your database. - -1. Open SQL Server Management Studio. -2. In the **Connect to Server** dialog box, enter the following information: - - | Setting       | Suggested value | Description | - | ------------ | ------------------ | ------------------------------------------------- | - | **Server type** | Database engine | This value is required. | - | **Server name** | The fully qualified server name | For example, *yourserver.database.windows.net*. | - | **Authentication** | SQL Server Authentication | SQL Authentication is the only authentication type that we've configured in this tutorial. | - | **Login** | The server admin account | The account that you specified when you created the server. | - | **Password** | The password for your server admin account | The password that you specified when you created the server. | - - ![connect to server](./media/design-first-database-tutorial/connect.png) - -3. Click **Options** in the **Connect to server** dialog box. In the **Connect to database** section, enter *yourDatabase* to connect to this database. - - ![connect to db on server](./media/design-first-database-tutorial/options-connect-to-db.png) - -4. Click **Connect**. The **Object Explorer** window opens in SSMS. - -5. In **Object Explorer**, expand **Databases** and then expand *yourDatabase* to view the objects in the sample database. - - ![database objects](./media/design-first-database-tutorial/connected.png) - -## Create tables in your database - -Create a database schema with four tables that model a student management system for universities using [Transact-SQL](/sql/t-sql/language-reference): - -- Person -- Course -- Student -- Credit - -The following diagram shows how these tables are related to each other. Some of these tables reference columns in other tables. For example, the *Student* table references the *PersonId* column of the *Person* table. Study the diagram to understand how the tables in this tutorial are related to one another. For an in-depth look at how to create effective database tables, see [Create effective database tables](/previous-versions/tn-archive/cc505842(v=technet.10)). For information about choosing data types, see [Data types](/sql/t-sql/data-types/data-types-transact-sql). - -> [!NOTE] -> You can also use the [table designer in SQL Server Management Studio](/sql/ssms/visual-db-tools/design-database-diagrams-visual-database-tools) to create and design your tables. - -![Table relationships](./media/design-first-database-tutorial/tutorial-database-tables.png) - -1. In **Object Explorer**, right-click *yourDatabase* and select **New Query**. A blank query window opens that is connected to your database. - -2. In the query window, execute the following query to create four tables in your database: - - ```sql - -- Create Person table - CREATE TABLE Person - ( - PersonId INT IDENTITY PRIMARY KEY, - FirstName NVARCHAR(128) NOT NULL, - MiddelInitial NVARCHAR(10), - LastName NVARCHAR(128) NOT NULL, - DateOfBirth DATE NOT NULL - ) - - -- Create Student table - CREATE TABLE Student - ( - StudentId INT IDENTITY PRIMARY KEY, - PersonId INT REFERENCES Person (PersonId), - Email NVARCHAR(256) - ) - - -- Create Course table - CREATE TABLE Course - ( - CourseId INT IDENTITY PRIMARY KEY, - Name NVARCHAR(50) NOT NULL, - Teacher NVARCHAR(256) NOT NULL - ) - - -- Create Credit table - CREATE TABLE Credit - ( - StudentId INT REFERENCES Student (StudentId), - CourseId INT REFERENCES Course (CourseId), - Grade DECIMAL(5,2) CHECK (Grade <= 100.00), - Attempt TINYINT, - CONSTRAINT [UQ_studentgrades] UNIQUE CLUSTERED - ( - StudentId, CourseId, Grade, Attempt - ) - ) - ``` - - ![Create tables](./media/design-first-database-tutorial/create-tables.png) - -3. Expand the **Tables** node under *yourDatabase* in the **Object Explorer** to see the tables you created. - - ![ssms tables-created](./media/design-first-database-tutorial/ssms-tables-created.png) - -## Load data into the tables - -1. Create a folder called *sampleData* in your Downloads folder to store sample data for your database. - -2. Right-click the following links and save them into the *sampleData* folder. - - - [SampleCourseData](https://sqldbtutorial.blob.core.windows.net/tutorials/SampleCourseData) - - [SamplePersonData](https://sqldbtutorial.blob.core.windows.net/tutorials/SamplePersonData) - - [SampleStudentData](https://sqldbtutorial.blob.core.windows.net/tutorials/SampleStudentData) - - [SampleCreditData](https://sqldbtutorial.blob.core.windows.net/tutorials/SampleCreditData) - -3. Open a command prompt window and navigate to the *sampleData* folder. - -4. Execute the following commands to insert sample data into the tables replacing the values for *server*, *database*, *user*, and *password* with the values for your environment. - - ```cmd - bcp Course in SampleCourseData -S .database.windows.net -d -U -P -q -c -t "," - bcp Person in SamplePersonData -S .database.windows.net -d -U -P -q -c -t "," - bcp Student in SampleStudentData -S .database.windows.net -d -U -P -q -c -t "," - bcp Credit in SampleCreditData -S .database.windows.net -d -U -P -q -c -t "," - ``` - -You have now loaded sample data into the tables you created earlier. - -## Query data - -Execute the following queries to retrieve information from the database tables. See [Write SQL queries](/previous-versions/sql/sql-server-2005/express-administrator/bb264565(v=sql.90)) to learn more about writing SQL queries. The first query joins all four tables to find the students taught by 'Dominick Pope' who have a grade higher than 75%. The second query joins all four tables and finds the courses in which 'Noe Coleman' has ever enrolled. - -1. In a SQL Server Management Studio query window, execute the following query: - - ```sql - -- Find the students taught by Dominick Pope who have a grade higher than 75% - SELECT person.FirstName, person.LastName, course.Name, credit.Grade - FROM Person AS person - INNER JOIN Student AS student ON person.PersonId = student.PersonId - INNER JOIN Credit AS credit ON student.StudentId = credit.StudentId - INNER JOIN Course AS course ON credit.CourseId = course.courseId - WHERE course.Teacher = 'Dominick Pope' - AND Grade > 75 - ``` - -2. In a query window, execute the following query: - - ```sql - -- Find all the courses in which Noe Coleman has ever enrolled - SELECT course.Name, course.Teacher, credit.Grade - FROM Course AS course - INNER JOIN Credit AS credit ON credit.CourseId = course.CourseId - INNER JOIN Student AS student ON student.StudentId = credit.StudentId - INNER JOIN Person AS person ON person.PersonId = student.PersonId - WHERE person.FirstName = 'Noe' - AND person.LastName = 'Coleman' - ``` - -## Next steps - -In this tutorial, you learned many basic database tasks. You learned how to: - -> [!div class="checklist"] -> -> - Create a database using the Azure portal* -> - Set up a server-level IP firewall rule using the Azure portal -> - Connect to the database with SSMS -> - Create tables with SSMS -> - Bulk load data with BCP -> - Query data with SSMS - -Advance to the next tutorial to learn about designing a database using Visual Studio and C#. - -> [!div class="nextstepaction"] -> [Design a relational database within Azure SQL Database C# and ADO.NET](design-first-database-csharp-tutorial.md) \ No newline at end of file diff --git a/articles/azure-sql/database/designing-cloud-solutions-for-disaster-recovery.md b/articles/azure-sql/database/designing-cloud-solutions-for-disaster-recovery.md deleted file mode 100644 index d86f330c7d1dd..0000000000000 --- a/articles/azure-sql/database/designing-cloud-solutions-for-disaster-recovery.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Design globally available services -description: Learn about application design for highly available services using Azure SQL Database. -keywords: cloud disaster recovery,disaster recovery solutions,app data backup,geo-replication,business continuity planning -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 07/28/2020 ---- -# Designing globally available services using Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -When building and deploying cloud services with Azure SQL Database, you use [active geo-replication](active-geo-replication-overview.md) or [auto-failover groups](auto-failover-group-overview.md) to provide resilience to regional outages and catastrophic failures. The same feature allows you to create globally distributed applications optimized for local access to the data. This article discusses common application patterns, including the benefits and trade-offs of each option. - -> [!NOTE] -> If you are using Premium or Business Critical databases and elastic pools, you can make them resilient to regional outages by converting them to zone redundant deployment configuration. See [Zone-redundant databases](high-availability-sla.md). - -## Scenario 1: Using two Azure regions for business continuity with minimal downtime - -In this scenario, the applications have the following characteristics: - -* Application is active in one Azure region -* All database sessions require read and write access (RW) to data -* Web tier and data tier must be collocated to reduce latency and traffic cost -* Fundamentally, downtime is a higher business risk for these applications than data loss - -In this case, the application deployment topology is optimized for handling regional disasters when all application components need to fail over together. The diagram below shows this topology. For geographic redundancy, the application’s resources are deployed to Region A and B. However, the resources in Region B are not utilized until Region A fails. A failover group is configured between the two regions to manage database connectivity, replication and failover. The web service in both regions is configured to access the database via the read-write listener **<failover-group-name>.database.windows.net** (1). Azure Traffic Manager is set up to use [priority routing method](../../traffic-manager/traffic-manager-configure-priority-routing-method.md) (2).   - -> [!NOTE] -> [Azure Traffic Manager](../../traffic-manager/traffic-manager-overview.md) is used throughout this article for illustration purposes only. You can use any load-balancing solution that supports priority routing method. - -The following diagram shows this configuration before an outage: - -![Scenario 1. Configuration before the outage.](./media/designing-cloud-solutions-for-disaster-recovery/scenario1-a.png) - -After an outage in the primary region, SQL Database detects that the primary database is not accessible and triggers failover to the secondary region based on the parameters of the automatic failover policy (1). Depending on your application SLA, you can configure a grace period that controls the time between the detection of the outage and the failover itself. It is possible that Azure Traffic Manager initiates the endpoint failover before the failover group triggers the failover of the database. In that case the web application cannot immediately reconnect to the database. But the reconnections will automatically succeed as soon as the database failover completes. When the failed region is restored and back online, the old primary automatically reconnects as a new secondary. The diagram below illustrates the configuration after failover. - -> [!NOTE] -> All transactions committed after the failover are lost during the reconnection. After the failover is completed, the application in region B is able to reconnect and restart processing the user requests. Both the web application and the primary database are now in region B and remain co-located. - -![Scenario 1. Configuration after failover](./media/designing-cloud-solutions-for-disaster-recovery/scenario1-b.png) - -If an outage happens in region B, the replication process between the primary and the secondary database gets suspended but the link between the two remains intact (1). Traffic Manager detects that connectivity to Region B is broken and marks the endpoint web app 2 as Degraded (2). The application's performance is not impacted in this case, but the database becomes exposed and therefore at higher risk of data loss in case region A fails in succession. - -> [!NOTE] -> For disaster recovery, we recommend the configuration with application deployment limited to two regions. This is because most of the Azure geographies have only two regions. This configuration does not protect your application from a simultaneous catastrophic failure of both regions. In an unlikely event of such a failure, you can recover your databases in a third region using [geo-restore operation](disaster-recovery-guidance.md#recover-using-geo-restore). -> - - Once the outage is mitigated, the secondary database automatically resynchronizes with the primary. During synchronization, performance of the primary can be impacted. The specific impact depends on the amount of data the new primary acquired since the failover. - -> [!NOTE] -> After the outage is mitigated, Traffic Manager will start routing the connections to the application in Region A as a higher priority end-point. If you intend to keep the primary in Region B for a while, you should change the priority table in the Trafic Manager profile accordingly. -> - - The following diagram illustrates an outage in the secondary region: - -![Scenario 1. Configuration after an outage in the secondary region.](./media/designing-cloud-solutions-for-disaster-recovery/scenario1-c.png) - -The key **advantages** of this design pattern are: - -* The same web application is deployed to both regions without any region-specific configuration and doesn’t require additional logic to manage failover. -* Application performance is not impacted by failover as the web application and the database are always co-located. - -The main **tradeoff** is that the application resources in Region B are underutilized most of the time. - -## Scenario 2: Azure regions for business continuity with maximum data preservation - -This option is best suited for applications with the following characteristics: - -* Any data loss is high business risk. The database failover can only be used as a last resort if the outage is caused by a catastrophic failure. -* The application supports read-only and read-write modes of operations and can operate in "read-only mode" for a period of time. - -In this pattern, the application switches to read-only mode when the read-write connections start getting time-out errors. The web application is deployed to both regions and includes a connection to the read-write listener endpoint and different connection to the read-only listener endpoint (1). The Traffic Manager profile should use [priority routing](../../traffic-manager/traffic-manager-configure-priority-routing-method.md). [End point monitoring](../../traffic-manager/traffic-manager-monitoring.md) should be enabled for the application endpoint in each region (2). - -The following diagram illustrates this configuration before an outage: - -![Scenario 2. Configuration before the outage.](./media/designing-cloud-solutions-for-disaster-recovery/scenario2-a.png) - -When Traffic Manager detects a connectivity failure to region A, it automatically switches user traffic to the application instance in region B. With this pattern, it is important that you set the grace period with data loss to a sufficiently high value, for example 24 hours. It ensures that data loss is prevented if the outage is mitigated within that time. When the web application in region B is activated the read-write operations start failing. At that point, it should switch to the read-only mode (1). In this mode the requests are automatically routed to the secondary database. If the outage is caused by a catastrophic failure, most likely it cannot be mitigated within the grace period. When it expires the failover group triggers the failover. After that the read-write listener becomes available and the connections to it stop failing (2). The following diagram illustrates the two stages of the recovery process. - -> [!NOTE] -> If the outage in the primary region is mitigated within the grace period, Traffic Manager detects the restoration of connectivity in the primary region and switches user traffic back to the application instance in region A. That application instance resumes and operates in read-write mode using the primary database in region A as illustrated by the previous diagram. - -![Scenario 2. Disaster recovery stages.](./media/designing-cloud-solutions-for-disaster-recovery/scenario2-b.png) - -If an outage happens in region B, Traffic Manager detects the failure of the end point web-app-2 in region B and marks it degraded (1). In the meantime, the failover group switches the read-only listener to region A (2). This outage does not impact the end-user experience but the primary database is exposed during the outage. The following diagram illustrates a failure in the secondary region: - -![Scenario 2. Outage of the secondary region.](./media/designing-cloud-solutions-for-disaster-recovery/scenario2-c.png) - -Once the outage is mitigated, the secondary database is immediately synchronized with the primary and the read-only listener is switched back to the secondary database in region B. During synchronization performance of the primary could be slightly impacted depending on the amount of data that needs to be synchronized. - -This design pattern has several **advantages**: - -* It avoids data loss during the temporary outages. -* Downtime depends only on how quickly Traffic Manager detects the connectivity failure, which is configurable. - -The **tradeoff** is that the application must be able to operate in read-only mode. - -## Scenario 3: Application relocation to a different geography without data loss and near zero downtime - -In this scenario the application has the following characteristics: - -* The end users access the application from different geographies -* The application includes read-only workloads that do not depend on full synchronization with the latest updates -* Write access to data should be supported in the same geography for majority of the users -* Read latency is critical for the end-user experience - -In order to meet these requirements you need to guarantee that the user device **always** connects to the application deployed in the same geography for the read-only operations, such as browsing data, analytics etc. Whereas the OLTP operations are processed in the same geography **most of the time**. For example, during the day time OLTP operations are processed in the same geography, but during the off hours they could be processed in a different geography. If the end-user activity mostly happens during the working hours, you can guarantee the optimal performance for most of the users most of the time. The following diagram shows this topology. - -The application’s resources should be deployed in each geography where you have substantial usage demand. For example, if your application is actively used in the United States, European Union and South East Asia the application should be deployed to all of these geographies. The primary database should be dynamically switched from one geography to the next at the end of the working hours. This method is called “follow the sun”. The OLTP workload always connects to the database via the read-write listener **<failover-group-name>.database.windows.net** (1). The read-only workload connects to the local database directly using the databases server endpoint **<server-name>.database.windows.net** (2). Traffic Manager is configured with the [performance routing method](../../traffic-manager/traffic-manager-configure-performance-routing-method.md). It ensures that the end-user’s device is connected to the web service in the closest region. Traffic Manager should be set up with end point monitoring enabled for each web service end point (3). - -> [!NOTE] -> The failover group configuration defines which region is used for failover. Because the new primary is in a different geography the failover results in longer latency for both OLTP and read-only workloads until the impacted region is back online. - -![Scenario 3. Configuration with primary in East US.](./media/designing-cloud-solutions-for-disaster-recovery/scenario3-a.png) - -At the end of the day, for example at 11 PM local time, the active databases should be switched to the next region (North Europe). This task can be fully automated by using [Azure Logic Apps](../../logic-apps/logic-apps-overview.md). The task involves the following steps: - -* Switch primary server in the failover group to North Europe using friendly failover (1) -* Remove the failover group between East US and North Europe -* Create a new failover group with the same name but between North Europe and East Asia (2). -* Add the primary in North Europe and secondary in East Asia to this failover group (3). - -The following diagram illustrates the new configuration after the planned failover: - -![Scenario 3. Transitioning the primary to North Europe.](./media/designing-cloud-solutions-for-disaster-recovery/scenario3-b.png) - -If an outage happens in North Europe for example, the automatic database failover is initiated by the failover group, which effectively results in moving the application to the next region ahead of schedule (1). In that case the US East is the only remaining secondary region until North Europe is back online. The remaining two regions serve the customers in all three geographies by switching roles. Azure Logic Apps has to be adjusted accordingly. Because the remaining regions get additional user traffic from Europe, the application's performance is impacted not only by additional latency but also by an increased number of end-user connections. Once the outage is mitigated in North Europe, the secondary database there is immediately synchronized with the current primary. The following diagram illustrates an outage in North Europe: - -![Scenario 3. Outage in North Europe.](./media/designing-cloud-solutions-for-disaster-recovery/scenario3-c.png) - -> [!NOTE] -> You can reduce the time when the end user’s experience in Europe is degraded by the long latency. To do that you should proactively deploy an application copy and create the secondary database(s) in another local region (West Europe) as a replacement of the offline application instance in North Europe. When the latter is back online you can decide whether to continue using West Europe or to remove the copy of the application there and switch back to using North Europe. - -The key **benefits** of this design are: - -* The read-only application workload accesses data in the closets region at all times. -* The read-write application workload accesses data in the closest region during the period of the highest activity in each geography -* Because the application is deployed to multiple regions, it can survive a loss of one of the regions without any significant downtime. - -But there are some **tradeoffs**: - -* A regional outage results in the geography to be impacted by longer latency. Both read-write and read-only workloads are served by the application in a different geography. -* The read-only workloads must connect to a different end point in each region. - -## Business continuity planning: Choose an application design for cloud disaster recovery - -Your specific cloud disaster recovery strategy can combine or extend these design patterns to best meet the needs of your application. As mentioned earlier, the strategy you choose is based on the SLA you want to offer to your customers and the application deployment topology. To help guide your decision, the following table compares the choices based on recovery point objective (RPO) and estimated recovery time (ERT). - -| Pattern | RPO | ERT | -|:--- |:--- |:--- | -| Active-passive deployment for disaster recovery with co-located database access |Read-write access < 5 sec |Failure detection time + DNS TTL | -| Active-active deployment for application load balancing |Read-write access < 5 sec |Failure detection time + DNS TTL | -| Active-passive deployment for data preservation |Read-only access < 5 sec | Read-only access = 0 | -||Read-write access = zero | Read-write access = Failure detection time + grace period with data loss | - - -## Next steps - -* For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md) -* To learn about active geo-replication, see [Active geo-replication](active-geo-replication-overview.md). -* To learn about auto-failover groups, see [Auto-failover groups](auto-failover-group-overview.md). -* For information about active geo-replication with elastic pools, see [Elastic pool disaster recovery strategies](disaster-recovery-strategies-for-applications-with-elastic-pool.md). diff --git a/articles/azure-sql/database/develop-cplusplus-simple.md b/articles/azure-sql/database/develop-cplusplus-simple.md deleted file mode 100644 index 5d97957841f7b..0000000000000 --- a/articles/azure-sql/database/develop-cplusplus-simple.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Connect to SQL Database using C and C++ -description: Use the sample code in this quick start to build a modern application with C++ and backed by a powerful relational database in the cloud with Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1 -ms.devlang: cpp -ms.topic: how-to -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 12/12/2018 ---- -# Connect to SQL Database using C and C++ -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This post is aimed at C and C++ developers trying to connect to Azure SQL Database. It is broken down into sections so you can jump to the section that best captures your interest. - -## Prerequisites for the C/C++ tutorial - -Make sure you have the following items: - -* An active Azure account. If you don't have one, you can sign up for a [Free Azure Trial](https://azure.microsoft.com/pricing/free-trial/). -* [Visual Studio](https://www.visualstudio.com/downloads/). You must install the C++ language components to build and run this sample. -* [Visual Studio Linux Development](/cpp/linux/). If you are developing on Linux, you must also install the Visual Studio Linux extension. - -## Azure SQL Database and SQL Server on virtual machines - -Azure SQL Database is built on Microsoft SQL Server and is designed to provide a high-availability, performant, and scalable service. There are many benefits to using Azure SQL over your proprietary database running on premises. With Azure SQL you don't have to install, set up, maintain, or manage your database but only the content and the structure of your database. Typical things that we worry about with databases like fault tolerance and redundancy are all built in. - -Azure currently has two options for hosting SQL server workloads: Azure SQL Database, database as a service and SQL server on Virtual Machines (VM). We will not get into detail about the differences between these two except that Azure SQL Database is your best bet for new cloud-based applications to take advantage of the cost savings and performance optimization that cloud services provide. If you are considering migrating or extending your on-premises applications to the cloud, SQL server on Azure virtual machine might work out better for you. To keep things simple for this article, let's create an Azure SQL Database. - -## Data access technologies: ODBC and OLE DB - -Connecting to Azure SQL Database is no different and currently there are two ways to connect to databases: ODBC (Open Database connectivity) and OLE DB (Object Linking and Embedding database). In recent years, Microsoft has aligned with [ODBC for native relational data access](/archive/blogs/sqlnativeclient/microsoft-is-aligning-with-odbc-for-native-relational-data-access). ODBC is relatively simple, and also much faster than OLE DB. The only caveat here is that ODBC does use an old C-style API. - -## Step 1: Creating your Azure SQL Database - -See the [getting started page](single-database-create-quickstart.md) to learn how to create a sample database. Alternatively, you can follow this [short two-minute video](https://azure.microsoft.com/documentation/videos/azure-sql-database-create-dbs-in-seconds/) to create an Azure SQL Database using the Azure portal. - -## Step 2: Get connection string - -After your Azure SQL Database has been provisioned, you need to carry out the following steps to determine connection information and add your client IP for firewall access. - -In [Azure portal](https://portal.azure.com/), go to your Azure SQL Database ODBC connection string by using the **Show database connection strings** listed as a part of the overview section for your database: - -![ODBCConnectionString](./media/develop-cplusplus-simple/azureportal.png) - -![ODBCConnectionStringProps](./media/develop-cplusplus-simple/dbconnection.png) - -Copy the contents of the **ODBC (Includes Node.js) [SQL authentication]** string. We use this string later to connect from our C++ ODBC command-line interpreter. This string provides details such as the driver, server, and other database connection parameters. - -## Step 3: Add your IP to the firewall - -Go to the firewall section for your server and add your [client IP to the firewall using these steps](firewall-configure.md) to make sure we can establish a successful connection: - -![AddyourIPWindow](./media/develop-cplusplus-simple/ip.png) - -At this point, you have configured your Azure SQL Database and are ready to connect from your C++ code. - -## Step 4: Connecting from a Windows C/C++ application - -You can easily connect to your [Azure SQL Database using ODBC on Windows using this sample](https://github.com/Microsoft/VCSamples/tree/master/VC2015Samples/ODBC%20database%20sample%20%28windows%29) that builds with Visual Studio. The sample implements an ODBC command-line interpreter that can be used to connect to our Azure SQL Database. This sample takes either a Database source name file (DSN) file as a command-line argument or the verbose connection string that we copied earlier from the Azure portal. Bring up the property page for this project and paste the connection string as a command argument as shown here: - -![DSN Propsfile](./media/develop-cplusplus-simple/props.png) - -Make sure you provide the right authentication details for your database as a part of that database connection string. - -Launch the application to build it. You should see the following window validating a successful connection. You can even run some basic SQL commands like **create table** to validate your database connectivity: - -![SQL Commands](./media/develop-cplusplus-simple/sqlcommands.png) - -Alternatively, you could create a DSN file using the wizard that is launched when no command arguments are provided. We recommend that you try this option as well. You can use this DSN file for automation and protecting your authentication settings: - -![Create DSN File](./media/develop-cplusplus-simple/datasource.png) - -Congratulations! You have now successfully connected to Azure SQL using C++ and ODBC on Windows. You can continue reading to do the same for Linux platform as well. - -## Step 5: Connecting from a Linux C/C++ application - -In case you haven't heard the news yet, Visual Studio now allows you to develop C++ Linux application as well. You can read about this new scenario in the [Visual C++ for Linux Development](https://blogs.msdn.microsoft.com/vcblog/20../../visual-c-for-linux-development/) blog. To build for Linux, you need a remote machine where your Linux distro is running. If you don't have one available, you can set one up quickly using [Linux Azure Virtual machines](../../virtual-machines/linux/quick-create-cli.md?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - -For this tutorial, let us assume that you have an Ubuntu 16.04 Linux distribution set up. The steps here should also apply to Ubuntu 15.10, Red Hat 6, and Red Hat 7. - -The following steps install the libraries needed for SQL and ODBC for your distro: - -```console - sudo su - sh -c 'echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/mssql-ubuntu-test/ xenial main" > /etc/apt/sources.list.d/mssqlpreview.list' - sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893 - apt-get update - apt-get install msodbcsql - apt-get install unixodbc-dev-utf16 #this step is optional but recommended* -``` - -Launch Visual Studio. Under Tools -> Options -> Cross Platform -> Connection Manager, add a connection to your Linux box: - -![Tools Options](./media/develop-cplusplus-simple/tools.png) - -After connection over SSH is established, create an Empty project (Linux) template: - -![New project template](./media/develop-cplusplus-simple/template.png) - -You can then add a [new C source file and replace it with this content](https://github.com/Microsoft/VCSamples/blob/master/VC2015Samples/ODBC%20database%20sample%20%28linux%29/odbcconnector/odbcconnector.c). Using the ODBC APIs SQLAllocHandle, SQLSetConnectAttr, and SQLDriverConnect, you should be able to initialize and establish a connection to your database. -Like with the Windows ODBC sample, you need to replace the SQLDriverConnect call with the details from your database connection string parameters copied from the Azure portal previously. - -```c - retcode = SQLDriverConnect( - hdbc, NULL, "Driver=ODBC Driver 13 for SQL" - "Server;Server=;Uid=;Pwd=<" - "yourpassword>;database=", - SQL_NTS, outstr, sizeof(outstr), &outstrlen, SQL_DRIVER_NOPROMPT); -``` - -The last thing to do before compiling is to add **odbc** as a library dependency: - -![Adding ODBC as an input library](./media/develop-cplusplus-simple/lib.png) - -To launch your application, bring up the Linux Console from the **Debug** menu: - -![Linux Console](./media/develop-cplusplus-simple/linuxconsole.png) - -If your connection was successful, you should now see the current database name printed in the Linux Console: - -![Linux Console Window Output](./media/develop-cplusplus-simple/linuxconsolewindow.png) - -Congratulations! You have successfully completed the tutorial and can now connect to your Azure SQL Database from C++ on Windows and Linux platforms. - -## Get the complete C/C++ tutorial solution - -You can find the GetStarted solution that contains all the samples in this article at GitHub: - -* [ODBC C++ Windows sample](https://github.com/Microsoft/VCSamples/tree/master/VC2015Samples/ODBC%20database%20sample%20%28windows%29), Download the Windows C++ ODBC Sample to connect to Azure SQL -* [ODBC C++ Linux sample](https://github.com/Microsoft/VCSamples/tree/master/VC2015Samples/ODBC%20database%20sample%20%28linux%29), Download the Linux C++ ODBC Sample to connect to Azure SQL - -## Next steps - -* Review the [SQL Database Development Overview](develop-overview.md) -* More information on the [ODBC API Reference](/sql/odbc/reference/syntax/odbc-api-reference/) - -## Additional resources - -* [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](saas-tenancy-app-design-patterns.md) -* Explore all the [capabilities of SQL Database](https://azure.microsoft.com/services/sql-database/) \ No newline at end of file diff --git a/articles/azure-sql/database/develop-overview.md b/articles/azure-sql/database/develop-overview.md deleted file mode 100644 index 939260f517fb3..0000000000000 --- a/articles/azure-sql/database/develop-overview.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Application Development Overview -description: Learn about available connectivity libraries and best practices for applications connecting to SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.topic: conceptual -author: dzsquared -ms.author: drskwier -ms.reviewer: kendralittle, mathoma -ms.date: 11/14/2019 -ms.custom: sqldbrb=2 ---- -# Application development overview - SQL Database & SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -This article walks through the basic considerations that a developer should be aware of when writing code to connect to your database in Azure. This article applies to Azure SQL Database, and Azure SQL Managed Instance. - -## Language and platform - -You can use various [programming languages and platforms](connect-query-content-reference-guide.md) to connect and query Azure SQL Database. You can find [sample applications](https://azure.microsoft.com/resources/samples/?service=sql-database&sort=0) that you can use to connect to the database. - -You can leverage open-source tools like [cheetah](https://github.com/wunderlist/cheetah), [sql-cli](https://www.npmjs.com/package/sql-cli), [VS Code](https://code.visualstudio.com/). Additionally, Azure SQL Database works with Microsoft tools like [Visual Studio](https://www.visualstudio.com/downloads/) and [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms). You can also use the Azure portal, PowerShell, and REST APIs help you gain additional productivity. - -## Authentication - -Access to Azure SQL Database is protected with logins and firewalls. Azure SQL Database supports both SQL Server and [Azure Active Directory authentication](authentication-aad-overview.md) users and logins. Azure Active Directory logins are available only in SQL Managed Instance. - -Learn more about [managing database access and login](logins-create-manage.md). - -## Connections - -In your client connection logic, override the default timeout to be 30 seconds. The default of 15 seconds is too short for connections that depend on the internet. - -If you are using a [connection pool](/dotnet/framework/data/adonet/sql-server-connection-pooling), be sure to close the connection the instant your program is not actively using it, and is not preparing to reuse it. - -Avoid long-running transactions because any infrastructure or connection failure might roll back the transaction. If possible, split the transaction in the multiple smaller transactions and use [batching to improve performance](../performance-improve-use-batching.md). - -## Resiliency - -Azure SQL Database is a cloud service where you might expect transient errors that happen in the underlying infrastructure or in the communication between cloud entities. Although Azure SQL Database is resilient on the transitive infrastructure failures, these failures might affect your connectivity. When a transient error occurs while connecting to SQL Database, your code should [retry the call](troubleshoot-common-connectivity-issues.md). We recommend that retry logic use backoff logic, so that it does not overwhelm the service with multiple clients retrying simultaneously. Retry logic depends on the [error messages for SQL Database client programs](troubleshoot-common-errors-issues.md). - -For more information about how to prepare for planned maintenance events on your Azure SQL Database, see [planning for Azure maintenance events in Azure SQL Database](planned-maintenance.md). - -## Network considerations - -- On the computer that hosts your client program, ensure the firewall allows outgoing TCP communication on port 1433. More information: [Configure an Azure SQL Database firewall](firewall-configure.md). -- If your client program connects to SQL Database while your client runs on an Azure virtual machine (VM), you must open certain port ranges on the VM. More information: [Ports beyond 1433 for ADO.NET 4.5 and SQL Database](adonet-v12-develop-direct-route-ports.md). -- Client connections to Azure SQL Database sometimes bypass the proxy and interact directly with the database. Ports other than 1433 become important. For more information, [Azure SQL Database connectivity architecture](connectivity-architecture.md) and [Ports beyond 1433 for ADO.NET 4.5 and SQL Database](adonet-v12-develop-direct-route-ports.md). -- For networking configuration for an instance of SQL Managed Instance, see [network configuration for SQL Managed Instance](../managed-instance/how-to-content-reference-guide.md#network-configuration). - -## Next steps - -Explore all the capabilities of [SQL Database](sql-database-paas-overview.md) and [SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md). - -To get started, see the guides for [Azure SQL Database](quickstart-content-reference-guide.md) and [Azure SQL Managed Instances](../managed-instance/quickstart-content-reference-guide.md). \ No newline at end of file diff --git a/articles/azure-sql/database/disaster-recovery-drills.md b/articles/azure-sql/database/disaster-recovery-drills.md deleted file mode 100644 index d23301a6d2349..0000000000000 --- a/articles/azure-sql/database/disaster-recovery-drills.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Disaster recovery drills -description: Learn guidance and best practices for using Azure SQL Database to perform disaster recovery drills. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 12/18/2018 ---- -# Performing disaster recovery drills -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -It is recommended that validation of application readiness for recovery workflow is performed periodically. Verifying the application behavior and implications of data loss and/or the disruption that failover involves is a good engineering practice. It is also a requirement by most industry standards as part of business continuity certification. - -Performing a disaster recovery drill consists of: - -* Simulating data tier outage -* Recovering -* Validate application integrity post recovery - -Depending on how you [designed your application for business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md), the workflow to execute the drill can vary. This article describes the best practices for conducting a disaster recovery drill in the context of Azure SQL Database. - -## Geo-restore - -To prevent the potential data loss when conducting a disaster recovery drill, perform the drill using a test environment by creating a copy of the production environment and using it to verify the application’s failover workflow. - -### Outage simulation - -To simulate the outage, you can rename the source database. This name change causes application connectivity failures. - -### Recovery - -* Perform the geo-restore of the database into a different server as described [here](disaster-recovery-guidance.md). -* Change the application configuration to connect to the recovered database and follow the [Configure a database after recovery](disaster-recovery-guidance.md) guide to complete the recovery. - -### Validation - -Complete the drill by verifying the application integrity post recovery (including connection strings, logins, basic functionality testing, or other validations part of standard application signoffs procedures). - -## Failover groups - -For a database that is protected using failover groups, the drill exercise involves planned failover to the secondary server. The planned failover ensures that the primary and the secondary databases in the failover group remain in sync when the roles are switched. Unlike the unplanned failover, this operation does not result in data loss, so the drill can be performed in the production environment. - -### Outage simulation - -To simulate the outage, you can disable the web application or virtual machine connected to the database. This outage simulation results in the connectivity failures for the web clients. - -### Recovery - -* Make sure the application configuration in the DR region points to the former secondary, which becomes the fully accessible new primary. -* Initiate [planned failover](scripts/setup-geodr-and-failover-database-powershell.md) of the failover group from the secondary server. -* Follow the [Configure a database after recovery](disaster-recovery-guidance.md) guide to complete the recovery. - -### Validation - -Complete the drill by verifying the application integrity post recovery (including connectivity, basic functionality testing, or other validations required for the drill signoffs). - -## Next steps - -* To learn about business continuity scenarios, see [Continuity scenarios](business-continuity-high-availability-disaster-recover-hadr-overview.md). -* To learn about Azure SQL Database automated backups, see [SQL Database automated backups](automated-backups-overview.md) -* To learn about using automated backups for recovery, see [restore a database from the service-initiated backups](recovery-using-backups.md). -* To learn about faster recovery options, see [Active geo-replication](active-geo-replication-overview.md) and [Auto-failover groups](auto-failover-group-overview.md). diff --git a/articles/azure-sql/database/disaster-recovery-guidance.md b/articles/azure-sql/database/disaster-recovery-guidance.md deleted file mode 100644 index c7b3a0a0066df..0000000000000 --- a/articles/azure-sql/database/disaster-recovery-guidance.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Disaster recovery -description: Learn how to recover a database from a regional data center outage or failure with the Azure SQL Database active geo-replication, and geo-restore capabilities. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 06/21/2019 ---- -# Restore your Azure SQL Database or failover to a secondary -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database offers the following capabilities for recovering from an outage: - -- [Active geo-replication](active-geo-replication-overview.md) -- [Auto-failover groups](auto-failover-group-overview.md) -- [Geo-restore](recovery-using-backups.md#point-in-time-restore) -- [Zone-redundant databases](high-availability-sla.md) - -To learn about business continuity scenarios and the features supporting these scenarios, see [Business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). - -> [!NOTE] -> If you are using zone-redundant Premium or Business Critical databases or pools, the recovery process is automated and the rest of this material does not apply. -> -> Both primary and secondary databases are required to have the same service tier. It is also strongly recommended that the secondary database is created with the same compute size (DTUs or vCores) as the primary. For more information, see [Upgrading or downgrading as primary database](active-geo-replication-overview.md#upgrading-or-downgrading-primary-database). -> -> Use one or several failover groups to manage failover of multiple databases. -> If you add an existing geo-replication relationship to the failover group, make sure the geo-secondary is configured with the same service tier and compute size as the primary. For more information, see [Use auto-failover groups to enable transparent and coordinated failover of multiple databases](auto-failover-group-overview.md). - -## Prepare for the event of an outage - -For success with recovery to another data region using either failover groups or geo-redundant backups, you need to prepare a server in another data center outage to become the new primary server should the need arise as well as have well-defined steps documented and tested to ensure a smooth recovery. These preparation steps include: - -- Identify the server in another region to become the new primary server. For geo-restore, this is generally a server in the [paired region](../../availability-zones/cross-region-replication-azure.md) for the region in which your database is located. This eliminates the additional traffic cost during the geo-restoring operations. -- Identify, and optionally define, the server-level IP firewall rules needed on for users to access the new primary database. -- Determine how you are going to redirect users to the new primary server, such as by changing connection strings or by changing DNS entries. -- Identify, and optionally create, the logins that must be present in the master database on the new primary server, and ensure these logins have appropriate permissions in the master database, if any. For more information, see [SQL Database security after disaster recovery](active-geo-replication-security-configure.md) -- Identify alert rules that need to be updated to map to the new primary database. -- Document the auditing configuration on the current primary database -- Perform a [disaster recovery drill](disaster-recovery-drills.md). To simulate an outage for geo-restore, you can delete or rename the source database to cause application connectivity failure. To simulate an outage using failover groups, you can disable the web application or virtual machine connected to the database or failover the database to cause application connectivity failures. - -## When to initiate recovery - -The recovery operation impacts the application. It requires changing the SQL connection string or redirection using DNS and could result in permanent data loss. Therefore, it should be done only when the outage is likely to last longer than your application's recovery time objective. When the application is deployed to production you should perform regular monitoring of the application health and use the following data points to assert that the recovery is warranted: - -1. Permanent connectivity failure from the application tier to the database. -2. The Azure portal shows an alert about an incident in the region with broad impact. - -> [!NOTE] -> If you are using failover groups and chose automatic failover, the recovery process is automated and transparent to the application. - -Depending on your application tolerance to downtime and possible business liability you can consider the following recovery options. - -Use the [Get Recoverable Database](/previous-versions/azure/reference/dn800985(v=azure.100)) (*LastAvailableBackupDate*) to get the latest Geo-replicated restore point. - -## Wait for service recovery - -The Azure teams work diligently to restore service availability as quickly as possible but depending on the root cause it can take hours or days. If your application can tolerate significant downtime you can simply wait for the recovery to complete. In this case, no action on your part is required. You can see the current service status on our [Azure Service Health Dashboard](https://azure.microsoft.com/status/). After the recovery of the region, your application's availability is restored. - -## Fail over to geo-replicated secondary server in the failover group - -If your application's downtime can result in business liability, you should be using failover groups. It enables the application to quickly restore availability in a different region in case of an outage. For a tutorial, see [Implement a geo-distributed database](geo-distributed-application-configure-tutorial.md). - -To restore availability of the database(s) you need to initiate the failover to the secondary server using one of the supported methods. - -Use one of the following guides to fail over to a geo-replicated secondary database: - -- [Fail over to a geo-replicated secondary server using the Azure portal](active-geo-replication-configure-portal.md) -- [Fail over to the secondary server using PowerShell](scripts/setup-geodr-and-failover-database-powershell.md) -- [Fail over to a secondary server using Transact-SQL (T-SQL)](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true#e-failover-to-a-geo-replication-secondary) - -## Recover using geo-restore - -If your application's downtime does not result in business liability you can use [geo-restore](recovery-using-backups.md) as a method to recover your application database(s). It creates a copy of the database from its latest geo-redundant backup. - -## Configure your database after recovery - -If you are using geo-restore to recover from an outage, you must make sure that the connectivity to the new databases is properly configured so that the normal application function can be resumed. This is a checklist of tasks to get your recovered database production ready. - -### Update connection strings - -Because your recovered database resides in a different server, you need to update your application's connection string to point to that server. - -For more information about changing connection strings, see the appropriate development language for your [connection library](connect-query-content-reference-guide.md#libraries). - -### Configure firewall rules - -You need to make sure that the firewall rules configured on server and on the database match those that were configured on the primary server and primary database. For more information, see [How to: Configure Firewall Settings (Azure SQL Database)](firewall-configure.md). - -### Configure logins and database users - -You need to make sure that all the logins used by your application exist on the server which is hosting your recovered database. For more information, see [Security Configuration for geo-replication](active-geo-replication-security-configure.md). - -> [!NOTE] -> You should configure and test your server firewall rules and logins (and their permissions) during a disaster recovery drill. These server-level objects and their configuration may not be available during the outage. - -### Setup telemetry alerts - -You need to make sure your existing alert rule settings are updated to map to the recovered database and the different server. - -For more information about database alert rules, see [Receive Alert Notifications](../../azure-monitor/alerts/alerts-overview.md) and [Track Service Health](../../service-health/service-notifications.md). - -### Enable auditing - -If auditing is required to access your database, you need to enable Auditing after the database recovery. For more information, see [Database auditing](/azure/azure-sql/database/auditing-overview). - -## Next steps - -- To learn about Azure SQL Database automated backups, see [SQL Database automated backups](automated-backups-overview.md) -- To learn about business continuity design and recovery scenarios, see [Continuity scenarios](business-continuity-high-availability-disaster-recover-hadr-overview.md) -- To learn about using automated backups for recovery, see [restore a database from the service-initiated backups](recovery-using-backups.md) \ No newline at end of file diff --git a/articles/azure-sql/database/disaster-recovery-strategies-for-applications-with-elastic-pool.md b/articles/azure-sql/database/disaster-recovery-strategies-for-applications-with-elastic-pool.md deleted file mode 100644 index 967a0c8e761c6..0000000000000 --- a/articles/azure-sql/database/disaster-recovery-strategies-for-applications-with-elastic-pool.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Design disaster recovery solutions -description: Learn how to design your cloud solution for disaster recovery by choosing the right failover pattern. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb-1 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Disaster recovery strategies for applications using Azure SQL Database elastic pools -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database provides several capabilities to provide for the business continuity of your application when catastrophic incidents occur. [Elastic pools](elastic-pool-overview.md) and single databases support the same kind of disaster recovery (DR) capabilities. This article describes several DR strategies for elastic pools that leverage these Azure SQL Database business continuity features. - -This article uses the following canonical SaaS ISV application pattern: - -A modern cloud-based web application provisions one database for each end user. The ISV has many customers and therefore uses many databases, known as tenant databases. Because the tenant databases typically have unpredictable activity patterns, the ISV uses an elastic pool to make the database cost very predictable over extended periods of time. The elastic pool also simplifies the performance management when the user activity spikes. In addition to the tenant databases the application also uses several databases to manage user profiles, security, collect usage patterns etc. Availability of the individual tenants does not impact the application’s availability as whole. However, the availability and performance of management databases is critical for the application’s function and if the management databases are offline the entire application is offline. - -This article discusses DR strategies covering a range of scenarios from cost sensitive startup applications to ones with stringent availability requirements. - -> [!NOTE] -> If you are using Premium or Business Critical databases and elastic pools, you can make them resilient to regional outages by converting them to zone redundant deployment configuration. See [Zone-redundant databases](high-availability-sla.md). - -## Scenario 1. Cost sensitive startup - -I am a startup business and am extremely cost sensitive. I want to simplify deployment and management of the application and I can have a limited SLA for individual customers. But I want to ensure the application as a whole is never offline. - -To satisfy the simplicity requirement, deploy all tenant databases into one elastic pool in the Azure region of your choice and deploy management databases as geo-replicated single databases. For the disaster recovery of tenants, use geo-restore, which comes at no additional cost. To ensure the availability of the management databases, geo-replicate them to another region using an auto-failover group (step 1). The ongoing cost of the disaster recovery configuration in this scenario is equal to the total cost of the secondary databases. This configuration is illustrated on the next diagram. - -![Figure 1](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-1.png) - -If an outage occurs in the primary region, the recovery steps to bring your application online are illustrated by the next diagram. - -* The failover group initiates automatic failover of the management database to the DR region. The application is automatically reconnected to the new primary and all new accounts and tenant databases are created in the DR region. The existing customers see their data temporarily unavailable. -* Create the elastic pool with the same configuration as the original pool (2). -* Use geo-restore to create copies of the tenant databases (3). You can consider triggering the individual restores by the end-user connections or use some other application-specific priority scheme. - -At this point your application is back online in the DR region, but some customers experience delay when accessing their data. - -![Figure 2](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-2.png) - -If the outage was temporary, it is possible that the primary region is recovered by Azure before all the database restores are complete in the DR region. In this case, orchestrate moving the application back to the primary region. The process takes the steps illustrated on the next diagram. - -* Cancel all outstanding geo-restore requests. -* Fail over the management databases to the primary region (5). After the region’s recovery, the old primaries have automatically become secondaries. Now they switch roles again. -* Change the application's connection string to point back to the primary region. Now all new accounts and tenant databases are created in the primary region. Some existing customers see their data temporarily unavailable. -* Set all databases in the DR pool to read-only to ensure they cannot be modified in the DR region (6). -* For each database in the DR pool that has changed since the recovery, rename or delete the corresponding databases in the primary pool (7). -* Copy the updated databases from the DR pool to the primary pool (8). -* Delete the DR pool (9) - -At this point your application is online in the primary region with all tenant databases available in the primary pool. - -![Figure 3](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-3.png) - -### Benefit - -The key benefit of this strategy is low ongoing cost for data tier redundancy. Azure SQL Database automatically backs up databases with no application rewrite at no additional cost. The cost is incurred only when the elastic databases are restored. - -### Trade-off - -The trade-off is that the complete recovery of all tenant databases takes significant time. The length of time depends on the total number of restores you initiate in the DR region and overall size of the tenant databases. Even if you prioritize some tenants' restores over others, you are competing with all the other restores that are initiated in the same region as the service arbitrates and throttles to minimize the overall impact on the existing customers' databases. In addition, the recovery of the tenant databases cannot start until the new elastic pool in the DR region is created. - -## Scenario 2. Mature application with tiered service - -I am a mature SaaS application with tiered service offers and different SLAs for trial customers and for paying customers. For the trial customers, I have to reduce the cost as much as possible. Trial customers can take downtime but I want to reduce its likelihood. For the paying customers, any downtime is a flight risk. So I want to make sure that paying customers are always able to access their data. - -To support this scenario, separate the trial tenants from paid tenants by putting them into separate elastic pools. The trial customers have lower eDTU or vCores per tenant and lower SLA with a longer recovery time. The paying customers are in a pool with higher eDTU or vCores per tenant and a higher SLA. To guarantee the lowest recovery time, the paying customers' tenant databases are geo-replicated. This configuration is illustrated on the next diagram. - -![Diagram shows a primary region and a D R region which employ geo-replication between the management database and paid customers primary pool and secondary pool with no replication for the trial customers pool.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-4.png) - -As in the first scenario, the management databases are quite active so you use a single geo-replicated database for it (1). This ensures the predictable performance for new customer subscriptions, profile updates, and other management operations. The region in which the primaries of the management databases reside is the primary region and the region in which the secondaries of the management databases reside is the DR region. - -The paying customers’ tenant databases have active databases in the “paid” pool provisioned in the primary region. Provision a secondary pool with the same name in the DR region. Each tenant is geo-replicated to the secondary pool (2). This enables quick recovery of all tenant databases using failover. - -If an outage occurs in the primary region, the recovery steps to bring your application online are illustrated in the next diagram: - -![Diagram shows an outage for the primary region, with failover to the management database, paid customer secondary pool, and creation and restore for trial customers.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-5.png) - -* Immediately fail over the management databases to the DR region (3). -* Change the application’s connection string to point to the DR region. Now all new accounts and tenant databases are created in the DR region. The existing trial customers see their data temporarily unavailable. -* Fail over the paid tenant's databases to the pool in the DR region to immediately restore their availability (4). Since the failover is a quick metadata level change, consider an optimization where the individual failovers are triggered on demand by the end-user connections. -* If your secondary pool eDTU size or vCore value was lower than the primary because the secondary databases only required the capacity to process the change logs while they were secondaries, immediately increase the pool capacity now to accommodate the full workload of all tenants (5). -* Create the new elastic pool with the same name and the same configuration in the DR region for the trial customers' databases (6). -* Once the trial customers’ pool is created, use geo-restore to restore the individual trial tenant databases into the new pool (7). Consider triggering the individual restores by the end-user connections or use some other application-specific priority scheme. - -At this point your application is back online in the DR region. All paying customers have access to their data while the trial customers experience delay when accessing their data. - -When the primary region is recovered by Azure *after* you have restored the application in the DR region you can continue running the application in that region or you can decide to fail back to the primary region. If the primary region is recovered *before* the failover process is completed, consider failing back right away. The failback takes the steps illustrated in the next diagram: - -![Diagram shows failback steps to implement after restoring the primary region.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-6.png) - -* Cancel all outstanding geo-restore requests. -* Fail over the management databases (8). After the region’s recovery, the old primary automatically become the secondary. Now it becomes the primary again. -* Fail over the paid tenant databases (9). Similarly, after the region’s recovery, the old primaries automatically become the secondaries. Now they become the primaries again. -* Set the restored trial databases that have changed in the DR region to read-only (10). -* For each database in the trial customers DR pool that changed since the recovery, rename or delete the corresponding database in the trial customers primary pool (11). -* Copy the updated databases from the DR pool to the primary pool (12). -* Delete the DR pool (13). - -> [!NOTE] -> The failover operation is asynchronous. To minimize the recovery time it is important that you execute the tenant databases' failover command in batches of at least 20 databases. - -### Benefit - -The key benefit of this strategy is that it provides the highest SLA for the paying customers. It also guarantees that the new trials are unblocked as soon as the trial DR pool is created. - -### Trade-off - -The trade-off is that this setup increases the total cost of the tenant databases by the cost of the secondary DR pool for paid customers. In addition, if the secondary pool has a different size, the paying customers experience lower performance after failover until the pool upgrade in the DR region is completed. - -## Scenario 3. Geographically distributed application with tiered service - -I have a mature SaaS application with tiered service offers. I want to offer a very aggressive SLA to my paid customers and minimize the risk of impact when outages occur because even brief interruption can cause customer dissatisfaction. It is critical that the paying customers can always access their data. The trials are free and an SLA is not offered during the trial period. - -To support this scenario, use three separate elastic pools. Provision two equal size pools with high eDTUs or vCores per database in two different regions to contain the paid customers' tenant databases. The third pool containing the trial tenants can have lower eDTUs or vCores per database and be provisioned in one of the two regions. - -To guarantee the lowest recovery time during outages, the paying customers' tenant databases are geo-replicated with 50% of the primary databases in each of the two regions. Similarly, each region has 50% of the secondary databases. This way, if a region is offline, only 50% of the paid customers' databases are impacted and have to fail over. The other databases remain intact. This configuration is illustrated in the following diagram: - -![Diagram shows a primary region called Region A and secondary region called Region B which employ geo-replication between the management database and paid customers primary pool and secondary pool with no replication for the trial customers pool.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-7.png) - -As in the previous scenarios, the management databases are quite active so configure them as single geo-replicated databases (1). This ensures the predictable performance of the new customer subscriptions, profile updates and other management operations. Region A is the primary region for the management databases and the region B is used for recovery of the management databases. - -The paying customers’ tenant databases are also geo-replicated but with primaries and secondaries split between region A and region B (2). This way, the tenant primary databases impacted by the outage can fail over to the other region and become available. The other half of the tenant databases are not be impacted at all. - -The next diagram illustrates the recovery steps to take if an outage occurs in region A. - -![Diagram shows an outage for the primary region, with failover to the management database, paid customer secondary pool, and creation and restore for trial customers to region B.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-8.png) - -* Immediately fail over the management databases to region B (3). -* Change the application’s connection string to point to the management databases in region B. Modify the management databases to make sure the new accounts and tenant databases are created in region B and the existing tenant databases are found there as well. The existing trial customers see their data temporarily unavailable. -* Fail over the paid tenant's databases to pool 2 in region B to immediately restore their availability (4). Since the failover is a quick metadata level change, you may consider an optimization where the individual failovers are triggered on demand by the end-user connections. -* Since now pool 2 contains only primary databases, the total workload in the pool increases and can immediately increase its eDTU size (5) or number of vCores. -* Create the new elastic pool with the same name and the same configuration in the region B for the trial customers' databases (6). -* Once the pool is created use geo-restore to restore the individual trial tenant database into the pool (7). You can consider triggering the individual restores by the end-user connections or use some other application-specific priority scheme. - -> [!NOTE] -> The failover operation is asynchronous. To minimize the recovery time, it is important that you execute the tenant databases' failover command in batches of at least 20 databases. - -At this point your application is back online in region B. All paying customers have access to their data while the trial customers experience delay when accessing their data. - -When region A is recovered you need to decide if you want to use region B for trial customers or failback to using the trial customers pool in region A. One criteria could be the % of trial tenant databases modified since the recovery. Regardless of that decision, you need to re-balance the paid tenants between two pools. the next diagram illustrates the process when the trial tenant databases fail back to region A. - -![Diagram shows failback steps to implement after restoring Region A.](./media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-9.png) - -* Cancel all outstanding geo-restore requests to trial DR pool. -* Fail over the management database (8). After the region’s recovery, the old primary automatically became the secondary. Now it becomes the primary again. -* Select which paid tenant databases fail back to pool 1 and initiate failover to their secondaries (9). After the region’s recovery, all databases in pool 1 automatically became secondaries. Now 50% of them become primaries again. -* Reduce the size of pool 2 to the original eDTU (10) or number of vCores. -* Set all restored trial databases in the region B to read-only (11). -* For each database in the trial DR pool that has changed since the recovery, rename or delete the corresponding database in the trial primary pool (12). -* Copy the updated databases from the DR pool to the primary pool (13). -* Delete the DR pool (14). - -### Benefit - -The key benefits of this strategy are: - -* It supports the most aggressive SLA for the paying customers because it ensures that an outage cannot impact more than 50% of the tenant databases. -* It guarantees that the new trials are unblocked as soon as the trail DR pool is created during the recovery. -* It allows more efficient use of the pool capacity as 50% of secondary databases in pool 1 and pool 2 are guaranteed to be less active than the primary databases. - -### Trade-offs - -The main trade-offs are: - -* The CRUD operations against the management databases have lower latency for the end users connected to region A than for the end users connected to region B as they are executed against the primary of the management databases. -* It requires more complex design of the management database. For example, each tenant record has a location tag that needs to be changed during failover and failback. -* The paying customers may experience lower performance than usual until the pool upgrade in region B is completed. - -## Summary - -This article focuses on the disaster recovery strategies for the database tier used by a SaaS ISV multi-tenant application. The strategy you choose is based on the needs of the application, such as the business model, the SLA you want to offer to your customers, budget constraint etc. Each described strategy outlines the benefits and trade-off so you could make an informed decision. Also, your specific application likely includes other Azure components. So you review their business continuity guidance and orchestrate the recovery of the database tier with them. To learn more about managing recovery of database applications in Azure, refer to [Designing cloud solutions for disaster recovery](designing-cloud-solutions-for-disaster-recovery.md). - -## Next steps - -* To learn about Azure SQL Database automated backups, see [Azure SQL Database automated backups](automated-backups-overview.md). -* For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). -* To learn about using automated backups for recovery, see [restore a database from the service-initiated backups](recovery-using-backups.md). -* To learn about faster recovery options, see [Active geo-replication](active-geo-replication-overview.md) and [Auto-failover groups](auto-failover-group-overview.md). -* To learn about using automated backups for archiving, see [database copy](database-copy.md). diff --git a/articles/azure-sql/database/dns-alias-overview.md b/articles/azure-sql/database/dns-alias-overview.md deleted file mode 100644 index 954887f0f5b19..0000000000000 --- a/articles/azure-sql/database/dns-alias-overview.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: DNS alias -description: Your applications can connect to an alias for the name of the server for Azure SQL Database. Meanwhile, you can change the SQL Database the alias points to anytime, to facilitate testing and so on. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: seo-lt-2019 sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 06/26/2019 ---- -# DNS alias for Azure SQL Database -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -Azure SQL Database has a Domain Name System (DNS) server. PowerShell and REST APIs accept [calls to create and manage DNS aliases](#anchor-powershell-code-62x) for your [logical SQL server](logical-servers.md) name. - -A *DNS alias* can be used in place of the server name. Client programs can use the alias in their connection strings. The DNS alias provides a translation layer that can redirect your client programs to different servers. This layer spares you the difficulties of having to find and edit all the clients and their connection strings. - -Common uses for a DNS alias include the following cases: - -- Create an easy to remember name for a server. -- During initial development, your alias can refer to a test server. When the application goes live, you can modify the alias to refer to the production server. The transition from test to production does not require any modification to the configurations several clients that connect to the server. -- Suppose the only database in your application is moved to another server. You can modify the alias without having to modify the configurations of several clients. -- During a regional outage you use geo-restore to recover your database in a different server and region. You can modify your existing alias to point to the new server so that the existing client application could re-connect to it. - -## Domain Name System (DNS) of the Internet - -The Internet relies on the DNS. The DNS translates your friendly names into the name of your server. - -## Scenarios with one DNS alias - -Suppose you need to switch your system to a new server. In the past you needed to find and update every connection string in every client program. But now, if the connection strings use a DNS alias, only an alias property must be updated. - -The DNS alias feature of Azure SQL Database can help in the following scenarios: - -### Test to production - -When you start developing the client programs, have them use a DNS alias in their connection strings. You make the properties of the alias point to a test version of your server. - -Later when the new system goes live in production, you can update the properties of the alias to point to the production server. No change to the client programs is necessary. - -### Cross-region support - -A disaster recovery might shift your server to a different geographic region. For a system that was using a DNS alias, the need to find and update all the connection strings for all clients can be avoided. Instead, you can update an alias to refer to the new server that now hosts your Azure SQL Database. - -## Properties of a DNS alias - -The following properties apply to each DNS alias for your server: - -- *Unique name:* Each alias name you create is unique across all servers, just as server names are. -- *Server is required:* A DNS alias cannot be created unless it references exactly one server, and the server must already exist. An updated alias must always reference exactly one existing server. - - When you drop a server, the Azure system also drops all DNS aliases that refer to the server. -- *Not bound to any region:* DNS aliases are not bound to a region. Any DNS aliases can be updated to refer to a server that resides in any geographic region. - - However, when updating an alias to refer to another server, both servers must exist in the same Azure *subscription*. -- *Permissions:* To manage a DNS alias, the user must have *Server Contributor* permissions, or higher. For more information, see [Get started with Azure role-based access control in the Azure portal](../../role-based-access-control/overview.md). - -## Manage your DNS aliases - -Both REST APIs and PowerShell cmdlets are available to enable you to programmatically manage your DNS aliases. - -### REST APIs for managing your DNS aliases - -The documentation for the REST APIs is available near the following web location: - -- [Azure SQL Database REST API](/rest/api/sql/) - -Also, the REST APIs can be seen in GitHub at: - -- [Azure SQL Database DNS alias REST APIs](https://github.com/Azure/azure-rest-api-specs/blob/master/specification/sql/resource-manager/Microsoft.Sql/preview/2017-03-01-preview/serverDnsAliases.json) - - - -### PowerShell for managing your DNS aliases - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -PowerShell cmdlets are available that call the REST APIs. - -A code example of PowerShell cmdlets being used to manage DNS aliases is documented at: - -- [PowerShell for DNS Alias to Azure SQL Database](dns-alias-powershell-create.md) - -The cmdlets used in the code example are the following: - -- [New-AzSqlServerDnsAlias](/powershell/module/az.Sql/New-azSqlServerDnsAlias): Creates a new DNS alias in the Azure SQL Database service system. The alias refers to server 1. -- [Get-AzSqlServerDnsAlias](/powershell/module/az.Sql/Get-azSqlServerDnsAlias): Get and list all the DNS aliases that are assigned to server 1. -- [Set-AzSqlServerDnsAlias](/powershell/module/az.Sql/Set-azSqlServerDnsAlias): Modifies the server name that the alias is configured to refer to, from server 1 to server 2. -- [Remove-AzSqlServerDnsAlias](/powershell/module/az.Sql/Remove-azSqlServerDnsAlias): Remove the DNS alias from server 2, by using the name of the alias. - -## Limitations - -Presently, a DNS alias has the following limitations: - -- *Delay of up to 2 minutes:* It takes up to 2 minutes for a DNS alias to be updated or removed. - - Regardless of any brief delay, the alias immediately stops referring client connections to the legacy server. -- *DNS lookup:* For now, the only authoritative way to check what server a given DNS alias refers to is by performing a [DNS lookup](/windows-server/administration/windows-commands/nslookup). -- _Table auditing is not supported:_ You cannot use a DNS alias on a server that has *table auditing* enabled on a database. - - Table auditing is deprecated. - - We recommend that you move to [Blob Auditing](/azure/azure-sql/database/auditing-overview). -- DNS alias is subject to [naming restrictions](/azure/azure-resource-manager/management/resource-name-rules). - -## Related resources - -- [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md), including disaster recovery. -- [Azure REST API reference](/rest/api/azure/) -- [Server Dns Aliases API](/rest/api/sql/2020-11-01-preview/serverdnsaliases) - -## Next steps - -- [PowerShell for DNS Alias to Azure SQL Database](dns-alias-powershell-create.md) diff --git a/articles/azure-sql/database/dns-alias-powershell-create.md b/articles/azure-sql/database/dns-alias-powershell-create.md deleted file mode 100644 index 1e92b32835705..0000000000000 --- a/articles/azure-sql/database/dns-alias-powershell-create.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: DNS Alias (PowerShell & Azure CLI) -description: PowerShell and Azure CLI cmdlets enable you to redirect new client connections to a different SQL server in Azure, without having to touch any client configuration. -keywords: dns sql database -ms.custom: seo-lt-2019 sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.devlang: PowerShell -ms.topic: how-to -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma, amagarwa, maboja, vanto -ms.date: 05/14/2019 ---- -# PowerShell for DNS Alias to Azure SQL Database -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -This article provides a PowerShell script that demonstrates how you can manage a DNS alias for the [SQL server](logical-servers.md) hosting your Azure SQL Database. - -> [!NOTE] -> This article has been updated to use either the Azure PowerShell Az module or Azure CLI. You can still use the AzureRM module, which will continue to receive bug fixes until at least December 2020. -> -> To learn more about the Az module and AzureRM compatibility, see [Introducing the Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). For installation instructions, see [Install Azure PowerShell](/powershell/azure/install-az-ps) or [Install Azure CLI](/cli/azure/install-azure-cli). - -## DNS alias in connection string - -To connect a [logical SQL server](logical-servers.md), a client such as SQL Server Management Studio (SSMS) can provide the DNS alias name instead of the true server name. In the following example server string, the alias *any-unique-alias-name* replaces the first dot-delimited node in the four node server string: - - `.database.windows.net` - -## Prerequisites - -If you want to run the demo PowerShell script given in this article, the following prerequisites apply: - -- An Azure subscription and account, for free trial, see [Azure trials](https://azure.microsoft.com/free/) -- Two servers - -## Example - -The following code example starts by assigning literal values to several variables. - -To run the code, edit the placeholder values to match real values in your system. - -# [PowerShell](#tab/azure-powershell) - -The cmdlets used are the following: - -- [New-AzSqlServerDNSAlias](/powershell/module/az.Sql/New-azSqlServerDnsAlias): Creates a DNS alias in the Azure SQL Database service system. The alias refers to server 1. -- [Get-AzSqlServerDNSAlias](/powershell/module/az.Sql/Get-azSqlServerDnsAlias): Get and list all the aliases assigned to server 1. -- [Set-AzSqlServerDNSAlias](/powershell/module/az.Sql/Set-azSqlServerDnsAlias): Modifies the server name that the alias is configured to refer to, from server 1 to server 2. -- [Remove-AzSqlServerDNSAlias](/powershell/module/az.Sql/Remove-azSqlServerDnsAlias): Remove the alias from server 2, by using the name of the alias. - -To install or upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). - -Use `Get-Module -ListAvailable Az` in *powershell\_ise.exe*, to find the version. - -```powershell -$subscriptionName = ''; -$sqlServerDnsAliasName = ''; -$resourceGroupName = ''; -$sqlServerName = ''; -$resourceGroupName2 = ''; # can be same or different than $resourceGroupName -$sqlServerName2 = ''; # must be different from $sqlServerName. - -# login to Azure -Connect-AzAccount -SubscriptionName $subscriptionName; -$subscriptionId = Get-AzSubscription -SubscriptionName $subscriptionName; - -Write-Host 'Assign an alias to server 1...'; -New-AzSqlServerDnsAlias –ResourceGroupName $resourceGroupName -ServerName $sqlServerName ` - -Name $sqlServerDnsAliasName; - -Write-Host 'Get the aliases assigned to server 1...'; -Get-AzSqlServerDnsAlias –ResourceGroupName $resourceGroupName -ServerName $sqlServerName; - -Write-Host 'Move the alias from server 1 to server 2...'; -Set-AzSqlServerDnsAlias –ResourceGroupName $resourceGroupName2 -TargetServerName $sqlServerName2 ` - -Name $sqlServerDnsAliasName ` - -SourceServerResourceGroup $resourceGroupName -SourceServerName $sqlServerName ` - -SourceServerSubscriptionId $subscriptionId.Id; - -Write-Host 'Get the aliases assigned to server 2...'; -Get-AzSqlServerDnsAlias –ResourceGroupName $resourceGroupName2 -ServerName $sqlServerName2; - -Write-Host 'Remove the alias from server 2...'; -Remove-AzSqlServerDnsAlias –ResourceGroupName $resourceGroupName2 -ServerName $sqlServerName2 ` - -Name $sqlServerDnsAliasName; -``` - -# [Azure CLI](#tab/azure-cli) - -The commands used are the following: - -- [az sql server dns-alias create](/powershell/module/az.Sql/New-azSqlServerDnsAlias): Creates a DNS alias for a server. The alias refers to server 1. -- [az sql server dns-alias show](/powershell/module/az.Sql/Get-azSqlServerDnsAlias): Get and list all the aliases assigned to server 1. -- [az sql server dns-alias set](/powershell/module/az.Sql/Set-azSqlServerDnsAlias): Modifies the server name that the alias is configured to refer to, from server 1 to server 2. -- [az sql server dns-alias delete](/powershell/module/az.Sql/Remove-azSqlServerDnsAlias): Remove the alias from server 2, by using the name of the alias. - -To install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). - -```azurecli-interactive -$subscriptionName = ''; -$sqlServerDnsAliasName = ''; -$resourceGroupName = ''; -$sqlServerName = ''; -$resourceGroupName2 = ''; # can be same or different than $resourceGroupName -$sqlServerName2 = ''; # must be different from $sqlServerName. - -# login to Azure -az login -SubscriptionName $subscriptionName; -$subscriptionId = az account list[0].i -SubscriptionName $subscriptionName; - -Write-Host 'Assign an alias to server 1...'; -az sql server dns-alias create –-resource-group $resourceGroupName --server $sqlServerName ` - --name $sqlServerDnsAliasName; - -Write-Host 'Get the aliases assigned to server 1...'; -az sql server dns-alias show –-resource-group $resourceGroupName --server $sqlServerName; - -Write-Host 'Move the alias from server 1 to server 2...'; -az sql server dns-alias set –-resource-group $resourceGroupName2 --server $sqlServerName2 ` - --name $sqlServerDnsAliasName ` - --original-resource-group $resourceGroupName --original-server $sqlServerName ` - --original-subscription-id $subscriptionId.Id; - -Write-Host 'Get the aliases assigned to server 2...'; -az sql server dns-alias show –-resource-group $resourceGroupName2 --server $sqlServerName2; - -Write-Host 'Remove the alias from server 2...'; -az sql server dns-alias delete –-resource-group $resourceGroupName2 --server $sqlServerName2 ` - --name $sqlServerDnsAliasName; -``` - -* * * - -## Next steps - -For a full explanation of the DNS alias feature for SQL Database, see [DNS alias for Azure SQL Database](./dns-alias-overview.md). diff --git a/articles/azure-sql/database/doc-changes-updates-release-notes-whats-new.md b/articles/azure-sql/database/doc-changes-updates-release-notes-whats-new.md deleted file mode 100644 index a6095646a74e3..0000000000000 --- a/articles/azure-sql/database/doc-changes-updates-release-notes-whats-new.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: What's new? -titleSuffix: Azure SQL Database -description: Learn about the new features and documentation improvements for Azure SQL Database. -services: sql-database -author: LitKnd -ms.author: kendralittle -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=2, references_regions, ignite-fall-2021 -ms.devlang: -ms.topic: conceptual -ms.date: 03/07/2022 ---- -# What's new in Azure SQL Database? -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](doc-changes-updates-release-notes-whats-new.md) -> * [Azure SQL Managed Instance](../managed-instance/doc-changes-updates-release-notes-whats-new.md) - -This article summarizes the documentation changes associated with new features and improvements in the recent releases of [Azure SQL Database](https://azure.microsoft.com/products/azure-sql/database/). To learn more about Azure SQL Database, see the [overview](sql-database-paas-overview.md). - - -## Preview - -The following table lists the features of Azure SQL Database that are currently in preview: - -| Feature | Details | -| ---| --- | -| [Change data capture](/sql/relational-databases/track-changes/about-change-data-capture-sql-server) | Change data capture (CDC) lets you track all the changes that occur on a database. Though this feature has been available for SQL Server for quite some time, using it with Azure SQL Database is currently in preview. | -| [Elastic jobs](elastic-jobs-overview.md) | The elastic jobs feature is the SQL Server Agent replacement for Azure SQL Database as a PaaS offering. | -| [Elastic queries](elastic-query-overview.md) | The elastic queries feature allows for cross-database queries in Azure SQL Database. | -| [Elastic transactions](elastic-transactions-overview.md) | Elastic transactions allow you to execute transactions distributed among cloud databases in Azure SQL Database. | -| [Ledger](ledger-overview.md) | The Azure SQL Database ledger feature allows you to cryptographically attest to other parties, such as auditors or other business parties, that your data hasn't been tampered with. | -| [Maintenance window advance notifications](../database/advance-notifications.md)| Advance notifications are available for databases configured to use a non-default [maintenance window](maintenance-window.md). Advance notifications for maintenance windows are in public preview for Azure SQL Database. | -| [Query editor in the Azure portal](connect-query-portal.md) | The query editor in the portal allows you to run queries against your Azure SQL Database directly from the [Azure portal](https://portal.azure.com).| -| [Query Store hints](/sql/relational-databases/performance/query-store-hints?view=azuresqldb-current&preserve-view=true) | Use query hints to optimize your query execution via the OPTION clause. | -| [Reverse migrate from Hyperscale](manage-hyperscale-database.md#reverse-migrate-from-hyperscale) | Reverse migration to the General Purpose service tier allows customers who have recently migrated an existing database in Azure SQL Database to the Hyperscale service tier to move back in an emergency, should Hyperscale not meet their needs. While reverse migration is initiated by a service tier change, it's essentially a size-of-data move between different architectures. | -| [SQL Analytics](../../azure-monitor/insights/azure-sql.md)|Azure SQL Analytics is an advanced cloud monitoring solution for monitoring performance of all of your Azure SQL databases at scale and across multiple subscriptions in a single view. Azure SQL Analytics collects and visualizes key performance metrics with built-in intelligence for performance troubleshooting.| -| [SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) | SQL Insights (preview) is a comprehensive solution for monitoring any product in the Azure SQL family. SQL Insights (preview) uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance.| -| [Zone redundant configuration for Hyperscale databases](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview) | The zone redundant configuration feature utilizes [Azure Availability Zones](../../availability-zones/az-overview.md#availability-zones) to replicate databases across multiple physical locations within an Azure region. By selecting [zone redundancy](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview), you can make your Hyperscale databases resilient to a much larger set of failures, including catastrophic datacenter outages, without any changes to the application logic.| -||| - -## General availability (GA) - -The following table lists the features of Azure SQL Database that have transitioned from preview to general availability (GA) within the last 12 months: - -| Feature | GA Month | Details | -| ---| --- |--- | -| [Zone redundant configuration for General Purpose tier](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) | April 2022 | The zone redundant configuration feature utilizes [Azure Availability Zones](../../availability-zones/az-overview.md#availability-zones) to replicate databases across multiple physical locations within an Azure region. By selecting [zone redundancy](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability), you can make your provisioned and serverless General Purpose databases and elastic pools resilient to a much larger set of failures, including catastrophic datacenter outages, without any changes to the application logic.| -| [Maintenance window](../database/maintenance-window.md)| March 2022 | The maintenance window feature allows you to configure maintenance schedule for your Azure SQL Database. [Maintenance window advance notifications](../database/advance-notifications.md), however, are in preview.| -| [Storage redundancy for Hyperscale databases](automated-backups-overview.md#configure-backup-storage-redundancy) | March 2022 | When creating a Hyperscale database, you can choose your preferred storage type: read-access geo-redundant storage (RA-GRS), zone-redundant storage (ZRS), or locally redundant storage (LRS) Azure standard storage. The selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and backup storage redundancy. | -| [Azure Active Directory-only authentication](authentication-azure-ad-only-authentication.md) | November 2021 | It's possible to configure your Azure SQL Database to allow authentication only from Azure Active Directory. | -| [Azure AD service principal](authentication-aad-service-principal.md) | September 2021 | Azure Active Directory (Azure AD) supports user creation in Azure SQL Database on behalf of Azure AD applications (service principals).| -| [Audit management operations](../database/auditing-overview.md#auditing-of-microsoft-support-operations) | March 2021 | Azure SQL audit capabilities enable you to audit operations done by Microsoft support engineers when they need to access your SQL assets during a support request, enabling more transparency in your workforce. | - - - -## Documentation changes - -Learn about significant changes to the Azure SQL Database documentation. - -### April 2022 - -| Changes | Details | -| --- | --- | -| **General Purpose tier Zone redundancy GA** | Enabling zone redundancy for your provisioned and serverless General Purpose databases and elastic pools is now generally available in select regions. To learn more, including region availability see [General Purpose zone redundancy](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability). | - -### March 2022 - -| Changes | Details | -| --- | --- | -| **GA for maintenance window** | The [maintenance window](maintenance-window.md) feature allows you to configure a maintenance schedule for your Azure SQL Database and receive advance notifications of maintenance windows. [Maintenance window advance notifications](../database/advance-notifications.md) are in public preview for databases configured to use a non-default [maintenance window](maintenance-window.md).| -| **Hyperscale zone redundant configuration preview** | It's now possible to create new Hyperscale databases with zone redundancy to make your databases resilient to a much larger set of failures. This feature is currently in preview for the Hyperscale service tier. To learn more, see [Hyperscale zone redundancy](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview). | -| **Hyperscale storage redundancy GA** | Choosing your storage redundancy for your databases in the Hyperscale service tier is now generally available. See [Configure backup storage redundancy](automated-backups-overview.md#configure-backup-storage-redundancy) to learn more. | - - -### February 2022 - -| Changes | Details | -| --- | --- | -| **Hyperscale reverse migration** | Reverse migration is now in preview. Reverse migration to the General Purpose service tier allows customers who have recently migrated an existing database in Azure SQL Database to the Hyperscale service tier to move back in an emergency, should Hyperscale not meet their needs. While reverse migration is initiated by a service tier change, it's essentially a size-of-data move between different architectures. Learn about [reverse migration from Hyperscale](manage-hyperscale-database.md#reverse-migrate-from-hyperscale). | -| **New Hyperscale articles** | We have reorganized some existing content into new articles and added new content for Hyperscale. Learn about [Hyperscale distributed functions architecture](hyperscale-architecture.md), [how to manage a Hyperscale database](manage-hyperscale-database.md), and how to [create a Hyperscale database](hyperscale-database-create-quickstart.md). | -| **Free Azure SQL Database** | Try Azure SQL Database for free using the Azure free account. To learn more, review [Try SQL Database for free](free-sql-db-free-account-how-to-deploy.md).| - -### 2021 - -| Changes | Details | -| --- | --- | -| **Azure AD-only authentication** | Restricting authentication to your Azure SQL Database only to Azure Active Directory users is now generally available. To learn more, see [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). | -|**Split what's new** | The previously combined **What's new** article has been split by product - [What's new in SQL Database](doc-changes-updates-release-notes-whats-new.md) and [What's new in SQL Managed Instance](../managed-instance/doc-changes-updates-release-notes-whats-new.md), making it easier to identify what features are currently in preview, generally available, and significant documentation changes. Additionally, the [Known Issues in SQL Managed Instance](../managed-instance/doc-changes-updates-known-issues.md) content has moved to its own page. | -| **Maintenance Window support for availability zones** | You can now use the [Maintenance Window feature](maintenance-window.md) if your Azure SQL Database is deployed to an availability zone. This feature is currently in preview. | -| **Azure AD-only authentication** | It's now possible to restrict authentication to your Azure SQL Database to Azure Active Directory users only. This feature is currently in preview. To learn more, see [Azure AD-only authentication](authentication-azure-ad-only-authentication.md). | -| **Query store hints** | It's now possible to use query hints to optimize your query execution via the OPTION clause. This feature is currently in preview. To learn more, see [Query store hints](/sql/relational-databases/performance/query-store-hints?view=azuresqldb-current&preserve-view=true). | -| **Change data capture** | Using change data capture (CDC) with Azure SQL Database is now in preview. To learn more, see [Change data capture](/sql/relational-databases/track-changes/about-change-data-capture-sql-server). | -| **SQL Database ledger** | SQL Database ledger is in preview, and introduces the ability to cryptographically attest to other parties, such as auditors or other business parties, that your data hasn't been tampered with. To learn more, see [Ledger](ledger-overview.md). | -| **Maintenance window** | The maintenance window feature allows you to configure a maintenance schedule for your Azure SQL Database, currently in preview. To learn more, see [maintenance window](maintenance-window.md).| -| **SQL insights** | SQL insights is a comprehensive solution for monitoring any product in the Azure SQL family. SQL insights uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance. To learn more, see [SQL insights](../../azure-monitor/insights/sql-insights-overview.md). | - -## Contribute to content - -To contribute to the Azure SQL documentation, see the [Docs contributor guide](/contribute/). diff --git a/articles/azure-sql/database/dtu-benchmark.md b/articles/azure-sql/database/dtu-benchmark.md deleted file mode 100644 index d710bfd23c920..0000000000000 --- a/articles/azure-sql/database/dtu-benchmark.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: DTU benchmark -description: Learn about the benchmark for the DTU-based purchasing model for Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: references_regions -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 03/29/2022 ---- -# DTU benchmark -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -A database transaction unit (DTU) is a unit of measure representing a blended measure of CPU, memory, reads, and writes. Physical characteristics (CPU, memory, IO) associated with each DTU measure are calibrated using a benchmark that simulates real-world database workload. This article summarizes the DTU benchmark and shares information about the schema, transaction types used, workload mix, users and pacing, scaling rules, and metrics associated with the benchmark. - -For general information about the DTU-based purchasing model, see the [DTU-based purchasing model overview](service-tiers-dtu.md). - -## Benchmark summary - -The DTU benchmark measures the performance of a mix of basic database operations that occur most frequently in online transaction processing (OLTP) workloads. Although the benchmark is designed with cloud computing in mind, the database schema, data population, and transactions have been designed to be broadly representative of the basic elements most commonly used in OLTP workloads. - -## Correlating benchmark results to real world database performance - -It's important to understand that all benchmarks are representative and indicative only. The transaction rates achieved with the benchmark application will not be the same as those that might be achieved with other applications. The benchmark comprises a collection of different transaction types run against a schema containing a range of tables and data types. While the benchmark exercises the same basic operations that are common to all OLTP workloads, it doesn't represent any specific class of database or application. The goal of the benchmark is to provide a reasonable guide to the relative performance of a database that might be expected when scaling up or down between compute sizes. - -In reality, databases are of different sizes and complexity, encounter different mixes of workloads, and will respond in different ways. For example, an IO-intensive application may hit IO thresholds sooner, or a CPU-intensive application may hit CPU limits sooner. There is no guarantee that any particular database will scale in the same way as the benchmark under increasing load. - -The benchmark and its methodology are described in more detail in this article. - -## Schema - -The schema is designed to have enough variety and complexity to support a broad range of operations. The benchmark runs against a database comprised of six tables. The tables fall into three categories: fixed-size, scaling, and growing. There are two fixed-size tables; three scaling tables; and one growing table. Fixed-size tables have a constant number of rows. Scaling tables have a cardinality that is proportional to database performance, but doesn’t change during the benchmark. The growing table is sized like a scaling table on initial load, but then the cardinality changes in the course of running the benchmark as rows are inserted and deleted. - -The schema includes a mix of data types, including integer, numeric, character, and date/time. The schema includes primary and secondary keys, but not any foreign keys - that is, there are no referential integrity constraints between tables. - -A data generation program generates the data for the initial database. Integer and numeric data is generated with various strategies. In some cases, values are distributed randomly over a range. In other cases, a set of values is randomly permuted to ensure that a specific distribution is maintained. Text fields are generated from a weighted list of words to produce realistic looking data. - -The database is sized based on a “scale factor.” The scale factor (abbreviated as SF) determines the cardinality of the scaling and growing tables. As described below in the section Users and Pacing, the database size, number of users, and maximum performance all scale in proportion to each other. - -## Transactions - -The workload consists of nine transaction types, as shown in the table below. Each transaction is designed to highlight a particular set of system characteristics in the database engine and system hardware, with high contrast from the other transactions. This approach makes it easier to assess the impact of different components to overall performance. For example, the transaction “Read Heavy” produces a significant number of read operations from disk. - -| Transaction Type | Description | -| --- | --- | -| Read Lite |SELECT; in-memory; read-only | -| Read Medium |SELECT; mostly in-memory; read-only | -| Read Heavy |SELECT; mostly not in-memory; read-only | -| Update Lite |UPDATE; in-memory; read-write | -| Update Heavy |UPDATE; mostly not in-memory; read-write | -| Insert Lite |INSERT; in-memory; read-write | -| Insert Heavy |INSERT; mostly not in-memory; read-write | -| Delete |DELETE; mix of in-memory and not in-memory; read-write | -| CPU Heavy |SELECT; in-memory; relatively heavy CPU load; read-only | - -## Workload mix - -Transactions are selected at random from a weighted distribution with the following overall mix. The overall mix has a read/write ratio of approximately 2:1. - -| Transaction Type | % of Mix | -| --- | --- | -| Read Lite |35 | -| Read Medium |20 | -| Read Heavy |5 | -| Update Lite |20 | -| Update Heavy |3 | -| Insert Lite |3 | -| Insert Heavy |2 | -| Delete |2 | -| CPU Heavy |10 | - -## Users and pacing - -The benchmark workload is driven from a tool that submits transactions across a set of connections to simulate the behavior of a number of concurrent users. Although all of the connections and transactions are machine generated, for simplicity we refer to these connections as “users.” Although each user operates independently of all other users, all users perform the same cycle of steps shown below: - -1. Establish a database connection. -2. Repeat until signaled to exit: - - Select a transaction at random (from a weighted distribution). - - Perform the selected transaction and measure the response time. - - Wait for a pacing delay. -3. Close the database connection. -4. Exit. - -The pacing delay (in step 2c) is selected at random, but with a distribution that has an average of 1.0 second. Thus each user can, on average, generate at most one transaction per second. - -## Scaling rules - -The number of users is determined by the database size (in scale-factor units). There is one user for every five scale-factor units. Because of the pacing delay, one user can generate at most one transaction per second, on average. - -For example, a scale-factor of 500 (SF=500) database will have 100 users and can achieve a maximum rate of 100 TPS. To drive a higher TPS rate requires more users and a larger database. - -## Measurement duration - -A valid benchmark run requires a steady-state measurement duration of at least one hour. - -## Metrics - -The key metrics in the benchmark are throughput and response time. - -- Throughput is the essential performance measure in the benchmark. Throughput is reported in transactions per unit-of-time, counting all transaction types. -- Response time is a measure of performance predictability. The response time constraint varies with class of service, with higher classes of service having a more stringent response time requirement, as shown below. - -| Class of Service | Throughput Measure | Response Time Requirement | -| --- | --- | --- | -| [Premium](service-tiers-dtu.md#compare-service-tiers) |Transactions per second |95th percentile at 0.5 seconds | -| [Standard](service-tiers-dtu.md#compare-service-tiers) |Transactions per minute |90th percentile at 1.0 seconds | -| [Basic](service-tiers-dtu.md#compare-service-tiers) |Transactions per hour |80th percentile at 2.0 seconds | - -> [!NOTE] -> Response time metrics are specific to the [DTU Benchmark](#dtu-benchmark). Response times for other workloads are workload-dependent and will differ. - -## Next steps - -Learn more about purchasing models and related concepts in the following articles: - -- [DTU-based purchasing model overview](service-tiers-dtu.md) -- [vCore purchasing model - Azure SQL Database](service-tiers-sql-database-vcore.md) -- [Compare vCore and DTU-based purchasing models of Azure SQL Database](purchasing-models.md) -- [Migrate Azure SQL Database from the DTU-based model to the vCore-based model](migrate-dtu-to-vcore.md) -- [Resource limits for single databases using the DTU purchasing model - Azure SQL Database](resource-limits-dtu-single-databases.md) -- [Resources limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) \ No newline at end of file diff --git a/articles/azure-sql/database/dynamic-data-masking-configure-portal.md b/articles/azure-sql/database/dynamic-data-masking-configure-portal.md deleted file mode 100644 index 84e933afb90c0..0000000000000 --- a/articles/azure-sql/database/dynamic-data-masking-configure-portal.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Azure portal: Dynamic data masking" -description: How to get started with Azure SQL Database dynamic data masking in the Azure portal -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: Madhumitatripathy -ms.author: matripathy -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/05/2022 ---- -# Get started with SQL Database dynamic data masking with the Azure portal -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article shows you how to implement [dynamic data masking](dynamic-data-masking-overview.md) with the Azure portal. You can also implement dynamic data masking using [Azure SQL Database cmdlets](/powershell/module/az.sql/) or the [REST API](/rest/api/sql/). - -> [!NOTE] -> This feature cannot be set using portal for SQL Managed Instance (use PowerShell or REST API). For more information, see [Dynamic Data Masking](/sql/relational-databases/security/dynamic-data-masking). - -## Enable dynamic data masking - -1. Launch the Azure portal at [https://portal.azure.com](https://portal.azure.com). -2. Go to your database resource in the Azure portal. -3. Select the **Dynamic Data Masking** blade under the **Security** section. - - ![Screenshot that shows the Security section with Dynamic Data Masking highlighted.](./media/dynamic-data-masking-configure-portal/dynamic-data-masking-in-portal.png) - -4. In the **Dynamic Data Masking** configuration page, you may see some database columns that the recommendations engine has flagged for masking. In order to accept the recommendations, just click **Add Mask** for one or more columns and a mask is created based on the default type for this column. You can change the masking function by clicking on the masking rule and editing the masking field format to a different format of your choice. Be sure to click **Save** to save your settings. - - ![Screenshot that shows the Dynamic Data Masking configuration page.](./media/dynamic-data-masking-configure-portal/5_ddm_recommendations.png) - -5. To add a mask for any column in your database, at the top of the **Dynamic Data Masking** configuration page, click **Add Mask** to open the **Add Masking Rule** configuration page. - - ![Screenshot that shows the Add Masking Rule configuration page.](./media/dynamic-data-masking-configure-portal/6_ddm_add_mask.png) - -6. Select the **Schema**, **Table** and **Column** to define the designated field for masking. -7. **Select how to mask** from the list of sensitive data masking categories. - - ![Screenshot that shows the sensitive data masking categories under the Select how to mask section.](./media/dynamic-data-masking-configure-portal/7_ddm_mask_field_format.png) - -8. Click **Add** in the data masking rule page to update the set of masking rules in the dynamic data masking policy. -9. Type the SQL users or Azure Active Directory (Azure AD) identities that should be excluded from masking, and have access to the unmasked sensitive data. This should be a semicolon-separated list of users. Users with administrator privileges always have access to the original unmasked data. - - ![Navigation pane](./media/dynamic-data-masking-configure-portal/8_ddm_excluded_users.png) - - > [!TIP] - > To make it so the application layer can display sensitive data for application privileged users, add the SQL user or Azure AD identity the application uses to query the database. It is highly recommended that this list contain a minimal number of privileged users to minimize exposure of the sensitive data. - -10. Click **Save** in the data masking configuration page to save the new or updated masking policy. - -## Next steps - -- For an overview of dynamic data masking, see [dynamic data masking](dynamic-data-masking-overview.md). -- You can also implement dynamic data masking using [Azure SQL Database cmdlets](/powershell/module/az.sql/) or the [REST API](/rest/api/sql/). diff --git a/articles/azure-sql/database/dynamic-data-masking-overview.md b/articles/azure-sql/database/dynamic-data-masking-overview.md deleted file mode 100644 index 3349b20bb45b5..0000000000000 --- a/articles/azure-sql/database/dynamic-data-masking-overview.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: Dynamic data masking -description: Dynamic data masking limits sensitive data exposure by masking it to non-privileged users for Azure SQL Database, Azure SQL Managed Instance and Azure Synapse Analytics -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: Madhumitatripathy -ms.author: matripathy -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/05/2022 -tags: azure-synpase ---- -# Dynamic data masking -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics support dynamic data masking. Dynamic data masking limits sensitive data exposure by masking it to non-privileged users. - -Dynamic data masking helps prevent unauthorized access to sensitive data by enabling customers to designate how much of the sensitive data to reveal with minimal impact on the application layer. It’s a policy-based security feature that hides the sensitive data in the result set of a query over designated database fields, while the data in the database is not changed. - -For example, a service representative at a call center might identify a caller by confirming several characters of their email address, but the complete email address shouldn't be revealed to the service representative. A masking rule can be defined that masks all the email address in the result set of any query. As another example, an appropriate data mask can be defined to protect personal data, so that a developer can query production environments for troubleshooting purposes without violating compliance regulations. - -## Dynamic data masking basics - -You set up a dynamic data masking policy in the Azure portal by selecting the **Dynamic Data Masking** blade under **Security** in your SQL Database configuration pane. This feature cannot be set using portal for SQL Managed Instance. For more information, see [Dynamic Data Masking](/sql/relational-databases/security/dynamic-data-masking). - -### Dynamic data masking policy - -* **SQL users excluded from masking** - A set of SQL users or Azure AD identities that get unmasked data in the SQL query results. Users with administrator privileges are always excluded from masking, and see the original data without any mask. -* **Masking rules** - A set of rules that define the designated fields to be masked and the masking function that is used. The designated fields can be defined using a database schema name, table name, and column name. -* **Masking functions** - A set of methods that control the exposure of data for different scenarios. - -| Masking function | Masking logic | -| --- | --- | -| **Default** |**Full masking according to the data types of the designated fields**

    • Use XXXX or fewer Xs if the size of the field is less than 4 characters for string data types (nchar, ntext, nvarchar).
    • Use a zero value for numeric data types (bigint, bit, decimal, int, money, numeric, smallint, smallmoney, tinyint, float, real).
    • Use 01-01-1900 for date/time data types (date, datetime2, datetime, datetimeoffset, smalldatetime, time).
    • For SQL variant, the default value of the current type is used.
    • For XML the document \ is used.
    • Use an empty value for special data types (timestamp table, hierarchyid, GUID, binary, image, varbinary spatial types). | -| **Credit card** |**Masking method, which exposes the last four digits of the designated fields** and adds a constant string as a prefix in the form of a credit card.

    XXXX-XXXX-XXXX-1234 | -| **Email** |**Masking method, which exposes the first letter and replaces the domain with XXX.com** using a constant string prefix in the form of an email address.

    aXX@XXXX.com | -| **Random number** |**Masking method, which generates a random number** according to the selected boundaries and actual data types. If the designated boundaries are equal, then the masking function is a constant number.

    ![Screenshot that shows the masking method for generating a random number.](./media/dynamic-data-masking-overview/1_DDM_Random_number.png) | -| **Custom text** |**Masking method, which exposes the first and last characters** and adds a custom padding string in the middle. If the original string is shorter than the exposed prefix and suffix, only the padding string is used.
    prefix[padding]suffix

    ![Navigation pane](./media/dynamic-data-masking-overview/2_DDM_Custom_text.png) | - - - -### Recommended fields to mask - -The DDM recommendations engine, flags certain fields from your database as potentially sensitive fields, which may be good candidates for masking. In the Dynamic Data Masking blade in the portal, you will see the recommended columns for your database. All you need to do is click **Add Mask** for one or more columns and then **Save** to apply a mask for these fields. - -## Manage dynamic data masking using T-SQL - -- To create a dynamic data mask, see [Creating a Dynamic Data Mask](/sql/relational-databases/security/dynamic-data-masking#creating-a-dynamic-data-mask). -- To add or edit a mask on an existing column, see [Adding or Editing a Mask on an Existing Column](/sql/relational-databases/security/dynamic-data-masking#adding-or-editing-a-mask-on-an-existing-column). -- To grant permissions to view unmasked data, see [Granting Permissions to View Unmasked Data](/sql/relational-databases/security/dynamic-data-masking#granting-permissions-to-view-unmasked-data). -- To drop a dynamic data mask, see [Dropping a Dynamic Data Mask](/sql/relational-databases/security/dynamic-data-masking#dropping-a-dynamic-data-mask). - -## Set up dynamic data masking for your database using PowerShell cmdlets - -### Data masking policies - -- [Get-AzSqlDatabaseDataMaskingPolicy](/powershell/module/az.sql/Get-AzSqlDatabaseDataMaskingPolicy) -- [Set-AzSqlDatabaseDataMaskingPolicy](/powershell/module/az.sql/Set-AzSqlDatabaseDataMaskingPolicy) - -### Data masking rules - -- [Get-AzSqlDatabaseDataMaskingRule](/powershell/module/az.sql/Get-AzSqlDatabaseDataMaskingRule) -- [New-AzSqlDatabaseDataMaskingRule](/powershell/module/az.sql/New-AzSqlDatabaseDataMaskingRule) -- [Remove-AzSqlDatabaseDataMaskingRule](/powershell/module/az.sql/Remove-AzSqlDatabaseDataMaskingRule) -- [Set-AzSqlDatabaseDataMaskingRule](/powershell/module/az.sql/Set-AzSqlDatabaseDataMaskingRule) - -## Set up dynamic data masking for your database using the REST API - -You can use the REST API to programmatically manage data masking policy and rules. The published REST API supports the following operations: - -### Data masking policies - -- [Create Or Update](/rest/api/sql/2014-04-01/datamaskingpolicies/createorupdate): Creates or updates a database data masking policy. -- [Get](/rest/api/sql/2014-04-01/datamaskingpolicies/get): Gets a database data masking policy. - -### Data masking rules - -- [Create Or Update](/rest/api/sql/2014-04-01/datamaskingrules/createorupdate): Creates or updates a database data masking rule. -- [List By Database](/rest/api/sql/2014-04-01/datamaskingrules/listbydatabase): Gets a list of database data masking rules. - -## Permissions - -These are the built-in roles to configure dynamic data masking is: -- [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) -- [SQL DB Contributor](../../role-based-access-control/built-in-roles.md#sql-db-contributor) -- [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) - -These are the required actions to use dynamic data masking: - -Read/Write: -- Microsoft.Sql/servers/databases/dataMaskingPolicies/* -Read: -- Microsoft.Sql/servers/databases/dataMaskingPolicies/read -Write: -- Microsoft.Sql/servers/databases/dataMaskingPolicies/write - -To learn more about permissions when using dynamic data masking with T-SQL command, see [Permissions](/sql/relational-databases/security/dynamic-data-masking#permissions) - -## Granular permission example - -Prevent unauthorized access to sensitive data and gain control by masking it to an unauthorized user at different levels of the database. You can grant or revoke UNMASK permission at the database-level, schema-level, table-level or at the column-level to a user. Using UNMASK permission provides a more granular way to control and limit unauthorized access to data stored in the database and improve data security management. - -1. Create schema to contain user tables - - ```sql - CREATE SCHEMA Data; - GO - ``` - -1. Create table with masked columns - - ```sql - CREATE TABLE Data.Membership ( - MemberID int IDENTITY(1,1) NOT NULL PRIMARY KEY CLUSTERED, - FirstName varchar(100) MASKED WITH (FUNCTION = 'partial(1, "xxxxx", 1)') NULL, - LastName varchar(100) NOT NULL, - Phone varchar(12) MASKED WITH (FUNCTION = 'default()') NULL, - Email varchar(100) MASKED WITH (FUNCTION = 'email()') NOT NULL, - DiscountCode smallint MASKED WITH (FUNCTION = 'random(1, 100)') NULL, - BirthDay datetime MASKED WITH (FUNCTION = 'default()') NULL - ); - ``` - -1. Insert sample data - - ```sql - INSERT INTO Data.Membership (FirstName, LastName, Phone, Email, DiscountCode, BirthDay) - VALUES - ('Roberto', 'Tamburello', '555.123.4567', 'RTamburello@contoso.com', 10, '1985-01-25 03:25:05'), - ('Janice', 'Galvin', '555.123.4568', 'JGalvin@contoso.com.co', 5,'1990-05-14 11:30:00'), - ('Shakti', 'Menon', '555.123.4570', 'SMenon@contoso.net', 50,'2004-02-29 14:20:10'), - ('Zheng', 'Mu', '555.123.4569', 'ZMu@contoso.net', 40,'1990-03-01 06:00:00'); - ``` - -1. Create schema to contain service tables - - ```sql - CREATE SCHEMA Service; - GO - ``` - -1. Create service table with masked columns - - ```sql - CREATE TABLE Service.Feedback ( - MemberID int IDENTITY(1,1) NOT NULL PRIMARY KEY CLUSTERED, - Feedback varchar(100) MASKED WITH (FUNCTION = 'default()') NULL, - Rating int MASKED WITH (FUNCTION='default()'), - Received_On datetime) - ); - ``` - -1. Insert sample data - - ```sql - INSERT INTO Service.Feedback(Feedback,Rating,Received_On) - VALUES - ('Good',4,'2022-01-25 11:25:05'), - ('Excellent', 5, '2021-12-22 08:10:07'), - ('Average', 3, '2021-09-15 09:00:00'); - ``` - -1. Create different users in the database - - ```sql - CREATE USER ServiceAttendant WITHOUT LOGIN; - GO - - CREATE USER ServiceLead WITHOUT LOGIN; - GO - - CREATE USER ServiceManager WITHOUT LOGIN; - GO - - CREATE USER ServiceHead WITHOUT LOGIN; - GO - ``` - -1. Grant read permissions to the users in the database - - ```sql - ALTER ROLE db_datareader ADD MEMBER ServiceAttendant; - - ALTER ROLE db_datareader ADD MEMBER ServiceLead; - - ALTER ROLE db_datareader ADD MEMBER ServiceManager; - - ALTER ROLE db_datareader ADD MEMBER ServiceHead; - ``` - -1. Grant different UNMASK permissions to users - - ```sql - --Grant column level UNMASK permission to ServiceAttendant - GRANT UNMASK ON Data.Membership(FirstName) TO ServiceAttendant; - - -- Grant table level UNMASK permission to ServiceLead - GRANT UNMASK ON Data.Membership TO ServiceLead; - - -- Grant schema level UNMASK permission to ServiceManager - GRANT UNMASK ON SCHEMA::Data TO ServiceManager; - GRANT UNMASK ON SCHEMA::Service TO ServiceManager; - - --Grant database level UNMASK permission to ServiceHead; - GRANT UNMASK TO ServiceHead; - ``` - -1. Query the data under the context of user `ServiceAttendant` - - ```sql - EXECUTE AS USER='ServiceAttendant'; - SELECT MemberID,FirstName,LastName,Phone,Email,BirthDay FROM Data. Membership; - SELECT MemberID,Feedback,Rating FROM Service.Feedback; - REVERT; - ``` - -1. Query the data under the context of user `ServiceLead` - - ```sql - EXECUTE AS USER='ServiceLead'; - SELECT MemberID,FirstName,LastName,Phone,Email,BirthDay FROM Data. Membership; - SELECT MemberID,Feedback,Rating FROM Service.Feedback; - REVERT; - ``` - -1. Query the data under the context of user `ServiceManager` - - ```sql - EXECUTE AS USER='ServiceManager'; - SELECT MemberID,FirstName,LastName,Phone,Email FROM Data.Membership; - SELECT MemberID,Feedback,Rating FROM Service.Feedback; - REVERT; - ``` - -1. Query the data under the context of user `ServiceHead` - - ```sql - EXECUTE AS USER='ServiceHead'; - SELECT MemberID,FirstName,LastName,Phone,Email,BirthDay FROM Data.Membership; - SELECT MemberID,Feedback,Rating FROM Service.Feedback; - REVERT; - ``` - - -1. To revoke UNMASK permissions, use the following T-SQL statements: - - ```sql - REVOKE UNMASK ON Data.Membership(FirstName) FROM ServiceAttendant; - - REVOKE UNMASK ON Data.Membership FROM ServiceLead; - - REVOKE UNMASK ON SCHEMA::Data FROM ServiceManager; - - REVOKE UNMASK ON SCHEMA::Service FROM ServiceManager; - - REVOKE UNMASK FROM ServiceHead; - ``` - -## See also - -- [Dynamic Data Masking](/sql/relational-databases/security/dynamic-data-masking) for SQL Server. -- Data Exposed episode about [Granular Permissions for Azure SQL Dynamic Data Masking](/Shows/Data-Exposed/Granular-Permissions-for-Azure-SQL-Dynamic-Data-Masking) on Channel 9. diff --git a/articles/azure-sql/database/elastic-convert-to-use-elastic-tools.md b/articles/azure-sql/database/elastic-convert-to-use-elastic-tools.md deleted file mode 100644 index f85f32ef099f1..0000000000000 --- a/articles/azure-sql/database/elastic-convert-to-use-elastic-tools.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Migrate existing databases to scale out -description: Convert sharded databases to use Elastic Database tools by creating a shard map manager -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 - ---- -# Migrate existing databases to scale out -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Easily manage your existing scaled-out sharded databases using tools (such as the [Elastic Database client library](elastic-database-client-library.md)). First convert an existing set of databases to use the [shard map manager](elastic-scale-shard-map-management.md). - -## Overview - -To migrate an existing sharded database: - -1. Prepare the [shard map manager database](elastic-scale-shard-map-management.md). -2. Create the shard map. -3. Prepare the individual shards. -4. Add mappings to the shard map. - -These techniques can be implemented using either the [.NET Framework client library](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/), or the PowerShell scripts found at [Azure SQL Database - Elastic Database tools scripts](https://github.com/Azure/elastic-db-tools/tree/master/Samples/PowerShell). The examples here use the PowerShell scripts. - -For more information about the ShardMapManager, see [Shard map management](elastic-scale-shard-map-management.md). For an overview of the Elastic Database tools, see [Elastic Database features overview](elastic-scale-introduction.md). - -## Prepare the shard map manager database - -The shard map manager is a special database that contains the data to manage scaled-out databases. You can use an existing database, or create a new database. A database acting as shard map manager should not be the same database as a shard. The PowerShell script does not create the database for you. - -## Step 1: Create a shard map manager - -```powershell -# Create a shard map manager -New-ShardMapManager -UserName '' -Password '' -SqlServerName '' -SqlDatabaseName '' -# and are the server name and database name -# for the new or existing database that should be used for storing -# tenant-database mapping information. -``` - -### To retrieve the shard map manager - -After creation, you can retrieve the shard map manager with this cmdlet. This step is needed every time you need to use the ShardMapManager object. - -```powershell -# Try to get a reference to the Shard Map Manager -$ShardMapManager = Get-ShardMapManager -UserName '' -Password '' -SqlServerName '' -SqlDatabaseName '' -``` - -## Step 2: Create the shard map - -Select the type of shard map to create. The choice depends on the database architecture: - -1. Single tenant per database (For terms, see the [glossary](elastic-scale-glossary.md).) -2. Multiple tenants per database (two types): - 1. List mapping - 2. Range mapping - -For a single-tenant model, create a **list mapping** shard map. The single-tenant model assigns one database per tenant. This is an effective model for SaaS developers as it simplifies management. - -![List mapping][1] - -The multi-tenant model assigns several tenants to an individual database (and you can distribute groups of tenants across multiple databases). Use this model when you expect each tenant to have small data needs. In this model, assign a range of tenants to a database using **range mapping**. - -![Range mapping][2] - -Or you can implement a multi-tenant database model using a *list mapping* to assign multiple tenants to an individual database. For example, DB1 is used to store information about tenant ID 1 and 5, and DB2 stores data for tenant 7 and tenant 10. - -![Multiple tenants on single DB][3] - -**Based on your choice, choose one of these options:** - -### Option 1: Create a shard map for a list mapping - -Create a shard map using the ShardMapManager object. - -```powershell -# $ShardMapManager is the shard map manager object -$ShardMap = New-ListShardMap -KeyType $([int]) -ListShardMapName 'ListShardMap' -ShardMapManager $ShardMapManager -``` - -### Option 2: Create a shard map for a range mapping - -To utilize this mapping pattern, tenant ID values needs to be continuous ranges, and it is acceptable to have gap in the ranges by skipping the range when creating the databases. - -```powershell -# $ShardMapManager is the shard map manager object -# 'RangeShardMap' is the unique identifier for the range shard map. -$ShardMap = New-RangeShardMap -KeyType $([int]) -RangeShardMapName 'RangeShardMap' -ShardMapManager $ShardMapManager -``` - -### Option 3: List mappings on an individual database - -Setting up this pattern also requires creation of a list map as shown in step 2, option 1. - -## Step 3: Prepare individual shards - -Add each shard (database) to the shard map manager. This prepares the individual databases for storing mapping information. Execute this method on each shard. - -```powershell -Add-Shard -ShardMap $ShardMap -SqlServerName '' -SqlDatabaseName '' -# The $ShardMap is the shard map created in step 2. -``` - -## Step 4: Add mappings - -The addition of mappings depends on the kind of shard map you created. If you created a list map, you add list mappings. If you created a range map, you add range mappings. - -### Option 1: Map the data for a list mapping - -Map the data by adding a list mapping for each tenant. - -```powershell -# Create the mappings and associate it with the new shards -Add-ListMapping -KeyType $([int]) -ListPoint '' -ListShardMap $ShardMap -SqlServerName '' -SqlDatabaseName '' -``` - -### Option 2: Map the data for a range mapping - -Add the range mappings for all the tenant ID range - database associations: - -```powershell -# Create the mappings and associate it with the new shards -Add-RangeMapping -KeyType $([int]) -RangeHigh '5' -RangeLow '1' -RangeShardMap $ShardMap -SqlServerName '' -SqlDatabaseName '' -``` - -### Step 4 option 3: Map the data for multiple tenants on an individual database - -For each tenant, run the Add-ListMapping (option 1). - -## Checking the mappings - -Information about the existing shards and the mappings associated with them can be queried using following commands: - -```powershell -# List the shards and mappings -Get-Shards -ShardMap $ShardMap -Get-Mappings -ShardMap $ShardMap -``` - -## Summary - -Once you have completed the setup, you can begin to use the Elastic Database client library. You can also use [data-dependent routing](elastic-scale-data-dependent-routing.md) and [multi-shard query](elastic-scale-multishard-querying.md). - -## Next steps - -Get the PowerShell scripts from [Azure Elastic Database tools scripts](https://github.com/Azure/elastic-db-tools/tree/master/Samples/PowerShell). - -The Elastic database tools client library is available on GitHub: [Azure/elastic-db-tools](https://github.com/Azure/elastic-db-tools). - -Use the split-merge tool to move data to or from a multi-tenant model to a single tenant model. See [Split merge tool](elastic-scale-configure-deploy-split-and-merge.md). - -## Additional resources - -For information on common data architecture patterns of multi-tenant software-as-a-service (SaaS) database applications, see [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](saas-tenancy-app-design-patterns.md). - -## Questions and feature requests - -For questions, use the [Microsoft Q&A question page for SQL Database](/answers/topics/azure-sql-database.html) and for feature requests, add them to the [SQL Database feedback forum](https://feedback.azure.com/d365community/forum/04fe6ee0-3b25-ec11-b6e6-000d3a4f0da0). - - -[1]: ./media/elastic-convert-to-use-elastic-tools/listmapping.png -[2]: ./media/elastic-convert-to-use-elastic-tools/rangemapping.png -[3]: ./media/elastic-convert-to-use-elastic-tools/multipleonsingledb.png diff --git a/articles/azure-sql/database/elastic-database-client-library.md b/articles/azure-sql/database/elastic-database-client-library.md deleted file mode 100644 index 245d5b381c7fe..0000000000000 --- a/articles/azure-sql/database/elastic-database-client-library.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Building scalable cloud databases -description: Build scalable .NET database apps with the Elastic Database client library. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 09/25/2018 ---- -# Building scalable cloud databases -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Scaling out databases can be easily accomplished using scalable tools and features for Azure SQL Database. In particular, you can use the **Elastic Database client library** to create and manage scaled-out databases. This feature lets you easily develop sharded applications using hundreds—or even thousands—of databases in Azure SQL Database. - -To download: - -* The Java version of the library, see [Maven Central Repository](https://search.maven.org/#search%7Cga%7C1%7Celastic-db-tools). -* The .NET version of the library, see [NuGet](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/). - -## Documentation - -1. [Get started with Elastic Database tools](elastic-scale-get-started.md) -2. [Elastic Database features](elastic-scale-introduction.md) -3. [Shard map management](elastic-scale-shard-map-management.md) -4. [Migrate existing databases to scale out](elastic-convert-to-use-elastic-tools.md) -5. [Data dependent routing](elastic-scale-data-dependent-routing.md) -6. [Multi-shard queries](elastic-scale-multishard-querying.md) -7. [Adding a shard using Elastic Database tools](elastic-scale-add-a-shard.md) -8. [Multi-tenant applications with Elastic Database tools and row-level security](saas-tenancy-elastic-tools-multi-tenant-row-level-security.md) -9. [Upgrade client library apps](elastic-scale-upgrade-client-library.md) -10. [Elastic queries overview](elastic-query-overview.md) -11. [Elastic Database tools glossary](elastic-scale-glossary.md) -12. [Elastic Database client library with Entity Framework](elastic-scale-use-entity-framework-applications-visual-studio.md) -13. [Elastic Database client library with Dapper](elastic-scale-working-with-dapper.md) -14. [Split-merge tool](elastic-scale-overview-split-and-merge.md) -15. [Performance counters for shard map manager](elastic-database-client-library.md) -16. [FAQ for Elastic Database tools](elastic-scale-faq.yml) - -## Client capabilities - -Scaling out applications using *sharding* presents challenges for both the developer as well as the administrator. The client library simplifies the management tasks by providing tools that let both developers and administrators manage scaled-out databases. In a typical example, there are many databases, known as "shards," to manage. Customers are co-located in the same database, and there is one database per customer (a single-tenant scheme). The client library includes these features: - -- **Shard map management**: A special database called the "shard map manager" is created. Shard map management is the ability for an application to manage metadata about its shards. Developers can use this functionality to register databases as shards, describe mappings of individual sharding keys or key ranges to those databases, and maintain this metadata as the number and composition of databases evolves to reflect capacity changes. Without the Elastic Database client library, you would need to spend a lot of time writing the management code when implementing sharding. For details, see [Shard map management](elastic-scale-shard-map-management.md). - -- **Data dependent routing**: Imagine a request coming into the application. Based on the sharding key value of the request, the application needs to determine the correct database based on the key value. It then opens a connection to the database to process the request. Data dependent routing provides the ability to open connections with a single easy call into the shard map of the application. Data dependent routing was another area of infrastructure code that is now covered by functionality in the Elastic Database client library. For details, see [Data dependent routing](elastic-scale-data-dependent-routing.md). -- **Multi-shard queries (MSQ)**: Multi-shard querying works when a request involves several (or all) shards. A multi-shard query executes the same T-SQL code on all shards or a set of shards. The results from the participating shards are merged into an overall result set using UNION ALL semantics. The functionality as exposed through the client library handles many tasks, including: connection management, thread management, fault handling, and intermediate results processing. MSQ can query up to hundreds of shards. For details, see [Multi-shard querying](elastic-scale-multishard-querying.md). - -In general, customers using Elastic Database tools can expect to get full T-SQL functionality when submitting shard-local operations as opposed to cross-shard operations that have their own semantics. - - - -## Next steps - -- Elastic Database client library ([Java](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22azure-elasticdb-tools%22), [.NET](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/)) - to **download** the library. - -- [Get started with Elastic Database tools](elastic-scale-get-started.md) - to try the **sample app** that demonstrates client functions. - -- GitHub ([Java](https://github.com/Microsoft/elastic-db-tools-for-java/blob/master/README.md), [.NET](https://github.com/Azure/elastic-db-tools)) - to make contributions to the code. -- [Azure SQL Database elastic query overview](elastic-query-overview.md) - to use elastic queries. - -- [Moving data between scaled-out cloud databases](elastic-scale-overview-split-and-merge.md) - for instructions on using the **split-merge tool**. - - - - - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - - - - -[1]: ./media/sql-database-elastic-database-client-library/glossary.png - diff --git a/articles/azure-sql/database/elastic-database-perf-counters.md b/articles/azure-sql/database/elastic-database-perf-counters.md deleted file mode 100644 index 9c6a41e4858f2..0000000000000 --- a/articles/azure-sql/database/elastic-database-perf-counters.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Performance counters to track shard map manager -description: ShardMapManager class and data dependent routing performance counters -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: seoapril2019, seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 02/07/2019 ---- -# Create performance counters to track performance of shard map manager -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Performance counters are used to track the performance of [data dependent routing](elastic-scale-data-dependent-routing.md) operations. These counters are accessible in the Performance Monitor, under the "Elastic Database: Shard Management" category. - -You can capture the performance of a [shard map manager](elastic-scale-shard-map-management.md), especially when using [data dependent routing](elastic-scale-data-dependent-routing.md). Counters are created with methods of the Microsoft.Azure.SqlDatabase.ElasticScale.Client class. - - -**For the latest version:** Go to [Microsoft.Azure.SqlDatabase.ElasticScale.Client](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/). See also [Upgrade an app to use the latest elastic database client library](elastic-scale-upgrade-client-library.md). - -## Prerequisites - -* To create the performance category and counters, the user must be a part of the local **Administrators** group on the machine hosting the application. -* To create a performance counter instance and update the counters, the user must be a member of either the **Administrators** or **Performance Monitor Users** group. - -## Create performance category and counters - -To create the counters, call the CreatePerformanceCategoryAndCounters method of the [ShardMapManagementFactory class](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory). Only an administrator can execute the method: - -`ShardMapManagerFactory.CreatePerformanceCategoryAndCounters()` - -You can also use [this](https://gallery.technet.microsoft.com/scriptcenter/Elastic-DB-Tools-for-Azure-17e3d283) PowerShell script to execute the method. -The method creates the following performance counters: - -* **Cached mappings**: Number of mappings cached for the shard map. -* **DDR operations/sec**: Rate of data dependent routing operations for the shard map. This counter is updated when a call to [OpenConnectionForKey()](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.openconnectionforkey) results in a successful connection to the destination shard. -* **Mapping lookup cache hits/sec**: Rate of successful cache lookup operations for mappings in the shard map. -* **Mapping lookup cache misses/sec**: Rate of failed cache lookup operations for mappings in the shard map. -* **Mappings added or updated in cache/sec**: Rate at which mappings are being added or updated in cache for the shard map. -* **Mappings removed from cache/sec**: Rate at which mappings are being removed from cache for the shard map. - -Performance counters are created for each cached shard map per process. - -## Notes - -The following events trigger the creation of the performance counters: - -* Initialization of the [ShardMapManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager) with [eager loading](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerloadpolicy), if the ShardMapManager contains any shard maps. These include the [GetSqlShardMapManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.getsqlshardmapmanager) and the [TryGetSqlShardMapManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.trygetsqlshardmapmanager) methods. -* Successful lookup of a shard map (using [GetShardMap()](/previous-versions/azure/dn824215(v=azure.100)), [GetListShardMap()](/previous-versions/azure/dn824212(v=azure.100)) or [GetRangeShardMap()](/previous-versions/azure/dn824173(v=azure.100))). -* Successful creation of shard map using CreateShardMap(). - -The performance counters will be updated by all cache operations performed on the shard map and mappings. Successful removal of the shard map using DeleteShardMap() results in deletion of the performance counters instance. - -## Best practices - -* Creation of the performance category and counters should be performed only once before the creation of ShardMapManager object. Every execution of the command CreatePerformanceCategoryAndCounters() clears the previous counters (losing data reported by all instances) and creates new ones. -* Performance counter instances are created per process. Any application crash or removal of a shard map from the cache will result in deletion of the performance counters instances. - -### See also - -[Elastic Database features overview](elastic-scale-introduction.md) - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - - \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-database-recovery-manager.md b/articles/azure-sql/database/elastic-database-recovery-manager.md deleted file mode 100644 index a2066386a3d9a..0000000000000 --- a/articles/azure-sql/database/elastic-database-recovery-manager.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Recovery Manager to fix shard map problems -description: Use the RecoveryManager class to solve problems with shard maps -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/03/2019 ---- -# Using the RecoveryManager class to fix shard map problems -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The [RecoveryManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.recovery.recoverymanager) class provides ADO.NET applications the ability to easily detect and correct any inconsistencies between the global shard map (GSM) and the local shard map (LSM) in a sharded database environment. - -The GSM and LSM track the mapping of each database in a sharded environment. Occasionally, a break occurs between the GSM and the LSM. In that case, use the RecoveryManager class to detect and repair the break. - -The RecoveryManager class is part of the [Elastic Database client library](elastic-database-client-library.md). - -![Shard map][1] - -For term definitions, see [Elastic Database tools glossary](elastic-scale-glossary.md). To understand how the **ShardMapManager** is used to manage data in a sharded solution, see [Shard map management](elastic-scale-shard-map-management.md). - -## Why use the recovery manager - -In a sharded database environment, there is one tenant per database, and many databases per server. There can also be many servers in the environment. Each database is mapped in the shard map, so calls can be routed to the correct server and database. Databases are tracked according to a **sharding key**, and each shard is assigned a **range of key values**. For example, a sharding key may represent the customer names from "D" to "F." The mapping of all shards (also known as databases) and their mapping ranges are contained in the **global shard map (GSM)**. Each database also contains a map of the ranges contained on the shard that is known as the **local shard map (LSM)**. When an app connects to a shard, the mapping is cached with the app for quick retrieval. The LSM is used to validate cached data. - -The GSM and LSM may become out of sync for the following reasons: - -1. The deletion of a shard whose range is believed to no longer be in use, or renaming of a shard. Deleting a shard results in an **orphaned shard mapping**. Similarly, a renamed database can cause an orphaned shard mapping. Depending on the intent of the change, the shard may need to be removed or the shard location needs to be updated. To recover a deleted database, see [Restore a deleted database](recovery-using-backups.md). -2. A geo-failover event occurs. To continue, one must update the server name, and database name of shard map manager in the application and then update the shard-mapping details for all shards in a shard map. If there is a geo-failover, such recovery logic should be automated within the failover workflow. Automating recovery actions enables a frictionless manageability for geo-enabled databases and avoids manual human actions. To learn about options to recover a database if there is a data center outage, see [Business Continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md) and [Disaster Recovery](disaster-recovery-guidance.md). -3. Either a shard or the ShardMapManager database is restored to an earlier point-in time. To learn about point in time recovery using backups, see [Recovery using backups](recovery-using-backups.md). - -For more information about Azure SQL Database Elastic Database tools, geo-replication and Restore, see the following: - -* [Overview: Cloud business continuity and database disaster recovery with SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md) -* [Get started with elastic database tools](elastic-scale-get-started.md) -* [ShardMap Management](elastic-scale-shard-map-management.md) - -## Retrieving RecoveryManager from a ShardMapManager - -The first step is to create a RecoveryManager instance. The [GetRecoveryManager method](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.getrecoverymanager) returns the recovery manager for the current [ShardMapManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager) instance. To address any inconsistencies in the shard map, you must first retrieve the RecoveryManager for the particular shard map. - - ```java - ShardMapManager smm = ShardMapManagerFactory.GetSqlShardMapManager(smmConnectionString, - ShardMapManagerLoadPolicy.Lazy); - RecoveryManager rm = smm.GetRecoveryManager(); - ``` - -In this example, the RecoveryManager is initialized from the ShardMapManager. The ShardMapManager containing a ShardMap is also already initialized. - -Since this application code manipulates the shard map itself, the credentials used in the factory method (in the preceding example, smmConnectionString) should be credentials that have read-write permissions on the GSM database referenced by the connection string. These credentials are typically different from credentials used to open connections for data-dependent routing. For more information, see [Using credentials in the elastic database client](elastic-scale-manage-credentials.md). - -## Removing a shard from the ShardMap after a shard is deleted - -The [DetachShard method](/previous-versions/azure/dn842083(v=azure.100)) detaches the given shard from the shard map and deletes mappings associated with the shard. - -* The location parameter is the shard location, specifically server name and database name, of the shard being detached. -* The shardMapName parameter is the shard map name. This is only required when multiple shard maps are managed by the same shard map manager. Optional. - -> [!IMPORTANT] -> Use this technique only if you are certain that the range for the updated mapping is empty. The methods above do not check data for the range being moved, so it is best to include checks in your code. - -This example removes shards from the shard map. - - ```java - rm.DetachShard(s.Location, customerMap); - ``` - -The shard map reflects the shard location in the GSM before the deletion of the shard. Because the shard was deleted, it is assumed this was intentional, and the sharding key range is no longer in use. If not, you can execute point-in time restore. to recover the shard from an earlier point-in-time. (In that case, review the following section to detect shard inconsistencies.) To recover, see [Point in time recovery](recovery-using-backups.md). - -Since it is assumed the database deletion was intentional, the final administrative cleanup action is to delete the entry to the shard in the shard map manager. This prevents the application from inadvertently writing information to a range that is not expected. - -## To detect mapping differences - -The [DetectMappingDifferences method](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.recovery.recoverymanager.detectmappingdifferences) selects and returns one of the shard maps (either local or global) as the source of truth and reconciles mappings on both shard maps (GSM and LSM). - - ```java - rm.DetectMappingDifferences(location, shardMapName); - ``` - -* The *location* specifies the server name and database name. -* The *shardMapName* parameter is the shard map name. This is only required if multiple shard maps are managed by the same shard map manager. Optional. - -## To resolve mapping differences - -The [ResolveMappingDifferences method](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.recovery.recoverymanager.resolvemappingdifferences) selects one of the shard maps (either local or global) as the source of truth and reconciles mappings on both shard maps (GSM and LSM). - - ```java - ResolveMappingDifferences (RecoveryToken, MappingDifferenceResolution.KeepShardMapping); - ``` - -* The *RecoveryToken* parameter enumerates the differences in the mappings between the GSM and the LSM for the specific shard. -* The [MappingDifferenceResolution enumeration](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.recovery.mappingdifferenceresolution) is used to indicate the method for resolving the difference between the shard mappings. -* **MappingDifferenceResolution.KeepShardMapping** is recommended that when the LSM contains the accurate mapping and therefore the mapping in the shard should be used. This is typically the case if there is a failover: the shard now resides on a new server. Since the shard must first be removed from the GSM (using the RecoveryManager.DetachShard method), a mapping no longer exists on the GSM. Therefore, the LSM must be used to re-establish the shard mapping. - -## Attach a shard to the ShardMap after a shard is restored - -The [AttachShard method](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.recovery.recoverymanager.attachshard) attaches the given shard to the shard map. It then detects any shard map inconsistencies and updates the mappings to match the shard at the point of the shard restoration. It is assumed that the database is also renamed to reflect the original database name (before the shard was restored), since the point-in time restoration defaults to a new database appended with the timestamp. - - ```java - rm.AttachShard(location, shardMapName) - ``` - -* The *location* parameter is the server name and database name, of the shard being attached. -* The *shardMapName* parameter is the shard map name. This is only required when multiple shard maps are managed by the same shard map manager. Optional. - -This example adds a shard to the shard map that has been recently restored from an earlier point-in time. Since the shard (namely the mapping for the shard in the LSM) has been restored, it is potentially inconsistent with the shard entry in the GSM. Outside of this example code, the shard was restored and renamed to the original name of the database. Since it was restored, it is assumed the mapping in the LSM is the trusted mapping. - - ```java - rm.AttachShard(s.Location, customerMap); - var gs = rm.DetectMappingDifferences(s.Location); - foreach (RecoveryToken g in gs) - { - rm.ResolveMappingDifferences(g, MappingDifferenceResolution.KeepShardMapping); - } - ``` - -## Updating shard locations after a geo-failover (restore) of the shards - -If there is a geo-failover, the secondary database is made write accessible and becomes the new primary database. The name of the server, and potentially the database (depending on your configuration), may be different from the original primary. Therefore the mapping entries for the shard in the GSM and LSM must be fixed. Similarly, if the database is restored to a different name or location, or to an earlier point in time, this might cause inconsistencies in the shard maps. The Shard Map Manager handles the distribution of open connections to the correct database. Distribution is based on the data in the shard map and the value of the sharding key that is the target of the applications request. After a geo-failover, this information must be updated with the accurate server name, database name and shard mapping of the recovered database. - -## Best practices - -Geo-failover and recovery are operations typically managed by a cloud administrator of the application intentionally utilizing Azure SQL Database business continuity features. Business continuity planning requires processes, procedures, and measures to ensure that business operations can continue without interruption. The methods available as part of the RecoveryManager class should be used within this work flow to ensure the GSM and LSM are kept up-to-date based on the recovery action taken. There are five basic steps to properly ensuring the GSM and LSM reflect the accurate information after a failover event. The application code to execute these steps can be integrated into existing tools and workflow. - -1. Retrieve the RecoveryManager from the ShardMapManager. -2. Detach the old shard from the shard map. -3. Attach the new shard to the shard map, including the new shard location. -4. Detect inconsistencies in the mapping between the GSM and LSM. -5. Resolve differences between the GSM and the LSM, trusting the LSM. - -This example performs the following steps: - -1. Removes shards from the Shard Map that reflect shard locations before the failover event. -2. Attaches shards to the Shard Map reflecting the new shard locations (the parameter "Configuration.SecondaryServer" is the new server name but the same database name). -3. Retrieves the recovery tokens by detecting mapping differences between the GSM and the LSM for each shard. -4. Resolves the inconsistencies by trusting the mapping from the LSM of each shard. - - ```java - var shards = smm.GetShards(); - foreach (shard s in shards) - { - if (s.Location.Server == Configuration.PrimaryServer) - { - ShardLocation slNew = new ShardLocation(Configuration.SecondaryServer, s.Location.Database); - rm.DetachShard(s.Location); - rm.AttachShard(slNew); - var gs = rm.DetectMappingDifferences(slNew); - foreach (RecoveryToken g in gs) - { - rm.ResolveMappingDifferences(g, MappingDifferenceResolution.KeepShardMapping); - } - } - } - ``` - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/elastic-database-recovery-manager/recovery-manager.png \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-jobs-migrate.md b/articles/azure-sql/database/elastic-jobs-migrate.md deleted file mode 100644 index e648e7547f68b..0000000000000 --- a/articles/azure-sql/database/elastic-jobs-migrate.md +++ /dev/null @@ -1,627 +0,0 @@ ---- -title: Migrate to new Elastic Database Jobs (preview) -description: Migrate to the new Elastic Database Jobs. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: seo-lt-2019, sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: LitKnd -ms.author: kendralittle -ms.date: 03/13/2019 ---- -# Migrate to the new Elastic Database jobs (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -An upgraded version of [Elastic Database Jobs](elastic-jobs-overview.md) is available. - -If you have an existing customer hosted version of Elastic Database Jobs, migration cmdlets and scripts are provided for easily migrating to the latest version. - - -## Prerequisites - -The upgraded version of Elastic Database jobs has a new set of PowerShell cmdlets for use during migration. These new cmdlets transfer all of your existing job credentials, targets (including databases, servers, custom collections), job triggers, job schedules, job contents, and jobs over to a new Elastic Job agent. - -### Install the latest Elastic Jobs cmdlets - -If you don't already have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -Install the **Az.Sql** 1.1.1-preview module to get the latest Elastic Job cmdlets. Run the following commands in PowerShell with administrative access. - -```powershell -# Installs the latest PackageManagement powershell package which PowerShellGet v1.6.5 is dependent on -Find-Package PackageManagement -RequiredVersion 1.1.7.2 | Install-Package -Force - -# Installs the latest PowerShellGet module which adds the -AllowPrerelease flag to Install-Module -Find-Package PowerShellGet -RequiredVersion 1.6.5 | Install-Package -Force - -# Restart your powershell session with administrative access - -# Places Az.Sql preview cmdlets side by side with existing Az.Sql version -Install-Module -Name Az.Sql -RequiredVersion 1.1.1-preview -AllowPrerelease - -# Import the Az.Sql module -Import-Module Az.Sql -RequiredVersion 1.1.1 - -# Confirm if module successfully imported - if the imported version is 1.1.1, then continue -Get-Module Az.Sql -``` - -### Create a new Elastic Job agent - -After installing the new cmdlets, create a new Elastic Job agent. - -```powershell -# Register your subscription for the for the Elastic Jobs public preview feature -Register-AzProviderFeature -FeatureName sqldb-JobAccounts -ProviderNamespace Microsoft.Sql - -# Get an existing database to use as the job database - or create a new one if necessary -$db = Get-AzSqlDatabase -ResourceGroupName -ServerName -DatabaseName -# Create a new elastic job agent -$agent = $db | New-AzSqlElasticJobAgent -Name -``` - -### Install the old Elastic Database Jobs cmdlets - -Migration needs to use some of the *old* elastic job cmdlets, so run the following commands if you don't already have them installed. - -```powershell -# Install the old elastic job cmdlets if necessary and initialize the old jobs cmdlets -.\nuget install Microsoft.Azure.SqlDatabase.Jobs -prerelease - -# Install the old jobs cmdlets -cd Microsoft.Azure.SqlDatabase.Jobs.x.x.xxxx.x*\tools -Unblock-File .\InstallElasticDatabaseJobsCmdlets.ps1 -.\InstallElasticDatabaseJobsCmdlets.ps1 - -# Choose the subscription where your existing jobs are -Select-AzSubscription -SubscriptionId -Use-AzureSqlJobConnection -CurrentAzureSubscription -Credential (Get-Credential) -``` - - - -## Migration - -Now that both the old and new Elastic Jobs cmdlets are initialized, migrate your job credentials, targets, and jobs to the new *job database*. - -### Setup - -```powershell -$ErrorActionPreference = "Stop"; - -# Helper function to show starting write output -function Log-StartOutput ($output) { - Write-Output ("`r--------------------- " + $output + " ---------------------") -} - -# Helper function to show starting write output -function Log-ChildOutput ($output) { - Write-Output (" - " + $output) -} -``` - - - -### Migrate credentials - -```powershell -function Migrate-Credentials ($agent) { - Log-StartOutput "Migrating credentials" - - $oldCreds = Get-AzureSqlJobCredential - $oldCreds | % { - $oldCredName = $_.CredentialName - $oldUserName = $_.UserName - Write-Output ("Credential " + $oldCredName) - $oldCredential = Get-Credential -UserName $oldUserName ` - -Message ("Please enter in the password that was used for your credential " + $oldCredName) - try - { - $cred = New-AzSqlElasticJobCredential -ParentObject $agent -Name $oldCredName -Credential $oldCredential - } - catch [System.Management.Automation.PSArgumentException] - { - $cred = Get-AzSqlElasticJobCredential -ParentObject $agent -Name $oldCredName - $cred = Set-AzSqlElasticJobCredential -InputObject $cred -Credential $oldCredential - } - - Log-ChildOutput ("Added user " + $oldUserName) - } -} -``` - -To migrate your credentials, execute the following command by passing in the `$agent` PowerShell object from earlier. - -```powershell -Migrate-Credentials $agent -``` - -Sample output - -```powershell -# You should see similar output after executing the above -# --------------------- Migrating credentials --------------------- -# Credential cred1 -# - Added user user1 -# Credential cred2 -# - Added user user2 -# Credential cred3 -# - Added user user3 -``` - -### Migrate targets - -```powershell -function Migrate-TargetGroups ($agent) { - Log-StartOutput "Migrating target groups" - - # Setup hash of target groups - $targetGroups = [ordered]@{} - - # Fetch root job targets from old service - $rootTargets = Get-AzureSqlJobTarget - - # Return if no root targets are found - if ($rootTargets.Count -eq 0) - { - Write-Output "No targets found - no need for migration" - return - } - - # Create list of target groups to create - # We format the target group name as such: - # - If root target is server type, then target group name is "(serverName)" - # - If root target is database type, then target group name is "(serverName,databaseName)" - # - If root target is shard map type, then target group name is "(serverName,databaseName,shardMapName)" - # - If root target is custom collection, then target group name is "customCollectionName" - $rootTargets | % { - $tgName = Format-OldTargetName -target $_ - $childTargets = Get-ChildTargets -target $_ - $targetGroups.Add($tgName, $childTargets) - } - - # Flatten list - for ($i=$targetGroups.Count - 1; $i -ge 0; $i--) - { - # Fetch target group's initial list of targets unexpanded - $targets = $targetGroups[$i] - - # Expand custom collection targets - $j = 0; - while ($j -lt $targets.Count) - { - $target = $targets[$j] - if ($target.TargetType -eq "CustomCollection") - { - $targets = [System.Collections.ArrayList] $targets - $targets.Remove($target) # Remove this target from the list - - $expandedTargets = $targetGroups[$target.TargetDescription.CustomCollectionName] - - foreach ($expandedTarget in $expandedTargets) - { - $targets.Add($expandedTarget) | Out-Null - } - - # Set updated list of targets for tg - $targetGroups[$i] = $targets - # Note we don't increment here in case we need to expand further - } - else - { - # Skip if no custom collection target needs to be expanded - $j++ - } - } - } - - # Add targets to target group - foreach ($targetGroup in $targetGroups.Keys) - { - $tg = Setup-TargetGroup -tgName $targetGroup -agent $agent - $targets = $targetGroups[$targetGroup] - Migrate-Targets -targets $targets -tg $tg - $targetsAdded = (Get-AzSqlElasticJobTargetGroup -ParentObject $agent -Name $tg.TargetGroupName).Targets - foreach ($targetAdded in $targetsAdded) - { - Log-ChildOutput ("Added target " + (Format-NewTargetName $targetAdded)) - } - } -} - -## Target group helpers -# Migrate shard map target from old jobs to new job's target group -function Migrate-Targets ($targets, $tg) { - Write-Output ("Target group " + $tg.TargetGroupName) - foreach ($target in $targets) { - if ($target.TargetType -eq "Server") { - Add-ServerTarget -target $target -tg $tg - } - elseif ($target.TargetType -eq "Database") { - Add-DatabaseTarget -target $target -tg $tg - } - elseif ($target.TargetType -eq "ShardMap") { - Add-ShardMapTarget -target $target -tg $tg - } - } -} - -# Migrate server target from old jobs to new job's target group -function Add-ServerTarget ($target, $tg) { - $jobTarget = Get-AzureSqlJobTarget -TargetId $target.TargetId - $serverName = $jobTarget.ServerName - $credName = $jobTarget.MasterDatabaseCredentialName - $t = Add-AzSqlElasticJobTarget -ParentObject $tg -ServerName $serverName -RefreshCredentialName $credName -} - -# Migrate database target from old jobs to new job's target group -function Add-DatabaseTarget ($target, $tg) { - $jobTarget = Get-AzureSqlJobTarget -TargetId $target.TargetId - $serverName = $jobTarget.ServerName - $databaseName = $jobTarget.DatabaseName - $exclude = $target.Membership - - if ($exclude -eq "Exclude") { - $t = Add-AzSqlElasticJobTarget -ParentObject $tg -ServerName $serverName -DatabaseName $databaseName -Exclude - } - else { - $t = Add-AzSqlElasticJobTarget -ParentObject $tg -ServerName $serverName -DatabaseName $databaseName - } -} - -# Migrate shard map target from old jobs to new job's target group -function Add-ShardMapTarget ($target, $tg) { - $jobTarget = Get-AzureSqlJobTarget -TargetId $target.TargetId - $smName = $jobTarget.ShardMapName - $serverName = $jobTarget.ShardMapManagerServerName - $databaseName = $jobTarget.ShardMapManagerDatabaseName - $credName = $jobTarget.ShardMapManagerCredentialName - $exclude = $target.Membership - - if ($exclude -eq "Exclude") { - $t = Add-AzSqlElasticJobTarget -ParentObject $tg -ServerName $serverName -ShardMapName $smName -DatabaseName $databasename -RefreshCredentialName $credName -Exclude - } - else { - $t = Add-AzSqlElasticJobTarget -ParentObject $tg -ServerName $serverName -ShardMapName $smName -DatabaseName $databasename -RefreshCredentialName $credName - } -} - -# Helper to format target old target names -function Format-OldTargetName ($target) { - if ($target.TargetType -eq "Server") { - $tgName = "(" + $target.ServerName + ")" - } - elseif ($target.TargetType -eq "Database") { - $tgName = "(" + $target.ServerName + "," + $target.DatabaseName + ")" - } - elseif ($target.TargetType -eq "ShardMap") { - $tgName = "(" + $target.ShardMapManagerServerName + "," + - $target.ShardMapManagerDatabaseName + "," + ` - $target.ShardMapName + ")" - } - elseif ($target.TargetType -eq "CustomCollection") { - $tgName = $target.CustomCollectionName - } - - return $tgName -} - -# Helper to format new target names -function Format-NewTargetName ($target) { - if ($target.TargetType -eq "SqlServer") { - $tgName = "(" + $target.TargetServerName + ")" - } - elseif ($target.TargetType -eq "SqlDatabase") { - $tgName = "(" + $target.TargetServerName + "," + $target.TargetDatabaseName + ")" - } - elseif ($target.TargetType -eq "SqlShardMap") { - $tgName = "(" + $target.TargetServerName + "," + - $target.TargetDatabaseName + "," + ` - $target.TargetShardMapName + ")" - } - elseif ($target.TargetType -eq "SqlElasticPool") { - $tgName = "(" + $target.TargetServerName + "," + - $target.TargetDatabaseName + "," + ` - $target.TargetElasticPoolName + ")" - } - - return $tgName -} - -# Get child targets -function Get-ChildTargets($target) { - if ($target.TargetType -eq "CustomCollection") { - $children = Get-AzureSqlJobChildTarget -TargetId $target.TargetId - if ($children.Count -eq 1) - { - $arr = New-Object System.Collections.ArrayList($null) - $arr.Add($children) - $children = $arr - } - return $children - } - else { - return $target - } -} - -# Migrates target groups -function Setup-TargetGroup ($tgName, $agent) { - try { - $tg = New-AzSqlElasticJobTargetGroup -ParentObject $agent -Name $tgName - return $tg - } - catch [System.Management.Automation.PSArgumentException] { - $tg = Get-AzSqlElasticJobTargetGroup -ParentObject $agent -Name $tgName - return $tg - } -} -``` - -To migrate your targets (servers, databases, and custom collections) to your new job database, execute the **Migrate-TargetGroups** cmdlet to perform the following: - -- Root level targets that are servers and databases will be migrated to a new target group named "(\, \)" containing only the root level target. -- A custom collection will migrate to a new target group containing all child targets. - -```powershell -Migrate-TargetGroups $agent -``` - -Sample output: - -```powershell -# --------------------- Migrating target groups --------------------- -# Target group cc1 -# - Added target (s1) -# - Added target (s1,db1) -# Target group cc2 -# - Added target (s1,db1) -# Target group cc3 -# - Added target (s1) -# - Added target (s1,db1) -# Target group (s1,db1) -# - Added target (s1,db1) -# Target group (s1,db2) -# - Added target (s1,db2) -# Target group (s1) -# - Added target (s1) -# Target group (s1,db1,sm1) -# - Added target (s1,db1,sm1) -``` - - - -### Migrate jobs - -```powershell -function Migrate-Jobs ($agent) -{ - Log-StartOutput "Migrating jobs and job steps" - - $oldJobs = Get-AzureSqlJob - $newJobs = [System.Collections.ArrayList] @() - - foreach ($oldJob in $oldJobs) - { - # Ignore system jobs - if ($oldJob.ContentName -eq $null) - { - continue - } - - # Schedule - $oldJobTriggers = Get-AzureSqlJobTrigger -JobName $oldJob.JobName - - if ($oldJobTriggers.Count -ge 1) - { - foreach ($trigger in $oldJobTriggers) - { - - $schedule = Get-AzureSqlJobSchedule -ScheduleName $trigger.ScheduleName - $newJob = [PSCustomObject] @{ - JobName = ($trigger.JobName + " (" + $trigger.ScheduleName + ")"); - Description = $oldJob.ContentName - Schedule = $schedule - TargetGroupName = (Format-OldTargetName(Get-AzureSqlJobTarget -TargetId $oldJob.TargetId)) - CredentialName = $oldJob.CredentialName - Output = $oldJob.ResultSetDestination - } - $newJobs.Add($newJob) | Out-Null - } - } - else - { - $newJob = [PSCustomObject] @{ - JobName = $oldJob.JobName - Description = $oldJob.ContentName - Schedule = $null - TargetGroupName = (Format-OldTargetName(Get-AzureSqlJobTarget -TargetId $oldJob.TargetId)) - CredentialName = $oldJob.CredentialName - Output = $oldJob.ResultSetDestination - } - $newJobs.Add($newJob) | Out-Null - } - } - - # At this point, we should have an organized list of jobs to create - foreach ($newJob in $newJobs) - { - Write-Output ("Job " + $newJob.JobName) - $job = Setup-Job $newJob $agent - If ($job.Interval -ne $null) - { - Log-ChildOutput ("Schedule with start time " + $job.StartTime + " and end time at " + $job.EndTime) - Log-ChildOutput ("Repeats every " + $job.Interval) - } - else { - Log-ChildOutput ("Repeats once") - } - - Setup-JobStep $newJob $job - } -} - -# Migrates jobs -function Setup-Job ($job, $agent) { - $jobName = $newJob.JobName - $jobDescription = $newJob.Description - - # Create or update a job has a recurring schedule - if ($newJob.Schedule -ne $null) { - $schedule = $newJob.Schedule - $startTime = $schedule.StartTime.UtcTime - $endTime = $schedule.EndTime.UtcTime - $intervalType = $schedule.Interval.IntervalType.ToString() - $intervalType = $intervalType.Substring(0, $intervalType.Length - 1) # Remove the last letter (s) - $intervalCount = $schedule.Interval.Count - - try { - $job = New-AzSqlElasticJob -ParentObject $agent -Name $jobName ` - -Description $jobDescription -IntervalType $intervalType -IntervalCount $intervalCount ` - -StartTime $startTime -EndTime $endTime - return $job - } - catch [System.Management.Automation.PSArgumentException] { - $job = Get-AzSqlElasticJob -ParentObject $agent -Name $jobName - $job = $job | Set-AzSqlElasticJob -Description $jobDescription -IntervalType $intervalType -IntervalCount $intervalCount ` - -StartTime $startTime -EndTime $endTime - return $job - } - } - # Create or update a job that runs once - else { - try { - $job = New-AzSqlElasticJob -ParentObject $agent -Name $jobName ` - -Description $jobDescription -RunOnce - return $job - } - catch [System.Management.Automation.PSArgumentException] { - $job = Get-AzSqlElasticJob -ParentObject $agent -Name $jobName - $job = $job | Set-AzSqlElasticJob -Description $jobDescription -RunOnce - return $job - } - } -} -# Migrates job steps -function Setup-JobStep ($newJob, $job) { - $defaultJobStepName = 'JobStep' - $contentName = $newJob.Description - $commandText = (Get-AzureSqlJobContentDefinition -ContentName $contentName).CommandText - $targetGroupName = $newJob.TargetGroupName - $credentialName = $newJob.CredentialName - - $output = $newJob.Output - - if ($output -ne $null) { - $outputServerName = $output.TargetDescription.ServerName - $outputDatabaseName = $output.TargetDescription.DatabaseName - $outputCredentialName = $output.CredentialName - $outputSchemaName = $output.SchemaName - $outputTableName = $output.TableName - $outputDatabase = Get-AzSqlDatabase -ResourceGroupName $job.ResourceGroupName -ServerName $outputServerName -Databasename $outputDatabaseName - - try { - $jobStep = $job | Add-AzSqlElasticJobStep -Name $defaultJobStepName ` - -TargetGroupName $targetGroupName -CredentialName $credentialName -CommandText $commandText ` - -OutputDatabaseObject $outputDatabase ` - -OutputSchemaName $outputSchemaName -OutputTableName $outputTableName ` - -OutputCredentialName $outputCredentialName - } - catch [System.Management.Automation.PSArgumentException] { - $jobStep = $job | Get-AzSqlElasticJobStep -Name $defaultJobStepName - $jobStep = $jobStep | Set-AzSqlElasticJobStep -TargetGroupName $targetGroupName ` - -CredentialName $credentialName -CommandText $commandText ` - -OutputDatabaseObject $outputDatabase ` - -OutputSchemaName $outputSchemaName -OutputTableName $outputTableName ` - -OutputCredentialName $outputCredentialName - } - } - else { - try { - $jobStep = $job | Add-AzSqlElasticJobStep -Name $defaultJobStepName -TargetGroupName $targetGroupName -CredentialName $credentialName -CommandText $commandText - } - catch [System.Management.Automation.PSArgumentException] { - $jobStep = $job | Get-AzSqlElasticJobStep -Name $defaultJobStepName - $jobStep = $jobStep | Set-AzSqlElasticJobStep -TargetGroupName $targetGroupName -CredentialName $credentialName -CommandText $commandText - } - } - Log-ChildOutput ("Added step " + $jobStep.StepName + " using target group " + $jobStep.TargetGroupName + " using credential " + $jobStep.CredentialName) - Log-ChildOutput("Command text script taken from content name " + $contentName) - - if ($jobStep.Output -ne $null) { - Log-ChildOutput ("With output target as (" + $jobStep.Output.ServerName + "," + $jobStep.Output.DatabaseName + "," + $jobStep.Output.SchemaName + "," + $jobStep.Output.TableName + ")") - } -} -``` - -To migrate your jobs, job content, job triggers, and job schedules over to your new Elastic Job agent's database, execute the **Migrate-Jobs** cmdlet passing in your agent. - -- Jobs with multiple triggers with different schedules are separated into multiple jobs with naming scheme: "\ (\)". -- Job contents are migrated to a job by adding a default job step named JobStep with associated command text. -- Jobs are disabled by default so that you can validate them before enabling them. - -```powershell -Migrate-Jobs $agent -``` - -Sample output: -```powershell ---------------------- Migrating jobs and job steps --------------------- -Job job1 - - Repeats once - - Added step JobStep using target group cc2 using credential cred1 - - Command text script taken from content name SampleContext -Job job2 - - Repeats once - - Added step JobStep using target group (s1,db1) using credential cred1 - - Command text script taken from content name SampleContent - - With output target as (s1,db1,dbo,sampleTable) -Job job3 (repeat every 10 min) - - Schedule with start time 05/16/2018 22:05:28 and end time at 12/31/9999 11:59:59 - - Repeats every PT10M - - Added step JobStep using target group cc1 using credential cred1 - - Command text script taken from content name SampleContent -Job job3 (repeat every 5 min) - - Schedule with start time 05/16/2018 22:05:31 and end time at 12/31/9999 11:59:59 - - Repeats every PT5M - - Added step JobStep using target group cc1 using credential cred1 - - Command text script taken from content name SampleContent -Job job4 - - Repeats once - - Added step JobStep using target group (s1,db1) using credential cred1 - - Command text script taken from content name SampleContent -``` - - - -## Migration Complete - -The *job database* should now have all of the job credentials, targets, job triggers, job schedules, job contents, and jobs migrated over. - -To confirm that everything migrated correctly, use the following scripts: - -```powershell -$creds = $agent | Get-AzSqlElasticJobCredential -$targetGroups = $agent | Get-AzSqlElasticJobTargetGroup -$jobs = $agent | Get-AzSqlElasticJob -$steps = $jobs | Get-AzSqlElasticJobStep -``` - -To test that jobs are executing correctly, start them: - -```powershell -$jobs | Start-AzSqlElasticJob -``` - -For any jobs that were running on a schedule, remember to enable them so that they can run in the background: - -```powershell -$jobs | Set-AzSqlElasticJob -Enable -``` - -## Next steps - -- [Create and manage Elastic Jobs using PowerShell](elastic-jobs-powershell-create.md) -- [Create and manage Elastic Jobs using Transact-SQL (T-SQL)](elastic-jobs-tsql-create-manage.md) diff --git a/articles/azure-sql/database/elastic-jobs-overview.md b/articles/azure-sql/database/elastic-jobs-overview.md deleted file mode 100644 index a8d49dd54fcf9..0000000000000 --- a/articles/azure-sql/database/elastic-jobs-overview.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Elastic Database Jobs (preview) -description: 'Configure Elastic Database Jobs (preview) to run Transact-SQL (T-SQL) scripts across a set of one or more databases in Azure SQL Database' -services: sql-database -ms.service: sql-database -ms.subservice: elastic-jobs -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: srinia -ms.author: srinia -ms.reviewer: kendralittle, mathoma -ms.date: 12/18/2018 ---- -# Create, configure, and manage elastic jobs (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this article, you will learn how to create, configure, and manage elastic jobs. - -If you have not used Elastic jobs, [learn more about the job automation concepts in Azure SQL Database](job-automation-overview.md). - -## Create and configure the agent - -1. Create or identify an empty S0 or higher database. This database will be used as the *Job database* during Elastic Job agent creation. -2. Create an Elastic Job agent in the [portal](https://portal.azure.com/#create/Microsoft.SQLElasticJobAgent) or with [PowerShell](elastic-jobs-powershell-create.md#create-the-elastic-job-agent). - - ![Creating Elastic Job agent](./media/elastic-jobs-overview/create-elastic-job-agent.png) - -## Create, run, and manage jobs - -1. Create a credential for job execution in the *Job database* using [PowerShell](elastic-jobs-powershell-create.md) or [T-SQL](elastic-jobs-tsql-create-manage.md#create-a-credential-for-job-execution). -2. Define the target group (the databases you want to run the job against) using [PowerShell](elastic-jobs-powershell-create.md) or [T-SQL](elastic-jobs-tsql-create-manage.md#create-a-target-group-servers). -3. Create a job agent credential in each database the job will run [(add the user (or role) to each database in the group)](logins-create-manage.md). For an example, see the [PowerShell tutorial](elastic-jobs-powershell-create.md). -4. Create a job using [PowerShell](elastic-jobs-powershell-create.md) or [T-SQL](elastic-jobs-tsql-create-manage.md#deploy-new-schema-to-many-databases). -5. Add job steps using [PowerShell](elastic-jobs-powershell-create.md) or [T-SQL](elastic-jobs-tsql-create-manage.md#deploy-new-schema-to-many-databases). -6. Run a job using [PowerShell](elastic-jobs-powershell-create.md#run-the-job) or [T-SQL](elastic-jobs-tsql-create-manage.md#begin-unplanned-execution-of-a-job). -7. Monitor job execution status using the portal, [PowerShell](elastic-jobs-powershell-create.md#monitor-status-of-job-executions) or [T-SQL](elastic-jobs-tsql-create-manage.md#monitor-job-execution-status). - - ![Portal](./media/elastic-jobs-overview/elastic-job-executions-overview.png) - -## Credentials for running jobs - -Jobs use [database scoped credentials](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) to connect to the databases specified by the target group upon execution. If a target group contains servers or pools, these database scoped credentials are used to connect to the master database to enumerate the available databases. - -Setting up the proper credentials to run a job can be a little confusing, so keep the following points in mind: - -- The database scoped credentials must be created in the *Job database*. -- **All target databases must have a login with [sufficient permissions](/sql/relational-databases/security/permissions-database-engine) for the job to complete successfully** (`jobuser` in the diagram below). -- Credentials can be reused across jobs, and the credential passwords are encrypted and secured from users who have read-only access to job objects. - -The following image is designed to assist in understanding and setting up the proper job credentials. **Remember to create the user in every database (all *target user dbs*) the job needs to run**. - -![Elastic Jobs credentials](./media/elastic-jobs-overview/job-credentials.png) - -## Security best practices - -A few best practice considerations for working with Elastic Jobs: - -- Limit usage of the APIs to trusted individuals. -- Credentials should have the least privileges necessary to perform the job step. For more information, see [Authorization and Permissions](/dotnet/framework/data/adonet/sql/authorization-and-permissions-in-sql-server). -- When using a server and/or pool target group member, it is highly suggested to create a separate credential with rights on the master database to view/list databases that is used to expand the database lists of the server(s) and/or pool(s) prior to the job execution. - -## Agent performance, capacity, and limitations - -Elastic Jobs use minimal compute resources while waiting for long-running jobs to complete. - -Depending on the size of the target group of databases and the desired execution time for a job (number of concurrent workers), the agent requires different amounts of compute and performance of the *Job database* (the more targets and the higher number of jobs, the higher the amount of compute required). - -### Prevent jobs from reducing target database performance - -To ensure resources aren't overburdened when running jobs against databases in a SQL elastic pool, jobs can be configured to limit the number of databases a job can run against at the same time. - -Set the number of concurrent databases a job runs on by setting the `sp_add_jobstep` stored procedure's `@max_parallelism` parameter in T-SQL. - - -### Known limitations - -These are the current limitations to the Elastic Jobs service. We're actively working to remove as many of these limitations as possible. - -| Issue | Description | -| :---- | :--------- | -| The Elastic Job agent needs to be recreated and started in the new region after a failover/move to a new Azure region. | The Elastic Jobs service stores all its job agent and job metadata in the jobs database. Any failover or move of Azure resources to a new Azure region will also move the jobs database, job agent and jobs metadata to the new Azure region. However, the Elastic Job agent is a compute only resource and needs to be explicitly re-created and started in the new region before jobs will start executing again in the new region. Once started, the Elastic Job agent will resume executing jobs in the new region as per the previously defined job schedule. | -| Concurrent jobs limit. | Currently, the preview is limited to 100 concurrent jobs. | - -## Best practices for creating jobs - -Consider the following best practices when working with Elastic Database jobs: - -### Idempotent scripts -A job's T-SQL scripts must be [idempotent](https://en.wikipedia.org/wiki/Idempotence). **Idempotent** means that if the script succeeds, and it is run again, the same result occurs. A script may fail due to transient network issues. In that case, the job will automatically retry running the script a preset number of times before desisting. An idempotent script has the same result even if its been successfully run twice (or more). - -A simple tactic is to test for the existence of an object before creating it. A hypothetical example is shown below: - -```sql -IF NOT EXISTS (SELECT * FROM sys.objects WHERE [name] = N'some_object') - print 'Object does not exist' - -- Create the object -ELSE - print 'Object exists' - -- If it exists, drop the object before recreating it. -``` - -Similarly, a script must be able to execute successfully by logically testing for and countering any conditions it finds. - -## Next steps - -- [Create and manage Elastic Jobs using PowerShell](elastic-jobs-powershell-create.md) -- [Create and manage Elastic Jobs using Transact-SQL (T-SQL)](elastic-jobs-tsql-create-manage.md) diff --git a/articles/azure-sql/database/elastic-jobs-powershell-create.md b/articles/azure-sql/database/elastic-jobs-powershell-create.md deleted file mode 100644 index c42816c40d7ac..0000000000000 --- a/articles/azure-sql/database/elastic-jobs-powershell-create.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: Create an Elastic Job agent using PowerShell (preview) -description: Learn how to create an Elastic Job agent using PowerShell. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-jobs -ms.custom: seo-lt-2019, devx-track-azurepowershell -ms.devlang: -ms.topic: tutorial -author: srinia -ms.author: srinia -ms.reviewer: kendralittle, mathoma -ms.date: 10/21/2020 ---- -# Create an Elastic Job agent using PowerShell (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Elastic jobs (preview)](job-automation-overview.md) enable the running of one or more Transact-SQL (T-SQL) scripts in parallel across many databases. - -In this tutorial, you learn the steps required to run a query across multiple databases: - -> [!div class="checklist"] -> * Create an Elastic Job agent -> * Create job credentials so that jobs can execute scripts on its targets -> * Define the targets (servers, elastic pools, databases, shard maps) you want to run the job against -> * Create database scoped credentials in the target databases so the agent connect and execute jobs -> * Create a job -> * Add job steps to a job -> * Start execution of a job -> * Monitor a job - -## Prerequisites - -The upgraded version of Elastic Database jobs has a new set of PowerShell cmdlets for use during migration. These new cmdlets transfer all of your existing job credentials, targets (including databases, servers, custom collections), job triggers, job schedules, job contents, and jobs over to a new Elastic Job agent. - -### Install the latest Elastic Jobs cmdlets - -If you don't have already have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -Install the **Az.Sql** module to get the latest Elastic Job cmdlets. Run the following commands in PowerShell with administrative access. - -```powershell -# installs the latest PackageManagement and PowerShellGet packages -Find-Package PackageManagement | Install-Package -Force -Find-Package PowerShellGet | Install-Package -Force - -# Restart your powershell session with administrative access - -# Install and import the Az.Sql module, then confirm -Install-Module -Name Az.Sql -Import-Module Az.Sql - -Get-Module Az.Sql -``` - -In addition to the **Az.Sql** module, this tutorial also requires the *SqlServer* PowerShell module. For details, see [Install SQL Server PowerShell module](/sql/powershell/download-sql-server-ps-module). - -## Create required resources - -Creating an Elastic Job agent requires a database (S0 or higher) for use as the [Job database](job-automation-overview.md#elastic-job-database). - -The script below creates a new resource group, server, and database for use as the Job database. The second script creates a second server with two blank databases to execute jobs against. - -Elastic Jobs has no specific naming requirements so you can use whatever naming conventions you want, as long as they conform to any [Azure requirements](/azure/architecture/best-practices/resource-naming). - -```powershell -# sign in to Azure account -Connect-AzAccount - -# create a resource group -Write-Output "Creating a resource group..." -$resourceGroupName = Read-Host "Please enter a resource group name" -$location = Read-Host "Please enter an Azure Region" -$rg = New-AzResourceGroup -Name $resourceGroupName -Location $location -$rg - -# create a server -Write-Output "Creating a server..." -$agentServerName = Read-Host "Please enter an agent server name" -$agentServerName = $agentServerName + "-" + [guid]::NewGuid() -$adminLogin = Read-Host "Please enter the server admin name" -$adminPassword = Read-Host "Please enter the server admin password" -$adminPasswordSecure = ConvertTo-SecureString -String $AdminPassword -AsPlainText -Force -$adminCred = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $adminLogin, $adminPasswordSecure -$agentServer = New-AzSqlServer -ResourceGroupName $resourceGroupName -Location $location ` - -ServerName $agentServerName -ServerVersion "12.0" -SqlAdministratorCredentials ($adminCred) - -# set server firewall rules to allow all Azure IPs -Write-Output "Creating a server firewall rule..." -$agentServer | New-AzSqlServerFirewallRule -AllowAllAzureIPs -$agentServer - -# create the job database -Write-Output "Creating a blank database to be used as the Job Database..." -$jobDatabaseName = "JobDatabase" -$jobDatabase = New-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $agentServerName -DatabaseName $jobDatabaseName -RequestedServiceObjectiveName "S0" -$jobDatabase -``` - -```powershell -# create a target server and sample databases - uses the same credentials -Write-Output "Creating target server..." -$targetServerName = Read-Host "Please enter a target server name" -$targetServerName = $targetServerName + "-" + [guid]::NewGuid() -$targetServer = New-AzSqlServer -ResourceGroupName $resourceGroupName -Location $location ` - -ServerName $targetServerName -ServerVersion "12.0" -SqlAdministratorCredentials ($adminCred) - -# set target server firewall rules to allow all Azure IPs -$targetServer | New-AzSqlServerFirewallRule -AllowAllAzureIPs -$targetServer | New-AzSqlServerFirewallRule -StartIpAddress 0.0.0.0 -EndIpAddress 255.255.255.255 -FirewallRuleName AllowAll -$targetServer - -# create sample databases to execute jobs against -$db1 = New-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $targetServerName -DatabaseName "database1" -$db1 -$db2 = New-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $targetServerName -DatabaseName "database2" -$db2 -``` - -### Create the Elastic Job agent - -An Elastic Job agent is an Azure resource for creating, running, and managing jobs. The agent executes jobs based on a schedule or as a one-time job. - -The **New-AzSqlElasticJobAgent** cmdlet requires a database in Azure SQL Database to already exist, so the *resourceGroupName*, *serverName*, and *databaseName* parameters must all point to existing resources. - -```powershell -Write-Output "Creating job agent..." -$agentName = Read-Host "Please enter a name for your new Elastic Job agent" -$jobAgent = $jobDatabase | New-AzSqlElasticJobAgent -Name $agentName -$jobAgent -``` - -### Create the job credentials - -Jobs use database scoped credentials to connect to the target databases specified by the target group upon execution and execute scripts. These database scoped credentials are also used to connect to the master database to enumerate all the databases in a server or an elastic pool, when either of these are used as the target group member type. - -The database scoped credentials must be created in the job database. All target databases must have a login with sufficient permissions for the job to complete successfully. - -![Elastic Jobs credentials](./media/elastic-jobs-powershell-create/job-credentials.png) - -In addition to the credentials in the image, note the addition of the **GRANT** commands in the following script. These permissions are required for the script we chose for this example job. Because the example creates a new table in the targeted databases, each target db needs the proper permissions to successfully run. - -To create the required job credentials (in the job database), run the following script: - -```powershell -# in the master database (target server) -# create the master user login, master user, and job user login -$params = @{ - 'database' = 'master' - 'serverInstance' = $targetServer.ServerName + '.database.windows.net' - 'username' = $adminLogin - 'password' = $adminPassword - 'outputSqlErrors' = $true - 'query' = 'CREATE LOGIN masteruser WITH PASSWORD=''password!123''' -} -Invoke-SqlCmd @params -$params.query = "CREATE USER masteruser FROM LOGIN masteruser" -Invoke-SqlCmd @params -$params.query = 'CREATE LOGIN jobuser WITH PASSWORD=''password!123''' -Invoke-SqlCmd @params - -# for each target database -# create the jobuser from jobuser login and check permission for script execution -$targetDatabases = @( $db1.DatabaseName, $Db2.DatabaseName ) -$createJobUserScript = "CREATE USER jobuser FROM LOGIN jobuser" -$grantAlterSchemaScript = "GRANT ALTER ON SCHEMA::dbo TO jobuser" -$grantCreateScript = "GRANT CREATE TABLE TO jobuser" - -$targetDatabases | % { - $params.database = $_ - $params.query = $createJobUserScript - Invoke-SqlCmd @params - $params.query = $grantAlterSchemaScript - Invoke-SqlCmd @params - $params.query = $grantCreateScript - Invoke-SqlCmd @params -} - -# create job credential in Job database for master user -Write-Output "Creating job credentials..." -$loginPasswordSecure = (ConvertTo-SecureString -String 'password!123' -AsPlainText -Force) - -$masterCred = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList "masteruser", $loginPasswordSecure -$masterCred = $jobAgent | New-AzSqlElasticJobCredential -Name "masteruser" -Credential $masterCred - -$jobCred = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList "jobuser", $loginPasswordSecure -$jobCred = $jobAgent | New-AzSqlElasticJobCredential -Name "jobuser" -Credential $jobCred -``` - -### Define the target databases to run the job against - -A [target group](job-automation-overview.md#target-group) defines the set of one or more databases a job step will execute on. - -The following snippet creates two target groups: *serverGroup*, and *serverGroupExcludingDb2*. *serverGroup* targets all databases that exist on the server at the time of execution, and *serverGroupExcludingDb2* targets all databases on the server, except *targetDb2*: - -```powershell -Write-Output "Creating test target groups..." -# create ServerGroup target group -$serverGroup = $jobAgent | New-AzSqlElasticJobTargetGroup -Name 'ServerGroup' -$serverGroup | Add-AzSqlElasticJobTarget -ServerName $targetServerName -RefreshCredentialName $masterCred.CredentialName - -# create ServerGroup with an exclusion of db2 -$serverGroupExcludingDb2 = $jobAgent | New-AzSqlElasticJobTargetGroup -Name 'ServerGroupExcludingDb2' -$serverGroupExcludingDb2 | Add-AzSqlElasticJobTarget -ServerName $targetServerName -RefreshCredentialName $masterCred.CredentialName -$serverGroupExcludingDb2 | Add-AzSqlElasticJobTarget -ServerName $targetServerName -Database $db2.DatabaseName -Exclude -``` - -### Create a job and steps - -This example defines a job and two job steps for the job to run. The first job step (*step1*) creates a new table (*Step1Table*) in every database in target group *ServerGroup*. The second job step (*step2*) creates a new table (*Step2Table*) in every database except for *TargetDb2*, because the target group defined previously specified to exclude it. - -```powershell -Write-Output "Creating a new job..." -$jobName = "Job1" -$job = $jobAgent | New-AzSqlElasticJob -Name $jobName -RunOnce -$job - -Write-Output "Creating job steps..." -$sqlText1 = "IF NOT EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('Step1Table')) CREATE TABLE [dbo].[Step1Table]([TestId] [int] NOT NULL);" -$sqlText2 = "IF NOT EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('Step2Table')) CREATE TABLE [dbo].[Step2Table]([TestId] [int] NOT NULL);" - -$job | Add-AzSqlElasticJobStep -Name "step1" -TargetGroupName $serverGroup.TargetGroupName -CredentialName $jobCred.CredentialName -CommandText $sqlText1 -$job | Add-AzSqlElasticJobStep -Name "step2" -TargetGroupName $serverGroupExcludingDb2.TargetGroupName -CredentialName $jobCred.CredentialName -CommandText $sqlText2 -``` - -### Run the job - -To start the job immediately, run the following command: - -```powershell -Write-Output "Start a new execution of the job..." -$jobExecution = $job | Start-AzSqlElasticJob -$jobExecution -``` - -After successful completion you should see two new tables in TargetDb1, and only one new table in TargetDb2: - - ![new tables verification in SSMS](./media/elastic-jobs-powershell-create/job-execution-verification.png) - -You can also schedule the job to run later. To schedule a job to run at a specific time, run the following command: - -```powershell -# run every hour starting from now -$job | Set-AzSqlElasticJob -IntervalType Hour -IntervalCount 1 -StartTime (Get-Date) -Enable -``` - -### Monitor status of job executions - -The following snippets get job execution details: - -```powershell -# get the latest 10 executions run -$jobAgent | Get-AzSqlElasticJobExecution -Count 10 - -# get the job step execution details -$jobExecution | Get-AzSqlElasticJobStepExecution - -# get the job target execution details -$jobExecution | Get-AzSqlElasticJobTargetExecution -Count 2 -``` - -The following table lists the possible job execution states: - -|State|Description| -|:---|:---| -|**Created** | The job execution was just created and is not yet in progress.| -|**InProgress** | The job execution is currently in progress.| -|**WaitingForRetry** | The job execution wasn't able to complete its action and is waiting to retry.| -|**Succeeded** | The job execution has completed successfully.| -|**SucceededWithSkipped** | The job execution has completed successfully, but some of its children were skipped.| -|**Failed** | The job execution has failed and exhausted its retries.| -|**TimedOut** | The job execution has timed out.| -|**Canceled** | The job execution was canceled.| -|**Skipped** | The job execution was skipped because another execution of the same job step was already running on the same target.| -|**WaitingForChildJobExecutions** | The job execution is waiting for its child executions to complete.| - -## Clean up resources - -Delete the Azure resources created in this tutorial by deleting the resource group. - -> [!TIP] -> If you plan to continue to work with these jobs, you do not clean up the resources created in this article. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -``` - -## Next steps - -In this tutorial, you ran a Transact-SQL script against a set of databases. You learned how to do the following tasks: - -> [!div class="checklist"] -> * Create an Elastic Job agent -> * Create job credentials so that jobs can execute scripts on its targets -> * Define the targets (servers, elastic pools, databases, shard maps) you want to run the job against -> * Create database scoped credentials in the target databases so the agent connect and execute jobs -> * Create a job -> * Add a job step to the job -> * Start an execution of the job -> * Monitor the job - -> [!div class="nextstepaction"] -> [Manage Elastic Jobs using Transact-SQL](elastic-jobs-tsql-create-manage.md) diff --git a/articles/azure-sql/database/elastic-jobs-tsql-create-manage.md b/articles/azure-sql/database/elastic-jobs-tsql-create-manage.md deleted file mode 100644 index 24696c1cc3880..0000000000000 --- a/articles/azure-sql/database/elastic-jobs-tsql-create-manage.md +++ /dev/null @@ -1,1352 +0,0 @@ ---- -title: Create and manage Elastic Database Jobs (preview) with Transact-SQL (T-SQL) -description: Run scripts across many databases with Elastic Database Job agent using Transact-SQL (T-SQL). -services: sql-database -ms.service: sql-database -ms.subservice: elastic-jobs -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -dev_langs: - - "TSQL" -ms.topic: how-to -author: srinia -ms.author: srinia -ms.reviewer: kendralittle, mathoma -ms.date: 02/01/2021 ---- -# Use Transact-SQL (T-SQL) to create and manage Elastic Database Jobs (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides many example scenarios to get started working with Elastic Jobs using T-SQL. - -The examples use the [stored procedures](#job-stored-procedures) and [views](#job-views) available in the [*job database*](job-automation-overview.md#elastic-job-database). - -Transact-SQL (T-SQL) is used to create, configure, execute, and manage jobs. Creating the Elastic Job agent is not supported in T-SQL, so you must first create an *Elastic Job agent* using the portal, or [PowerShell](elastic-jobs-powershell-create.md#create-the-elastic-job-agent). - -## Create a credential for job execution - -The credential is used to connect to your target databases for script execution. The credential needs appropriate permissions, on the databases specified by the target group, to successfully execute the script. When using a [logical SQL server](logical-servers.md) and/or pool target group member, it is highly suggested to create a credential for use to refresh the credential prior to expansion of the server and/or pool at time of job execution. The database scoped credential is created on the job agent database. The same credential must be used to *Create a Login* and *Create a User from Login to grant the Login Database Permissions* on the target databases. - -```sql ---Connect to the new job database specified when creating the Elastic Job agent - --- Create a database master key if one does not already exist, using your own password. -CREATE MASTER KEY ENCRYPTION BY PASSWORD=''; - --- Create two database scoped credentials. --- The credential to connect to the Azure SQL logical server, to execute jobs -CREATE DATABASE SCOPED CREDENTIAL job_credential WITH IDENTITY = 'job_credential', - SECRET = ''; -GO --- The credential to connect to the Azure SQL logical server, to refresh the database metadata in server -CREATE DATABASE SCOPED CREDENTIAL refresh_credential WITH IDENTITY = 'refresh_credential', - SECRET = ''; -GO -``` - -## Create a target group (servers) - -The following example shows how to execute a job against all databases in a server. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql --- Connect to the job database specified when creating the job agent - --- Add a target group containing server(s) -EXEC jobs.sp_add_target_group 'ServerGroup1'; - --- Add a server target member -EXEC jobs.sp_add_target_group_member -@target_group_name = 'ServerGroup1', -@target_type = 'SqlServer', -@refresh_credential_name = 'refresh_credential', --credential required to refresh the databases in a server -@server_name = 'server1.database.windows.net'; - ---View the recently created target group and target group members -SELECT * FROM jobs.target_groups WHERE target_group_name='ServerGroup1'; -SELECT * FROM jobs.target_group_members WHERE target_group_name='ServerGroup1'; -``` - -## Exclude an individual database - -The following example shows how to execute a job against all databases in an server, except for the database named *MappingDB*. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- Add a target group containing server(s) -EXEC [jobs].sp_add_target_group N'ServerGroup'; -GO - --- Add a server target member -EXEC [jobs].sp_add_target_group_member -@target_group_name = N'ServerGroup', -@target_type = N'SqlServer', -@refresh_credential_name = N'refresh_credential', --credential required to refresh the databases in a server -@server_name = N'London.database.windows.net'; -GO - --- Add a server target member -EXEC [jobs].sp_add_target_group_member -@target_group_name = N'ServerGroup', -@target_type = N'SqlServer', -@refresh_credential_name = N'refresh_credential', --credential required to refresh the databases in a server -@server_name = 'server2.database.windows.net'; -GO - ---Exclude a database target member from the server target group -EXEC [jobs].sp_add_target_group_member -@target_group_name = N'ServerGroup', -@membership_type = N'Exclude', -@target_type = N'SqlDatabase', -@server_name = N'server1.database.windows.net', -@database_name = N'MappingDB'; -GO - ---View the recently created target group and target group members -SELECT * FROM [jobs].target_groups WHERE target_group_name = N'ServerGroup'; -SELECT * FROM [jobs].target_group_members WHERE target_group_name = N'ServerGroup'; -``` - -## Create a target group (pools) - -The following example shows how to target all the databases in one or more elastic pools. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- Add a target group containing pool(s) -EXEC jobs.sp_add_target_group 'PoolGroup'; - --- Add an elastic pool(s) target member -EXEC jobs.sp_add_target_group_member -@target_group_name = 'PoolGroup', -@target_type = 'SqlElasticPool', -@refresh_credential_name = 'refresh_credential', --credential required to refresh the databases in a server -@server_name = 'server1.database.windows.net', -@elastic_pool_name = 'ElasticPool-1'; - --- View the recently created target group and target group members -SELECT * FROM jobs.target_groups WHERE target_group_name = N'PoolGroup'; -SELECT * FROM jobs.target_group_members WHERE target_group_name = N'PoolGroup'; -``` - -## Deploy new schema to many databases - -The following example shows how to deploy new schema to all databases. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - ---Add job for create table -EXEC jobs.sp_add_job @job_name = 'CreateTableTest', @description = 'Create Table Test'; - --- Add job step for create table -EXEC jobs.sp_add_jobstep @job_name = 'CreateTableTest', -@command = N'IF NOT EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id(''Test'')) -CREATE TABLE [dbo].[Test]([TestId] [int] NOT NULL);', -@credential_name = 'job_credential', -@target_group_name = 'PoolGroup'; -``` - -## Data collection using built-in parameters - -In many data collection scenarios, it can be useful to include some of these scripting variables to help post-process the results of the job. - -- $(job_name) -- $(job_id) -- $(job_version) -- $(step_id) -- $(step_name) -- $(job_execution_id) -- $(job_execution_create_time) -- $(target_group_name) - -For example, to group all results from the same job execution together, use the *$(job_execution_id)* as shown in the following command: - -```sql -@command= N' SELECT DB_NAME() DatabaseName, $(job_execution_id) AS job_execution_id, * FROM sys.dm_db_resource_stats WHERE end_time > DATEADD(mi, -20, GETDATE());' -``` - -## Monitor database performance - -The following example creates a new job to collect performance data from multiple databases. - -By default, the job agent will create the output table to store returned results. Therefore, the database principal associated with the output credential must at a minimum have the following permissions: `CREATE TABLE` on the database, `ALTER`, `SELECT`, `INSERT`, `DELETE` on the output table or its schema, and `SELECT` on the [sys.indexes](/sql/relational-databases/system-catalog-views/sys-indexes-transact-sql) catalog view. - -If you want to manually create the table ahead of time, then it needs to have the following properties: - -1. Columns with the correct name and data types for the result set. -2. Additional column for internal_execution_id with the data type of uniqueidentifier. -3. A nonclustered index named `IX__Internal_Execution_ID` on the internal_execution_id column. -4. All permissions listed above except for `CREATE TABLE` permission on the database. - -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following commands: - -```sql ---Connect to the job database specified when creating the job agent - --- Add a job to collect perf results -EXEC jobs.sp_add_job @job_name ='ResultsJob', @description='Collection Performance data from all customers' - --- Add a job step w/ schedule to collect results -EXEC jobs.sp_add_jobstep -@job_name = 'ResultsJob', -@command = N' SELECT DB_NAME() DatabaseName, $(job_execution_id) AS job_execution_id, * FROM sys.dm_db_resource_stats WHERE end_time > DATEADD(mi, -20, GETDATE());', -@credential_name = 'job_credential', -@target_group_name = 'PoolGroup', -@output_type = 'SqlDatabase', -@output_credential_name = 'job_credential', -@output_server_name = 'server1.database.windows.net', -@output_database_name = '', -@output_table_name = ''; - ---Create a job to monitor pool performance - ---Connect to the job database specified when creating the job agent - --- Add a target group containing Elastic Job database -EXEC jobs.sp_add_target_group 'ElasticJobGroup'; - --- Add a server target member -EXEC jobs.sp_add_target_group_member -@target_group_name = 'ElasticJobGroup', -@target_type = 'SqlDatabase', -@server_name = 'server1.database.windows.net', -@database_name = 'master'; - --- Add a job to collect perf results -EXEC jobs.sp_add_job -@job_name = 'ResultsPoolsJob', -@description = 'Demo: Collection Performance data from all pools', -@schedule_interval_type = 'Minutes', -@schedule_interval_count = 15; - --- Add a job step w/ schedule to collect results -EXEC jobs.sp_add_jobstep -@job_name='ResultsPoolsJob', -@command=N'declare @now datetime -DECLARE @startTime datetime -DECLARE @endTime datetime -DECLARE @poolLagMinutes datetime -DECLARE @poolStartTime datetime -DECLARE @poolEndTime datetime -SELECT @now = getutcdate () -SELECT @startTime = dateadd(minute, -15, @now) -SELECT @endTime = @now -SELECT @poolStartTime = dateadd(minute, -30, @startTime) -SELECT @poolEndTime = dateadd(minute, -30, @endTime) - -SELECT elastic_pool_name , end_time, elastic_pool_dtu_limit, avg_cpu_percent, avg_data_io_percent, avg_log_write_percent, max_worker_percent, max_session_percent, - avg_storage_percent, elastic_pool_storage_limit_mb FROM sys.elastic_pool_resource_stats - WHERE end_time > @poolStartTime and end_time <= @poolEndTime; -'), -@credential_name = 'job_credential', -@target_group_name = 'ElasticJobGroup', -@output_type = 'SqlDatabase', -@output_credential_name = 'job_credential', -@output_server_name = 'server1.database.windows.net', -@output_database_name = 'resultsdb', -@output_table_name = 'resultstable'; -``` - -## View job definitions - -The following example shows how to view current job definitions. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- View all jobs -SELECT * FROM jobs.jobs; - --- View the steps of the current version of all jobs -SELECT js.* FROM jobs.jobsteps js -JOIN jobs.jobs j - ON j.job_id = js.job_id AND j.job_version = js.job_version; - --- View the steps of all versions of all jobs -SELECT * FROM jobs.jobsteps; -``` - -## Begin unplanned execution of a job - -The following example shows how to start a job immediately. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- Execute the latest version of a job -EXEC jobs.sp_start_job 'CreateTableTest'; - --- Execute the latest version of a job and receive the execution id -declare @je uniqueidentifier; -exec jobs.sp_start_job 'CreateTableTest', @job_execution_id = @je output; -select @je; - -select * from jobs.job_executions where job_execution_id = @je; - --- Execute a specific version of a job (e.g. version 1) -exec jobs.sp_start_job 'CreateTableTest', 1; -``` - -## Schedule execution of a job - -The following example shows how to schedule a job for future execution. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - -EXEC jobs.sp_update_job -@job_name = 'ResultsJob', -@enabled=1, -@schedule_interval_type = 'Minutes', -@schedule_interval_count = 15; -``` - -## Monitor job execution status - -The following example shows how to view execution status details for all jobs. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - ---View top-level execution status for the job named 'ResultsPoolJob' -SELECT * FROM jobs.job_executions -WHERE job_name = 'ResultsPoolsJob' and step_id IS NULL -ORDER BY start_time DESC; - ---View all top-level execution status for all jobs -SELECT * FROM jobs.job_executions WHERE step_id IS NULL -ORDER BY start_time DESC; - ---View all execution statuses for job named 'ResultsPoolsJob' -SELECT * FROM jobs.job_executions -WHERE job_name = 'ResultsPoolsJob' -ORDER BY start_time DESC; - --- View all active executions -SELECT * FROM jobs.job_executions -WHERE is_active = 1 -ORDER BY start_time DESC; -``` - -## Cancel a job - -The following example shows how to cancel a job. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- View all active executions to determine job execution id -SELECT * FROM jobs.job_executions -WHERE is_active = 1 AND job_name = 'ResultPoolsJob' -ORDER BY start_time DESC; -GO - --- Cancel job execution with the specified job execution id -EXEC jobs.sp_stop_job '01234567-89ab-cdef-0123-456789abcdef'; -``` - -## Delete old job history - -The following example shows how to delete job history prior to a specific date. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - --- Delete history of a specific job's executions older than the specified date -EXEC jobs.sp_purge_jobhistory @job_name='ResultPoolsJob', @oldest_date='2016-07-01 00:00:00'; - ---Note: job history is automatically deleted if it is >45 days old -``` - -## Delete a job and all its job history - -The following example shows how to delete a job and all related job history. -Connect to the [*job database*](job-automation-overview.md#elastic-job-database) and run the following command: - -```sql ---Connect to the job database specified when creating the job agent - -EXEC jobs.sp_delete_job @job_name='ResultsPoolsJob'; - ---Note: job history is automatically deleted if it is >45 days old -``` - -## Job stored procedures - -The following stored procedures are in the [jobs database](job-automation-overview.md#elastic-job-database). - -|Stored procedure |Description | -|---------|---------| -|[sp_add_job](#sp_add_job) | Adds a new job. | -|[sp_update_job](#sp_update_job) | Updates an existing job. | -|[sp_delete_job](#sp_delete_job) | Deletes an existing job. | -|[sp_add_jobstep](#sp_add_jobstep) | Adds a step to a job. | -|[sp_update_jobstep](#sp_update_jobstep) | Updates a job step. | -|[sp_delete_jobstep](#sp_delete_jobstep) | Deletes a job step. | -|[sp_start_job](#sp_start_job) | Starts executing a job. | -|[sp_stop_job](#sp_stop_job) | Stops a job execution. | -|[sp_add_target_group](#sp_add_target_group) | Adds a target group. | -|[sp_delete_target_group](#sp_delete_target_group) | Deletes a target group. | -|[sp_add_target_group_member](#sp_add_target_group_member) | Adds a database or group of databases to a target group. | -|[sp_delete_target_group_member](#sp_delete_target_group_member) | Removes a target group member from a target group. | -|[sp_purge_jobhistory](#sp_purge_jobhistory) | Removes the history records for a job. | - -### sp_add_job - -Adds a new job. - -#### Syntax - -```syntaxsql -[jobs].sp_add_job [ @job_name = ] 'job_name' - [ , [ @description = ] 'description' ] - [ , [ @enabled = ] enabled ] - [ , [ @schedule_interval_type = ] schedule_interval_type ] - [ , [ @schedule_interval_count = ] schedule_interval_count ] - [ , [ @schedule_start_time = ] schedule_start_time ] - [ , [ @schedule_end_time = ] schedule_end_time ] - [ , [ @job_id = ] job_id OUTPUT ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job. The name must be unique and cannot contain the percent (%) character. job_name is nvarchar(128), with no default. - -[ **\@description =** ] 'description' -The description of the job. description is nvarchar(512), with a default of NULL. If description is omitted, an empty string is used. - -[ **\@enabled =** ] enabled -Whether the job's schedule is enabled. Enabled is bit, with a default of 0 (disabled). If 0, the job is not enabled and does not run according to its schedule; however, it can be run manually. If 1, the job will run according to its schedule, and can also be run manually. - -[ **\@schedule_interval_type =**] schedule_interval_type -Value indicates when the job is to be executed. schedule_interval_type is nvarchar(50), with a default of Once, and can be one of the following values: - -- 'Once', -- 'Minutes', -- 'Hours', -- 'Days', -- 'Weeks', -- 'Months' - -[ **\@schedule_interval_count =** ] schedule_interval_count -Number of schedule_interval_count periods to occur between each execution of the job. schedule_interval_count is int, with a default of 1. The value must be greater than or equal to 1. - -[ **\@schedule_start_time =** ] schedule_start_time -Date on which job execution can begin. schedule_start_time is DATETIME2, with the default of 0001-01-01 00:00:00.0000000. - -[ **\@schedule_end_time =** ] schedule_end_time -Date on which job execution can stop. schedule_end_time is DATETIME2, with the default of 9999-12-31 11:59:59.0000000. - -[ **\@job_id =** ] job_id OUTPUT -The job identification number assigned to the job if created successfully. job_id is an output variable of type uniqueidentifier. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -sp_add_job must be run from the job agent database specified when creating the job agent. -After sp_add_job has been executed to add a job, sp_add_jobstep can be used to add steps that perform the activities for the job. The job's initial version number is 0, which will be incremented to 1 when the first step is added. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_update_job - -Updates an existing job. - -#### Syntax - -```syntaxsql -[jobs].sp_update_job [ @job_name = ] 'job_name' - [ , [ @new_name = ] 'new_name' ] - [ , [ @description = ] 'description' ] - [ , [ @enabled = ] enabled ] - [ , [ @schedule_interval_type = ] schedule_interval_type ] - [ , [ @schedule_interval_count = ] schedule_interval_count ] - [ , [ @schedule_start_time = ] schedule_start_time ] - [ , [ @schedule_end_time = ] schedule_end_time ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job to be updated. job_name is nvarchar(128). - -[ **\@new_name =** ] 'new_name' -The new name of the job. new_name is nvarchar(128). - -[ **\@description =** ] 'description' -The description of the job. description is nvarchar(512). - -[ **\@enabled =** ] enabled -Specifies whether the job's schedule is enabled (1) or not enabled (0). Enabled is bit. - -[ **\@schedule_interval_type=** ] schedule_interval_type -Value indicates when the job is to be executed. schedule_interval_type is nvarchar(50) and can be one of the following values: - -- 'Once', -- 'Minutes', -- 'Hours', -- 'Days', -- 'Weeks', -- 'Months' - -[ **\@schedule_interval_count=** ] schedule_interval_count -Number of schedule_interval_count periods to occur between each execution of the job. schedule_interval_count is int, with a default of 1. The value must be greater than or equal to 1. - -[ **\@schedule_start_time=** ] schedule_start_time -Date on which job execution can begin. schedule_start_time is DATETIME2, with the default of 0001-01-01 00:00:00.0000000. - -[ **\@schedule_end_time=** ] schedule_end_time -Date on which job execution can stop. schedule_end_time is DATETIME2, with the default of 9999-12-31 11:59:59.0000000. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -After sp_add_job has been executed to add a job, sp_add_jobstep can be used to add steps that perform the activities for the job. The job's initial version number is 0, which will be incremented to 1 when the first step is added. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_delete_job - -Deletes an existing job. - -#### Syntax - -```syntaxsql -[jobs].sp_delete_job [ @job_name = ] 'job_name' - [ , [ @force = ] force ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job to be deleted. job_name is nvarchar(128). - -[ **\@force =** ] force -Specifies whether to delete if the job has any executions in progress and cancel all in-progress executions (1) or fail if any job executions are in progress (0). force is bit. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Job history is automatically deleted when a job is deleted. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_add_jobstep - -Adds a step to a job. - -#### Syntax - -```syntaxsql -[jobs].sp_add_jobstep [ @job_name = ] 'job_name' - [ , [ @step_id = ] step_id ] - [ , [ @step_name = ] step_name ] - [ , [ @command_type = ] 'command_type' ] - [ , [ @command_source = ] 'command_source' ] - , [ @command = ] 'command' - , [ @credential_name = ] 'credential_name' - , [ @target_group_name = ] 'target_group_name' - [ , [ @initial_retry_interval_seconds = ] initial_retry_interval_seconds ] - [ , [ @maximum_retry_interval_seconds = ] maximum_retry_interval_seconds ] - [ , [ @retry_interval_backoff_multiplier = ] retry_interval_backoff_multiplier ] - [ , [ @retry_attempts = ] retry_attempts ] - [ , [ @step_timeout_seconds = ] step_timeout_seconds ] - [ , [ @output_type = ] 'output_type' ] - [ , [ @output_credential_name = ] 'output_credential_name' ] - [ , [ @output_subscription_id = ] 'output_subscription_id' ] - [ , [ @output_resource_group_name = ] 'output_resource_group_name' ] - [ , [ @output_server_name = ] 'output_server_name' ] - [ , [ @output_database_name = ] 'output_database_name' ] - [ , [ @output_schema_name = ] 'output_schema_name' ] - [ , [ @output_table_name = ] 'output_table_name' ] - [ , [ @job_version = ] job_version OUTPUT ] - [ , [ @max_parallelism = ] max_parallelism ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job to which to add the step. job_name is nvarchar(128). - -[ **\@step_id =** ] step_id -The sequence identification number for the job step. Step identification numbers start at 1 and increment without gaps. If an existing step already has this ID, then that step and all following steps will have their ID's incremented so that this new step can be inserted into the sequence. If not specified, the step_id will be automatically assigned to the last in the sequence of steps. step_id is an int. - -[ **\@step_name =** ] step_name -The name of the step. Must be specified, except for the first step of a job that (for convenience) has a default name of 'JobStep'. step_name is nvarchar(128). - -[ **\@command_type =** ] 'command_type' -The type of command that is executed by this jobstep. command_type is nvarchar(50), with a default value of TSql, meaning that the value of the @command_type parameter is a T-SQL script. - -If specified, the value must be TSql. - -[ **\@command_source =** ] 'command_source' -The type of location where the command is stored. command_source is nvarchar(50), with a default value of Inline, meaning that the value of the @command_source parameter is the literal text of the command. - -If specified, the value must be Inline. - -[ **\@command =** ] 'command' -The command must be valid T-SQL script and is then executed by this job step. command is nvarchar(max), with a default of NULL. - -[ **\@credential_name =** ] 'credential_name' -The name of the database scoped credential stored in this job control database that is used to connect to each of the target databases within the target group when this step is executed. credential_name is nvarchar(128). - -[ **\@target_group_name =** ] 'target-group_name' -The name of the target group that contains the target databases that the job step will be executed on. target_group_name is nvarchar(128). - -[ **\@initial_retry_interval_seconds =** ] initial_retry_interval_seconds -The delay before the first retry attempt, if the job step fails on the initial execution attempt. initial_retry_interval_seconds is int, with default value of 1. - -[ **\@maximum_retry_interval_seconds =** ] maximum_retry_interval_seconds -The maximum delay between retry attempts. If the delay between retries would grow larger than this value, it is capped to this value instead. maximum_retry_interval_seconds is int, with default value of 120. - -[ **\@retry_interval_backoff_multiplier =** ] retry_interval_backoff_multiplier -The multiplier to apply to the retry delay if multiple job step execution attempts fail. For example, if the first retry had a delay of 5 second and the backoff multiplier is 2.0, then the second retry will have a delay of 10 seconds and the third retry will have a delay of 20 seconds. retry_interval_backoff_multiplier is real, with default value of 2.0. - -[ **\@retry_attempts =** ] retry_attempts -The number of times to retry execution if the initial attempt fails. For example, if the retry_attempts value is 10, then there will be 1 initial attempt and 10 retry attempts, giving a total of 11 attempts. If the final retry attempt fails, then the job execution will terminate with a lifecycle of Failed. retry_attempts is int, with default value of 10. - -[ **\@step_timeout_seconds =** ] step_timeout_seconds -The maximum amount of time allowed for the step to execute. If this time is exceeded, then the job execution will terminate with a lifecycle of TimedOut. step_timeout_seconds is int, with default value of 43,200 seconds (12 hours). - -[ **\@output_type =** ] 'output_type' -If not null, the type of destination that the command's first result set is written to. output_type is nvarchar(50), with a default of NULL. - -If specified, the value must be SqlDatabase. - -[ **\@output_credential_name =** ] 'output_credential_name' -If not null, the name of the database scoped credential that is used to connect to the output destination database. Must be specified if output_type equals SqlDatabase. output_credential_name is nvarchar(128), with a default value of NULL. - -[ **\@output_subscription_id =** ] 'output_subscription_id' -Needs description. - -[ **\@output_resource_group_name =** ] 'output_resource_group_name' -Needs description. - -[ **\@output_server_name =** ] 'output_server_name' -If not null, the fully qualified DNS name of the server that contains the output destination database. Must be specified if output_type equals SqlDatabase. output_server_name is nvarchar(256), with a default of NULL. - -[ **\@output_database_name =** ] 'output_database_name' -If not null, the name of the database that contains the output destination table. Must be specified if output_type equals SqlDatabase. output_database_name is nvarchar(128), with a default of NULL. - -[ **\@output_schema_name =** ] 'output_schema_name' -If not null, the name of the SQL schema that contains the output destination table. If output_type equals SqlDatabase, the default value is dbo. output_schema_name is nvarchar(128). - -[ **\@output_table_name =** ] 'output_table_name' -If not null, the name of the table that the command's first result set will be written to. If the table doesn't already exist, it will be created based on the schema of the returning result-set. Must be specified if output_type equals SqlDatabase. output_table_name is nvarchar(128), with a default value of NULL. - -[ **\@job_version =** ] job_version OUTPUT -Output parameter that will be assigned the new job version number. job_version is int. - -[ **\@max_parallelism =** ] max_parallelism OUTPUT -The maximum level of parallelism per elastic pool. If set, then the job step will be restricted to only run on a maximum of that many databases per elastic pool. This applies to each elastic pool that is either directly included in the target group or is inside a server that is included in the target group. max_parallelism is int. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -When sp_add_jobstep succeeds, the job's current version number is incremented. The next time the job is executed, the new version will be used. If the job is currently executing, that execution will not contain the new step. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_update_jobstep - -Updates a job step. - -#### Syntax - -```syntaxsql -[jobs].sp_update_jobstep [ @job_name = ] 'job_name' - [ , [ @step_id = ] step_id ] - [ , [ @step_name = ] 'step_name' ] - [ , [ @new_id = ] new_id ] - [ , [ @new_name = ] 'new_name' ] - [ , [ @command_type = ] 'command_type' ] - [ , [ @command_source = ] 'command_source' ] - , [ @command = ] 'command' - , [ @credential_name = ] 'credential_name' - , [ @target_group_name = ] 'target_group_name' - [ , [ @initial_retry_interval_seconds = ] initial_retry_interval_seconds ] - [ , [ @maximum_retry_interval_seconds = ] maximum_retry_interval_seconds ] - [ , [ @retry_interval_backoff_multiplier = ] retry_interval_backoff_multiplier ] - [ , [ @retry_attempts = ] retry_attempts ] - [ , [ @step_timeout_seconds = ] step_timeout_seconds ] - [ , [ @output_type = ] 'output_type' ] - [ , [ @output_credential_name = ] 'output_credential_name' ] - [ , [ @output_server_name = ] 'output_server_name' ] - [ , [ @output_database_name = ] 'output_database_name' ] - [ , [ @output_schema_name = ] 'output_schema_name' ] - [ , [ @output_table_name = ] 'output_table_name' ] - [ , [ @job_version = ] job_version OUTPUT ] - [ , [ @max_parallelism = ] max_parallelism ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job to which the step belongs. job_name is nvarchar(128). - -[ **\@step_id =** ] step_id -The identification number for the job step to be modified. Either step_id or step_name must be specified. step_id is an int. - -[ **\@step_name =** ] 'step_name' -The name of the step to be modified. Either step_id or step_name must be specified. step_name is nvarchar(128). - -[ **\@new_id =** ] new_id -The new sequence identification number for the job step. Step identification numbers start at 1 and increment without gaps. If a step is reordered, then other steps will be automatically renumbered. - -[ **\@new_name =** ] 'new_name' -The new name of the step. new_name is nvarchar(128). - -[ **\@command_type =** ] 'command_type' -The type of command that is executed by this jobstep. command_type is nvarchar(50), with a default value of TSql, meaning that the value of the @command_type parameter is a T-SQL script. - -If specified, the value must be TSql. - -[ **\@command_source =** ] 'command_source' -The type of location where the command is stored. command_source is nvarchar(50), with a default value of Inline, meaning that the value of the @command_source parameter is the literal text of the command. - -If specified, the value must be Inline. - -[ **\@command =** ] 'command' -The command(s) must be valid T-SQL script and is then executed by this job step. command is nvarchar(max), with a default of NULL. - -[ **\@credential_name =** ] 'credential_name' -The name of the database scoped credential stored in this job control database that is used to connect to each of the target databases within the target group when this step is executed. credential_name is nvarchar(128). - -[ **\@target_group_name =** ] 'target-group_name' -The name of the target group that contains the target databases that the job step will be executed on. target_group_name is nvarchar(128). - -[ **\@initial_retry_interval_seconds =** ] initial_retry_interval_seconds -The delay before the first retry attempt, if the job step fails on the initial execution attempt. initial_retry_interval_seconds is int, with default value of 1. - -[ **\@maximum_retry_interval_seconds =** ] maximum_retry_interval_seconds -The maximum delay between retry attempts. If the delay between retries would grow larger than this value, it is capped to this value instead. maximum_retry_interval_seconds is int, with default value of 120. - -[ **\@retry_interval_backoff_multiplier =** ] retry_interval_backoff_multiplier -The multiplier to apply to the retry delay if multiple job step execution attempts fail. For example, if the first retry had a delay of 5 second and the backoff multiplier is 2.0, then the second retry will have a delay of 10 seconds and the third retry will have a delay of 20 seconds. retry_interval_backoff_multiplier is real, with default value of 2.0. - -[ **\@retry_attempts =** ] retry_attempts -The number of times to retry execution if the initial attempt fails. For example, if the retry_attempts value is 10, then there will be 1 initial attempt and 10 retry attempts, giving a total of 11 attempts. If the final retry attempt fails, then the job execution will terminate with a lifecycle of Failed. retry_attempts is int, with default value of 10. - -[ **\@step_timeout_seconds =** ] step_timeout_seconds -The maximum amount of time allowed for the step to execute. If this time is exceeded, then the job execution will terminate with a lifecycle of TimedOut. step_timeout_seconds is int, with default value of 43,200 seconds (12 hours). - -[ **\@output_type =** ] 'output_type' -If not null, the type of destination that the command's first result set is written to. To reset the value of output_type back to NULL, set this parameter's value to '' (empty string). output_type is nvarchar(50), with a default of NULL. - -If specified, the value must be SqlDatabase. - -[ **\@output_credential_name =** ] 'output_credential_name' -If not null, the name of the database scoped credential that is used to connect to the output destination database. Must be specified if output_type equals SqlDatabase. To reset the value of output_credential_name back to NULL, set this parameter's value to '' (empty string). output_credential_name is nvarchar(128), with a default value of NULL. - -[ **\@output_server_name =** ] 'output_server_name' -If not null, the fully qualified DNS name of the server that contains the output destination database. Must be specified if output_type equals SqlDatabase. To reset the value of output_server_name back to NULL, set this parameter's value to '' (empty string). output_server_name is nvarchar(256), with a default of NULL. - -[ **\@output_database_name =** ] 'output_database_name' -If not null, the name of the database that contains the output destination table. Must be specified if output_type equals SqlDatabase. To reset the value of output_database_name back to NULL, set this parameter's value to '' (empty string). output_database_name is nvarchar(128), with a default of NULL. - -[ **\@output_schema_name =** ] 'output_schema_name' -If not null, the name of the SQL schema that contains the output destination table. If output_type equals SqlDatabase, the default value is dbo. To reset the value of output_schema_name back to NULL, set this parameter's value to '' (empty string). output_schema_name is nvarchar(128). - -[ **\@output_table_name =** ] 'output_table_name' -If not null, the name of the table that the command's first result set will be written to. If the table doesn't already exist, it will be created based on the schema of the returning result-set. Must be specified if output_type equals SqlDatabase. To reset the value of output_server_name back to NULL, set this parameter's value to '' (empty string). output_table_name is nvarchar(128), with a default value of NULL. - -[ **\@job_version =** ] job_version OUTPUT -Output parameter that will be assigned the new job version number. job_version is int. - -[ **\@max_parallelism =** ] max_parallelism OUTPUT -The maximum level of parallelism per elastic pool. If set, then the job step will be restricted to only run on a maximum of that many databases per elastic pool. This applies to each elastic pool that is either directly included in the target group or is inside a server that is included in the target group. To reset the value of max_parallelism back to null, set this parameter's value to -1. max_parallelism is int. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Any in-progress executions of the job will not be affected. When sp_update_jobstep succeeds, the job's version number is incremented. The next time the job is executed, the new version will be used. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users - -### sp_delete_jobstep - -Removes a job step from a job. - -#### Syntax - -```syntaxsql -[jobs].sp_delete_jobstep [ @job_name = ] 'job_name' - [ , [ @step_id = ] step_id ] - [ , [ @step_name = ] 'step_name' ] - [ , [ @job_version = ] job_version OUTPUT ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job from which the step will be removed. job_name is nvarchar(128), with no default. - -[ **\@step_id =** ] step_id -The identification number for the job step to be deleted. Either step_id or step_name must be specified. step_id is an int. - -[ **\@step_name =** ] 'step_name' -The name of the step to be deleted. Either step_id or step_name must be specified. step_name is nvarchar(128). - -[ **\@job_version =** ] job_version OUTPUT -Output parameter that will be assigned the new job version number. job_version is int. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Any in-progress executions of the job will not be affected. When sp_update_jobstep succeeds, the job's version number is incremented. The next time the job is executed, the new version will be used. - -The other job steps will be automatically renumbered to fill the gap left by the deleted job step. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_start_job - -Starts executing a job. - -#### Syntax - -```syntaxsql -[jobs].sp_start_job [ @job_name = ] 'job_name' - [ , [ @job_execution_id = ] job_execution_id OUTPUT ] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job from which the step will be removed. job_name is nvarchar(128), with no default. - -[ **\@job_execution_id =** ] job_execution_id OUTPUT -Output parameter that will be assigned the job execution's ID. job_version is uniqueidentifier. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -None. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_stop_job - -Stops a job execution. - -#### Syntax - -```syntaxsql -[jobs].sp_stop_job [ @job_execution_id = ] ' job_execution_id ' -``` - -#### Arguments - -[ **\@job_execution_id =** ] job_execution_id -The identification number of the job execution to stop. job_execution_id is uniqueidentifier, with default of NULL. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -None. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_add_target_group - -Adds a target group. - -#### Syntax - -```syntaxsql -[jobs].sp_add_target_group [ @target_group_name = ] 'target_group_name' - [ , [ @target_group_id = ] target_group_id OUTPUT ] -``` - -#### Arguments - -[ **\@target_group_name =** ] 'target_group_name' -The name of the target group to create. target_group_name is nvarchar(128), with no default. - -[ **\@target_group_id =** ] target_group_id OUTPUT - The target group identification number assigned to the job if created successfully. target_group_id is an output variable of type uniqueidentifier, with a default of NULL. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Target groups provide an easy way to target a job at a collection of databases. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_delete_target_group - -Deletes a target group. - -#### Syntax - -```syntaxsql -[jobs].sp_delete_target_group [ @target_group_name = ] 'target_group_name' -``` - -#### Arguments - -[ **\@target_group_name =** ] 'target_group_name' -The name of the target group to delete. target_group_name is nvarchar(128), with no default. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -None. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -### sp_add_target_group_member - -Adds a database or group of databases to a target group. - -#### Syntax - -```syntaxsql -[jobs].sp_add_target_group_member [ @target_group_name = ] 'target_group_name' - [ @membership_type = ] 'membership_type' ] - [ , [ @target_type = ] 'target_type' ] - [ , [ @refresh_credential_name = ] 'refresh_credential_name' ] - [ , [ @server_name = ] 'server_name' ] - [ , [ @database_name = ] 'database_name' ] - [ , [ @elastic_pool_name = ] 'elastic_pool_name' ] - [ , [ @shard_map_name = ] 'shard_map_name' ] - [ , [ @target_id = ] 'target_id' OUTPUT ] -``` - -#### Arguments - -[ **\@target_group_name =** ] 'target_group_name' -The name of the target group to which the member will be added. target_group_name is nvarchar(128), with no default. - -[ **\@membership_type =** ] 'membership_type' -Specifies if the target group member will be included or excluded. target_group_name is nvarchar(128), with default of 'Include'. Valid values for membership_type are 'Include' or 'Exclude'. - -[ **\@target_type =** ] 'target_type' -The type of target database or collection of databases including all databases in a server, all databases in an Elastic pool, all databases in a shard map, or an individual database. target_type is nvarchar(128), with no default. Valid values for target_type are 'SqlServer', 'SqlElasticPool', 'SqlDatabase', or 'SqlShardMap'. - -[ **\@refresh_credential_name =** ] 'refresh_credential_name' -The name of the database scoped credential. refresh_credential_name is nvarchar(128), with no default. - -[ **\@server_name =** ] 'server_name' -The name of the server that should be added to the specified target group. server_name should be specified when target_type is 'SqlServer'. server_name is nvarchar(128), with no default. - -[ **\@database_name =** ] 'database_name' -The name of the database that should be added to the specified target group. database_name should be specified when target_type is 'SqlDatabase'. database_name is nvarchar(128), with no default. - -[ **\@elastic_pool_name =** ] 'elastic_pool_name' -The name of the Elastic pool that should be added to the specified target group. elastic_pool_name should be specified when target_type is 'SqlElasticPool'. elastic_pool_name is nvarchar(128), with no default. - -[ **\@shard_map_name =** ] 'shard_map_name' -The name of the shard map pool that should be added to the specified target group. elastic_pool_name should be specified when target_type is 'SqlShardMap'. shard_map_name is nvarchar(128), with no default. - -[ **\@target_id =** ] target_group_id OUTPUT -The target identification number assigned to the target group member if created added to the target group. target_id is an output variable of type uniqueidentifier, with a default of NULL. -Return Code Values -0 (success) or 1 (failure) - -#### Remarks - -A job executes on all single databases within a server or in an elastic pool at time of execution, when a server or elastic pool is included in the target group. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -#### Examples - -The following example adds all the databases in the London and NewYork servers to the group Servers Maintaining Customer Information. You must connect to the jobs database specified when creating the job agent, in this case ElasticJobs. - -```sql ---Connect to the jobs database specified when creating the job agent -USE ElasticJobs; -GO - --- Add a target group containing server(s) -EXEC jobs.sp_add_target_group @target_group_name = N'Servers Maintaining Customer Information'; -GO - --- Add a server target member -EXEC jobs.sp_add_target_group_member -@target_group_name = N'Servers Maintaining Customer Information', -@target_type = N'SqlServer', -@refresh_credential_name=N'refresh_credential', --credential required to refresh the databases in server -@server_name=N'London.database.windows.net'; -GO - --- Add a server target member -EXEC jobs.sp_add_target_group_member -@target_group_name = N'Servers Maintaining Customer Information', -@target_type = N'SqlServer', -@refresh_credential_name=N'refresh_credential', --credential required to refresh the databases in server -@server_name=N'NewYork.database.windows.net'; -GO - ---View the recently added members to the target group -SELECT * FROM [jobs].target_group_members WHERE target_group_name= N'Servers Maintaining Customer Information'; -GO -``` - -### sp_delete_target_group_member - -Removes a target group member from a target group. - -#### Syntax - -```syntaxsql -[jobs].sp_delete_target_group_member [ @target_group_name = ] 'target_group_name' - [ , [ @target_id = ] 'target_id'] -``` - -#### Arguments - -[ @target_group_name = ] 'target_group_name' -The name of the target group from which to remove the target group member. target_group_name is nvarchar(128), with no default. - -[ @target_id = ] target_id - The target identification number assigned to the target group member to be removed. target_id is a uniqueidentifier, with a default of NULL. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Target groups provide an easy way to target a job at a collection of databases. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -#### Examples - -The following example removes the London server from the group Servers Maintaining Customer Information. You must connect to the jobs database specified when creating the job agent, in this case ElasticJobs. - -```sql ---Connect to the jobs database specified when creating the job agent -USE ElasticJobs ; -GO - --- Retrieve the target_id for a target_group_members -declare @tid uniqueidentifier -SELECT @tid = target_id FROM [jobs].target_group_members WHERE target_group_name = 'Servers Maintaining Customer Information' and server_name = 'London.database.windows.net'; - --- Remove a target group member of type server -EXEC jobs.sp_delete_target_group_member -@target_group_name = N'Servers Maintaining Customer Information', -@target_id = @tid; -GO -``` - -### sp_purge_jobhistory - -Removes the history records for a job. - -#### Syntax - -```syntaxsql -[jobs].sp_purge_jobhistory [ @job_name = ] 'job_name' - [ , [ @job_id = ] job_id ] - [ , [ @oldest_date = ] oldest_date [] -``` - -#### Arguments - -[ **\@job_name =** ] 'job_name' -The name of the job for which to delete the history records. job_name is nvarchar(128), with a default of NULL. Either job_id or job_name must be specified, but both cannot be specified. - -[ **\@job_id =** ] job_id - The job identification number of the job for the records to be deleted. job_id is uniqueidentifier, with a default of NULL. Either job_id or job_name must be specified, but both cannot be specified. - -[ **\@oldest_date =** ] oldest_date - The oldest record to retain in the history. oldest_date is DATETIME2, with a default of NULL. When oldest_date is specified, sp_purge_jobhistory only removes records that are older than the value specified. - -#### Return Code Values - -0 (success) or 1 (failure) - -#### Remarks - -Target groups provide an easy way to target a job at a collection of databases. - -#### Permissions - -By default, members of the sysadmin fixed server role can execute this stored procedure. They restrict a user to just be able to monitor jobs, you can grant the user to be part of the following database role in the job agent database specified when creating the job agent: - -- jobs_reader - -For details about the permissions of these roles, see the Permission section in this document. Only members of sysadmin can use this stored procedure to edit the attributes of jobs that are owned by other users. - -#### Examples - -The following example adds all the databases in the London and NewYork servers to the group Servers Maintaining Customer Information. You must connect to the jobs database specified when creating the job agent, in this case ElasticJobs. - -```sql ---Connect to the jobs database specified when creating the job agent - -EXEC sp_delete_target_group_member - @target_group_name = N'Servers Maintaining Customer Information', - @server_name = N'London.database.windows.net'; -GO -``` - -## Job views - -The following views are available in the [jobs database](job-automation-overview.md#elastic-job-database). - -|View |Description | -|---------|---------| -|[job_executions](#job_executions-view) | Shows job execution history. | -|[jobs](#jobs-view) | Shows all jobs. | -|[job_versions](#job_versions-view) | Shows all job versions. | -|[jobsteps](#jobsteps-view) | Shows all steps in the current version of each job. | -|[jobstep_versions](#jobstep_versions-view) | Shows all steps in all versions of each job. | -|[target_groups](#target_groups-view) | Shows all target groups. | -|[target_group_members](#target_group_members-view) | Shows all members of all target groups. | - -### job_executions view - -[jobs].[job_executions] - -Shows job execution history. - -|Column name | Data type | Description | -|---------|---------|---------| -|**job_execution_id** | uniqueidentifier | Unique ID of an instance of a job execution. -|**job_name** | nvarchar(128) | Name of the job. -|**job_id** | uniqueidentifier | Unique ID of the job. -|**job_version** | int | Version of the job (automatically updated each time the job is modified). -|**step_id** |int | Unique (for this job) identifier for the step. NULL indicates this is the parent job execution. -|**is_active** | bit | Indicates whether information is active or inactive. 1 indicates active jobs, and 0 indicates inactive. -|**lifecycle** | nvarchar(50) | Value indicating the status of the job:'Created', 'In Progress', 'Failed', 'Succeeded', 'Skipped', 'SucceededWithSkipped'| -|**create_time**| datetime2(7) | Date and time the job was created. -|**start_time** | datetime2(7) | Date and time the job started execution. NULL if the job has not yet been executed. -|**end_time** | datetime2(7) | Date and time the job finished execution. NULL if the job has not yet been executed or has not yet completed execution. -|**current_attempts** | int | Number of times the step was retried. Parent job will be 0, child job executions will be 1 or greater based on the execution policy. -|**current_attempt_start_time** | datetime2(7) | Date and time the job started execution. NULL indicates this is the parent job execution. -|**last_message** | nvarchar(max) | Job or step history message. -|**target_type** | nvarchar(128) | Type of target database or collection of databases including all databases in a server, all databases in an Elastic pool or a database. Valid values for target_type are 'SqlServer', 'SqlElasticPool' or 'SqlDatabase'. NULL indicates this is the parent job execution. -|**target_id** | uniqueidentifier | Unique ID of the target group member. NULL indicates this is the parent job execution. -|**target_group_name** | nvarchar(128) | Name of the target group. NULL indicates this is the parent job execution. -|**target_server_name** | nvarchar(256) | Name of the server contained in the target group. Specified only if target_type is 'SqlServer'. NULL indicates this is the parent job execution. -|**target_database_name** | nvarchar(128) | Name of the database contained in the target group. Specified only when target_type is 'SqlDatabase'. NULL indicates this is the parent job execution. - -### jobs view - -[jobs].[jobs] - -Shows all jobs. - -|Column name | Data type |Description| -|------|------|-------| -|**job_name** | nvarchar(128) | Name of the job.| -|**job_id**| uniqueidentifier |Unique ID of the job.| -|**job_version** |int |Version of the job (automatically updated each time the job is modified).| -|**description** |nvarchar(512)| Description for the job. Enabled bit: Indicates whether the job is enabled or disabled. 1 indicates enabled jobs, and 0 indicates disabled jobs.| -|**schedule_interval_type**|nvarchar(50) |Value indicating when the job is to be executed:'Once', 'Minutes', 'Hours', 'Days', 'Weeks', 'Months' -|**schedule_interval_count**|int|Number of schedule_interval_type periods to occur between each execution of the job.| -|**schedule_start_time**|datetime2(7)|Date and time the job was last started execution.| -|**schedule_end_time**|datetime2(7)|Date and time the job was last completed execution.| - -### job_versions view - -[jobs].[job_versions] - -Shows all job versions. - -|Column name|Data type|Description| -|------|------|-------| -|**job_name**|nvarchar(128)|Name of the job.| -|**job_id**|uniqueidentifier|Unique ID of the job.| -|**job_version**|int|Version of the job (automatically updated each time the job is modified).| - -### jobsteps view - -[jobs].[jobsteps] - -Shows all steps in the current version of each job. - -|Column name|Data type|Description| -|------|------|-------| -|**job_name**|nvarchar(128)|Name of the job.| -|**job_id**|uniqueidentifier|Unique ID of the job.| -|**job_version**|int|Version of the job (automatically updated each time the job is modified).| -|**step_id**|int|Unique (for this job) identifier for the step.| -|**step_name**|nvarchar(128)|Unique (for this job) name for the step.| -|**command_type**|nvarchar(50)|Type of command to execute in the job step. For v1, value must equal to and defaults to 'TSql'.| -|**command_source**|nvarchar(50)|Location of the command. For v1, 'Inline' is the default and only accepted value.| -|**command**|nvarchar(max)|The commands to be executed by Elastic jobs through command_type.| -|**credential_name**|nvarchar(128)|Name of the database scoped credential used to execution the job.| -|**target_group_name**|nvarchar(128)|Name of the target group.| -|**target_group_id**|uniqueidentifier|Unique ID of the target group.| -|**initial_retry_interval_seconds**|int|The delay before the first retry attempt. Default value is 1.| -|**maximum_retry_interval_seconds**|int|The maximum delay between retry attempts. If the delay between retries would grow larger than this value, it is capped to this value instead. Default value is 120.| -|**retry_interval_backoff_multiplier**|real|The multiplier to apply to the retry delay if multiple job step execution attempts fail. Default value is 2.0.| -|**retry_attempts**|int|The number of retry attempts to use if this step fails. Default of 10, which indicates no retry attempts.| -|**step_timeout_seconds**|int|The amount of time in minutes between retry attempts. The default is 0, which indicates a 0-minute interval.| -|**output_type**|nvarchar(11)|Location of the command. In the current preview, 'Inline' is the default and only accepted value.| -|**output_credential_name**|nvarchar(128)|Name of the credentials to be used to connect to the destination server to store the results set.| -|**output_subscription_id**|uniqueidentifier|Unique ID of the subscription of the destination server\database for the results set from the query execution.| -|**output_resource_group_name**|nvarchar(128)|Resource group name where the destination server resides.| -|**output_server_name**|nvarchar(256)|Name of the destination server for the results set.| -|**output_database_name**|nvarchar(128)|Name of the destination database for the results set.| -|**output_schema_name**|nvarchar(max)|Name of the destination schema. Defaults to dbo, if not specified.| -|**output_table_name**|nvarchar(max)|Name of the table to store the results set from the query results. Table will be created automatically based on the schema of the results set if it doesn't already exist. Schema must match the schema of the results set.| -|**max_parallelism**|int|The maximum number of databases per elastic pool that the job step will be run on at a time. The default is NULL, meaning no limit. | - -### jobstep_versions view - -[jobs].[jobstep_versions] - -Shows all steps in all versions of each job. The schema is identical to [jobsteps](#jobsteps-view). - -### target_groups view - -[jobs].[target_groups] - -Lists all target groups. - -|Column name|Data type|Description| -|-----|-----|-----| -|**target_group_name**|nvarchar(128)|The name of the target group, a collection of databases. -|**target_group_id**|uniqueidentifier|Unique ID of the target group. - -### target_group_members view - -[jobs].[target_group_members] - -Shows all members of all target groups. - -|Column name|Data type|Description| -|-----|-----|-----| -|**target_group_name**|nvarchar(128|The name of the target group, a collection of databases. | -|**target_group_id**|uniqueidentifier|Unique ID of the target group.| -|**membership_type**|int|Specifies if the target group member is included or excluded in the target group. Valid values for target_group_name are 'Include' or 'Exclude'.| -|**target_type**|nvarchar(128)|Type of target database or collection of databases including all databases in a server, all databases in an Elastic pool or a database. Valid values for target_type are 'SqlServer', 'SqlElasticPool', 'SqlDatabase', or 'SqlShardMap'.| -|**target_id**|uniqueidentifier|Unique ID of the target group member.| -|**refresh_credential_name**|nvarchar(128)|Name of the database scoped credential used to connect to the target group member.| -|**subscription_id**|uniqueidentifier|Unique ID of the subscription.| -|**resource_group_name**|nvarchar(128)|Name of the resource group in which the target group member resides.| -|**server_name**|nvarchar(128)|Name of the server contained in the target group. Specified only if target_type is 'SqlServer'. | -|**database_name**|nvarchar(128)|Name of the database contained in the target group. Specified only when target_type is 'SqlDatabase'.| -|**elastic_pool_name**|nvarchar(128)|Name of the Elastic pool contained in the target group. Specified only when target_type is 'SqlElasticPool'.| -|**shard_map_name**|nvarchar(128)|Name of the shard maps contained in the target group. Specified only when target_type is 'SqlShardMap'.| - -## Resources - -- ![Topic link icon](/sql/database-engine/configure-windows/media/topic-link.gif "Topic link icon") [Transact-SQL Syntax Conventions](/sql/t-sql/language-elements/transact-sql-syntax-conventions-transact-sql) - -## Next steps - -- [Create and manage Elastic Jobs using PowerShell](elastic-jobs-powershell-create.md) -- [Authorization and Permissions](/dotnet/framework/data/adonet/sql/authorization-and-permissions-in-sql-server) diff --git a/articles/azure-sql/database/elastic-pool-manage.md b/articles/azure-sql/database/elastic-pool-manage.md deleted file mode 100644 index 983697b395ab3..0000000000000 --- a/articles/azure-sql/database/elastic-pool-manage.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Manage elastic pools -description: Create and manage Azure SQL Database elastic pools using the Azure portal, PowerShell, the Azure CLI, Transact-SQL (T-SQL), and REST API. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.topic: conceptual -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 -ms.custom: seoapril2019 sqldbrb=1, devx-track-azurecli ---- - -# Manage elastic pools in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -With an elastic pool, you determine the amount of resources that the elastic pool requires to handle the workload of its databases, and the amount of resources for each pooled database. - -## Azure portal - -All pool settings can be found in one place: the **Configure pool** blade. To get here, find an elastic pool in the Azure portal and click **Configure pool** either from the top of the blade or from the resource menu on the left. - -From here you can make any combination of the following changes and save them all in one batch: - -1. Change the service tier of the pool -2. Scale the performance (DTU or vCores) and storage up or down -3. Add or remove databases to/from the pool -4. Set a min (guaranteed) and max performance limit for the databases in the pools -5. Review the cost summary to view any changes to your bill as a result of your new selections - -![Elastic pool configuration blade](./media/elastic-pool-manage/configure-pool.png) - -## PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To create and manage SQL Database elastic pools and pooled databases with Azure PowerShell, use the following PowerShell cmdlets. If you need to install or upgrade PowerShell, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). To create and manage the servers for an elastic pool, see [Create and manage servers](logical-servers.md). To create and manage firewall rules, see [Create and manage firewall rules using PowerShell](firewall-configure.md#use-powershell-to-manage-server-level-ip-firewall-rules). - -> [!TIP] -> For PowerShell example scripts, see [Create elastic pools and move databases between pools and out of a pool using PowerShell](scripts/move-database-between-elastic-pools-powershell.md) and [Use PowerShell to monitor and scale a SQL elastic pool in Azure SQL Database](scripts/monitor-and-scale-pool-powershell.md). -> - -| Cmdlet | Description | -| --- | --- | -|[New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool)|Creates an elastic pool.| -|[Get-AzSqlElasticPool](/powershell/module/az.sql/get-azsqlelasticpool)|Gets elastic pools and their property values.| -|[Set-AzSqlElasticPool](/powershell/module/az.sql/set-azsqlelasticpool)|Modifies properties of an elastic pool For example, use the **StorageMB** property to modify the max storage of an elastic pool.| -|[Remove-AzSqlElasticPool](/powershell/module/az.sql/remove-azsqlelasticpool)|Deletes an elastic pool.| -|[Get-AzSqlElasticPoolActivity](/powershell/module/az.sql/get-azsqlelasticpoolactivity)|Gets the status of operations on an elastic pool| -|[New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase)|Creates a new database in an existing pool or as a single database. | -|[Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase)|Gets one or more databases.| -|[Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase)|Sets properties for a database, or moves an existing database into, out of, or between elastic pools.| -|[Remove-AzSqlDatabase](/powershell/module/az.sql/remove-azsqldatabase)|Removes a database.| - -> [!TIP] -> Creation of many databases in an elastic pool can take time when done using the portal or PowerShell cmdlets that create only a single database at a time. To automate creation into an elastic pool, see [CreateOrUpdateElasticPoolAndPopulate](https://gist.github.com/billgib/d80c7687b17355d3c2ec8042323819ae). - -## Azure CLI - -To create and manage SQL Database elastic pools with [Azure CLI](/cli/azure), use the following [Azure CLI SQL Database](/cli/azure/sql/db) commands. Use the [Cloud Shell](../../cloud-shell/overview.md) to run Azure CLI in your browser, or [install](/cli/azure/install-azure-cli) it on macOS, Linux, or Windows. - -> [!TIP] -> For Azure CLI example scripts, see [Use CLI to move a database in SQL Database in a SQL elastic pool](scripts/move-database-between-elastic-pools-cli.md) and [Use Azure CLI to scale a SQL elastic pool in Azure SQL Database](scripts/scale-pool-cli.md). -> - -| Cmdlet | Description | -| --- | --- | -|[az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create)|Creates an elastic pool.| -|[az sql elastic-pool list](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-list)|Returns a list of elastic pools in a server.| -|[az sql elastic-pool list-dbs](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-list-dbs)|Returns a list of databases in an elastic pool.| -|[az sql elastic-pool list-editions](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-list-editions)|Also includes available pool DTU settings, storage limits, and per database settings. In order to reduce verbosity, additional storage limits and per database settings are hidden by default.| -|[az sql elastic-pool update](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-update)|Updates an elastic pool.| -|[az sql elastic-pool delete](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-delete)|Deletes the elastic pool.| - -## Transact-SQL (T-SQL) - -To create and move databases within existing elastic pools or to return information about an SQL Database elastic pool with Transact-SQL, use the following T-SQL commands. You can issue these commands using the Azure portal, [SQL Server Management Studio](/sql/ssms/use-sql-server-management-studio), [Visual Studio Code](https://code.visualstudio.com/docs), or any other program that can connect to a server and pass Transact-SQL commands. To create and manage firewall rules using T-SQL, see [Manage firewall rules using Transact-SQL](firewall-configure.md#use-transact-sql-to-manage-ip-firewall-rules). - -> [!IMPORTANT] -> You cannot create, update, or delete an Azure SQL Database elastic pool using Transact-SQL. You can add or remove databases from an elastic pool, and you can use DMVs to return information about existing elastic pools. -> - -| Command | Description | -| --- | --- | -|[CREATE DATABASE (Azure SQL Database)](/sql/t-sql/statements/create-database-azure-sql-database)|Creates a new database in an existing pool or as a single database. You must be connected to the master database to create a new database.| -| [ALTER DATABASE (Azure SQL Database)](/sql/t-sql/statements/alter-database-azure-sql-database) |Move a database into, out of, or between elastic pools.| -|[DROP DATABASE (Transact-SQL)](/sql/t-sql/statements/drop-database-transact-sql)|Deletes a database.| -|[sys.elastic_pool_resource_stats (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database)|Returns resource usage statistics for all the elastic pools on a server. For each elastic pool, there is one row for each 15 second reporting window (four rows per minute). This includes CPU, IO, Log, storage consumption and concurrent request/session utilization by all databases in the pool.| -|[sys.database_service_objectives (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-database-service-objectives-azure-sql-database)|Returns the edition (service tier), service objective (pricing tier), and elastic pool name, if any, for a database in SQL Database or Azure Synapse Analytics. If logged on to the master database in a server, returns information on all databases. For Azure Synapse Analytics, you must be connected to the master database.| - -## REST API - -To create and manage SQL Database elastic pools and pooled databases, use these REST API requests. - -| Command | Description | -| --- | --- | -|[Elastic pools - Create or update](/rest/api/sql/elasticpools/createorupdate)|Creates a new elastic pool or updates an existing elastic pool.| -|[Elastic pools - Delete](/rest/api/sql/elasticpools/delete)|Deletes the elastic pool.| -|[Elastic pools - Get](/rest/api/sql/elasticpools/get)|Gets an elastic pool.| -|[Elastic pools - List by server](/rest/api/sql/elasticpools/listbyserver)|Returns a list of elastic pools in a server.| -|[Elastic pools - Update](/rest/api/sql/2020-11-01-preview/elasticpools/update)|Updates an existing elastic pool.| -|[Elastic pool activities](/rest/api/sql/elasticpoolactivities)|Returns elastic pool activities.| -|[Elastic pool database activities](/rest/api/sql/elasticpooldatabaseactivities)|Returns activity on databases inside of an elastic pool.| -|[Databases - Create or update](/rest/api/sql/databases/createorupdate)|Creates a new database or updates an existing database.| -|[Databases - Get](/rest/api/sql/databases/get)|Gets a database.| -|[Databases - List by elastic pool](/rest/api/sql/databases/listbyelasticpool)|Returns a list of databases in an elastic pool.| -|[Databases - List by server](/rest/api/sql/databases/listbyserver)|Returns a list of databases in a server.| -|[Databases - Update](/rest/api/sql/databases/update)|Updates an existing database.| - -## Next steps - -* To learn more about design patterns for SaaS applications using elastic pools, see [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](saas-tenancy-app-design-patterns.md). -* For a SaaS tutorial using elastic pools, see [Introduction to the Wingtip SaaS application](saas-dbpertenant-wingtip-app-overview.md). diff --git a/articles/azure-sql/database/elastic-pool-overview.md b/articles/azure-sql/database/elastic-pool-overview.md deleted file mode 100644 index 2d40cedd168e7..0000000000000 --- a/articles/azure-sql/database/elastic-pool-overview.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Manage multiple databases with elastic pools -description: Manage and scale multiple databases in Azure SQL Database, as many as hundreds or thousands, by using elastic pools. For one price, you can distribute resources where they're needed. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 06/23/2021 ---- -# Elastic pools help you manage and scale multiple databases in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database elastic pools are a simple, cost-effective solution for managing and scaling multiple databases that have varying and unpredictable usage demands. The databases in an elastic pool are on a single server and share a set number of resources at a set price. Elastic pools in SQL Database enable software as a service (SaaS) developers to optimize the price performance for a group of databases within a prescribed budget while delivering performance elasticity for each database. - -## What are SQL elastic pools? - -SaaS developers build applications on top of large-scale data tiers that consist of multiple databases. A common application pattern is to provision a single database for each customer. But different customers often have varying and unpredictable usage patterns, and it's difficult to predict the resource requirements of each individual database user. Traditionally, you had two options: - -- Overprovision resources based on peak usage and overpay. -- Underprovision to save cost, at the expense of performance and customer satisfaction during peaks. - -Elastic pools solve this problem by ensuring that databases get the performance resources they need when they need it. They provide a simple resource allocation mechanism within a predictable budget. To learn more about design patterns for SaaS applications by using elastic pools, see [Design patterns for multitenant SaaS applications with SQL Database](saas-tenancy-app-design-patterns.md). -> -> [!IMPORTANT] -> There's no per-database charge for elastic pools. You're billed for each hour a pool exists at the highest eDTU or vCores, regardless of usage or whether the pool was active for less than an hour. - -Elastic pools enable you to purchase resources for a pool shared by multiple databases to accommodate unpredictable periods of usage by individual databases. You can configure resources for the pool based either on the [DTU-based purchasing model](service-tiers-dtu.md) or the [vCore-based purchasing model](service-tiers-vcore.md). The resource requirement for a pool is determined by the aggregate utilization of its databases. - -The amount of resources available to the pool is controlled by your budget. All you have to do is: - -- Add databases to the pool. -- Optionally set the minimum and maximum resources for the databases. These resources are either minimum and maximum DTUs or minimum or maximum vCores depending on your choice of resourcing model. -- Set the resources of the pool based on your budget. - -You can use pools to seamlessly grow your service from a lean startup to a mature business at ever-increasing scale. - -Within the pool, individual databases are given the flexibility to use resources within set parameters. Under heavy load, a database can consume more resources to meet demand. Databases under light loads consume less, and databases under no load consume no resources. Provisioning resources for the entire pool rather than for single databases simplifies your management tasks. Plus, you have a predictable budget for the pool. - - More resources can be added to an existing pool with minimum downtime. If extra resources are no longer needed, they can be removed from an existing pool at any time. You can also add or remove databases from the pool. If a database is predictably underutilizing resources, you can move it out. - -> [!NOTE] -> When you move databases into or out of an elastic pool, there's no downtime except for a brief period (on the order of seconds) at the end of the operation when database connections are dropped. - -## When should you consider a SQL Database elastic pool? - -Pools are well suited for a large number of databases with specific utilization patterns. For a given database, this pattern is characterized by low average utilization with infrequent utilization spikes. Conversely, multiple databases with persistent medium-high utilization shouldn't be placed in the same elastic pool. - -The more databases you can add to a pool, the greater your savings become. Depending on your application utilization pattern, it's possible to see savings with as few as two S3 databases. - -The following sections help you understand how to assess if your specific collection of databases can benefit from being in a pool. The examples use Standard pools, but the same principles also apply to Basic and Premium pools. - -### Assess database utilization patterns - -The following figure shows an example of a database that spends much of its time idle but also periodically spikes with activity. This utilization pattern is suited for a pool. - - ![Chart that shows a single database suitable for a pool.](./media/elastic-pool-overview/one-database.png) - -The chart illustrates DTU usage over one hour from 12:00 to 1:00 where each data point has one-minute granularity. At 12:10, DB1 peaks up to 90 DTUs, but its overall average usage is less than five DTUs. An S3 compute size is required to run this workload in a single database, but this size leaves most of the resources unused during periods of low activity. - -A pool allows these unused DTUs to be shared across multiple databases. A pool reduces the DTUs needed and the overall cost. - -Building on the previous example, suppose there are other databases with similar utilization patterns as DB1. In the next two figures, the utilization of four databases and 20 databases are layered onto the same graph to illustrate the nonoverlapping nature of their utilization over time by using the DTU-based purchasing model: - - ![Chart that shows four databases with a utilization pattern suitable for a pool.](./media/elastic-pool-overview/four-databases.png) - - ![Chart that shows 20 databases with a utilization pattern suitable for a pool.](./media/elastic-pool-overview/twenty-databases.png) - -The aggregate DTU utilization across all 20 databases is illustrated by the black line in the preceding chart. This line shows that the aggregate DTU utilization never exceeds 100 DTUs and indicates that the 20 databases can share 100 eDTUs over this time period. The result is a 20-time reduction in DTUs and a 13-time price reduction compared to placing each of the databases in S3 compute sizes for single databases. - -This example is ideal because: - -- There are large differences between peak utilization and average utilization per database. -- The peak utilization for each database occurs at different points in time. -- eDTUs are shared between many databases. - -In the DTU purchasing model, the price of a pool is a function of the pool eDTUs. While the eDTU unit price for a pool is 1.5 times greater than the DTU unit price for a single database, *pool eDTUs can be shared by many databases and fewer total eDTUs are needed*. These distinctions in pricing and eDTU sharing are the basis of the price savings potential that pools can provide. - -In the vCore purchasing model, the vCore unit price for elastic pools is the same as the vCore unit price for single databases. - -## How do I choose the correct pool size? - -The best size for a pool depends on the aggregate resources needed for all databases in the pool. You need to determine: - -- Maximum compute resources utilized by all databases in the pool. Compute resources are indexed by either eDTUs or vCores depending on your choice of purchasing model. -- Maximum storage bytes utilized by all databases in the pool. - -For service tiers and resource limits in each purchasing model, see the [DTU-based purchasing model](service-tiers-dtu.md) or the [vCore-based purchasing model](service-tiers-vcore.md). - -The following steps can help you estimate whether a pool is more cost-effective than single databases: - -1. Estimate the eDTUs or vCores needed for the pool: - - For the DTU-based purchasing model: - - MAX(<*Total number of DBs* × *Average DTU utilization per DB*>, <*Number of concurrently peaking DBs* × *Peak DTU utilization per DB*>) - - For the vCore-based purchasing model: - - MAX(<*Total number of DBs* × *Average vCore utilization per DB*>, <*Number of concurrently peaking DBs* × *Peak vCore utilization per DB*>) -1. Estimate the total storage space needed for the pool by adding the data size needed for all the databases in the pool. For the DTU purchasing model, determine the eDTU pool size that provides this amount of storage. -1. For the DTU-based purchasing model, take the larger of the eDTU estimates from step 1 and step 2. For the vCore-based purchasing model, take the vCore estimate from step 1. -1. See the [SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/) and find the smallest pool size that's greater than the estimate from step 3. -1. Compare the pool price from step 4 to the price of using the appropriate compute sizes for single databases. - -> [!IMPORTANT] -> If the number of databases in a pool approaches the maximum supported, make sure to consider [resource management in dense elastic pools](elastic-pool-resource-management.md). - -### Per-database properties - -You can optionally set per-database properties to modify resource consumption patterns in elastic pools. For more information, see resource limits documentation for [DTU](resource-limits-dtu-elastic-pools.md#database-properties-for-pooled-databases) and [vCore](resource-limits-vcore-elastic-pools.md#database-properties-for-pooled-databases) elastic pools. - -## Use other SQL Database features with elastic pools - -You can use other SQL Database features with elastic pools. - -### Elastic jobs and elastic pools - -With a pool, management tasks are simplified by running scripts in [elastic jobs](elastic-jobs-overview.md). An elastic job eliminates most of the tedium associated with large numbers of databases. - -For more information about other database tools for working with multiple databases, see [Scaling out with SQL Database](elastic-scale-introduction.md). - -### Business continuity options for databases in an elastic pool - -Pooled databases generally support the same [business-continuity features](business-continuity-high-availability-disaster-recover-hadr-overview.md) that are available to single databases: - -- **Point-in-time restore**: Point-in-time restore uses automatic database backups to recover a database in a pool to a specific point in time. See [Point-in-time restore](recovery-using-backups.md#point-in-time-restore). -- **Geo-restore**: Geo-restore provides the default recovery option when a database is unavailable because of an incident in the region where the database is hosted. See [Restore a SQL database or fail over to a secondary](disaster-recovery-guidance.md). -- **Active geo-replication**: For applications that have more aggressive recovery requirements than geo-restore can offer, configure [active geo-replication](active-geo-replication-overview.md) or an [auto-failover group](auto-failover-group-overview.md). - -## Create a new SQL Database elastic pool by using the Azure portal - -You can create an elastic pool in the Azure portal in two ways: - -- Create an elastic pool and select an existing or new server. -- Create an elastic pool from an existing server. - -To create an elastic pool and select an existing or new server: - -1. Go to the [Azure portal](https://portal.azure.com) to create an elastic pool. Search for and select **Azure SQL**. -1. Select **Create** to open the **Select SQL deployment option** pane. To view more information about elastic pools, on the **Databases** tile, select **Show details**. -1. On the **Databases** tile, in the **Resource type** dropdown, select **Elastic pool**. Then select **Create**. - - ![Screenshot that shows creating an elastic pool.](./media/elastic-pool-overview/create-elastic-pool.png) - -To create an elastic pool from an existing server: - -- Go to an existing server and select **New pool** to create a pool directly in that server. - -> [!NOTE] -> You can create multiple pools on a server, but you can't add databases from different servers into the same pool. - -The pool's service tier determines the features available to the elastics in the pool, and the maximum amount of resources available to each database. For more information, see resource limits for elastic pools in the [DTU model](resource-limits-dtu-elastic-pools.md#elastic-pool-storage-sizes-and-compute-sizes). For vCore-based resource limits for elastic pools, see [vCore-based resource limits - elastic pools](resource-limits-vcore-elastic-pools.md). - -To configure the resources and pricing of the pool, select **Configure pool**. Then select a service tier, add databases to the pool, and configure the resource limits for the pool and its databases. - -After you've configured the pool, select **Apply**, name the pool, and select **OK** to create the pool. - -## Monitor an elastic pool and its databases - -In the Azure portal, you can monitor the utilization of an elastic pool and the databases within that pool. You can also make a set of changes to your elastic pool and submit all changes at the same time. These changes include adding or removing databases, changing your elastic pool settings, or changing your database settings. - -You can use the built-in [performance monitoring](./performance-guidance.md) and [alerting tools](./alerts-insights-configure-portal.md) combined with performance ratings. SQL Database can also [emit metrics and resource logs](./metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-portal) for easier monitoring. - -## Customer case studies - -- [SnelStart](https://azure.microsoft.com/resources/videos/azure-sql-database-case-study-snelstart/): SnelStart used elastic pools with SQL Database to rapidly expand its business services at a rate of 1,000 new SQL databases per month. -- [Umbraco](https://azure.microsoft.com/resources/videos/azure-sql-database-case-study-umbraco/): Umbraco uses elastic pools with SQL Database to quickly provision and scale services for thousands of tenants in the cloud. -- [Daxko/CSI](https://customers.microsoft.com/story/726277-csi-daxko-partner-professional-service-azure): Daxko/CSI uses elastic pools with SQL Database to accelerate its development cycle and to enhance its customer services and performance. - -## Next steps - -- For pricing information, see [Elastic pool pricing](https://azure.microsoft.com/pricing/details/sql-database/elastic). -- To scale elastic pools, see [Scale elastic pools](elastic-pool-scale.md) and [Scale an elastic pool - sample code](scripts/monitor-and-scale-pool-powershell.md). -- To learn more about design patterns for SaaS applications by using elastic pools, see [Design patterns for multitenant SaaS applications with SQL Database](saas-tenancy-app-design-patterns.md). -- For a SaaS tutorial by using elastic pools, see [Introduction to the Wingtip SaaS application](saas-dbpertenant-wingtip-app-overview.md). -- To learn about resource management in elastic pools with many databases, see [Resource management in dense elastic pools](elastic-pool-resource-management.md). diff --git a/articles/azure-sql/database/elastic-pool-resource-management.md b/articles/azure-sql/database/elastic-pool-resource-management.md deleted file mode 100644 index ff5c6110c2950..0000000000000 --- a/articles/azure-sql/database/elastic-pool-resource-management.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Resource management in dense elastic pools -description: Manage compute resources in Azure SQL Database elastic pools with many databases. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, jackli, wiassaf -ms.date: 3/30/2022 ---- - -# Resource management in dense elastic pools -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database [elastic pools](./elastic-pool-overview.md) is a cost-effective solution for managing many databases with varying resource usage. All databases in an elastic pool share the same allocation of resources, such as CPU, memory, worker threads, storage space, `tempdb`, on the assumption that **only a subset of databases in the pool will use compute resources at any given time**. This assumption allows elastic pools to be cost-effective. Instead of paying for all resources each individual database could potentially need, customers pay for a much smaller set of resources, shared among all databases in the pool. - -## Resource governance - -Resource sharing requires the system to carefully control resource usage to minimize the "noisy neighbor" effect, where a database with high resource consumption affects other databases in the same elastic pool. Azure SQL Database achieves these goals by implementing [resource governance](resource-limits-logical-server.md#resource-governance). At the same time, the system must provide sufficient resources for features such as high availability and disaster recovery (HADR), backup and restore, monitoring, Query Store, Automatic tuning, etc. to function reliably. - -The primary design goal of elastic pools is to be cost-effective. For this reason, the system intentionally allows customers to create _dense_ pools, that is pools with the number of databases approaching or at the maximum allowed, but with a moderate allocation of compute resources. For the same reason, the system doesn't reserve all potentially needed resources for its internal processes, but allows resource sharing between internal processes and user workloads. - -This approach allows customers to use dense elastic pools to achieve adequate performance and major cost savings. However, if the workload against many databases in a dense pool is sufficiently intense, resource contention becomes significant. Resource contention reduces user workload performance, and can negatively impact internal processes. - -> [!IMPORTANT] -> In dense pools with many active databases, it may not be feasible to increase the number of databases in the pool up to the maximums documented for [DTU](resource-limits-dtu-elastic-pools.md) and [vCore](resource-limits-vcore-elastic-pools.md) elastic pools. -> -> The number of databases that can be placed in dense pools without causing resource contention and performance problems depends on the number of concurrently active databases, and on resource consumption by user workloads in each database. This number can change over time as user workloads change. -> -> Additionally, if the min vCores per database, or min DTUs per database setting is set to a value greater than 0, the maximum number of databases in the pool will be implicitly limited. For more information, see [Database properties for pooled vCore databases](resource-limits-vcore-elastic-pools.md#database-properties-for-pooled-databases) and [Database properties for pooled DTU databases](resource-limits-dtu-elastic-pools.md#database-properties-for-pooled-databases). - -When resource contention occurs in a densely packed pool, customers can choose one or more of the following actions to mitigate it: - -- Tune query workload to reduce resource consumption, or spread resource consumption across multiple databases over time. -- Reduce pool density by moving some databases to another pool, or by making them standalone databases. -- Scale up the pool to get more resources. - -For suggestions on how to implement the last two actions, see [Operational recommendations](#operational-recommendations) later in this article. Reducing resource contention benefits both user workloads and internal processes, and lets the system reliably maintain expected level of service. - -## Monitoring resource consumption - -To avoid performance degradation due to resource contention, customers using dense elastic pools should proactively monitor resource consumption, and take timely action if increasing resource contention starts affecting workloads. Continuous monitoring is important because resource usage in a pool changes over time, due to changes in user workload, changes in data volumes and distribution, changes in pool density, and changes in the Azure SQL Database service. - -Azure SQL Database provides several metrics that are relevant for this type of monitoring. Exceeding the recommended average value for each metric indicates resource contention in the pool, and should be addressed using one of the actions mentioned earlier. - -To send an alert when pool resource utilization (CPU, data IO, log IO, workers, etc.) exceeds a threshold, consider creating alerts via the [Azure portal](alerts-insights-configure-portal.md) or the [Add-AzMetricAlertRulev2](/powershell/module/az.monitor/add-azmetricalertrulev2) PowerShell cmdlet. When monitoring elastic pools, consider also creating alerts for individual databases in the pool if needed in your scenario. For a sample scenario of monitoring elastic pools, see [Monitor and manage performance of Azure SQL Database in a multi-tenant SaaS app](saas-dbpertenant-performance-monitoring.md). - -|Metric name|Description|Recommended average value| -|----------|--------------------------------|------------| -|`avg_instance_cpu_percent`|CPU utilization of the SQL process associated with an elastic pool, as measured by the underlying operating system. Available in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view in every database, and in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `sqlserver_process_core_percent`, and can be viewed in Azure portal. This value is the same for every database in the same elastic pool.|Below 70%. Occasional short spikes up to 90% may be acceptable.| -|`max_worker_percent`|[Worker thread](/sql/relational-databases/thread-and-task-architecture-guide) utilization. Provided for each database in the pool, as well as for the pool itself. There are different limits on the number of worker threads at the database level, and at the pool level, therefore monitoring this metric at both levels is recommended. Available in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view in every database, and in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `workers_percent`, and can be viewed in Azure portal.|Below 80%. Spikes up to 100% will cause connection attempts and queries to fail.| -|`avg_data_io_percent`|IOPS utilization for read and write physical IO. Provided for each database in the pool, as well as for the pool itself. There are different limits on the number of IOPS at the database level, and at the pool level, therefore monitoring this metric at both levels is recommended. Available in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view in every database, and in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `physical_data_read_percent`, and can be viewed in Azure portal.|Below 80%. Occasional short spikes up to 100% may be acceptable.| -|`avg_log_write_percent`|Throughput utilizations for transaction log write IO. Provided for each database in the pool, as well as for the pool itself. There are different limits on the log throughput at the database level, and at the pool level, therefore monitoring this metric at both levels is recommended. Available in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view in every database, and in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `log_write_percent`, and can be viewed in Azure portal. When this metric is close to 100%, all database modifications (INSERT, UPDATE, DELETE, MERGE statements, SELECT … INTO, BULK INSERT, etc.) will be slower.|Below 90%. Occasional short spikes up to 100% may be acceptable.| -|`oom_per_second`|The rate of out-of-memory (OOM) errors in an elastic pool, which is an indicator of memory pressure. Available in the [sys.dm_resource_governor_resource_pools_history_ex](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-resource-pools-history-ex-azure-sql-database) view. See [Examples](#examples) for a sample query to calculate this metric. For more information, see resource limits for [elastic pools using DTUs](resource-limits-dtu-elastic-pools.md) or [elastic pools using vCores](resource-limits-vcore-elastic-pools.md), and [Troubleshoot out of memory errors with Azure SQL Database](troubleshoot-memory-errors-issues.md). If you encounter out of memory errors, review [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events).|0| -|`avg_storage_percent`|Total storage space used by data in all databases within an elastic pool. Does not include empty space in database files. Available in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `storage_percent`, and can be viewed in Azure portal.|Below 80%. Can approach 100% for pools with no data growth.| -|`avg_allocated_storage_percent`|Total storage space used by database files in storage in all databases within an elastic pool. Includes empty space in database files. Available in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view in the `master` database. This metric is also emitted to Azure Monitor, where it is [named](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) `allocated_data_storage_percent`, and can be viewed in Azure portal.|Below 90%. Can approach 100% for pools with no data growth.| -|`tempdb_log_used_percent`|Transaction log space utilization in the `tempdb` database. Even though temporary objects created in one database are not visible in other databases in the same elastic pool, `tempdb` is a shared resource for all databases in the same pool. A long running or orphaned transaction in `tempdb` started from one database in the pool can consume a large portion of transaction log, and cause failures for queries in other databases in the same pool. Derived from [sys.dm_db_log_space_usage](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-log-space-usage-transact-sql) and [sys.database_files](/sql/relational-databases/system-catalog-views/sys-database-files-transact-sql) views. This metric is also emitted to Azure Monitor, and can be viewed in Azure portal. See [Examples](#examples) for a sample query to return the current value of this metric.|Below 50%. Occasional spikes up to 80% are acceptable.| - -In addition to these metrics, Azure SQL Database provides a view that returns actual resource governance limits, as well as additional views that return resource utilization statistics at the resource pool level, and at the workload group level. - -|View name|Description| -|-----------------|--------------------------------| -|[sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database)|Returns actual configuration and capacity settings used by resource governance mechanisms in the current database or elastic pool.| -|[sys.dm_resource_governor_resource_pools](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-resource-pools-transact-sql)|Returns information about the current resource pool state, the current configuration of resource pools, and cumulative resource pool statistics.| -|[sys.dm_resource_governor_workload_groups](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-workload-groups-transact-sql)|Returns cumulative workload group statistics and the current configuration of the workload group. This view can be joined with sys.dm_resource_governor_resource_pools on the `pool_id` column to get resource pool information.| -|[sys.dm_resource_governor_resource_pools_history_ex](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-resource-pools-history-ex-azure-sql-database)|Returns resource pool utilization statistics for recent history, based on the number of snapshots available. Each row represents a time interval. The duration of the interval is provided in the `duration_ms` column. The `delta_` columns return the change in each statistic during the interval.| -|[sys.dm_resource_governor_workload_groups_history_ex](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-workload-groups-history-ex-azure-sql-database)|Returns workload group utilization statistics for recent history, based on the number of snapshots available. Each row represents a time interval. The duration of the interval is provided in the `duration_ms` column. The `delta_` columns return the change in each statistic during the interval.| - -> [!TIP] -> To query these and other dynamic management views using a principal other than server administrator, add this principal to the `##MS_ServerStateReader##` [server role](security-server-roles.md). - -These views can be used to monitor resource utilization and troubleshoot resource contention in near real-time. User workload on the primary and readable secondary replicas, including geo-replicas, is classified into the `SloSharedPool1` resource pool and `UserPrimaryGroup.DBId[N]` workload group, where `N` stands for the database ID value. - -In addition to monitoring current resource utilization, customers using dense pools can maintain historical resource utilization data in a separate data store. This data can be used in predictive analysis to proactively manage resource utilization based on historical and seasonal trends. - -## Operational recommendations - -**Leave sufficient resource headroom**. If resource contention and performance degradation occur, mitigation may involve moving some databases out of the affected elastic pool, or scaling up the pool, as noted earlier. However, these actions require additional compute resources to complete. In particular, for Premium and Business Critical pools, these actions require transferring all data for the databases being moved, or for all databases in the elastic pool if the pool is scaled up. Data transfer is a long running and resource-intensive operation. If the pool is already under high resource pressure, the mitigating operation itself will degrade performance even further. In extreme cases, it may not be possible to solve resource contention via database move or pool scale-up because the required resources are not available. In this case, temporarily reducing query workload on the affected elastic pool may be the only solution. - -Customers using dense pools should closely monitor resource utilization trends as described earlier, and take mitigating action while metrics remain within the recommended ranges and there are still sufficient resources in the elastic pool. - -Resource utilization depends on multiple factors that change over time for each database and each elastic pool. Achieving optimal price/performance ratio in dense pools requires continuous monitoring and rebalancing, that is moving databases from more utilized pools to less utilized pools, and creating new pools as necessary to accommodate increased workload. - -> [!NOTE] -> For DTU elastic pools, the eDTU metric at the pool level is not a MAX or a SUM of individual database utilization. It is derived from the utilization of various pool level metrics. Pool level resource limits may be higher than individual database level limits, so it is possible that an individual database can reach a specific resource limit (CPU, data IO, log IO, etc.), even when the eDTU reporting for the pool indicates no limit been reached. - -**Do not move "hot" databases**. If resource contention at the pool level is primarily caused by a small number of highly utilized databases, it may be tempting to move these databases to a less utilized pool, or make them standalone databases. However, doing this while a database remains highly utilized is not recommended, because the move operation will further degrade performance, both for the database being moved, and for the entire pool. Instead, either wait until high utilization subsides, or move less utilized databases instead to relieve resource pressure at the pool level. But moving databases with very low utilization does not provide any benefit in this case, because it does not materially reduce resource utilization at the pool level. - -**Create new databases in a "quarantine" pool**. In scenarios where new databases are created frequently, such as applications using the tenant-per-database model, there is risk that a new database placed into an existing elastic pool will unexpectedly consume significant resources and affect other databases and internal processes in the pool. To mitigate this risk, create a separate "quarantine" pool with ample allocation of resources. Use this pool for new databases with yet unknown resource consumption patterns. Once a database has stayed in this pool for a business cycle, such as a week or a month, and its resource consumption is known, it can be moved to a pool with sufficient capacity to accommodate this additional resource usage. - -**Monitor both used and allocated space**. When allocated pool space (total size of all database files in storage for all databases in a pool) reaches maximum pool size, out-of-space errors may occur. If allocated space trends high and is on track to reach maximum pool size, mitigation options include: -- Move some databases out of the pool to reduce total allocated space -- [Shrink](file-space-manage.md) database files to reduce empty allocated space in files -- Scale up the pool to a service objective with a larger maximum pool size - -If used pool space (total size of data in all databases in a pool, not including empty space in files) trends high and is on track to reach maximum pool size, mitigation options include: -- Move some databases out of the pool to reduce total used space -- Move (archive) data outside of the database, or delete no longer needed data -- Implement [data compression](/sql/relational-databases/data-compression/data-compression) -- Scale up the pool to a service objective with a larger maximum pool size - -**Avoid overly dense servers**. Azure SQL Database [supports](./resource-limits-logical-server.md) up to 5000 databases per server. Customers using elastic pools with thousands of databases may consider placing multiple elastic pools on a single server, with the total number of databases up to the supported limit. However, servers with many thousands of databases create operational challenges. Operations that require enumerating all databases on a server, for example viewing databases in the portal, will be slower. Operational errors, such as incorrect modification of server level logins or firewall rules, will affect a larger number of databases. Accidental deletion of the server will require assistance from Microsoft Support to recover databases on the deleted server, and will cause a prolonged outage for all affected databases. - -Limit the number of databases per server to a lower number than the maximum supported. In many scenarios, using up to 1000-2000 databases per server is optimal. To reduce the likelihood of accidental server deletion, place a [delete lock](../../azure-resource-manager/management/lock-resources.md) on the server or its resource group. - -## Examples - -### View individual database capacity settings - -Use the `sys.dm_user_db_resource_governance` dynamic management view to view the actual configuration and capacity settings used by resource governance in the current database or elastic pool. For more information, see [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database). - -Run this query in any database in an elastic pool. All databases in the pool have the same resource governance settings. - -```sql -SELECT * FROM sys.dm_user_db_resource_governance AS rg -WHERE database_id = DB_ID(); -``` - -### Monitoring overall elastic pool resource consumption - -Use the `sys.elastic_pool_resource_stats` system catalog view to monitor the resource consumption of the entire pool. For more information, see [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database). - -This sample query to view the last 10 minutes should be run in the `master` database of the logical Azure SQL server containing the desired elastic pool. - -```sql -SELECT * FROM sys.elastic_pool_resource_stats AS rs -WHERE rs.start_time > DATEADD(mi, -10, SYSUTCDATETIME()) -AND rs.elastic_pool_name = ''; -``` - -### Monitoring individual database resource consumption - -Use the `sys.dm_db_resource_stats` dynamic management view to monitor the resource consumption of individual databases. For more information, see [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database). One row exists for every 15 seconds, even if there is no activity. Historical data is maintained for approximately one hour. - -This sample query to view the last 10 minutes of data should be run in the desired database. - -```sql -SELECT * FROM sys.dm_db_resource_stats AS rs -WHERE rs.end_time > DATEADD(mi, -10, SYSUTCDATETIME()); -``` - -For longer retention time with less frequency, consider the following query on `sys.resource_stats`, run in the `master` database of the Azure SQL logical server. For more information, see [sys.resource_stats (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database). One row exists every five minutes, and historical data is maintained for two weeks. - -```sql -SELECT * FROM sys.resource_stats -WHERE [database_name] = 'sample' -ORDER BY [start_time] desc; -``` - -### Monitoring memory utilization - -This query calculates the `oom_per_second` metric for each resource pool for recent history, based on the number of snapshots available. This sample query helps identify the recent average number of failed memory allocations in the pool. This query can be run in any database in an elastic pool. - -```sql -SELECT pool_id, - name AS resource_pool_name, - IIF(name LIKE 'SloSharedPool%' OR name LIKE 'UserPool%', 'user', 'system') AS resource_pool_type, - SUM(CAST(delta_out_of_memory_count AS decimal))/(SUM(duration_ms)/1000.) AS oom_per_second -FROM sys.dm_resource_governor_resource_pools_history_ex -GROUP BY pool_id, name -ORDER BY pool_id; -``` - -### Monitoring `tempdb` log space utilization - -This query returns the current value of the `tempdb_log_used_percent` metric, showing the relative utilization of the `tempdb` transaction log relative to its maximum allowed size. This query can be run in any database in an elastic pool. - -```sql -SELECT (lsu.used_log_space_in_bytes / df.log_max_size_bytes) * 100 AS tempdb_log_space_used_percent -FROM tempdb.sys.dm_db_log_space_usage AS lsu -CROSS JOIN ( - SELECT SUM(CAST(max_size AS bigint)) * 8 * 1024. AS log_max_size_bytes - FROM tempdb.sys.database_files - WHERE type_desc = N'LOG' - ) AS df -; -``` - -## Next steps - -- For an introduction to elastic pools, see [Elastic pools help you manage and scale multiple databases in Azure SQL Database](./elastic-pool-overview.md). -- For information on tuning query workloads to reduce resource utilization, see [Monitoring and tuning](monitoring-tuning-index.yml), and [Monitoring and performance tuning](./monitor-tune-overview.md). diff --git a/articles/azure-sql/database/elastic-pool-scale.md b/articles/azure-sql/database/elastic-pool-scale.md deleted file mode 100644 index 497aabdeed972..0000000000000 --- a/articles/azure-sql/database/elastic-pool-scale.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Scale elastic pool resources -description: This page describes scaling resources for elastic pools in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 04/09/2021 ---- -# Scale elastic pool resources in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes how to scale the compute and storage resources available for elastic pools and pooled databases in Azure SQL Database. - -## Change compute resources (vCores or DTUs) - -After initially picking the number of vCores or eDTUs, you can scale an elastic pool up or down dynamically based on actual experience using the using: - -* [Transact-SQL](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](elastic-pool-manage.md#azure-portal) -* [PowerShell](/powershell/module/az.sql/Get-AzSqlElasticPool) -* [Azure CLI](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-update) -* [REST API](/rest/api/sql/elasticpools/update) - - -### Impact of changing service tier or rescaling compute size - -Changing the service tier or compute size of an elastic pool follows a similar pattern as for single databases and mainly involves the service performing the following steps: - -1. Create new compute instance for the elastic pool - - A new compute instance for the elastic pool is created with the requested service tier and compute size. For some combinations of service tier and compute size changes, a replica of each database must be created in the new compute instance which involves copying data and can strongly influence the overall latency. Regardless, the databases remain online during this step, and connections continue to be directed to the databases in the original compute instance. - -2. Switch routing of connections to new compute instance - - Existing connections to the databases in the original compute instance are dropped. Any new connections are established to the databases in the new compute instance. For some combinations of service tier and compute size changes, database files are detached and reattached during the switch. Regardless, the switch can result in a brief service interruption when databases are unavailable generally for less than 30 seconds and often for only a few seconds. If there are long running transactions running when connections are dropped, the duration of this step may take longer in order to recover aborted transactions. [Accelerated Database Recovery](../accelerated-database-recovery.md) can reduce the impact from aborting long running transactions. - -> [!IMPORTANT] -> No data is lost during any step in the workflow. - -### Latency of changing service tier or rescaling compute size - -The estimated latency to change the service tier, scale the compute size of a single database or elastic pool, move a database in/out of an elastic pool, or move a database between elastic pools is parameterized as follows: - -|Service tier|Basic single database,
    Standard (S0-S1)|Basic elastic pool,
    Standard (S2-S12),
    General Purpose single database or elastic pool|Premium or Business Critical single database or elastic pool|Hyperscale -|:---|:---|:---|:---|:---| -|**Basic single database,
    Standard (S0-S1)**|•  Constant time latency independent of space used
    •  Typically, less than 5 minutes|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Basic elastic pool,
    Standard (S2-S12),
    General Purpose single database or elastic pool**|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  For single databases, constant time latency independent of space used
    •  Typically, less than 5 minutes for single databases
    •  For elastic pools, proportional to the number of databases|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Premium or Business Critical single database or elastic pool**|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Hyperscale**|N/A|N/A|N/A|•  Constant time latency independent of space used
    •  Typically, less than 2 minutes| - -> [!NOTE] -> -> - In the case of changing the service tier or rescaling compute for an elastic pool, the summation of space used across all databases in the pool should be used to calculate the estimate. -> - In the case of moving a database to/from an elastic pool, only the space used by the database impacts the latency, not the space used by the elastic pool. -> - For Standard and General Purpose elastic pools, latency of moving a database in/out of an elastic pool or between elastic pools will be proportional to database size if the elastic pool is using Premium File Share ([PFS](../../storage/files/storage-files-introduction.md)) storage. To determine if a pool is using PFS storage, execute the following query in the context of any database in the pool. If the value in the AccountType column is `PremiumFileStorage` or `PremiumFileStorage-ZRS`, the pool is using PFS storage. - -```sql -SELECT s.file_id, - s.type_desc, - s.name, - FILEPROPERTYEX(s.name, 'AccountType') AS AccountType -FROM sys.database_files AS s -WHERE s.type_desc IN ('ROWS', 'LOG'); -``` - -> [!TIP] -> To monitor in-progress operations, see: [Manage operations using the SQL REST API](/rest/api/sql/operations/list), [Manage operations using CLI](/cli/azure/sql/db/op), [Monitor operations using T-SQL](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) and these two PowerShell commands: [Get-AzSqlDatabaseActivity](/powershell/module/az.sql/get-azsqldatabaseactivity) and [Stop-AzSqlDatabaseActivity](/powershell/module/az.sql/stop-azsqldatabaseactivity). - -### Additional considerations when changing service tier or rescaling compute size - -- When downsizing vCores or eDTUs for an elastic pool, the pool used space must be smaller than the maximum allowed size of the target service tier and pool eDTUs. -- When rescaling eDTUs for an elastic pool, an extra storage cost applies if (1) the storage max size of the pool is supported by the target pool, and (2) the storage max size exceeds the included storage amount of the target pool. For example, if a 100 eDTU Standard pool with a max size of 100 GB is downsized to a 50 eDTU Standard pool, then an extra storage cost applies since target pool supports a max size of 100 GB and its included storage amount is only 50 GB. So, the extra storage amount is 100 GB – 50 GB = 50 GB. For pricing of extra storage, see [SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). If the actual amount of space used is less than the included storage amount, then this extra cost can be avoided by reducing the database max size to the included amount. - -### Billing during rescaling - -You are billed for each hour a database exists using the highest service tier + compute size that applied during that hour, regardless of usage or whether the database was active for less than an hour. For example, if you create a single database and delete it five minutes later your bill reflects a charge for one database hour. - -## Change elastic pool storage size - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -### vCore-based purchasing model - -- Storage can be provisioned up to the max size limit: - - - For storage in the standard or general purpose service tiers, increase or decrease size in 10-GB increments - - For storage in the premium or business critical service tiers, increase or decrease size in 250-GB increments -- Storage for an elastic pool can be provisioned by increasing or decreasing its max size. -- The price of storage for an elastic pool is the storage amount multiplied by the storage unit price of the service tier. For details on the price of extra storage, see [SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -### DTU-based purchasing model - -- The eDTU price for an elastic pool includes a certain amount of storage at no additional cost. Extra storage beyond the included amount can be provisioned for an additional cost up to the max size limit in increments of 250 GB up to 1 TB, and then in increments of 256 GB beyond 1 TB. For included storage amounts and max size limits, see [Resources limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md#elastic-pool-storage-sizes-and-compute-sizes) or [Resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md). -- Extra storage for an elastic pool can be provisioned by increasing its max size using the [Azure portal](elastic-pool-manage.md#azure-portal), [PowerShell](/powershell/module/az.sql/Get-AzSqlElasticPool), the [Azure CLI](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-update), or the [REST API](/rest/api/sql/elasticpools/update). -- The price of extra storage for an elastic pool is the extra storage amount multiplied by the extra storage unit price of the service tier. For details on the price of extra storage, see [SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -## Next steps - -For overall resource limits, see [SQL Database vCore-based resource limits - elastic pools](resource-limits-vcore-elastic-pools.md) and [SQL Database DTU-based resource limits - elastic pools](resource-limits-dtu-elastic-pools.md). \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-query-getting-started-vertical.md b/articles/azure-sql/database/elastic-query-getting-started-vertical.md deleted file mode 100644 index 80be0931c5c1b..0000000000000 --- a/articles/azure-sql/database/elastic-query-getting-started-vertical.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Get started with cross-database queries -description: how to use elastic database query with vertically partitioned databases -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Get started with cross-database queries (vertical partitioning) (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Elastic database query (preview) for Azure SQL Database allows you to run T-SQL queries that span multiple databases using a single connection point. This article applies to [vertically partitioned databases](elastic-query-vertical-partitioning.md). - -When completed, you will: learn how to configure and use an Azure SQL Database to perform queries that span multiple related databases. - -For more information about the elastic database query feature, see [Azure SQL Database elastic database query overview](elastic-query-overview.md). - -## Prerequisites - -ALTER ANY EXTERNAL DATA SOURCE permission is required. This permission is included with the ALTER DATABASE permission. ALTER ANY EXTERNAL DATA SOURCE permissions are needed to refer to the underlying data source. - -## Create the sample databases - -To start with, create two databases, **Customers** and **Orders**, either in the same or different servers. - -Execute the following queries on the **Orders** database to create the **OrderInformation** table and input the sample data. - -```tsql -CREATE TABLE [dbo].[OrderInformation]( - [OrderID] [int] NOT NULL, - [CustomerID] [int] NOT NULL - ) -INSERT INTO [dbo].[OrderInformation] ([OrderID], [CustomerID]) VALUES (123, 1) -INSERT INTO [dbo].[OrderInformation] ([OrderID], [CustomerID]) VALUES (149, 2) -INSERT INTO [dbo].[OrderInformation] ([OrderID], [CustomerID]) VALUES (857, 2) -INSERT INTO [dbo].[OrderInformation] ([OrderID], [CustomerID]) VALUES (321, 1) -INSERT INTO [dbo].[OrderInformation] ([OrderID], [CustomerID]) VALUES (564, 8) -``` - -Now, execute following query on the **Customers** database to create the **CustomerInformation** table and input the sample data. - -```tsql -CREATE TABLE [dbo].[CustomerInformation]( - [CustomerID] [int] NOT NULL, - [CustomerName] [varchar](50) NULL, - [Company] [varchar](50) NULL - CONSTRAINT [CustID] PRIMARY KEY CLUSTERED ([CustomerID] ASC) -) -INSERT INTO [dbo].[CustomerInformation] ([CustomerID], [CustomerName], [Company]) VALUES (1, 'Jack', 'ABC') -INSERT INTO [dbo].[CustomerInformation] ([CustomerID], [CustomerName], [Company]) VALUES (2, 'Steve', 'XYZ') -INSERT INTO [dbo].[CustomerInformation] ([CustomerID], [CustomerName], [Company]) VALUES (3, 'Lylla', 'MNO') -``` - -## Create database objects - -### Database scoped master key and credentials - -1. Open SQL Server Management Studio or SQL Server Data Tools in Visual Studio. -2. Connect to the Orders database and execute the following T-SQL commands: - - ```tsql - CREATE MASTER KEY ENCRYPTION BY PASSWORD = ''; - CREATE DATABASE SCOPED CREDENTIAL ElasticDBQueryCred - WITH IDENTITY = '', - SECRET = ''; - ``` - - The "master_key_password" is a strong password of your choosing used to encrypt the connection credentials. - The "username" and "password" should be the username and password used to log in into the Customers database (create a new user in Customers database if one does not already exists). - Authentication using Azure Active Directory with elastic queries is not currently supported. - -### External data sources - -To create an external data source, execute the following command on the Orders database: - -```tsql -CREATE EXTERNAL DATA SOURCE MyElasticDBQueryDataSrc WITH - (TYPE = RDBMS, - LOCATION = '.database.windows.net', - DATABASE_NAME = 'Customers', - CREDENTIAL = ElasticDBQueryCred, -) ; -``` - -### External tables - -Create an external table on the Orders database, which matches the definition of the CustomerInformation table: - -```tsql -CREATE EXTERNAL TABLE [dbo].[CustomerInformation] -( [CustomerID] [int] NOT NULL, - [CustomerName] [varchar](50) NOT NULL, - [Company] [varchar](50) NOT NULL) -WITH -( DATA_SOURCE = MyElasticDBQueryDataSrc) -``` - -## Execute a sample elastic database T-SQL query - -Once you have defined your external data source and your external tables, you can now use T-SQL to query your external tables. Execute this query on the Orders database: - -```tsql -SELECT OrderInformation.CustomerID, OrderInformation.OrderId, CustomerInformation.CustomerName, CustomerInformation.Company -FROM OrderInformation -INNER JOIN CustomerInformation -ON CustomerInformation.CustomerID = OrderInformation.CustomerID -``` - -## Cost - -Currently, the elastic database query feature is included into the cost of your Azure SQL Database. - -For pricing information, see [SQL Database Pricing](https://azure.microsoft.com/pricing/details/sql-database). - -## Next steps - -* For an overview of elastic query, see [Elastic query overview](elastic-query-overview.md). -* For syntax and sample queries for vertically partitioned data, see [Querying vertically partitioned data)](elastic-query-vertical-partitioning.md) -* For a horizontal partitioning (sharding) tutorial, see [Getting started with elastic query for horizontal partitioning (sharding)](elastic-query-getting-started.md). -* For syntax and sample queries for horizontally partitioned data, see [Querying horizontally partitioned data)](elastic-query-horizontal-partitioning.md) -* See [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) for a stored procedure that executes a Transact-SQL statement on a single remote Azure SQL Database or set of databases serving as shards in a horizontal partitioning scheme. diff --git a/articles/azure-sql/database/elastic-query-getting-started.md b/articles/azure-sql/database/elastic-query-getting-started.md deleted file mode 100644 index 0915ac91d47d7..0000000000000 --- a/articles/azure-sql/database/elastic-query-getting-started.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Report across scaled-out cloud databases -description: Use cross database database queries to report across multiple databases. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/15/2021 - ---- -# Report across scaled-out cloud databases (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You can create reports from multiple databases from a single connection point using an [elastic query](elastic-query-overview.md). The databases must be horizontally partitioned (also known as "sharded"). - -If you have an existing database, see [Migrating existing databases to scaled-out databases](elastic-convert-to-use-elastic-tools.md). - -To understand the SQL objects needed to query, see [Query across horizontally partitioned databases](elastic-query-horizontal-partitioning.md). - -## Prerequisites - -Download and run the [Getting started with Elastic Database tools sample](elastic-scale-get-started.md). - -## Create a shard map manager using the sample app -Here you will create a shard map manager along with several shards, followed by insertion of data into the shards. If you happen to already have shards setup with sharded data in them, you can skip the following steps and move to the next section. - -1. Build and run the **Getting started with Elastic Database tools** sample application by following the steps in the article section [Download and run the sample app](elastic-scale-get-started.md#download-and-run-the-sample-app-1). Once you finish all the steps, you will see the following command prompt: - - ![command prompt][1] -2. In the command window, type "1" and press **Enter**. This creates the shard map manager, and adds two shards to the server. Then type "3" and press **Enter**; repeat the action four times. This inserts sample data rows in your shards. -3. The [Azure portal](https://portal.azure.com) should show three new databases in your server: - - ![Visual Studio confirmation][2] - - At this point, cross-database queries are supported through the Elastic Database client libraries. For example, use option 4 in the command window. The results from a multi-shard query are always a **UNION ALL** of the results from all shards. - - In the next section, we create a sample database endpoint that supports richer querying of the data across shards. - -## Create an elastic query database - -1. Open the [Azure portal](https://portal.azure.com) and log in. -2. Create a new database in Azure SQL Database in the same server as your shard setup. Name the database "ElasticDBQuery." - - ![Azure portal and pricing tier][3] - - > [!NOTE] - > you can use an existing database. If you can do so, it must not be one of the shards that you would like to execute your queries on. This database will be used for creating the metadata objects for an elastic database query. - > - -## Create database objects -### Database-scoped master key and credentials -These are used to connect to the shard map manager and the shards: - -1. Open SQL Server Management Studio or SQL Server Data Tools in Visual Studio. -2. Connect to ElasticDBQuery database and execute the following T-SQL commands: - - ```tsql - CREATE MASTER KEY ENCRYPTION BY PASSWORD = ''; - - CREATE DATABASE SCOPED CREDENTIAL ElasticDBQueryCred - WITH IDENTITY = '', - SECRET = ''; - ``` - - "username" and "password" should be the same as login information used in step 3 of section [Download and run the sample app](elastic-scale-get-started.md#download-and-run-the-sample-app) in the **Getting started with Elastic Database tools** article. - -### External data sources -To create an external data source, execute the following command on the ElasticDBQuery database: - -```tsql -CREATE EXTERNAL DATA SOURCE MyElasticDBQueryDataSrc WITH - (TYPE = SHARD_MAP_MANAGER, - LOCATION = '.database.windows.net', - DATABASE_NAME = 'ElasticScaleStarterKit_ShardMapManagerDb', - CREDENTIAL = ElasticDBQueryCred, - SHARD_MAP_NAME = 'CustomerIDShardMap' -) ; -``` - - "CustomerIDShardMap" is the name of the shard map, if you created the shard map and shard map manager using the elastic database tools sample. However, if you used your custom setup for this sample, then it should be the shard map name you chose in your application. - -### External tables -Create an external table that matches the Customers table on the shards by executing the following command on ElasticDBQuery database: - -```tsql -CREATE EXTERNAL TABLE [dbo].[Customers] -( [CustomerId] [int] NOT NULL, - [Name] [nvarchar](256) NOT NULL, - [RegionId] [int] NOT NULL) -WITH -( DATA_SOURCE = MyElasticDBQueryDataSrc, - DISTRIBUTION = SHARDED([CustomerId]) -) ; -``` - -## Execute a sample elastic database T-SQL query -Once you have defined your external data source and your external tables you can now use full T-SQL over your external tables. - -Execute this query on the ElasticDBQuery database: - -```tsql -select count(CustomerId) from [dbo].[Customers] -``` - -You will notice that the query aggregates results from all the shards and gives the following output: - -![Output details][4] - -## Import elastic database query results to Excel - You can import the results from of a query to an Excel file. - -1. Launch Excel 2013. -2. Navigate to the **Data** ribbon. -3. Click **From Other Sources** and click **From SQL Server**. - - ![Excel import from other sources][5] -4. In the **Data Connection Wizard** type the server name and login credentials. Then click **Next**. -5. In the dialog box **Select the database that contains the data you want**, select the **ElasticDBQuery** database. -6. Select the **Customers** table in the list view and click **Next**. Then click **Finish**. -7. In the **Import Data** form, under **Select how you want to view this data in your workbook**, select **Table** and click **OK**. - -All the rows from **Customers** table, stored in different shards populate the Excel sheet. - -You can now use Excel’s powerful data visualization functions. You can use the connection string with your server name, database name and credentials to connect your BI and data integration tools to the elastic query database. Make sure that SQL Server is supported as a data source for your tool. You can refer to the elastic query database and external tables just like any other SQL Server database and SQL Server tables that you would connect to with your tool. - -### Cost -There is no additional charge for using the Elastic Database Query feature. - -For pricing information see [SQL Database Pricing Details](https://azure.microsoft.com/pricing/details/sql-database/). - -## Next steps - -* For an overview of elastic query, see [Elastic query overview](elastic-query-overview.md). -* For a vertical partitioning tutorial, see [Getting started with cross-database query (vertical partitioning)](elastic-query-getting-started-vertical.md). -* For syntax and sample queries for vertically partitioned data, see [Querying vertically partitioned data)](elastic-query-vertical-partitioning.md) -* For syntax and sample queries for horizontally partitioned data, see [Querying horizontally partitioned data)](elastic-query-horizontal-partitioning.md) -* See [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) for a stored procedure that executes a Transact-SQL statement on a single remote Azure SQL Database or set of databases serving as shards in a horizontal partitioning scheme. - - - -[1]: ./media/elastic-query-getting-started/cmd-prompt.png -[2]: ./media/elastic-query-getting-started/portal.png -[3]: ./media/elastic-query-getting-started/tiers.png -[4]: ./media/elastic-query-getting-started/details.png -[5]: ./media/elastic-query-getting-started/exel-sources.png - diff --git a/articles/azure-sql/database/elastic-query-horizontal-partitioning.md b/articles/azure-sql/database/elastic-query-horizontal-partitioning.md deleted file mode 100644 index 4beb249b0b830..0000000000000 --- a/articles/azure-sql/database/elastic-query-horizontal-partitioning.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Reporting across scaled-out cloud databases -description: how to set up elastic queries over horizontal partitions -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/15/2021 ---- -# Reporting across scaled-out cloud databases (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -![Query across shards][1] - -Sharded databases distribute rows across a scaled out data tier. The schema is identical on all participating databases, also known as horizontal partitioning. Using an elastic query, you can create reports that span all databases in a sharded database. - -For a quickstart, see [Reporting across scaled-out cloud databases](elastic-query-getting-started.md). - -For non-sharded databases, see [Query across cloud databases with different schemas](elastic-query-vertical-partitioning.md). - -## Prerequisites - -* Create a shard map using the elastic database client library. see [Shard map management](elastic-scale-shard-map-management.md). Or use the sample app in [Get started with elastic database tools](elastic-scale-get-started.md). -* Alternatively, see [Migrate existing databases to scaled-out databases](elastic-convert-to-use-elastic-tools.md). -* The user must possess ALTER ANY EXTERNAL DATA SOURCE permission. This permission is included with the ALTER DATABASE permission. -* ALTER ANY EXTERNAL DATA SOURCE permissions are needed to refer to the underlying data source. - -## Overview - -These statements create the metadata representation of your sharded data tier in the elastic query database. - -1. [CREATE MASTER KEY](/sql/t-sql/statements/create-master-key-transact-sql) -2. [CREATE DATABASE SCOPED CREDENTIAL](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) -3. [CREATE EXTERNAL DATA SOURCE](/sql/t-sql/statements/create-external-data-source-transact-sql) -4. [CREATE EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql) - -## 1.1 Create database scoped master key and credentials - -The credential is used by the elastic query to connect to your remote databases. - -```sql -CREATE MASTER KEY ENCRYPTION BY PASSWORD = 'password'; -CREATE DATABASE SCOPED CREDENTIAL [] WITH IDENTITY = '', -SECRET = ''; -``` - -> [!NOTE] -> Make sure that the *"\"* does not include any *"\@servername"* suffix. - -## 1.2 Create external data sources - -Syntax: - -```syntaxsql - ::= - CREATE EXTERNAL DATA SOURCE WITH - (TYPE = SHARD_MAP_MANAGER, - LOCATION = '', - DATABASE_NAME = ‘', - CREDENTIAL = , - SHARD_MAP_NAME = ‘’ - ) [;] -``` - -### Example - -```sql -CREATE EXTERNAL DATA SOURCE MyExtSrc -WITH -( - TYPE=SHARD_MAP_MANAGER, - LOCATION='myserver.database.windows.net', - DATABASE_NAME='ShardMapDatabase', - CREDENTIAL= SMMUser, - SHARD_MAP_NAME='ShardMap' -); -``` - -Retrieve the list of current external data sources: - -```sql -select * from sys.external_data_sources; -``` - -The external data source references your shard map. An elastic query then uses the external data source and the underlying shard map to enumerate the databases that participate in the data tier. -The same credentials are used to read the shard map and to access the data on the shards during the processing of an elastic query. - -## 1.3 Create external tables - -Syntax: - -```syntaxsql -CREATE EXTERNAL TABLE [ database_name . [ schema_name ] . | schema_name. ] table_name - ( { } [ ,...n ]) - { WITH ( ) } -) [;] - - ::= - DATA_SOURCE = , - [ SCHEMA_NAME = N'nonescaped_schema_name',] - [ OBJECT_NAME = N'nonescaped_object_name',] - DISTRIBUTION = SHARDED() | REPLICATED |ROUND_ROBIN -``` - -**Example** - -```sql -CREATE EXTERNAL TABLE [dbo].[order_line]( - [ol_o_id] int NOT NULL, - [ol_d_id] tinyint NOT NULL, - [ol_w_id] int NOT NULL, - [ol_number] tinyint NOT NULL, - [ol_i_id] int NOT NULL, - [ol_delivery_d] datetime NOT NULL, - [ol_amount] smallmoney NOT NULL, - [ol_supply_w_id] int NOT NULL, - [ol_quantity] smallint NOT NULL, - [ol_dist_info] char(24) NOT NULL -) - -WITH -( - DATA_SOURCE = MyExtSrc, - SCHEMA_NAME = 'orders', - OBJECT_NAME = 'order_details', - DISTRIBUTION=SHARDED(ol_w_id) -); -``` - -Retrieve the list of external tables from the current database: - -```sql -SELECT * from sys.external_tables; -``` - -To drop external tables: - -```syntaxsql -DROP EXTERNAL TABLE [ database_name . [ schema_name ] . | schema_name. ] table_name[;] -``` - -### Remarks - -The DATA\_SOURCE clause defines the external data source (a shard map) that is used for the external table. - -The SCHEMA\_NAME and OBJECT\_NAME clauses map the external table definition to a table in a different schema. If omitted, the schema of the remote object is assumed to be “dbo” and its name is assumed to be identical to the external table name being defined. This is useful if the name of your remote table is already taken in the database where you want to create the external table. For example, you want to define an external table to get an aggregate view of catalog views or DMVs on your scaled out data tier. Since catalog views and DMVs already exist locally, you cannot use their names for the external table definition. Instead, use a different name and use the catalog view’s or the DMV’s name in the SCHEMA\_NAME and/or OBJECT\_NAME clauses. (See the example below.) - -The DISTRIBUTION clause specifies the data distribution used for this table. The query processor utilizes the information provided in the DISTRIBUTION clause to build the most efficient query plans. - -1. **SHARDED** means data is horizontally partitioned across the databases. The partitioning key for the data distribution is the **** parameter. -2. **REPLICATED** means that identical copies of the table are present on each database. It is your responsibility to ensure that the replicas are identical across the databases. -3. **ROUND\_ROBIN** means that the table is horizontally partitioned using an application-dependent distribution method. - -**Data tier reference**: The external table DDL refers to an external data source. The external data source specifies a shard map that provides the external table with the information necessary to locate all the databases in your data tier. - -### Security considerations - -Users with access to the external table automatically gain access to the underlying remote tables under the credential given in the external data source definition. Avoid undesired elevation of privileges through the credential of the external data source. Use GRANT or REVOKE for an external table as though it were a regular table. - -Once you have defined your external data source and your external tables, you can now use full T-SQL over your external tables. - -## Example: querying horizontal partitioned databases - -The following query performs a three-way join between warehouses, orders, and order lines and uses several aggregates and a selective filter. It assumes (1) horizontal partitioning (sharding) and (2) that warehouses, orders, and order lines are sharded by the warehouse ID column, and that the elastic query can co-locate the joins on the shards and process the expensive part of the query on the shards in parallel. - -```sql - select - w_id as warehouse, - o_c_id as customer, - count(*) as cnt_orderline, - max(ol_quantity) as max_quantity, - avg(ol_amount) as avg_amount, - min(ol_delivery_d) as min_deliv_date - from warehouse - join orders - on w_id = o_w_id - join order_line - on o_id = ol_o_id and o_w_id = ol_w_id - where w_id > 100 and w_id < 200 - group by w_id, o_c_id -``` - -## Stored procedure for remote T-SQL execution: sp\_execute_remote - -Elastic query also introduces a stored procedure that provides direct access to the shards. The stored procedure is called [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) and can be used to execute remote stored procedures or T-SQL code on the remote databases. It takes the following parameters: - -* Data source name (nvarchar): The name of the external data source of type RDBMS. -* Query (nvarchar): The T-SQL query to be executed on each shard. -* Parameter declaration (nvarchar) - optional: String with data type definitions for the parameters used in the Query parameter (like sp_executesql). -* Parameter value list - optional: Comma-separated list of parameter values (like sp_executesql). - -The sp\_execute\_remote uses the external data source provided in the invocation parameters to execute the given T-SQL statement on the remote databases. It uses the credential of the external data source to connect to the shardmap manager database and the remote databases. - -Example: - -```sql - EXEC sp_execute_remote - N'MyExtSrc', - N'select count(w_id) as foo from warehouse' -``` - -## Connectivity for tools - -Use regular SQL Server connection strings to connect your application, your BI, and data integration tools to the database with your external table definitions. Make sure that SQL Server is supported as a data source for your tool. Then reference the elastic query database like any other SQL Server database connected to the tool, and use external tables from your tool or application as if they were local tables. - -## Best practices - -* Ensure that the elastic query endpoint database has been given access to the shardmap database and all shards through the SQL Database firewalls. -* Validate or enforce the data distribution defined by the external table. If your actual data distribution is different from the distribution specified in your table definition, your queries may yield unexpected results. -* Elastic query currently does not perform shard elimination when predicates over the sharding key would allow it to safely exclude certain shards from processing. -* Elastic query works best for queries where most of the computation can be done on the shards. You typically get the best query performance with selective filter predicates that can be evaluated on the shards or joins over the partitioning keys that can be performed in a partition-aligned way on all shards. Other query patterns may need to load large amounts of data from the shards to the head node and may perform poorly - -## Next steps - -* For an overview of elastic query, see [Elastic query overview](elastic-query-overview.md). -* For a vertical partitioning tutorial, see [Getting started with cross-database query (vertical partitioning)](elastic-query-getting-started-vertical.md). -* For syntax and sample queries for vertically partitioned data, see [Querying vertically partitioned data)](elastic-query-vertical-partitioning.md) -* For a horizontal partitioning (sharding) tutorial, see [Getting started with elastic query for horizontal partitioning (sharding)](elastic-query-getting-started.md). -* See [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) for a stored procedure that executes a Transact-SQL statement on a single remote Azure SQL Database or set of databases serving as shards in a horizontal partitioning scheme. - - -[1]: ./media/elastic-query-horizontal-partitioning/horizontalpartitioning.png - diff --git a/articles/azure-sql/database/elastic-query-overview.md b/articles/azure-sql/database/elastic-query-overview.md deleted file mode 100644 index 0af8b8261c10b..0000000000000 --- a/articles/azure-sql/database/elastic-query-overview.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Elastic query overview -description: Elastic query enables you to run a Transact-SQL query that spans multiple databases. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: overview -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/15/2021 ---- - -# Azure SQL Database elastic query overview (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The elastic query feature (in preview) enables you to run a Transact-SQL query that spans multiple databases in Azure SQL Database. It allows you to perform cross-database queries to access remote tables, and to connect Microsoft and third-party tools (Excel, Power BI, Tableau, etc.) to query across data tiers with multiple databases. Using this feature, you can scale out queries to large data tiers and visualize the results in business intelligence (BI) reports. - -## Why use elastic queries - -### Azure SQL Database - -Query across databases in Azure SQL Database completely in T-SQL. This allows for read-only querying of remote databases and provides an option for current SQL Server customers to migrate applications using three- and four-part names or linked server to SQL Database. - -### Available on all service tiers - -Elastic query is supported in all service tiers of Azure SQL Database. See the section on Preview Limitations below on performance limitations for lower service tiers. - -### Push parameters to remote databases - -Elastic queries can now push SQL parameters to the remote databases for execution. - -### Stored procedure execution - -Execute remote stored procedure calls or remote functions using [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database). - -### Flexibility - -External tables with elastic query can refer to remote tables with a different schema or table name. - -## Elastic query scenarios - -The goal is to facilitate querying scenarios where multiple databases contribute rows into a single overall result. The query can either be composed by the user or application directly, or indirectly through tools that are connected to the database. This is especially useful when creating reports, using commercial BI or data integration tools, or any application that cannot be changed. With an elastic query, you can query across several databases using the familiar SQL Server connectivity experience in tools such as Excel, Power BI, Tableau, or Cognos. -An elastic query allows easy access to an entire collection of databases through queries issued by SQL Server Management Studio or Visual Studio, and facilitates cross-database querying from Entity Framework or other ORM environments. Figure 1 shows a scenario where an existing cloud application (which uses the [elastic database client library](elastic-database-client-library.md)) builds on a scaled-out data tier, and an elastic query is used for cross-database reporting. - -**Figure 1** Elastic query used on scaled-out data tier - -![Elastic query used on scaled-out data tier][1] - -Customer scenarios for elastic query are characterized by the following topologies: - -* **Vertical partitioning - Cross-database queries** (Topology 1): The data is partitioned vertically between a number of databases in a data tier. Typically, different sets of tables reside on different databases. That means that the schema is different on different databases. For instance, all tables for inventory are on one database while all accounting-related tables are on a second database. Common use cases with this topology require one to query across or to compile reports across tables in several databases. -* **Horizontal Partitioning - Sharding** (Topology 2): Data is partitioned horizontally to distribute rows across a scaled out data tier. With this approach, the schema is identical on all participating databases. This approach is also called "sharding". Sharding can be performed and managed using (1) the elastic database tools libraries or (2) self-sharding. An elastic query is used to query or compile reports across many shards. Shards are typically databases within an elastic pool. You can think of elastic query as an efficient way for querying all databases of elastic pool at once, as long as databases share the common schema. - -> [!NOTE] -> Elastic query works best for reporting scenarios where most of the processing (filtering, aggregation) can be performed on the external source side. It is not suitable for ETL operations where large amount of data is being transferred from remote database(s). For heavy reporting workloads or data warehousing scenarios with more complex queries, also consider using [Azure Synapse Analytics](https://azure.microsoft.com/services/synapse-analytics). -> - -## Vertical partitioning - cross-database queries - -To begin coding, see [Getting started with cross-database query (vertical partitioning)](elastic-query-getting-started-vertical.md). - -An elastic query can be used to make data located in a database in SQL Database available to other databases in SQL Database. This allows queries from one database to refer to tables in any other remote database in SQL Database. The first step is to define an external data source for each remote database. The external data source is defined in the local database from which you want to gain access to tables located on the remote database. No changes are necessary on the remote database. For typical vertical partitioning scenarios where different databases have different schemas, elastic queries can be used to implement common use cases such as access to reference data and cross-database querying. - -> [!IMPORTANT] -> You must possess ALTER ANY EXTERNAL DATA SOURCE permission. This permission is included with the ALTER DATABASE permission. ALTER ANY EXTERNAL DATA SOURCE permissions are needed to refer to the underlying data source. -> - -**Reference data**: The topology is used for reference data management. In the figure below, two tables (T1 and T2) with reference data are kept on a dedicated database. Using an elastic query, you can now access tables T1 and T2 remotely from other databases, as shown in the figure. Use topology 1 if reference tables are small or remote queries into reference table have selective predicates. - -**Figure 2** Vertical partitioning - Using elastic query to query reference data - -![Vertical partitioning - Using elastic query to query reference data][3] - -**Cross-database querying**: Elastic queries enable use cases that require querying across several databases in SQL Database. Figure 3 shows four different databases: CRM, Inventory, HR, and Products. Queries performed in one of the databases also need access to one or all the other databases. Using an elastic query, you can configure your database for this case by running a few simple DDL statements on each of the four databases. After this one-time configuration, access to a remote table is as simple as referring to a local table from your T-SQL queries or from your BI tools. This approach is recommended if the remote queries do not return large results. - -**Figure 3** Vertical partitioning - Using elastic query to query across various databases - -![Vertical partitioning - Using elastic query to query across various databases][4] - -The following steps configure elastic database queries for vertical partitioning scenarios that require access to a table located on remote databases in SQL Database with the same schema: - -* [CREATE MASTER KEY](/sql/t-sql/statements/create-master-key-transact-sql) mymasterkey -* [CREATE DATABASE SCOPED CREDENTIAL](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) mycredential -* [CREATE/DROP EXTERNAL DATA SOURCE](/sql/t-sql/statements/create-external-data-source-transact-sql) mydatasource of type **RDBMS** -* [CREATE/DROP EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql) mytable - -After running the DDL statements, you can access the remote table "mytable" as though it were a local table. Azure SQL Database automatically opens a connection to the remote database, processes your request on the remote database, and returns the results. - -## Horizontal partitioning - sharding - -Using elastic query to perform reporting tasks over a sharded, that is, horizontally partitioned, data tier requires an [elastic database shard map](elastic-scale-shard-map-management.md) to represent the databases of the data tier. Typically, only a single shard map is used in this scenario and a dedicated database with elastic query capabilities (head node) serves as the entry point for reporting queries. Only this dedicated database needs access to the shard map. Figure 4 illustrates this topology and its configuration with the elastic query database and shard map. For more information about the elastic database client library and creating shard maps, see [Shard map management](elastic-scale-shard-map-management.md). - -**Figure 4** Horizontal partitioning - Using elastic query for reporting over sharded data tiers - -![Horizontal partitioning - Using elastic query for reporting over sharded data tiers][5] - -> [!NOTE] -> Elastic Query Database (head node) can be separate database, or it can be the same database that hosts the shard map. -> Whatever configuration you choose, make sure that service tier and compute size of that database is high enough to handle the expected amount of login/query requests. - -The following steps configure elastic database queries for horizontal partitioning scenarios that require access to a set of tables located on (typically) several remote databases in SQL Database: - -* [CREATE MASTER KEY](/sql/t-sql/statements/create-master-key-transact-sql) mymasterkey -* [CREATE DATABASE SCOPED CREDENTIAL](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) mycredential -* Create a [shard map](elastic-scale-shard-map-management.md) representing your data tier using the elastic database client library. -* [CREATE/DROP EXTERNAL DATA SOURCE](/sql/t-sql/statements/create-external-data-source-transact-sql) mydatasource of type **SHARD_MAP_MANAGER** -* [CREATE/DROP EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql) mytable - -Once you have performed these steps, you can access the horizontally partitioned table "mytable" as though it were a local table. Azure SQL Database automatically opens multiple parallel connections to the remote databases where the tables are physically stored, processes the requests on the remote databases, and returns the results. -More information on the steps required for the horizontal partitioning scenario can be found in [elastic query for horizontal partitioning](elastic-query-horizontal-partitioning.md). - -To begin coding, see [Getting started with elastic query for horizontal partitioning (sharding)](elastic-query-getting-started.md). - -> [!IMPORTANT] -> Successful execution of elastic query over a large set of databases relies heavily on the availability of each of databases during the query execution. If one of databases is not available, entire query will fail. If you plan to query hundreds or thousands of databases at once, make sure your client application has retry logic embedded, or consider leveraging [Elastic Database Jobs](./job-automation-overview.md) (preview) and querying smaller subsets of databases, consolidating results of each query into a single destination. - -## T-SQL querying - -Once you have defined your external data sources and your external tables, you can use regular SQL Server connection strings to connect to the databases where you defined your external tables. You can then run T-SQL statements over your external tables on that connection with the limitations outlined below. You can find more information and examples of T-SQL queries in the documentation topics for [horizontal partitioning](elastic-query-horizontal-partitioning.md) and [vertical partitioning](elastic-query-vertical-partitioning.md). - -## Connectivity for tools - -You can use regular SQL Server connection strings to connect your applications and BI or data integration tools to databases that have external tables. Make sure that SQL Server is supported as a data source for your tool. Once connected, refer to the elastic query database and the external tables in that database just like you would do with any other SQL Server database that you connect to with your tool. - -> [!IMPORTANT] -> Elastic queries are only supported when connecting with SQL Server Authentication. - -## Cost - -Elastic query is included in the cost of Azure SQL Database. Note that topologies where your remote databases are in a different data center than the elastic query endpoint are supported, but data egress from remote databases is charged regularly [Azure rates](https://azure.microsoft.com/pricing/details/data-transfers/). - -## Preview limitations - -* Running your first elastic query can take up to a few minutes on smaller resources and Standard and General Purpose service tier. This time is necessary to load the elastic query functionality; loading performance improves with higher service tiers and compute sizes. -* Scripting of external data sources or external tables from SSMS or SSDT is not yet supported. -* Import/Export for SQL Database does not yet support external data sources and external tables. If you need to use Import/Export, drop these objects before exporting and then re-create them after importing. -* Elastic query currently only supports read-only access to external tables. You can, however, use full Transact-SQL functionality on the database where the external table is defined. This can be useful to, e.g., persist temporary results using, for example, SELECT INTO , or to define stored procedures on the elastic query database that refer to external tables. -* Except for nvarchar(max), LOB types (including spatial types) are not supported in external table definitions. As a workaround, you can create a view on the remote database that casts the LOB type into nvarchar(max), define your external table over the view instead of the base table and then cast it back into the original LOB type in your queries. -* Columns of nvarchar(max) data type in result set disable advanced batching technics used in Elastic Query implementation and may affect performance of query for an order of magnitude, or even two orders of magnitude in non-canonical use cases where large amount of non-aggregated data is being transferred as a result of query. -* Column statistics over external tables are currently not supported. Table statistics are supported, but need to be created manually. -* Cursors are not supported for external tables in Azure SQL Database. -* Elastic query works with Azure SQL Database only. You cannot use it for querying a SQL Server instance. - -## Share your Feedback - -Share feedback on your experience with elastic queries with us below, on the MSDN forums, or on Stack Overflow. We are interested in all kinds of feedback about the service (defects, rough edges, feature gaps). - -## Next steps - -* For a vertical partitioning tutorial, see [Getting started with cross-database query (vertical partitioning)](elastic-query-getting-started-vertical.md). -* For syntax and sample queries for vertically partitioned data, see [Querying vertically partitioned data)](elastic-query-vertical-partitioning.md) -* For a horizontal partitioning (sharding) tutorial, see [Getting started with elastic query for horizontal partitioning (sharding)](elastic-query-getting-started.md). -* For syntax and sample queries for horizontally partitioned data, see [Querying horizontally partitioned data)](elastic-query-horizontal-partitioning.md) -* See [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) for a stored procedure that executes a Transact-SQL statement on a single remote Azure SQL Database or set of databases serving as shards in a horizontal partitioning scheme. - - -[1]: ./media/elastic-query-overview/overview.png -[2]: ./media/elastic-query-overview/topology1.png -[3]: ./media/elastic-query-overview/vertpartrrefdata.png -[4]: ./media/elastic-query-overview/verticalpartitioning.png -[5]: ./media/elastic-query-overview/horizontalpartitioning.png - - diff --git a/articles/azure-sql/database/elastic-query-vertical-partitioning.md b/articles/azure-sql/database/elastic-query-vertical-partitioning.md deleted file mode 100644 index 10843bca7b0da..0000000000000 --- a/articles/azure-sql/database/elastic-query-vertical-partitioning.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: Query across cloud databases with different schema -description: how to set up cross-database queries over vertical partitions -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/15/2021 ---- -# Query across cloud databases with different schemas (preview) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -![Query across tables in different databases][1] - -Vertically partitioned databases use different sets of tables on different databases. That means that the schema is different on different databases. For instance, all tables for inventory are on one database while all accounting-related tables are on a second database. - -## Prerequisites - -* The user must possess ALTER ANY EXTERNAL DATA SOURCE permission. This permission is included with the ALTER DATABASE permission. -* ALTER ANY EXTERNAL DATA SOURCE permissions are needed to refer to the underlying data source. - -## Overview - -> [!NOTE] -> Unlike with horizontal partitioning, these DDL statements do not depend on defining a data tier with a shard map through the elastic database client library. -> - -1. [CREATE MASTER KEY](/sql/t-sql/statements/create-master-key-transact-sql) -2. [CREATE DATABASE SCOPED CREDENTIAL](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) -3. [CREATE EXTERNAL DATA SOURCE](/sql/t-sql/statements/create-external-data-source-transact-sql) -4. [CREATE EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql) - -## Create database scoped master key and credentials - -The credential is used by the elastic query to connect to your remote databases. - -```sql -CREATE MASTER KEY ENCRYPTION BY PASSWORD = 'master_key_password'; -CREATE DATABASE SCOPED CREDENTIAL [] WITH IDENTITY = '', -SECRET = ''; -``` - -> [!NOTE] -> Ensure that the `` does not include any **"\@servername"** suffix. - -## Create external data sources - -Syntax: - -```syntaxsql - ::= -CREATE EXTERNAL DATA SOURCE WITH - (TYPE = RDBMS, - LOCATION = ’’, - DATABASE_NAME = ‘’, - CREDENTIAL = - ) [;] -``` -> [!IMPORTANT] -> The TYPE parameter must be set to **RDBMS**. - -### Example - -The following example illustrates the use of the CREATE statement for external data sources. - -```sql -CREATE EXTERNAL DATA SOURCE RemoteReferenceData - WITH - ( - TYPE=RDBMS, - LOCATION='myserver.database.windows.net', - DATABASE_NAME='ReferenceData', - CREDENTIAL= SqlUser - ); -``` - -To retrieve the list of current external data sources: - -```sql -select * from sys.external_data_sources; -``` - -### External Tables - -Syntax: - -```syntaxsql -CREATE EXTERNAL TABLE [ database_name . [ schema_name ] . | schema_name . ] table_name - ( { } [ ,...n ]) - { WITH ( ) } - )[;] - - ::= - DATA_SOURCE = , - [ SCHEMA_NAME = N'nonescaped_schema_name',] - [ OBJECT_NAME = N'nonescaped_object_name',] -``` - -### Example - -```sql -CREATE EXTERNAL TABLE [dbo].[customer] - ( - [c_id] int NOT NULL, - [c_firstname] nvarchar(256) NULL, - [c_lastname] nvarchar(256) NOT NULL, - [street] nvarchar(256) NOT NULL, - [city] nvarchar(256) NOT NULL, - [state] nvarchar(20) NULL, - DATA_SOURCE = RemoteReferenceData - ); -``` - -The following example shows how to retrieve the list of external tables from the current database: - -```sql -select * from sys.external_tables; -``` - -### Remarks - -Elastic query extends the existing external table syntax to define external tables that use external data sources of type RDBMS. An external table definition for vertical partitioning covers the following aspects: - -* **Schema**: The external table DDL defines a schema that your queries can use. The schema provided in your external table definition needs to match the schema of the tables in the remote database where the actual data is stored. -* **Remote database reference**: The external table DDL refers to an external data source. The external data source specifies the server name and database name of the remote database where the actual table data is stored. - -Using an external data source as outlined in the previous section, the syntax to create external tables is as follows: - -The DATA_SOURCE clause defines the external data source (i.e. the remote database in vertical partitioning) that is used for the external table. - -The SCHEMA_NAME and OBJECT_NAME clauses allow mapping the external table definition to a table in a different schema on the remote database, or to a table with a different name, respectively. This mapping is useful if you want to define an external table to a catalog view or DMV on your remote database - or any other situation where the remote table name is already taken locally. - -The following DDL statement drops an existing external table definition from the local catalog. It does not impact the remote database. - -```sql -DROP EXTERNAL TABLE [ [ schema_name ] . | schema_name. ] table_name[;] -``` - -**Permissions for CREATE/DROP EXTERNAL TABLE**: ALTER ANY EXTERNAL DATA SOURCE permissions are needed for external table DDL, which is also needed to refer to the underlying data source. - -## Security considerations - -Users with access to the external table automatically gain access to the underlying remote tables under the credential given in the external data source definition. Carefully manage access to the external table, in order to avoid undesired elevation of privileges through the credential of the external data source. Regular SQL permissions can be used to GRANT or REVOKE access to an external table just as though it were a regular table. - -## Example: querying vertically partitioned databases - -The following query performs a three-way join between the two local tables for orders and order lines and the remote table for customers. This is an example of the reference data use case for elastic query: - -```sql - SELECT - c_id as customer, - c_lastname as customer_name, - count(*) as cnt_orderline, - max(ol_quantity) as max_quantity, - avg(ol_amount) as avg_amount, - min(ol_delivery_d) as min_deliv_date - FROM customer - JOIN orders - ON c_id = o_c_id - JOIN order_line - ON o_id = ol_o_id and o_c_id = ol_c_id - WHERE c_id = 100 -``` - -## Stored procedure for remote T-SQL execution: sp\_execute_remote - -Elastic query also introduces a stored procedure that provides direct access to the remote database. The stored procedure is called [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) and can be used to execute remote stored procedures or T-SQL code on the remote database. It takes the following parameters: - -* Data source name (nvarchar): The name of the external data source of type RDBMS. -* Query (nvarchar): The T-SQL query to be executed on the remote database. -* Parameter declaration (nvarchar) - optional: String with data type definitions for the parameters used in the Query parameter (like sp_executesql). -* Parameter value list - optional: Comma-separated list of parameter values (like sp_executesql). - -The sp\_execute\_remote uses the external data source provided in the invocation parameters to execute the given T-SQL statement on the remote database. It uses the credential of the external data source to connect to the remote database. - -Example: - -```sql - EXEC sp_execute_remote - N'MyExtSrc', - N'select count(w_id) as foo from warehouse' -``` - -## Connectivity for tools - -You can use regular SQL Server connection strings to connect your BI and data integration tools to databases on the server that has elastic query enabled and external tables defined. Make sure that SQL Server is supported as a data source for your tool. Then refer to the elastic query database and its external tables just like any other SQL Server database that you would connect to with your tool. - -## Best practices - -* Ensure that the elastic query endpoint database has been given access to the remote database by enabling access for Azure Services in its Azure SQL Database firewall configuration. Also ensure that the credential provided in the external data source definition can successfully log into the remote database and has the permissions to access the remote table. -* Elastic query works best for queries where most of the computation can be done on the remote databases. You typically get the best query performance with selective filter predicates that can be evaluated on the remote databases or joins that can be performed completely on the remote database. Other query patterns may need to load large amounts of data from the remote database and may perform poorly. - -## Next steps - -* For an overview of elastic query, see [Elastic query overview](elastic-query-overview.md). -* For limitations of elastic query, see [Preview limitations](elastic-query-overview.md#preview-limitations) -* For a vertical partitioning tutorial, see [Getting started with cross-database query (vertical partitioning)](elastic-query-getting-started-vertical.md). -* For a horizontal partitioning (sharding) tutorial, see [Getting started with elastic query for horizontal partitioning (sharding)](elastic-query-getting-started.md). -* For syntax and sample queries for horizontally partitioned data, see [Querying horizontally partitioned data)](elastic-query-horizontal-partitioning.md) -* See [sp\_execute \_remote](/sql/relational-databases/system-stored-procedures/sp-execute-remote-azure-sql-database) for a stored procedure that executes a Transact-SQL statement on a single remote Azure SQL Database or set of databases serving as shards in a horizontal partitioning scheme. - - -[1]: ./media/elastic-query-vertical-partitioning/verticalpartitioning.png - - diff --git a/articles/azure-sql/database/elastic-scale-add-a-shard.md b/articles/azure-sql/database/elastic-scale-add-a-shard.md deleted file mode 100644 index 6697510bebdd7..0000000000000 --- a/articles/azure-sql/database/elastic-scale-add-a-shard.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Adding a shard using elastic database tools -description: How to use Elastic Scale APIs to add new shards to a shard set. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/03/2019 ---- -# Adding a shard using Elastic Database tools -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -## To add a shard for a new range or key - -Applications often need to add new shards to handle data that is expected from new keys or key ranges, for a shard map that already exists. For example, an application sharded by Tenant ID may need to provision a new shard for a new tenant, or data sharded monthly may need a new shard provisioned before the start of each new month. - -If the new range of key values is not already part of an existing mapping, it is simple to add the new shard and associate the new key or range to that shard. - -### Example: adding a shard and its range to an existing shard map - -This sample uses the TryGetShard ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.shardmap.trygetshard), [.NET](/previous-versions/azure/dn823929(v=azure.100))) the CreateShard ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.shardmap.createshard), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.createshard)), CreateRangeMapping ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.createrangemapping), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) methods, and creates an instance of the ShardLocation ([Java](/java/api/com.microsoft.azure.elasticdb.shard.base.shardlocation), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardlocation)) class. In the sample below, a database named **sample_shard_2** and all necessary schema objects inside of it have been created to hold range [300, 400). - -```csharp -// sm is a RangeShardMap object. -// Add a new shard to hold the range being added. -Shard shard2 = null; - -if (!sm.TryGetShard(new ShardLocation(shardServer, "sample_shard_2"),out shard2)) -{ - shard2 = sm.CreateShard(new ShardLocation(shardServer, "sample_shard_2")); -} - -// Create the mapping and associate it with the new shard -sm.CreateRangeMapping(new RangeMappingCreationInfo - (new Range(300, 400), shard2, MappingStatus.Online)); -``` - -For the .NET version, you can also use PowerShell as an alternative to create a new Shard Map Manager. An example is available [here](https://gallery.technet.microsoft.com/scriptcenter/Azure-SQL-DB-Elastic-731883db). - -## To add a shard for an empty part of an existing range - -In some circumstances, you may have already mapped a range to a shard and partially filled it with data, but you now want upcoming data to be directed to a different shard. For example, you shard by day range and have already allocated 50 days to a shard, but on day 24, you want future data to land in a different shard. The elastic database [split-merge tool](elastic-scale-overview-split-and-merge.md) can perform this operation, but if data movement is not necessary (for example, data for the range of days [25, 50), that is, day 25 inclusive to 50 exclusive, does not yet exist) you can perform this entirely using the Shard Map Management APIs directly. - -### Example: splitting a range and assigning the empty portion to a newly added shard - -A database named “sample_shard_2” and all necessary schema objects inside of it have been created. - -```csharp -// sm is a RangeShardMap object. -// Add a new shard to hold the range we will move -Shard shard2 = null; - -if (!sm.TryGetShard(new ShardLocation(shardServer, "sample_shard_2"),out shard2)) -{ - shard2 = sm.CreateShard(new ShardLocation(shardServer,"sample_shard_2")); -} - -// Split the Range holding Key 25 -sm.SplitMapping(sm.GetMappingForKey(25), 25); - -// Map new range holding [25-50) to different shard: -// first take existing mapping offline -sm.MarkMappingOffline(sm.GetMappingForKey(25)); - -// now map while offline to a different shard and take online -RangeMappingUpdate upd = new RangeMappingUpdate(); -upd.Shard = shard2; -sm.MarkMappingOnline(sm.UpdateMapping(sm.GetMappingForKey(25), upd)); -``` - -**Important**: Use this technique only if you are certain that the range for the updated mapping is empty. The preceding methods do not check data for the range being moved, so it is best to include checks in your code. If rows exist in the range being moved, the actual data distribution will not match the updated shard map. Use the [split-merge tool](elastic-scale-overview-split-and-merge.md) to perform the operation instead in these cases. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-configure-deploy-split-and-merge.md b/articles/azure-sql/database/elastic-scale-configure-deploy-split-and-merge.md deleted file mode 100644 index 06d53310ef131..0000000000000 --- a/articles/azure-sql/database/elastic-scale-configure-deploy-split-and-merge.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: Deploy a split-merge service -description: Use the split-merge too to move data between sharded databases. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/04/2018 ---- -# Deploy a split-merge service to move data between sharded databases -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The split-merge tool lets you move data between sharded databases. See [Moving data between scaled-out cloud databases](elastic-scale-overview-split-and-merge.md) - -## Download the Split-Merge packages - -1. Download the latest NuGet version from [NuGet](https://docs.nuget.org/docs/start-here/installing-nuget). - -1. Open a command prompt and navigate to the directory where you downloaded nuget.exe. The download includes PowerShell commands. - -1. Download the latest Split-Merge package into the current directory with the below command: - - ```cmd - nuget install Microsoft.Azure.SqlDatabase.ElasticScale.Service.SplitMerge - ``` - -The files are placed in a directory named **Microsoft.Azure.SqlDatabase.ElasticScale.Service.SplitMerge.x.x.xxx.x** where *x.x.xxx.x* reflects the version number. Find the split-merge Service files in the **content\splitmerge\service** sub-directory, and the Split-Merge PowerShell scripts (and required client dlls) in the **content\splitmerge\powershell** sub-directory. - -## Prerequisites - -1. Create an Azure SQL Database database that will be used as the split-merge status database. Go to the [Azure portal](https://portal.azure.com). Create a new **SQL Database**. Give the database a name and create a new administrator and password. Be sure to record the name and password for later use. - -1. Ensure that your server allows Azure Services to connect to it. In the portal, in the **Firewall Settings**, ensure the **Allow access to Azure Services** setting is set to **On**. Click the "save" icon. - -1. Create an Azure Storage account for diagnostics output. - -1. Create an Azure Cloud Service for your Split-Merge service. - -## Configure your Split-Merge service - -### Split-Merge service configuration - -1. In the folder into which you downloaded the Split-Merge assemblies, create a copy of the *ServiceConfiguration.Template.cscfg* file that shipped alongside *SplitMergeService.cspkg* and rename it *ServiceConfiguration.cscfg*. - -1. Open *ServiceConfiguration.cscfg* in a text editor such as Visual Studio that validates inputs such as the format of certificate thumbprints. - -1. Create a new database or choose an existing database to serve as the status database for Split-Merge operations and retrieve the connection string of that database. - - > [!IMPORTANT] - > At this time, the status database must use the Latin collation (SQL\_Latin1\_General\_CP1\_CI\_AS). For more information, see [Windows Collation Name (Transact-SQL)](/sql/t-sql/statements/windows-collation-name-transact-sql). - - With Azure SQL Database, the connection string typically is of the form: - - `Server=.database.windows.net; Database=;User ID=; Password=; Encrypt=True; Connection Timeout=30` - -1. Enter this connection string in the *.cscfg* file in both the **SplitMergeWeb** and **SplitMergeWorker** role sections in the ElasticScaleMetadata setting. - -1. For the **SplitMergeWorker** role, enter a valid connection string to Azure storage for the **WorkerRoleSynchronizationStorageAccountConnectionString** setting. - -### Configure security - -For detailed instructions to configure the security of the service, refer to the [Split-Merge security configuration](elastic-scale-split-merge-security-configuration.md). - -For the purposes of a simple test deployment for this tutorial, a minimal set of configuration steps will be performed to get the service up and running. These steps enable only the one machine/account executing them to communicate with the service. - -### Create a self-signed certificate - -Create a new directory and from this directory execute the following command using a [Developer Command Prompt for Visual Studio](/dotnet/framework/tools/developer-command-prompt-for-vs) window: - - ```cmd - makecert ^ - -n "CN=*.cloudapp.net" ^ - -r -cy end -sky exchange -eku "1.3.6.1.5.5.7.3.1,1.3.6.1.5.5.7.3.2" ^ - -a sha256 -len 2048 ^ - -sr currentuser -ss root ^ - -sv MyCert.pvk MyCert.cer - ``` - -You are asked for a password to protect the private key. Enter a strong password and confirm it. You are then prompted for the password to be used once more after that. Click **Yes** at the end to import it to the Trusted Certification Authorities Root store. - -### Create a PFX file - -Execute the following command from the same window where makecert was executed; use the same password that you used to create the certificate: - - ```cmd - pvk2pfx -pvk MyCert.pvk -spc MyCert.cer -pfx MyCert.pfx -pi - ``` - -### Import the client certificate into the personal store - -1. In Windows Explorer, double-click *MyCert.pfx*. -2. In the **Certificate Import Wizard** select **Current User** and click **Next**. -3. Confirm the file path and click **Next**. -4. Type the password, leave **Include all extended properties** checked and click **Next**. -5. Leave **Automatically select the certificate store[…]** checked and click **Next**. -6. Click **Finish** and **OK**. - -### Upload the PFX file to the cloud service - -1. Go to the [Azure portal](https://portal.azure.com). -2. Select **Cloud Services**. -3. Select the cloud service you created above for the Split/Merge service. -4. Click **Certificates** on the top menu. -5. Click **Upload** in the bottom bar. -6. Select the PFX file and enter the same password as above. -7. Once completed, copy the certificate thumbprint from the new entry in the list. - -### Update the service configuration file - -Paste the certificate thumbprint copied above into the thumbprint/value attribute of these settings. -For the worker role: - - ```xml - - - ``` - -For the web role: - - ```xml - - - - - - - ``` - -Please note that for production deployments separate certificates should be used for the CA, for encryption, the Server certificate and client certificates. For detailed instructions on this, see [Security Configuration](elastic-scale-split-merge-security-configuration.md). - -## Deploy your service - -1. Go to the [Azure portal](https://portal.azure.com) -2. Select the cloud service that you created earlier. -3. Click **Overview**. -4. Choose the staging environment, then click **Upload**. -5. In the dialog box, enter a deployment label. For both 'Package' and 'Configuration', click 'From Local' and choose the *SplitMergeService.cspkg* file and your cscfg file that you configured earlier. -6. Ensure that the checkbox labeled **Deploy even if one or more roles contain a single instance** is checked. -7. Hit the tick button in the bottom right to begin the deployment. Expect it to take a few minutes to complete. - -## Troubleshoot the deployment - -If your web role fails to come online, it is likely a problem with the security configuration. Check that the TLS/SSL is configured as described above. - -If your worker role fails to come online, but your web role succeeds, it is most likely a problem connecting to the status database that you created earlier. - -- Make sure that the connection string in your cscfg is accurate. -- Check that the server and database exist, and that the user id and password are correct. -- For Azure SQL Database, the connection string should be of the form: - - `Server=.database.windows.net; Database=;User ID=; Password=; Encrypt=True; Connection Timeout=30` - -- Ensure that the server name does not begin with **https://**. -- Ensure that your server allows Azure Services to connect to it. To do this, open your database in the portal and ensure that the **Allow access to Azure Services** setting is set to **On****. - -## Test the service deployment - -### Connect with a web browser - -Determine the web endpoint of your Split-Merge service. You can find this in the portal by going to the **Overview** of your cloud service and looking under **Site URL** on the right side. Replace **http://** with **https://** since the default security settings disable the HTTP endpoint. Load the page for this URL into your browser. - -### Test with PowerShell scripts - -The deployment and your environment can be tested by running the included sample PowerShell scripts. - -> [!IMPORTANT] -> The sample scripts run on PowerShell 5.1. They do not currently run on PowerShell 6 or later. - -The script files included are: - -1. *SetupSampleSplitMergeEnvironment.ps1* - sets up a test data tier for Split/Merge (see table below for detailed description) -2. *ExecuteSampleSplitMerge.ps1* - executes test operations on the test data tier (see table below for detailed description) -3. *GetMappings.ps1* - top-level sample script that prints out the current state of the shard mappings. -4. *ShardManagement.psm1* - helper script that wraps the ShardManagement API -5. *SqlDatabaseHelpers.psm1* - helper script for creating and managing databases in SQL Database - - - - - - - - - - - - - - - - - - - - - -
    PowerShell fileSteps
    SetupSampleSplitMergeEnvironment.ps11. Creates a shard map manager database
    2. Creates 2 shard databases. -
    3. Creates a shard map for those databases (deletes any existing shard maps on those databases).
    4. Creates a small sample table in both the shards, and populates the table in one of the shards.
    5. Declares the SchemaInfo for the sharded table.
    - - - - - - - - - - - - - - - - - - -
    PowerShell fileSteps
    ExecuteSampleSplitMerge.ps1 1. Sends a split request to the Split-Merge Service web frontend, which splits half the data from the first shard to the second shard.
    2. Polls the web frontend for the split request status and waits until the request completes.
    3. Sends a merge request to the Split-Merge Service web frontend, which moves the data from the second shard back to the first shard.
    4. Polls the web frontend for the merge request status and waits until the request completes.
    - -## Use PowerShell to verify your deployment - -1. Open a new PowerShell window and navigate to the directory where you downloaded the Split-Merge package, and then navigate into the "powershell" directory. - -2. Create a server (or choose an existing server) where the shard map manager and shards will be created. - - > [!NOTE] - > The *SetupSampleSplitMergeEnvironment.ps1* script creates all these databases on the same server by default to keep the script simple. This is not a restriction of the Split-Merge Service itself. - - A SQL authentication login with read/write access to the DBs will be needed for the Split-Merge service to move data and update the shard map. Since the Split-Merge Service runs in the cloud, it does not currently support Integrated Authentication. - - Make sure the server is configured to allow access from the IP address of the machine running these scripts. You can find this setting under SQL server / Firewalls and virtual networks / Client IP addresses. - -3. Execute the *SetupSampleSplitMergeEnvironment.ps1* script to create the sample environment. - - Running this script will wipe out any existing shard map management data structures on the shard map manager database and the shards. It may be useful to rerun the script if you wish to re-initialize the shard map or shards. - - Sample command line: - - ```cmd - .\SetupSampleSplitMergeEnvironment.ps1 - -UserName 'mysqluser' -Password 'MySqlPassw0rd' -ShardMapManagerServerName 'abcdefghij.database.windows.net' - ``` - -4. Execute the Getmappings.ps1 script to view the mappings that currently exist in the sample environment. - - ```cmd - .\GetMappings.ps1 - -UserName 'mysqluser' -Password 'MySqlPassw0rd' -ShardMapManagerServerName 'abcdefghij.database.windows.net' - ``` - -5. Execute the *ExecuteSampleSplitMerge.ps1* script to execute a split operation (moving half the data on the first shard to the second shard) and then a merge operation (moving the data back onto the first shard). If you configured TLS and left the http endpoint disabled, ensure that you use the https:// endpoint instead. - - Sample command line: - - ```cmd - .\ExecuteSampleSplitMerge.ps1 - -UserName 'mysqluser' -Password 'MySqlPassw0rd' - -ShardMapManagerServerName 'abcdefghij.database.windows.net' - -SplitMergeServiceEndpoint 'https://mysplitmergeservice.cloudapp.net' - -CertificateThumbprint '0123456789abcdef0123456789abcdef01234567' - ``` - - If you receive the below error, it is most likely a problem with your Web endpoint's certificate. Try connecting to the Web endpoint with your favorite Web browser and check if there is a certificate error. - - `Invoke-WebRequest : The underlying connection was closed: Could not establish trust relationship for the SSL/TLSsecure channel.` - - If it succeeded, the output should look like the below: - - ```output - > .\ExecuteSampleSplitMerge.ps1 -UserName 'mysqluser' -Password 'MySqlPassw0rd' -ShardMapManagerServerName 'abcdefghij.database.windows.net' -SplitMergeServiceEndpoint 'http://mysplitmergeservice.cloudapp.net' -CertificateThumbprint 0123456789abcdef0123456789abcdef01234567 - > Sending split request - > Began split operation with id dc68dfa0-e22b-4823-886a-9bdc903c80f3 - > Polling split-merge request status. Press Ctrl-C to end - > Progress: 0% | Status: Queued | Details: [Informational] Queued request - > Progress: 5% | Status: Starting | Details: [Informational] Starting split-merge state machine for request. - > Progress: 5% | Status: Starting | Details: [Informational] Performing data consistency checks on target shards. - > Progress: 20% | Status: CopyingReferenceTables | Details: [Informational] Moving reference tables from source to target shard. - > Progress: 20% | Status: CopyingReferenceTables | Details: [Informational] Waiting for reference tables copy completion. - > Progress: 20% | Status: CopyingReferenceTables | Details: [Informational] Moving reference tables from source to target shard. - > Progress: 44% | Status: CopyingShardedTables | Details: [Informational] Moving key range [100:110) of Sharded tables - > Progress: 44% | Status: CopyingShardedTables | Details: [Informational] Successfully copied key range [100:110) for table [dbo].[MyShardedTable] - > ... - > ... - > Progress: 90% | Status: Completing | Details: [Informational] Successfully deleted shardlets in table [dbo].[MyShardedTable]. - > Progress: 90% | Status: Completing | Details: [Informational] Deleting any temp tables that were created while processing the request. - > Progress: 100% | Status: Succeeded | Details: [Informational] Successfully processed request. - > Sending merge request - > Began merge operation with id 6ffc308f-d006-466b-b24e-857242ec5f66 - > Polling request status. Press Ctrl-C to end - > Progress: 0% | Status: Queued | Details: [Informational] Queued request - > Progress: 5% | Status: Starting | Details: [Informational] Starting split-merge state machine for request. - > Progress: 5% | Status: Starting | Details: [Informational] Performing data consistency checks on target shards. - > Progress: 20% | Status: CopyingReferenceTables | Details: [Informational] Moving reference tables from source to target shard. - > Progress: 44% | Status: CopyingShardedTables | Details: [Informational] Moving key range [100:110) of Sharded tables - > Progress: 44% | Status: CopyingShardedTables | Details: [Informational] Successfully copied key range [100:110) for table [dbo].[MyShardedTable] - > ... - > ... - > Progress: 90% | Status: Completing | Details: [Informational] Successfully deleted shardlets in table [dbo].[MyShardedTable]. - > Progress: 90% | Status: Completing | Details: [Informational] Deleting any temp tables that were created while processing the request. - > Progress: 100% | Status: Succeeded | Details: [Informational] Successfully processed request. - > - ``` - -6. Experiment with other data types! All of these scripts take an optional -ShardKeyType parameter that allows you to specify the key type. The default is Int32, but you can also specify Int64, Guid, or Binary. - -## Create requests - -The service can be used either by using the web UI or by importing and using the SplitMerge.psm1 PowerShell module which will submit your requests through the web role. - -The service can move data in both sharded tables and reference tables. A sharded table has a sharding key column and has different row data on each shard. A reference table is not sharded so it contains the same row data on every shard. Reference tables are useful for data that does not change often and is used to JOIN with sharded tables in queries. - -In order to perform a split-merge operation, you must declare the sharded tables and reference tables that you want to have moved. This is accomplished with the **SchemaInfo** API. This API is in the **Microsoft.Azure.SqlDatabase.ElasticScale.ShardManagement.Schema** namespace. - -1. For each sharded table, create a **ShardedTableInfo** object describing the table's parent schema name (optional, defaults to "dbo"), the table name, and the column name in that table that contains the sharding key. -2. For each reference table, create a **ReferenceTableInfo** object describing the table's parent schema name (optional, defaults to "dbo") and the table name. -3. Add the above TableInfo objects to a new **SchemaInfo** object. -4. Get a reference to a **ShardMapManager** object, and call **GetSchemaInfoCollection**. -5. Add the **SchemaInfo** to the **SchemaInfoCollection**, providing the shard map name. - -An example of this can be seen in the SetupSampleSplitMergeEnvironment.ps1 script. - -The Split-Merge service does not create the target database (or schema for any tables in the database) for you. They must be pre-created before sending a request to the service. - -## Troubleshooting - -You may see the below message when running the sample PowerShell scripts: - - `Invoke-WebRequest : The underlying connection was closed: Could not establish trust relationship for the SSL/TLS secure channel.` - -This error means that your TLS/SSL certificate is not configured correctly. Please follow the instructions in section 'Connecting with a web browser'. - -If you cannot submit requests you may see this: - - `[Exception] System.Data.SqlClient.SqlException (0x80131904): Could not find stored procedure 'dbo.InsertRequest'.` - -In this case, check your configuration file, in particular the setting for **WorkerRoleSynchronizationStorageAccountConnectionString**. This error typically indicates that the worker role could not successfully initialize the metadata database on first use. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/sql-database-elastic-scale-configure-deploy-split-and-merge/allowed-services.png -[2]: ./media/sql-database-elastic-scale-configure-deploy-split-and-merge/manage.png -[3]: ./media/sql-database-elastic-scale-configure-deploy-split-and-merge/staging.png -[4]: ./media/sql-database-elastic-scale-configure-deploy-split-and-merge/upload.png -[5]: ./media/sql-database-elastic-scale-configure-deploy-split-and-merge/storage.png \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-data-dependent-routing.md b/articles/azure-sql/database/elastic-scale-data-dependent-routing.md deleted file mode 100644 index b6e1945385f31..0000000000000 --- a/articles/azure-sql/database/elastic-scale-data-dependent-routing.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Data-dependent routing -description: How to use the ShardMapManager class in .NET apps for data-dependent routing, a feature of sharded databases in Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Use data-dependent routing to route a query to an appropriate database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -**Data-dependent routing** is the ability to use the data in a query to route the request to an appropriate database. Data-dependent routing is a fundamental pattern when working with sharded databases. The request context may also be used to route the request, especially if the sharding key is not part of the query. Each specific query or transaction in an application using data-dependent routing is restricted to accessing one database per request. For the Azure SQL Database elastic tools, this routing is accomplished with the **ShardMapManager** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager)) class. - -The application does not need to track various connection strings or DB locations associated with different slices of data in the sharded environment. Instead, the [Shard Map Manager](elastic-scale-shard-map-management.md) opens connections to the correct databases when needed, based on the data in the shard map and the value of the sharding key that is the target of the application’s request. The key is typically the *customer_id*, *tenant_id*, *date_key*, or some other specific identifier that is a fundamental parameter of the database request. - -For more information, see [Scaling Out SQL Server with Data-Dependent Routing](/previous-versions/sql/sql-server-2005/administrator/cc966448(v=technet.10)). - -## Download the client library - -To download: - -* The Java version of the library, see [Maven Central Repository](https://search.maven.org/#search%7Cga%7C1%7Celastic-db-tools). -* The .NET version of the library, see [NuGet](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/). - -## Using a ShardMapManager in a data-dependent routing application - -Applications should instantiate the **ShardMapManager** during initialization, using the factory call **GetSQLShardMapManager** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory.getsqlshardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.getsqlshardmapmanager)). In this example, both a **ShardMapManager** and a specific **ShardMap** that it contains are initialized. This example shows the GetSqlShardMapManager and GetRangeShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.getrangeshardmap), [.NET](/previous-versions/azure/dn824173(v=azure.100))) methods. - -```Java -ShardMapManager smm = ShardMapManagerFactory.getSqlShardMapManager(connectionString, ShardMapManagerLoadPolicy.Lazy); -RangeShardMap rangeShardMap = smm.getRangeShardMap(Configuration.getRangeShardMapName(), ShardKeyType.Int32); -``` - -```csharp -ShardMapManager smm = ShardMapManagerFactory.GetSqlShardMapManager(smmConnectionString, ShardMapManagerLoadPolicy.Lazy); -RangeShardMap customerShardMap = smm.GetRangeShardMap("customerMap"); -``` - -### Use lowest privilege credentials possible for getting the shard map - -If an application is not manipulating the shard map itself, the credentials used in the factory method should have read-only permissions on the **Global Shard Map** database. These credentials are typically different from credentials used to open connections to the shard map manager. See also [Credentials used to access the Elastic Database client library](elastic-scale-manage-credentials.md). - -## Call the OpenConnectionForKey method - -The **ShardMap.OpenConnectionForKey method** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapper.listshardmapper.openconnectionforkey), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.openconnectionforkey)) returns a connection ready for issuing commands to the appropriate database based on the value of the **key** parameter. Shard information is cached in the application by the **ShardMapManager**, so these requests do not typically involve a database lookup against the **Global Shard Map** database. - -```Java -// Syntax: -public Connection openConnectionForKey(Object key, String connectionString, ConnectionOptions options) -``` - -```csharp -// Syntax: -public SqlConnection OpenConnectionForKey(TKey key, string connectionString, ConnectionOptions options) -``` - -* The **key** parameter is used as a lookup key into the shard map to determine the appropriate database for the request. -* The **connectionString** is used to pass only the user credentials for the desired connection. No database name or server name is included in this *connectionString* since the method determines the database and server using the **ShardMap**. -* The **connectionOptions** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapper.connectionoptions), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.connectionoptions)) should be set to **ConnectionOptions.Validate** if an environment where shard maps may change and rows may move to other databases as a result of split or merge operations. This validation involves a brief query to the local shard map on the target database (not to the global shard map) before the connection is delivered to the application. - -If the validation against the local shard map fails (indicating that the cache is incorrect), the Shard Map Manager queries the global shard map to obtain the new correct value for the lookup, update the cache, and obtain and return the appropriate database connection. - -Use **ConnectionOptions.None** only when shard mapping changes are not expected while an application is online. In that case, the cached values can be assumed to always be correct, and the extra round-trip validation call to the target database can be safely skipped. That reduces database traffic. The **connectionOptions** may also be set via a value in a configuration file to indicate whether sharding changes are expected or not during a period of time. - -This example uses the value of an integer key **CustomerID**, using a **ShardMap** object named **customerShardMap**. - -```Java -int customerId = 12345; -int productId = 4321; -// Looks up the key in the shard map and opens a connection to the shard -try (Connection conn = shardMap.openConnectionForKey(customerId, Configuration.getCredentialsConnectionString())) { - // Create a simple command that will insert or update the customer information - PreparedStatement ps = conn.prepareStatement("UPDATE Sales.Customer SET PersonID = ? WHERE CustomerID = ?"); - - ps.setInt(1, productId); - ps.setInt(2, customerId); - ps.executeUpdate(); -} catch (SQLException e) { - e.printStackTrace(); -} -``` - -```csharp -int customerId = 12345; -int newPersonId = 4321; - -// Connect to the shard for that customer ID. No need to call a SqlConnection -// constructor followed by the Open method. -using (SqlConnection conn = customerShardMap.OpenConnectionForKey(customerId, Configuration.GetCredentialsConnectionString(), ConnectionOptions.Validate)) -{ - // Execute a simple command. - SqlCommand cmd = conn.CreateCommand(); - cmd.CommandText = @"UPDATE Sales.Customer - SET PersonID = @newPersonID WHERE CustomerID = @customerID"; - - cmd.Parameters.AddWithValue("@customerID", customerId);cmd.Parameters.AddWithValue("@newPersonID", newPersonId); - cmd.ExecuteNonQuery(); -} -``` - -The **OpenConnectionForKey** method returns a new already-open connection to the correct database. Connections utilized in this way still take full advantage of connection pooling. - -The **OpenConnectionForKeyAsync method** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapper.listshardmapper.openconnectionforkeyasync), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.openconnectionforkeyasync)) is also available if your application makes use asynchronous programming. - -## Integrating with transient fault handling - -A best practice in developing data access applications in the cloud is to ensure that transient faults are caught by the app, and that the operations are retried several times before throwing an error. Transient fault handling for cloud applications is discussed at Transient Fault Handling ([Java](/java/api/com.microsoft.azure.elasticdb.core.commons.transientfaulthandling), [.NET](/previous-versions/msp-n-p/dn440719(v=pandp.60))). - -Transient fault handling can coexist naturally with the Data-Dependent Routing pattern. The key requirement is to retry the entire data access request including the **using** block that obtained the data-dependent routing connection. The preceding example could be rewritten as follows. - -### Example - data-dependent routing with transient fault handling - -```Java -int customerId = 12345; -int productId = 4321; -try { - SqlDatabaseUtils.getSqlRetryPolicy().executeAction(() -> { - // Looks up the key in the shard map and opens a connection to the shard - try (Connection conn = shardMap.openConnectionForKey(customerId, Configuration.getCredentialsConnectionString())) { - // Create a simple command that will insert or update the customer information - PreparedStatement ps = conn.prepareStatement("UPDATE Sales.Customer SET PersonID = ? WHERE CustomerID = ?"); - - ps.setInt(1, productId); - ps.setInt(2, customerId); - ps.executeUpdate(); - } catch (SQLException e) { - e.printStackTrace(); - } - }); -} catch (Exception e) { - throw new StoreException(e.getMessage(), e); -} -``` - -```csharp -int customerId = 12345; -int newPersonId = 4321; - -Configuration.SqlRetryPolicy.ExecuteAction(() -> { - - // Connect to the shard for a customer ID. - using (SqlConnection conn = customerShardMap.OpenConnectionForKey(customerId, Configuration.GetCredentialsConnectionString(), ConnectionOptions.Validate)) - { - // Execute a simple command - SqlCommand cmd = conn.CreateCommand(); - - cmd.CommandText = @"UPDATE Sales.Customer - SET PersonID = @newPersonID - WHERE CustomerID = @customerID"; - - cmd.Parameters.AddWithValue("@customerID", customerId); - cmd.Parameters.AddWithValue("@newPersonID", newPersonId); - cmd.ExecuteNonQuery(); - - Console.WriteLine("Update completed"); - } -}); -``` - -Packages necessary to implement transient fault handling are downloaded automatically when you build the elastic database sample application. - -## Transactional consistency - -Transactional properties are guaranteed for all operations local to a shard. For example, transactions submitted through data-dependent routing execute within the scope of the target shard for the connection. At this time, there are no capabilities provided for enlisting multiple connections into a transaction, and therefore there are no transactional guarantees for operations performed across shards. - -## Next steps - -To detach a shard, or to reattach a shard, see [Using the RecoveryManager class to fix shard map problems](elastic-database-recovery-manager.md). - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-faq.yml b/articles/azure-sql/database/elastic-scale-faq.yml deleted file mode 100644 index a83eaace0b3c1..0000000000000 --- a/articles/azure-sql/database/elastic-scale-faq.yml +++ /dev/null @@ -1,69 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Elastic Scale FAQ - description: Frequently Asked Questions about Azure SQL Database Elastic Scale. - services: sql-database - ms.service: sql-database - ms.subservice: scale-out - ms.custom: sqldbrb=1 - ms.devlang: - ms.topic: faq - author: scoriani - ms.author: scoriani - ms.reviewer: kendralittle, mathoma - ms.date: 01/25/2019 -title: Elastic database tools frequently asked questions (FAQ) -summary: | - [!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - -sections: - - name: Ignored - questions: - - question: | - If I have a single-tenant per shard and no sharding key, how do I populate the sharding key for the schema info - answer: | - The schema info object is only used to split merge scenarios. If an application is inherently single-tenant, then it does not require the Split Merge tool and thus there is no need to populate the schema info object. - - - question: | - I’ve provisioned a database and I already have a Shard Map Manager, how do I register this new database as a shard - answer: | - Please see [Adding a shard to an application using the elastic database client library](elastic-scale-add-a-shard.md). - - - question: | - How much do elastic database tools cost - answer: | - Using the elastic database client library does not incur any costs. Costs accrue only for the databases in Azure SQL Database that you use for shards and the Shard Map Manager, as well as the web/worker roles you provision for the Split Merge tool. - - - question: | - Why are my credentials not working when I add a shard from a different server - answer: | - Do not use credentials in the form of “User ID=username@servername”, instead simply use “User ID = username”. Also, be sure that the “username” login has permissions on the shard. - - - question: | - Do I need to create a Shard Map Manager and populate shards every time I start my applications - answer: | - No—the creation of the Shard Map Manager (for example, [ShardMapManagerFactory.CreateSqlShardMapManager](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.createsqlshardmapmanager)) is a one-time operation. Your application should use the call [ShardMapManagerFactory.TryGetSqlShardMapManager()](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.trygetsqlshardmapmanager) at application start-up time. There should be only one such call per application domain. - - - question: | - I have questions about using elastic database tools, how do I get them answered - answer: | - Please reach out to us on the [Microsoft Q&A question page for SQL Database](/answers/topics/azure-sql-database.html). - - - question: | - When I get a database connection using a sharding key, I can still query data for other sharding keys on the same shard. Is this by design - answer: | - The Elastic Scale APIs give you a connection to the correct database for your sharding key, but do not provide sharding key filtering. Add **WHERE** clauses to your query to restrict the scope to the provided sharding key, if necessary. - - - question: | - Can I use a different SQL Database edition for each shard in my shard set - answer: | - Yes, a shard is an individual database, and thus one shard could be a Premium edition while another be a Standard edition. Further, the edition of a shard can scale up or down multiple times during the lifetime of the shard. - - - question: | - Does the Split Merge tool provision (or delete) a database during a split or merge operation - answer: | - No. For **split** operations, the target database must exist with the appropriate schema and be registered with the Shard Map Manager. For **merge** - operations, you must delete the shard from the shard map manager and then delete the database. - - [!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] diff --git a/articles/azure-sql/database/elastic-scale-get-started.md b/articles/azure-sql/database/elastic-scale-get-started.md deleted file mode 100644 index 6ccf349a98791..0000000000000 --- a/articles/azure-sql/database/elastic-scale-get-started.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Get started with Elastic Database Tools -description: Basic explanation of the Elastic Database Tools feature of Azure SQL Database, including an easy-to-run sample app. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 10/18/2021 ---- -# Get started with Elastic Database Tools -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This document introduces you to the developer experience for the [elastic database client library](elastic-database-client-library.md) by helping you run a sample app. The sample app creates a simple sharded application and explores key capabilities of the Elastic Database Tools feature of Azure SQL Database. It focuses on use cases for [shard map management](elastic-scale-shard-map-management.md), [data-dependent routing](elastic-scale-data-dependent-routing.md), and [multi-shard querying](elastic-scale-multishard-querying.md). The client library is available for .NET as well as Java. - -## Elastic Database Tools for Java - -### Prerequisites - -* A Java Developer Kit (JDK), version 1.8 or later -* [Maven](https://maven.apache.org/download.cgi) -* SQL Database or a local SQL Server instance - -### Download and run the sample app - -To build the JAR files and get started with the sample project, do the following: - -1. Clone the [GitHub repository](https://github.com/Microsoft/elastic-db-tools-for-java) containing the client library, along with the sample app. - -2. Edit the _./sample/src/main/resources/resource.properties_ file to set the following: - * TEST_CONN_USER - * TEST_CONN_PASSWORD - * TEST_CONN_SERVER_NAME - -3. To build the sample project, in the _./sample_ directory, run the following command: - - ``` - mvn install - ``` - -4. To start the sample project, in the _./sample_ directory, run the following command: - - ``` - mvn -q exec:java "-Dexec.mainClass=com.microsoft.azure.elasticdb.samples.elasticscalestarterkit.Program" - ``` - -5. To learn more about the client library capabilities, experiment with the various options. Feel free to explore the code to learn about the sample app implementation. - - ![Progress-java][5] - -Congratulations! You have successfully built and run your first sharded application by using Elastic Database Tools on Azure SQL Database. Use Visual Studio or SQL Server Management Studio to connect to your database and take a quick look at the shards that the sample created. You will notice new sample shard databases and a shard map manager database that the sample has created. - -To add the client library to your own Maven project, add the following dependency in your POM file: - -```xml - - com.microsoft.azure - elastic-db-tools - 1.0.0 - -``` - -## Elastic Database Tools for .NET - -### Prerequisites - -* Visual Studio 2012 or later with C#. Download a free version at [Visual Studio Downloads](https://www.visualstudio.com/downloads/download-visual-studio-vs.aspx). -* NuGet 2.7 or later. To get the latest version, see [Installing NuGet](https://docs.nuget.org/docs/start-here/installing-nuget). - -### Download and run the sample app - -To install the library, go to [Microsoft.Azure.SqlDatabase.ElasticScale.Client](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/). The library is installed with the sample app that's described in the following section. - -To download and run the sample, follow these steps: - -1. Download the [Elastic DB Tools for Azure SQL - Getting Started sample](https://github.com/Azure/elastic-db-tools). Unzip the sample to a location that you choose. - -2. To create a project, open the *ElasticDatabaseTools.sln* solution from the *elastic-db-tools-master* directory. - -3. Set the *ElasticScaleStarterKit* project as the Startup Project. - -4. In the *ElasticScaleStarterKit* project, open the *App.config* file. Then follow the instructions in the file to add your server name and your sign in information (username and password). - -5. Build and run the application. When you are prompted, enable Visual Studio to restore the NuGet packages of the solution. This action downloads the latest version of the elastic database client library from NuGet. - -6. To learn more about the client library capabilities, experiment with the various options. Note the steps that the application takes in the console output, and feel free to explore the code behind the scenes. - - ![Progress][4] - -Congratulations! You have successfully built and run your first sharded application by using Elastic Database Tools on SQL Database. Use Visual Studio or SQL Server Management Studio to connect to your database and take a quick look at the shards that the sample created. You will notice new sample shard databases and a shard map manager database that the sample has created. - -> [!IMPORTANT] -> We recommend that you always use the latest version of Management Studio so that you stay synchronized with updates to Azure and SQL Database. [Update SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - -## Key pieces of the code sample - -* **Managing shards and shard maps**: The code illustrates how to work with shards, ranges, and mappings in the *ShardManagementUtils.cs* file. For more information, see [Scale out databases with the shard map manager](https://go.microsoft.com/?linkid=9862595). - -* **Data-dependent routing**: Routing of transactions to the right shard is shown in the *DataDependentRoutingSample.cs* file. For more information, see [Data-dependent routing](https://go.microsoft.com/?linkid=9862596). - -* **Querying over multiple shards**: Querying across shards is illustrated in the *MultiShardQuerySample.cs* file. For more information, see [Multi-shard querying](https://go.microsoft.com/?linkid=9862597). - -* **Adding empty shards**: The iterative adding of new empty shards is performed by the code in the *CreateShardSample.cs* file. For more information, see [Scale out databases with the shard map manager](https://go.microsoft.com/?linkid=9862595). - -## Other elastic scale operations - -* **Splitting an existing shard**: The capability to split shards is provided by the split-merge tool. For more information, see [Moving data between scaled-out cloud databases](elastic-scale-overview-split-and-merge.md). - -* **Merging existing shards**: Shard merges are also performed by using the split-merge tool. For more information, see [Moving data between scaled-out cloud databases](elastic-scale-overview-split-and-merge.md). - -## Cost - -The Elastic Database Tools library is free. When you use Elastic Database Tools, you incur no additional charges beyond the cost of your Azure usage. - -For example, the sample application creates new databases. The cost of this capability depends on the SQL Database edition you choose and the Azure usage of your application. - -For pricing information, see [SQL Database pricing details](https://azure.microsoft.com/pricing/details/sql-database/). - -## Next steps - -For more information about Elastic Database Tools, see the following articles: - -* Code samples: - * Elastic Database Tools ([.NET](https://github.com/Azure/elastic-db-tools), [Java](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22azure-elasticdb-tools%22)) - * [Elastic Database Tools for Azure SQL - Entity Framework Integration](https://code.msdn.microsoft.com/Elastic-Scale-with-Azure-bae904ba?SRC=VSIDE) - * [Shard Elasticity on Script Center](https://gallery.technet.microsoft.com/scriptcenter/Elastic-Scale-Shard-c9530cbe) -* Blog: [Elastic Scale announcement](https://azure.microsoft.com/blog/20../../introducing-elastic-scale-preview-for-azure-sql-database/) -* Discussion forum: [Microsoft Q&A question page for Azure SQL Database](/answers/topics/azure-sql-database.html) -* To measure performance: [Performance counters for shard map manager](elastic-database-client-library.md) - - -[The Elastic Scale Sample Application]: #The-Elastic-Scale-Sample-Application -[Download and Run the Sample App]: #Download-and-Run-the-Sample-App -[Cost]: #Cost -[Next steps]: #next-steps - - -[1]: ./media/elastic-scale-get-started/newProject.png -[2]: ./media/elastic-scale-get-started/click-online.png -[3]: ./media/elastic-scale-get-started/click-CSharp.png -[4]: ./media/elastic-scale-get-started/output2.png -[5]: ./media/elastic-scale-get-started/java-client-library.PNG \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-glossary.md b/articles/azure-sql/database/elastic-scale-glossary.md deleted file mode 100644 index bc1632d71444c..0000000000000 --- a/articles/azure-sql/database/elastic-scale-glossary.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Elastic Database tools glossary -description: Explanation of terms used for elastic database tools -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/04/2018 ---- -# Elastic Database tools glossary -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The following terms are defined for the [Elastic Database tools](elastic-scale-introduction.md). The tools are used to manage [shard maps](elastic-scale-shard-map-management.md), and include the [client library](elastic-database-client-library.md), the [split-merge tool](elastic-scale-overview-split-and-merge.md), [elastic pools](elastic-pool-overview.md), and [queries](elastic-query-overview.md). - -These terms are used in [Adding a shard using Elastic Database tools](elastic-scale-add-a-shard.md) and [Using the RecoveryManager class to fix shard map problems](elastic-database-recovery-manager.md). - -![Elastic Scale terms][1] - -**Database**: A database in Azure SQL Database. - -**Data dependent routing**: The functionality that enables an application to connect to a shard given a specific sharding key. See [Data dependent routing](elastic-scale-data-dependent-routing.md). Compare to **[Multi-Shard Query](elastic-scale-multishard-querying.md)**. - -**Global shard map**: The map between sharding keys and their respective shards within a **shard set**. The global shard map is stored in the **shard map manager**. Compare to **local shard map**. - -**List shard map**: A shard map in which sharding keys are mapped individually. Compare to **Range Shard Map**. - -**Local shard map**: Stored on a shard, the local shard map contains mappings for the shardlets that reside on the shard. - -**Multi-shard query**: The ability to issue a query against multiple shards; results sets are returned using UNION ALL semantics (also known as “fan-out query”). Compare to **data dependent routing**. - -**Multi-tenant** and **Single-tenant**: This shows a single-tenant database and a multi-tenant database: - -![Screenshot that shows a single-tenant database and a multi-tenant database.](./media/elastic-scale-glossary/multi-single-simple.png) - -Here is a representation of **sharded** single and multi-tenant databases. - -![Single and multi-tenant databases](./media/elastic-scale-glossary/shards-single-multi.png) - -**Range shard map**: A shard map in which the shard distribution strategy is based on multiple ranges of contiguous values. - -**Reference tables**: Tables that are not sharded but are replicated across shards. For example, zip codes can be stored in a reference table. - -**Shard**: A database in Azure SQL Database that stores data from a sharded data set. - -**Shard elasticity**: The ability to perform both **horizontal scaling** and **vertical scaling**. - -**Sharded tables**: Tables that are sharded, i.e., whose data is distributed across shards based on their sharding key values. - -**Sharding key**: A column value that determines how data is distributed across shards. The value type can be one of the following: **int**, **bigint**, **varbinary**, or **uniqueidentifier**. - -**Shard set**: The collection of shards that are attributed to the same shard map in the shard map manager. - -**Shardlet**: All of the data associated with a single value of a sharding key on a shard. A shardlet is the smallest unit of data movement possible when redistributing sharded tables. - -**Shard map**: The set of mappings between sharding keys and their respective shards. - -**Shard map manager**: A management object and data store that contains the shard map(s), shard locations, and mappings for one or more shard sets. - -![Diagram shows a shard map manager associated with shardmaps_global, shards_global, and shard_mappings_global.][2] - -## Verbs -**Horizontal scaling**: The act of scaling out (or in) a collection of shards by adding or removing shards to a shard map, as shown below. - -![Horizontal and vertical scaling][3] - -**Merge**: The act of moving shardlets from two shards to one shard and updating the shard map accordingly. - -**Shardlet move**: The act of moving a single shardlet to a different shard. - -**Shard**: The act of horizontally partitioning identically structured data across multiple databases based on a sharding key. - -**Split**: The act of moving several shardlets from one shard to another (typically new) shard. A sharding key is provided by the user as the split point. - -**Vertical Scaling**: The act of scaling up (or down) the compute size of an individual shard. For example, changing a shard from Standard to Premium (which results in more computing resources). - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/elastic-scale-glossary/glossary.png -[2]: ./media/elastic-scale-glossary/mappings.png -[3]: ./media/elastic-scale-glossary/h_versus_vert.png - diff --git a/articles/azure-sql/database/elastic-scale-introduction.md b/articles/azure-sql/database/elastic-scale-introduction.md deleted file mode 100644 index bcd337f22b119..0000000000000 --- a/articles/azure-sql/database/elastic-scale-introduction.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Scaling out -description: Software as a Service (SaaS) developers can easily create elastic, scalable databases in the cloud using these tools -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.topic: conceptual -ms.custom: sqldbrb=1 -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Scaling out with Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You can easily scale out databases in Azure SQL Database using the **Elastic Database** tools. These tools and features let you use the database resources of **Azure SQL Database** to create solutions for transactional workloads, and especially Software as a Service (SaaS) applications. Elastic Database features are composed of the: - -* [Elastic Database client library](elastic-database-client-library.md): The client library is a feature that allows you to create and maintain sharded databases. See [Get started with Elastic Database tools](elastic-scale-get-started.md). -* [Elastic Database split-merge tool](elastic-scale-overview-split-and-merge.md): moves data between sharded databases. This tool is useful for moving data from a multi-tenant database to a single-tenant database (or vice-versa). See [Elastic database Split-Merge tool tutorial](elastic-scale-configure-deploy-split-and-merge.md). -* [Elastic Database jobs](elastic-jobs-overview.md) (preview): Use jobs to manage large numbers of databases in Azure SQL Database. Easily perform administrative operations such as schema changes, credentials management, reference data updates, performance data collection, or tenant (customer) telemetry collection using jobs. -* [Elastic Database query](elastic-query-overview.md) (preview): Enables you to run a Transact-SQL query that spans multiple databases. This enables connection to reporting tools such as Excel, Power BI, Tableau, etc. -* [Elastic transactions](elastic-transactions-overview.md): This feature allows you to run transactions that span several databases. Elastic database transactions are available for .NET applications using ADO .NET and integrate with the familiar programming experience using the [System.Transaction classes](/dotnet/api/system.transactions). - -The following graphic shows an architecture that includes the **Elastic Database features** in relation to a collection of databases. - -In this graphic, colors of the database represent schemas. Databases with the same color share the same schema. - -1. A set of **SQL databases** is hosted on Azure using sharding architecture. -2. The **Elastic Database client library** is used to manage a shard set. -3. A subset of the databases is put into an **elastic pool**. (See [What is a pool?](elastic-pool-overview.md)). -4. An **Elastic Database job** runs scheduled or ad hoc T-SQL scripts against all databases. -5. The **split-merge tool** is used to move data from one shard to another. -6. The **Elastic Database query** allows you to write a query that spans all databases in the shard set. -7. **Elastic transactions** allow you to run transactions that span several databases. - -![Elastic Database tools][1] - -## Why use the tools? - -Achieving elasticity and scale for cloud applications has been straightforward for VMs and blob storage - simply add or subtract units, or increase power. But it has remained a challenge for stateful data processing in relational databases. Challenges emerged in these scenarios: - -* Growing and shrinking capacity for the relational database part of your workload. -* Managing hotspots that may arise affecting a specific subset of data - such as a busy end-customer (tenant). - -Traditionally, scenarios like these have been addressed by investing in larger-scale servers to support the application. However, this option is limited in the cloud where all processing happens on predefined commodity hardware. Instead, distributing data and processing across many identically structured databases (a scale-out pattern known as "sharding") provides an alternative to traditional scale-up approaches both in terms of cost and elasticity. - -## Horizontal and vertical scaling - -The following figure shows the horizontal and vertical dimensions of scaling, which are the basic ways the elastic databases can be scaled. - -![Horizontal versus vertical scale-out][2] - -Horizontal scaling refers to adding or removing databases in order to adjust capacity or overall performance, also called "scaling out". Sharding, in which data is partitioned across a collection of identically structured databases, is a common way to implement horizontal scaling. - -Vertical scaling refers to increasing or decreasing the compute size of an individual database, also known as "scaling up." - -Most cloud-scale database applications use a combination of these two strategies. For example, a Software as a Service application may use horizontal scaling to provision new end-customers and vertical scaling to allow each end-customer's database to grow or shrink resources as needed by the workload. - -* Horizontal scaling is managed using the [Elastic Database client library](elastic-database-client-library.md). -* Vertical scaling is accomplished using Azure PowerShell cmdlets to change the service tier, or by placing databases in an elastic pool. - -## Sharding - -*Sharding* is a technique to distribute large amounts of identically structured data across a number of independent databases. It is especially popular with cloud developers creating Software as a Service (SAAS) offerings for end customers or businesses. These end customers are often referred to as "tenants". Sharding may be required for any number of reasons: - -* The total amount of data is too large to fit within the constraints of an individual database -* The transaction throughput of the overall workload exceeds the capabilities of an individual database -* Tenants may require physical isolation from each other, so separate databases are needed for each tenant -* Different sections of a database may need to reside in different geographies for compliance, performance, or geopolitical reasons. - -In other scenarios, such as ingestion of data from distributed devices, sharding can be used to fill a set of databases that are organized temporally. For example, a separate database can be dedicated to each day or week. In that case, the sharding key can be an integer representing the date (present in all rows of the sharded tables) and queries retrieving information for a date range must be routed by the application to the subset of databases covering the range in question. - -Sharding works best when every transaction in an application can be restricted to a single value of a sharding key. That ensures that all transactions are local to a specific database. - -## Multi-tenant and single-tenant - -Some applications use the simplest approach of creating a separate database for each tenant. This approach is the **single tenant sharding pattern** that provides isolation, backup/restore ability, and resource scaling at the granularity of the tenant. With single tenant sharding, each database is associated with a specific tenant ID value (or customer key value), but that key need not always be present in the data itself. It is the application's responsibility to route each request to the appropriate database - and the client library can simplify this. - -![Single tenant versus multi-tenant][4] - -Others scenarios pack multiple tenants together into databases, rather than isolating them into separate databases. This pattern is a typical **multi-tenant sharding pattern** - and it may be driven by the fact that an application manages large numbers of small tenants. In multi-tenant sharding, the rows in the database tables are all designed to carry a key identifying the tenant ID or sharding key. Again, the application tier is responsible for routing a tenant's request to the appropriate database, and this can be supported by the elastic database client library. In addition, row-level security can be used to filter which rows each tenant can access - for details, see [Multi-tenant applications with elastic database tools and row-level security](saas-tenancy-elastic-tools-multi-tenant-row-level-security.md). Redistributing data among databases may be needed with the multi-tenant sharding pattern, and is facilitated by the elastic database split-merge tool. To learn more about design patterns for SaaS applications using elastic pools, see [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](saas-tenancy-app-design-patterns.md). - -### Move data from multiple to single-tenancy databases -When creating a SaaS application, it is typical to offer prospective customers a trial version of the software. In this case, it is cost-effective to use a multi-tenant database for the data. However, when a prospect becomes a customer, a single-tenant database is better since it provides better performance. If the customer had created data during the trial period, use the [split-merge tool](elastic-scale-overview-split-and-merge.md) to move the data from the multi-tenant to the new single-tenant database. - -## Next steps -For a sample app that demonstrates the client library, see [Get started with Elastic Database tools](elastic-scale-get-started.md). - -To convert existing databases to use the tools, see [Migrate existing databases to scale out](elastic-convert-to-use-elastic-tools.md). - -To see the specifics of the elastic pool, see [Price and performance considerations for an elastic pool](elastic-pool-overview.md), or create a new pool with [elastic pools](elastic-pool-manage.md). - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - - -[1]:./media/elastic-scale-introduction/tools.png -[2]:./media/elastic-scale-introduction/h_versus_vert.png -[3]:./media/elastic-scale-introduction/overview.png -[4]:./media/elastic-scale-introduction/single_v_multi_tenant.png diff --git a/articles/azure-sql/database/elastic-scale-manage-credentials.md b/articles/azure-sql/database/elastic-scale-manage-credentials.md deleted file mode 100644 index 037024c54e15e..0000000000000 --- a/articles/azure-sql/database/elastic-scale-manage-credentials.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Managing credentials in the elastic database client library -description: How to set the right level of credentials, admin to read-only, for elastic database apps -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/03/2019 ---- -# Credentials used to access the Elastic Database client library -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The [Elastic Database client library](elastic-database-client-library.md) uses three different kinds of credentials to access the [shard map manager](elastic-scale-shard-map-management.md). Depending on the need, use the credential with the lowest level of access possible. - -* **Management credentials**: for creating or manipulating a shard map manager. (See the [glossary](elastic-scale-glossary.md).) -* **Access credentials**: to access an existing shard map manager to obtain information about shards. -* **Connection credentials**: to connect to shards. - -See also [Managing databases and logins in Azure SQL Database](logins-create-manage.md). - -## About management credentials - -Management credentials are used to create a **ShardMapManager** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager)) object for applications that manipulate shard maps. (For example, see [Adding a shard using Elastic Database tools](elastic-scale-add-a-shard.md) and [data-dependent routing](elastic-scale-data-dependent-routing.md)). The user of the elastic scale client library creates the SQL users and SQL logins and makes sure each is granted the read/write permissions on the global shard map database and all shard databases as well. These credentials are used to maintain the global shard map and the local shard maps when changes to the shard map are performed. For instance, use the management credentials to create the shard map manager object (using **GetSqlShardMapManager** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory.getsqlshardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.getsqlshardmapmanager)): - -```java -// Obtain a shard map manager. -ShardMapManager shardMapManager = ShardMapManagerFactory.GetSqlShardMapManager(smmAdminConnectionString,ShardMapManagerLoadPolicy.Lazy); -``` - -The variable **smmAdminConnectionString** is a connection string that contains the management credentials. The user ID and password provide read/write access to both shard map database and individual shards. The management connection string also includes the server name and database name to identify the global shard map database. Here is a typical connection string for that purpose: - -```java -"Server=.database.windows.net;Database=;User ID=;Password=;Trusted_Connection=False;Encrypt=True;Connection Timeout=30;” -``` - -Do not use values in the form of "username@server"—instead just use the "username" value. This is because credentials must work against both the shard map manager database and individual shards, which may be on different servers. - -## Access credentials - -When creating a shard map manager in an application that does not administer shard maps, use credentials that have read-only permissions on the global shard map. The information retrieved from the global shard map under these credentials is used for [data-dependent routing](elastic-scale-data-dependent-routing.md) and to populate the shard map cache on the client. The credentials are provided through the same call pattern to **GetSqlShardMapManager**: - -```java -// Obtain shard map manager. -ShardMapManager shardMapManager = ShardMapManagerFactory.GetSqlShardMapManager(smmReadOnlyConnectionString, ShardMapManagerLoadPolicy.Lazy); -``` - -Note the use of the **smmReadOnlyConnectionString** to reflect the use of different credentials for this access on behalf of **non-admin** users: these credentials should not provide write permissions on the global shard map. - -## Connection credentials - -Additional credentials are needed when using the **OpenConnectionForKey** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapper.listshardmapper.openconnectionforkey), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.openconnectionforkey)) method to access a shard associated with a sharding key. These credentials need to provide permissions for read-only access to the local shard map tables residing on the shard. This is needed to perform connection validation for data-dependent routing on the shard. This code snippet allows data access in the context of data-dependent routing: - -```csharp -using (SqlConnection conn = rangeMap.OpenConnectionForKey(targetWarehouse, smmUserConnectionString, ConnectionOptions.Validate)) -``` - -In this example, **smmUserConnectionString** holds the connection string for the user credentials. For Azure SQL Database, here is a typical connection string for user credentials: - -```java -"User ID=; Password=; Trusted_Connection=False; Encrypt=True; Connection Timeout=30;” -``` - -As with the admin credentials, do not use values in the form of "username@server". Instead, just use "username". Also note that the connection string does not contain a server name and database name. That is because the **OpenConnectionForKey** call automatically directs the connection to the correct shard based on the key. Hence, the database name and server name are not provided. - -## See also - -[Managing databases and logins in Azure SQL Database](logins-create-manage.md) - -[Securing your SQL Database](security-overview.md) - -[Elastic Database jobs](elastic-jobs-overview.md) - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-multishard-querying.md b/articles/azure-sql/database/elastic-scale-multishard-querying.md deleted file mode 100644 index 7fa710ace9393..0000000000000 --- a/articles/azure-sql/database/elastic-scale-multishard-querying.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Query sharded databases -description: Run queries across shards using the elastic database client library. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.topic: how-to -ms.custom: sqldbrb=1 -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Multi-shard querying using elastic database tools -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -## Overview - -With the [Elastic Database tools](elastic-scale-introduction.md), you can create sharded database solutions. **Multi-shard querying** is used for tasks such as data collection/reporting that require running a query that stretches across several shards. (Contrast this to [data-dependent routing](elastic-scale-data-dependent-routing.md), which performs all work on a single shard.) - -1. Get a **RangeShardMap** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) or **ListShardMap** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.listshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.listshardmap-1)) using the **TryGetRangeShardMap** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.trygetrangeshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.trygetrangeshardmap)), the **TryGetListShardMap** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.trygetlistshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.trygetlistshardmap)), or the **GetShardMap** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.getshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.getshardmap)) method. See [Constructing a ShardMapManager](elastic-scale-shard-map-management.md#constructing-a-shardmapmanager) and [Get a RangeShardMap or ListShardMap](elastic-scale-shard-map-management.md#get-a-rangeshardmap-or-listshardmap). -2. Create a **MultiShardConnection** ([Java](/java/api/com.microsoft.azure.elasticdb.query.multishard.multishardconnection), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multishardconnection)) object. -3. Create a **MultiShardStatement or MultiShardCommand** ([Java](/java/api/com.microsoft.azure.elasticdb.query.multishard.multishardstatement), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multishardcommand)). -4. Set the **CommandText property** ([Java](/java/api/com.microsoft.azure.elasticdb.query.multishard.multishardstatement), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multishardcommand)) to a T-SQL command. -5. Execute the command by calling the **ExecuteQueryAsync or ExecuteReader** ([Java](/java/api/com.microsoft.azure.elasticdb.query.multishard.multishardstatement.executeQueryAsync), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multishardcommand)) method. -6. View the results using the **MultiShardResultSet or MultiShardDataReader** ([Java](/java/api/com.microsoft.azure.elasticdb.query.multishard.multishardresultset), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multisharddatareader)) class. - -## Example - -The following code illustrates the usage of multi-shard querying using a given **ShardMap** named *myShardMap*. - -```csharp -using (MultiShardConnection conn = new MultiShardConnection(myShardMap.GetShards(), myShardConnectionString)) -{ - using (MultiShardCommand cmd = conn.CreateCommand()) - { - cmd.CommandText = "SELECT c1, c2, c3 FROM ShardedTable"; - cmd.CommandType = CommandType.Text; - cmd.ExecutionOptions = MultiShardExecutionOptions.IncludeShardNameColumn; - cmd.ExecutionPolicy = MultiShardExecutionPolicy.PartialResults; - - using (MultiShardDataReader sdr = cmd.ExecuteReader()) - { - while (sdr.Read()) - { - var c1Field = sdr.GetString(0); - var c2Field = sdr.GetFieldValue(1); - var c3Field = sdr.GetFieldValue(2); - } - } - } -} -``` - -A key difference is the construction of multi-shard connections. Where **SqlConnection** operates on an individual database, the **MultiShardConnection** takes a ***collection of shards*** as its input. Populate the collection of shards from a shard map. The query is then executed on the collection of shards using **UNION ALL** semantics to assemble a single overall result. Optionally, the name of the shard where the row originates from can be added to the output using the **ExecutionOptions** property on command. - -Note the call to **myShardMap.GetShards()**. This method retrieves all shards from the shard map and provides an easy way to run a query across all relevant databases. The collection of shards for a multi-shard query can be refined further by performing a LINQ query over the collection returned from the call to **myShardMap.GetShards()**. In combination with the partial results policy, the current capability in multi-shard querying has been designed to work well for tens up to hundreds of shards. - -A limitation with multi-shard querying is currently the lack of validation for shards and shardlets that are queried. While data-dependent routing verifies that a given shard is part of the shard map at the time of querying, multi-shard queries do not perform this check. This can lead to multi-shard queries running on databases that have been removed from the shard map. - -## Multi-shard queries and split-merge operations - -Multi-shard queries do not verify whether shardlets on the queried database are participating in ongoing split-merge operations. (See [Scaling using the Elastic Database split-merge tool](elastic-scale-overview-split-and-merge.md).) This can lead to inconsistencies where rows from the same shardlet show for multiple databases in the same multi-shard query. Be aware of these limitations and consider draining ongoing split-merge operations and changes to the shard map while performing multi-shard queries. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-overview-split-and-merge.md b/articles/azure-sql/database/elastic-scale-overview-split-and-merge.md deleted file mode 100644 index e1e49747af902..0000000000000 --- a/articles/azure-sql/database/elastic-scale-overview-split-and-merge.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: Moving data between scaled-out cloud databases -description: Explains how to manipulate shards and move data via a self-hosted service using elastic database APIs. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Moving data between scaled-out cloud databases -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -If you are a Software as a Service developer, and suddenly your app undergoes tremendous demand, you need to accommodate the growth. So you add more databases (shards). How do you redistribute the data to the new databases without disrupting the data integrity? Use the **split-merge tool** to move data from constrained databases to the new databases. - -The split-merge tool runs as an Azure web service. An administrator or developer uses the tool to move shardlets (data from a shard) between different databases (shards). The tool uses shard map management to maintain the service metadata database, and ensure consistent mappings. - -![Overview][1] - -## Download - -[Microsoft.Azure.SqlDatabase.ElasticScale.Service.SplitMerge](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Service.SplitMerge/) - -## Documentation - -1. [Elastic database split-merge tool tutorial](elastic-scale-configure-deploy-split-and-merge.md) -2. [Split-merge security configuration](elastic-scale-split-merge-security-configuration.md) -3. [Split-merge security considerations](elastic-scale-split-merge-security-configuration.md) -4. [Shard map management](elastic-scale-shard-map-management.md) -5. [Migrate existing databases to scale-out](elastic-convert-to-use-elastic-tools.md) -6. [Elastic database tools](elastic-scale-introduction.md) -7. [Elastic database tools glossary](elastic-scale-glossary.md) - -## Why use the split-merge tool - -- **Flexibility** - - Applications need to stretch flexibly beyond the limits of a single database in Azure SQL Database. Use the tool to move data as needed to new databases while retaining integrity. - -- **Split to grow** - - To increase overall capacity to handle explosive growth, create additional capacity by sharding the data and by distributing it across incrementally more databases until capacity needs are fulfilled. This is a prime example of the **split** feature. - -- **Merge to shrink** - - Capacity needs shrink due to the seasonal nature of a business. The tool lets you scale down to fewer scale units when business slows. The ‘merge’ feature in the Elastic Scale Split-Merge Service covers this requirement. - -- **Manage hotspots by moving shardlets** - - With multiple tenants per database, the allocation of shardlets to shards can lead to capacity bottlenecks on some shards. This requires re-allocating shardlets or moving busy shardlets to new or less utilized shards. - -## Concepts & key features - -- **Customer-hosted services** - - The split-merge is delivered as a customer-hosted service. You must deploy and host the service in your Microsoft Azure subscription. The package you download from NuGet contains a configuration template to complete with the information for your specific deployment. See the [split-merge tutorial](elastic-scale-configure-deploy-split-and-merge.md) for details. Since the service runs in your Azure subscription, you can control and configure most security aspects of the service. The default template includes the options to configure TLS, certificate-based client authentication, encryption for stored credentials, DoS guarding and IP restrictions. You can find more information on the security aspects in the following document [split-merge security configuration](elastic-scale-split-merge-security-configuration.md). - - The default deployed service runs with one worker and one web role. Each uses the A1 VM size in Azure Cloud Services. While you cannot modify these settings when deploying the package, you could change them after a successful deployment in the running cloud service, (through the Azure portal). Note that the worker role must not be configured for more than a single instance for technical reasons. - -- **Shard map integration** - - The split-merge service interacts with the shard map of the application. When using the split-merge service to split or merge ranges or to move shardlets between shards, the service automatically keeps the shard map up-to-date. To do so, the service connects to the shard map manager database of the application and maintains ranges and mappings as split/merge/move requests progress. This ensures that the shard map always presents an up-to-date view when split-merge operations are going on. Split, merge and shardlet movement operations are implemented by moving a batch of shardlets from the source shard to the target shard. During the shardlet movement operation the shardlets subject to the current batch are marked as offline in the shard map and are unavailable for data-dependent routing connections using the **OpenConnectionForKey** API. - -- **Consistent shardlet connections** - - When data movement starts for a new batch of shardlets, any shard-map provided data-dependent routing connections to the shard storing the shardlet are killed and subsequent connections from the shard map APIs to the shardlets are blocked while the data movement is in progress in order to avoid inconsistencies. Connections to other shardlets on the same shard will also get killed, but will succeed again immediately on retry. Once the batch is moved, the shardlets are marked online again for the target shard and the source data is removed from the source shard. The service goes through these steps for every batch until all shardlets have been moved. This will lead to several connection kill operations during the course of the complete split/merge/move operation. - -- **Managing shardlet availability** - - Limiting the connection killing to the current batch of shardlets as discussed above restricts the scope of unavailability to one batch of shardlets at a time. This is preferred over an approach where the complete shard would remain offline for all its shardlets during the course of a split or merge operation. The size of a batch, defined as the number of distinct shardlets to move at a time, is a configuration parameter. It can be defined for each split and merge operation depending on the application’s availability and performance needs. Note that the range that is being locked in the shard map may be larger than the batch size specified. This is because the service picks the range size such that the actual number of sharding key values in the data approximately matches the batch size. This is important to remember in particular for sparsely populated sharding keys. - -- **Metadata storage** - - The split-merge service uses a database to maintain its status and to keep logs during request processing. The user creates this database in their subscription and provides the connection string for it in the configuration file for the service deployment. Administrators from the user’s organization can also connect to this database to review request progress and to investigate detailed information regarding potential failures. - -- **Sharding-awareness** - - The split-merge service differentiates between (1) sharded tables, (2) reference tables, and (3) normal tables. The semantics of a split/merge/move operation depend on the type of the table used and are defined as follows: - - - **Sharded tables** - - Split, merge, and move operations move shardlets from source to target shard. After successful completion of the overall request, those shardlets are no longer present on the source. Note that the target tables need to exist on the target shard and must not contain data in the target range prior to processing of the operation. - - - **Reference tables** - - For reference tables, the split, merge and move operations copy the data from the source to the target shard. Note, however, that no changes occur on the target shard for a given table if any row is already present in this table on the target. The table has to be empty for any reference table copy operation to get processed. - - - **Other tables** - - Other tables can be present on either the source or the target of a split and merge operation. The split-merge service disregards these tables for any data movement or copy operations. Note, however, that they can interfere with these operations in case of constraints. - - The information on reference vs. sharded tables is provided by the `SchemaInfo` APIs on the shard map. The following example illustrates the use of these APIs on a given shard map manager object: - - ```csharp - // Create the schema annotations - SchemaInfo schemaInfo = new SchemaInfo(); - - // reference tables - schemaInfo.Add(new ReferenceTableInfo("dbo", "region")); - schemaInfo.Add(new ReferenceTableInfo("dbo", "nation")); - - // sharded tables - schemaInfo.Add(new ShardedTableInfo("dbo", "customer", "C_CUSTKEY")); - schemaInfo.Add(new ShardedTableInfo("dbo", "orders", "O_CUSTKEY")); - - // publish - smm.GetSchemaInfoCollection().Add(Configuration.ShardMapName, schemaInfo); - ``` - - The tables ‘region’ and ‘nation’ are defined as reference tables and will be copied with split/merge/move operations. ‘customer’ and ‘orders’ in turn are defined as sharded tables. `C_CUSTKEY` and `O_CUSTKEY` serve as the sharding key. - -- **Referential integrity** - - The split-merge service analyzes dependencies between tables and uses foreign key-primary key relationships to stage the operations for moving reference tables and shardlets. In general, reference tables are copied first in dependency order, then shardlets are copied in order of their dependencies within each batch. This is necessary so that FK-PK constraints on the target shard are honored as the new data arrives. - -- **Shard map consistency and eventual completion** - - In the presence of failures, the split-merge service resumes operations after any outage and aims to complete any in progress requests. However, there may be unrecoverable situations, e.g., when the target shard is lost or compromised beyond repair. Under those circumstances, some shardlets that were supposed to be moved may continue to reside on the source shard. The service ensures that shardlet mappings are only updated after the necessary data has been successfully copied to the target. Shardlets are only deleted on the source once all their data has been copied to the target and the corresponding mappings have been updated successfully. The deletion operation happens in the background while the range is already online on the target shard. The split-merge service always ensures correctness of the mappings stored in the shard map. - -## The split-merge user interface - -The split-merge service package includes a worker role and a web role. The web role is used to submit split-merge requests in an interactive way. The main components of the user interface are as follows: - -- **Operation type** - - The operation type is a radio button that controls the kind of operation performed by the service for this request. You can choose between the split, merge and move scenarios. You can also cancel a previously submitted operation. You can use split, merge and move requests for range shard maps. List shard maps only support move operations. - -- **Shard map** - - The next section of request parameters covers information about the shard map and the database hosting your shard map. In particular, you need to provide the name of the server and database hosting the shardmap, credentials to connect to the shard map database, and finally the name of the shard map. Currently, the operation only accepts a single set of credentials. These credentials need to have sufficient permissions to perform changes to the shard map as well as to the user data on the shards. - -- **Source range (split and merge)** - - A split and merge operation processes a range using its low and high key. To specify an operation with an unbounded high key value, check the “High key is max” check box and leave the high key field empty. The range key values that you specify do not need to precisely match a mapping and its boundaries in your shard map. If you do not specify any range boundaries at all the service will infer the closest range for you automatically. You can use the GetMappings.ps1 PowerShell script to retrieve the current mappings in a given shard map. - -- **Split source behavior (split)** - - For split operations, define the point to split the source range. You do this by providing the sharding key where you want the split to occur. Use the radio button specify whether you want the lower part of the range (excluding the split key) to move, or whether you want the upper part to move (including the split key). - -- **Source shardlet (move)** - - Move operations are different from split or merge operations as they do not require a range to describe the source. A source for move is simply identified by the sharding key value that you plan to move. - -- **Target shard (split)** - - Once you have provided the information on the source of your split operation, you need to define where you want the data to be copied to by providing the server and database name for the target. - -- **Target range (merge)** - - Merge operations move shardlets to an existing shard. You identify the existing shard by providing the range boundaries of the existing range that you want to merge with. - -- **Batch size** - - The batch size controls the number of shardlets that will go offline at a time during the data movement. This is an integer value where you can use smaller values when you are sensitive to long periods of downtime for shardlets. Larger values will increase the time that a given shardlet is offline but may improve performance. - -- **Operation ID (cancel)** - - If you have an ongoing operation that is no longer needed, you can cancel the operation by providing its operation ID in this field. You can retrieve the operation ID from the request status table (see Section 8.1) or from the output in the web browser where you submitted the request. - -## Requirements and limitations - -The current implementation of the split-merge service is subject to the following requirements and limitations: - -- The shards need to exist and be registered in the shard map before a split-merge operation on these shards can be performed. -- The service does not create tables or any other database objects automatically as part of its operations. This means that the schema for all sharded tables and reference tables needs to exist on the target shard prior to any split/merge/move operation. Sharded tables in particular are required to be empty in the range where new shardlets are to be added by a split/merge/move operation. Otherwise, the operation will fail the initial consistency check on the target shard. Also note that reference data is only copied if the reference table is empty and that there are no consistency guarantees with regard to other concurrent write operations on the reference tables. We recommend this: when running split/merge operations, no other write operations make changes to the reference tables. -- The service relies on row identity established by a unique index or key that includes the sharding key to improve performance and reliability for large shardlets. This allows the service to move data at an even finer granularity than just the sharding key value. This helps to reduce the maximum amount of log space and locks that are required during the operation. Consider creating a unique index or a primary key including the sharding key on a given table if you want to use that table with split/merge/move requests. For performance reasons, the sharding key should be the leading column in the key or the index. -- During the course of request processing, some shardlet data may be present both on the source and the target shard. This is necessary to protect against failures during the shardlet movement. The integration of split-merge with the shard map ensures that connections through the data-dependent routing APIs using the **OpenConnectionForKey** method on the shard map do not see any inconsistent intermediate states. However, when connecting to the source or the target shards without using the **OpenConnectionForKey** method, inconsistent intermediate states might be visible when split/merge/move requests are going on. These connections may show partial or duplicate results depending on the timing or the shard underlying the connection. This limitation currently includes the connections made by Elastic Scale Multi-Shard-Queries. -- The metadata database for the split-merge service must not be shared between different roles. For example, a role of the split-merge service running in staging needs to point to a different metadata database than the production role. - -## Billing - -The split-merge service runs as a cloud service in your Microsoft Azure subscription. Therefore charges for cloud services apply to your instance of the service. Unless you frequently perform split/merge/move operations, we recommend you delete your split-merge cloud service. That saves costs for running or deployed cloud service instances. You can re-deploy and start your readily runnable configuration whenever you need to perform split or merge operations. - -## Monitoring - -### Status tables - -The split-merge Service provides the **RequestStatus** table in the metadata store database for monitoring of completed and ongoing requests. The table lists a row for each split-merge request that has been submitted to this instance of the split-merge service. It gives the following information for each request: - -- **Timestamp** - - The time and date when the request was started. - -- **OperationId** - - A GUID that uniquely identifies the request. This request can also be used to cancel the operation while it is still ongoing. - -- **Status** - - The current state of the request. For ongoing requests, it also lists the current phase in which the request is. - -- **CancelRequest** - - A flag that indicates whether the request has been canceled. - -- **Progress** - - A percentage estimate of completion for the operation. A value of 50 indicates that the operation is approximately 50% complete. - -- **Details** - - An XML value that provides a more detailed progress report. The progress report is periodically updated as sets of rows are copied from source to target. In case of failures or exceptions, this column also includes more detailed information about the failure. - -### Azure Diagnostics - -The split-merge service uses Azure Diagnostics based on Azure SDK 2.5 for monitoring and diagnostics. You control the diagnostics configuration as explained here: [Enabling Diagnostics in Azure Cloud Services and Virtual Machines](../../cloud-services/cloud-services-dotnet-diagnostics.md). The download package includes two diagnostics configurations - one for the web role and one for the worker role. It includes the definitions to log Performance Counters, IIS logs, Windows Event Logs, and split-merge application event logs. - -## Deploy Diagnostics - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To enable monitoring and diagnostics using the diagnostic configuration for the web and worker roles provided by the NuGet package, run the following commands using Azure PowerShell: - -```powershell -$storageName = "" -$key = " Storage -> `` -> Tables -> WADLogsTable. For more information, see [Server Explorer](/previous-versions/x603htbk(v=vs.140)). - -![WADLogsTable][2] - -The WADLogsTable highlighted in the figure above contains the detailed events from the split-merge service’s application log. Note that the default configuration of the downloaded package is geared towards a production deployment. Therefore the interval at which logs and counters are pulled from the service instances is large (5 minutes). For test and development, lower the interval by adjusting the diagnostics settings of the web or the worker role to your needs. Right-click on the role in the Visual Studio Server Explorer (see above) and then adjust the Transfer Period in the dialog for the Diagnostics configuration settings: - -![Configuration][3] - -## Performance - -In general, better performance is to be expected from higher, more performant service tiers. Higher IO, CPU and memory allocations for the higher service tiers benefit the bulk copy and delete operations that the split-merge service uses. For that reason, increase the service tier just for those databases for a defined, limited period of time. - -The service also performs validation queries as part of its normal operations. These validation queries check for unexpected presence of data in the target range and ensure that any split/merge/move operation starts from a consistent state. These queries all work over sharding key ranges defined by the scope of the operation and the batch size provided as part of the request definition. These queries perform best when an index is present that has the sharding key as the leading column. - -In addition, a uniqueness property with the sharding key as the leading column will allow the service to use an optimized approach that limits resource consumption in terms of log space and memory. This uniqueness property is required to move large data sizes (typically above 1GB). - -## How to upgrade - -1. Follow the steps in [Deploy a split-merge service](elastic-scale-configure-deploy-split-and-merge.md). -2. Change your cloud service configuration file for your split-merge deployment to reflect the new configuration parameters. A new required parameter is the information about the certificate used for encryption. An easy way to do this is to compare the new configuration template file from the download against your existing configuration. Make sure you add the settings for “DataEncryptionPrimaryCertificateThumbprint” and “DataEncryptionPrimary” for both the web and the worker role. -3. Before deploying the update to Azure, ensure that all currently running split-merge operations have finished. You can easily do this by querying the RequestStatus and PendingWorkflows tables in the split-merge metadata database for ongoing requests. -4. Update your existing cloud service deployment for split-merge in your Azure subscription with the new package and your updated service configuration file. - -You do not need to provision a new metadata database for split-merge to upgrade. The new version will automatically upgrade your existing metadata database to the new version. - -## Best practices & troubleshooting - -- Define a test tenant and exercise your most important split/merge/move operations with the test tenant across several shards. Ensure that all metadata is defined correctly in your shard map and that the operations do not violate constraints or foreign keys. -- Keep the test tenant data size above the maximum data size of your largest tenant to ensure you are not encountering data size related issues. This helps you assess an upper bound on the time it takes to move a single tenant around. -- Make sure that your schema allows deletions. The split-merge service requires the ability to remove data from the source shard once the data has been successfully copied to the target. For example, **delete triggers** can prevent the service from deleting the data on the source and may cause operations to fail. -- The sharding key should be the leading column in your primary key or unique index definition. That ensures the best performance for the split or merge validation queries, and for the actual data movement and deletion operations which always operate on sharding key ranges. -- Collocate your split-merge service in the region and data center where your databases reside. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - - -[1]:./media/elastic-scale-overview-split-and-merge/split-merge-overview.png -[2]:./media/elastic-scale-overview-split-and-merge/diagnostics.png -[3]:./media/elastic-scale-overview-split-and-merge/diagnostics-config.png diff --git a/articles/azure-sql/database/elastic-scale-shard-map-management.md b/articles/azure-sql/database/elastic-scale-shard-map-management.md deleted file mode 100644 index a7a3e441fdd7d..0000000000000 --- a/articles/azure-sql/database/elastic-scale-shard-map-management.md +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: Scale out a database -description: How to use the ShardMapManager, elastic database client library -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 ---- -# Scale out databases with the shard map manager -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -To easily scale out databases on Azure SQL Database, use a shard map manager. The shard map manager is a special database that maintains global mapping information about all shards (databases) in a shard set. The metadata allows an application to connect to the correct database based upon the value of the **sharding key**. In addition, every shard in the set contains maps that track the local shard data (known as **shardlets**). - -![Shard map management](./media/elastic-scale-shard-map-management/glossary.png) - -Understanding how these maps are constructed is essential to shard map management. This is done using the ShardMapManager class ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager)), found in the [Elastic Database client library](elastic-database-client-library.md) to manage shard maps. - -## Shard maps and shard mappings - -For each shard, you must select the type of shard map to create. The choice depends on the database architecture: - -1. Single tenant per database -2. Multiple tenants per database (two types): - 1. List mapping - 2. Range mapping - -For a single-tenant model, create a **list-mapping** shard map. The single-tenant model assigns one database per tenant. This is an effective model for SaaS developers as it simplifies shard map management. - -![List mapping][1] - -The multi-tenant model assigns several tenants to an individual database (and you can distribute groups of tenants across multiple databases). Use this model when you expect each tenant to have small data needs. In this model, assign a range of tenants to a database using **range mapping**. - -![Range mapping][2] - -Or you can implement a multi-tenant database model using a *list mapping* to assign multiple tenants to an individual database. For example, DB1 is used to store information about tenant ID 1 and 5, and DB2 stores data for tenant 7 and tenant 10. - -![Multiple tenants on single DB][3] - -### Supported types for sharding keys - -Elastic Scale support the following types as sharding keys: - -| .NET | Java | -| --- | --- | -| integer |integer | -| long |long | -| guid |uuid | -| byte[] |byte[] | -| datetime | timestamp | -| timespan | duration| -| datetimeoffset |offsetdatetime | - -### List and range shard maps - -Shard maps can be constructed using **lists of individual sharding key values**, or they can be constructed using **ranges of sharding key values**. - -### List shard maps - -**Shards** contain **shardlets** and the mapping of shardlets to shards is maintained by a shard map. A **list shard map** is an association between the individual key values that identify the shardlets and the databases that serve as shards. **List mappings** are explicit and different key values can be mapped to the same database. For example, key value 1 maps to Database A, and key values 3 and 6 both maps to Database B. - -| Key | Shard Location | -| --- | --- | -| 1 |Database_A | -| 3 |Database_B | -| 4 |Database_C | -| 6 |Database_B | -| ... |... | - -### Range shard maps - -In a **range shard map**, the key range is described by a pair **[Low Value, High Value)** where the *Low Value* is the minimum key in the range, and the *High Value* is the first value higher than the range. - -For example, **[0, 100)** includes all integers greater than or equal 0 and less than 100. Note that multiple ranges can point to the same database, and disjoint ranges are supported (for example, [100,200) and [400,600) both point to Database C in the following example.) - -| Key | Shard Location | -| --- | --- | -| [1,50) |Database_A | -| [50,100) |Database_B | -| [100,200) |Database_C | -| [400,600) |Database_C | -| ... |... | - -Each of the tables shown above is a conceptual example of a **ShardMap** object. Each row is a simplified example of an individual **PointMapping** (for the list shard map) or **RangeMapping** (for the range shard map) object. - -## Shard map manager - -In the client library, the shard map manager is a collection of shard maps. The data managed by a **ShardMapManager** instance is kept in three places: - -1. **Global Shard Map (GSM)**: You specify a database to serve as the repository for all of its shard maps and mappings. Special tables and stored procedures are automatically created to manage the information. This is typically a small database and lightly accessed, and it should not be used for other needs of the application. The tables are in a special schema named **__ShardManagement**. -2. **Local Shard Map (LSM)**: Every database that you specify to be a shard is modified to contain several small tables and special stored procedures that contain and manage shard map information specific to that shard. This information is redundant with the information in the GSM, and it allows the application to validate cached shard map information without placing any load on the GSM; the application uses the LSM to determine if a cached mapping is still valid. The tables corresponding to the LSM on each shard are also in the schema **__ShardManagement**. -3. **Application cache**: Each application instance accessing a **ShardMapManager** object maintains a local in-memory cache of its mappings. It stores routing information that has recently been retrieved. - -## Constructing a ShardMapManager - -A **ShardMapManager** object is constructed using a factory ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory)) pattern. The **ShardMapManagerFactory.GetSqlShardMapManager** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory.getsqlshardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.getsqlshardmapmanager)) method takes credentials (including the server name and database name holding the GSM) in the form of a **ConnectionString** and returns an instance of a **ShardMapManager**. - -**Please Note:** The **ShardMapManager** should be instantiated only once per app domain, within the initialization code for an application. Creation of additional instances of ShardMapManager in the same app domain results in increased memory and CPU utilization of the application. A **ShardMapManager** can contain any number of shard maps. While a single shard map may be sufficient for many applications, there are times when different sets of databases are used for different schema or for unique purposes; in those cases multiple shard maps may be preferable. - -In this code, an application tries to open an existing **ShardMapManager** with the TryGetSqlShardMapManager ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory.trygetsqlshardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager) method. If objects representing a Global **ShardMapManager** (GSM) do not yet exist inside the database, the client library creates them using the CreateSqlShardMapManager ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanagerfactory.createsqlshardmapmanager), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanagerfactory.createsqlshardmapmanager)) method. - -```Java -// Try to get a reference to the Shard Map Manager in the shardMapManager database. -// If it doesn't already exist, then create it. -ShardMapManager shardMapManager = null; -boolean shardMapManagerExists = ShardMapManagerFactory.tryGetSqlShardMapManager(shardMapManagerConnectionString,ShardMapManagerLoadPolicy.Lazy, refShardMapManager); -shardMapManager = refShardMapManager.argValue; - -if (shardMapManagerExists) { - ConsoleUtils.writeInfo("Shard Map %s already exists", shardMapManager); -} -else { - // The Shard Map Manager does not exist, so create it - shardMapManager = ShardMapManagerFactory.createSqlShardMapManager(shardMapManagerConnectionString); - ConsoleUtils.writeInfo("Created Shard Map %s", shardMapManager); -} -``` - -```csharp -// Try to get a reference to the Shard Map Manager via the Shard Map Manager database. -// If it doesn't already exist, then create it. -ShardMapManager shardMapManager; -bool shardMapManagerExists = ShardMapManagerFactory.TryGetSqlShardMapManager( - connectionString, - ShardMapManagerLoadPolicy.Lazy, - out shardMapManager); - -if (shardMapManagerExists) -{ - Console.WriteLine("Shard Map Manager already exists"); -} -else -{ - // Create the Shard Map Manager. - ShardMapManagerFactory.CreateSqlShardMapManager(connectionString); - Console.WriteLine("Created SqlShardMapManager"); - - shardMapManager = ShardMapManagerFactory.GetSqlShardMapManager( - connectionString, - ShardMapManagerLoadPolicy.Lazy); - -// The connectionString contains server name, database name, and admin credentials for privileges on both the GSM and the shards themselves. -} -``` - -For the .NET version, you can use PowerShell to create a new Shard Map Manager. An example is available [here](https://gallery.technet.microsoft.com/scriptcenter/Azure-SQL-DB-Elastic-731883db). - -## Get a RangeShardMap or ListShardMap - -After creating a shard map manager, you can get the RangeShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) or ListShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.listshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.listshardmap-1)) using the TryGetRangeShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.trygetrangeshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.trygetrangeshardmap)), the TryGetListShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.trygetlistshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.trygetlistshardmap)), or the GetShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.mapmanager.shardmapmanager.getshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.getshardmap)) method. - -```Java -// Creates a new Range Shard Map with the specified name, or gets the Range Shard Map if it already exists. -static RangeShardMap createOrGetRangeShardMap(ShardMapManager shardMapManager, - String shardMapName, - ShardKeyType keyType) { - // Try to get a reference to the Shard Map. - ReferenceObjectHelper> refRangeShardMap = new ReferenceObjectHelper<>(null); - boolean isGetSuccess = shardMapManager.tryGetRangeShardMap(shardMapName, keyType, refRangeShardMap); - RangeShardMap shardMap = refRangeShardMap.argValue; - - if (isGetSuccess && shardMap != null) { - ConsoleUtils.writeInfo("Shard Map %1$s already exists", shardMap.getName()); - } - else { - // The Shard Map does not exist, so create it - try { - shardMap = shardMapManager.createRangeShardMap(shardMapName, keyType); - } - catch (Exception e) { - e.printStackTrace(); - } - ConsoleUtils.writeInfo("Created Shard Map %1$s", shardMap.getName()); - } - - return shardMap; -} -``` - -```csharp -// Creates a new Range Shard Map with the specified name, or gets the Range Shard Map if it already exists. -public static RangeShardMap CreateOrGetRangeShardMap(ShardMapManager shardMapManager, string shardMapName) -{ - // Try to get a reference to the Shard Map. - RangeShardMap shardMap; - bool shardMapExists = shardMapManager.TryGetRangeShardMap(shardMapName, out shardMap); - - if (shardMapExists) - { - ConsoleUtils.WriteInfo("Shard Map {0} already exists", shardMap.Name); - } - else - { - // The Shard Map does not exist, so create it - shardMap = shardMapManager.CreateRangeShardMap(shardMapName); - ConsoleUtils.WriteInfo("Created Shard Map {0}", shardMap.Name); - } - - return shardMap; -} -``` - -### Shard map administration credentials - -Applications that administer and manipulate shard maps are different from those that use the shard maps to route connections. - -To administer shard maps (add or change shards, shard maps, shard mappings, etc.) you must instantiate the **ShardMapManager** using **credentials that have read/write privileges on both the GSM database and on each database that serves as a shard**. The credentials must allow for writes against the tables in both the GSM and LSM as shard map information is entered or changed, as well as for creating LSM tables on new shards. - -See [Credentials used to access the Elastic Database client library](elastic-scale-manage-credentials.md). - -### Only metadata affected - -Methods used for populating or changing the **ShardMapManager** data do not alter the user data stored in the shards themselves. For example, methods such as **CreateShard**, **DeleteShard**, **UpdateMapping**, etc. affect the shard map metadata only. They do not remove, add, or alter user data contained in the shards. Instead, these methods are designed to be used in conjunction with separate operations you perform to create or remove actual databases, or that move rows from one shard to another to rebalance a sharded environment. (The **split-merge** tool included with elastic database tools makes use of these APIs along with orchestrating actual data movement between shards.) See [Scaling using the Elastic Database split-merge tool](elastic-scale-overview-split-and-merge.md). - -## Data dependent routing - -The shard map manager is used in applications that require database connections to perform the app-specific data operations. Those connections must be associated with the correct database. This is known as **Data Dependent Routing**. For these applications, instantiate a shard map manager object from the factory using credentials that have read-only access on the GSM database. Individual requests for later connections supply credentials necessary for connecting to the appropriate shard database. - -Note that these applications (using **ShardMapManager** opened with read-only credentials) cannot make changes to the maps or mappings. For those needs, create administrative-specific applications or PowerShell scripts that supply higher-privileged credentials as discussed earlier. See [Credentials used to access the Elastic Database client library](elastic-scale-manage-credentials.md). - -For more information, see [Data dependent routing](elastic-scale-data-dependent-routing.md). - -## Modifying a shard map - -A shard map can be changed in different ways. All of the following methods modify the metadata describing the shards and their mappings, but they do not physically modify data within the shards, nor do they create or delete the actual databases. Some of the operations on the shard map described below may need to be coordinated with administrative actions that physically move data or that add and remove databases serving as shards. - -These methods work together as the building blocks available for modifying the overall distribution of data in your sharded database environment. - -* To add or remove shards: use **CreateShard** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.shardmap.createshard), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.createshard)) and **DeleteShard** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.shardmap.deleteshard), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap.deleteshard)) of the shardmap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.shardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmap)) class. - - The server and database representing the target shard must already exist for these operations to execute. These methods do not have any impact on the databases themselves, only on metadata in the shard map. -* To create or remove points or ranges that are mapped to the shards: use **CreateRangeMapping** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.createrangemapping), [.NET](/previous-versions/azure/dn841993(v=azure.100))), **DeleteMapping** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.deletemapping), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) of the RangeShardMapping ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) class, and **CreatePointMapping** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.listshardmap.createpointmapping), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.listshardmap-1)) of the ListShardMap ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.listshardmap), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.listshardmap-1)) class. - - Many different points or ranges can be mapped to the same shard. These methods only affect metadata - they do not affect any data that may already be present in shards. If data needs to be removed from the database in order to be consistent with **DeleteMapping** operations, you perform those operations separately but in conjunction with using these methods. -* To split existing ranges into two, or merge adjacent ranges into one: use **SplitMapping** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.splitmapping), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) and **MergeMappings** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.mergemappings), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)). - - Note that split and merge operations **do not change the shard to which key values are mapped**. A split breaks an existing range into two parts, but leaves both as mapped to the same shard. A merge operates on two adjacent ranges that are already mapped to the same shard, coalescing them into a single range. The movement of points or ranges themselves between shards needs to be coordinated by using **UpdateMapping** in conjunction with actual data movement. You can use the **Split/Merge** service that is part of elastic database tools to coordinate shard map changes with data movement, when movement is needed. -* To re-map (or move) individual points or ranges to different shards: use **UpdateMapping** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.updatemapping), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)). - - Since data may need to be moved from one shard to another in order to be consistent with **UpdateMapping** operations, you need to perform that movement separately but in conjunction with using these methods. - -* To take mappings online and offline: use **MarkMappingOffline** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.markmappingoffline), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) and **MarkMappingOnline** ([Java](/java/api/com.microsoft.azure.elasticdb.shard.map.rangeshardmap.markmappingonline), [.NET](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1)) to control the online state of a mapping. - - Certain operations on shard mappings are only allowed when a mapping is in an “offline” state, including **UpdateMapping** and **DeleteMapping**. When a mapping is offline, a data-dependent request based on a key included in that mapping returns an error. In addition, when a range is first taken offline, all connections to the affected shard are automatically killed in order to prevent inconsistent or incomplete results for queries directed against ranges being changed. - -Mappings are immutable objects in .NET. All of the methods above that change mappings also invalidate any references to them in your code. To make it easier to perform sequences of operations that change a mapping’s state, all of the methods that change a mapping return a new mapping reference, so operations can be chained. For example, to delete an existing mapping in shardmap sm that contains the key 25, you can execute the following: - -``` - sm.DeleteMapping(sm.MarkMappingOffline(sm.GetMappingForKey(25))); -``` - -## Adding a shard - -Applications often need to add new shards to handle data that is expected from new keys or key ranges, for a shard map that already exists. For example, an application sharded by Tenant ID may need to provision a new shard for a new tenant, or data sharded monthly may need a new shard provisioned before the start of each new month. - -If the new range of key values is not already part of an existing mapping and no data movement is necessary, it is simple to add the new shard and associate the new key or range to that shard. For details on adding new shards, see [Adding a new shard](elastic-scale-add-a-shard.md). - -For scenarios that require data movement, however, the split-merge tool is needed to orchestrate the data movement between shards in combination with the necessary shard map updates. For details on using the split-merge tool, see [Overview of split-merge](elastic-scale-overview-split-and-merge.md) - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/elastic-scale-shard-map-management/listmapping.png -[2]: ./media/elastic-scale-shard-map-management/rangemapping.png -[3]: ./media/elastic-scale-shard-map-management/multipleonsingledb.png diff --git a/articles/azure-sql/database/elastic-scale-split-merge-security-configuration.md b/articles/azure-sql/database/elastic-scale-split-merge-security-configuration.md deleted file mode 100644 index 9e0df4479dd0b..0000000000000 --- a/articles/azure-sql/database/elastic-scale-split-merge-security-configuration.md +++ /dev/null @@ -1,506 +0,0 @@ ---- -title: Split-merge security configuration -description: Set up x409 certificates for encryption with the split/merge service for elastic scale. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 12/18/2018 ---- -# Split-merge security configuration -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -To use the Split/Merge service, you must correctly configure security. The service is part of the Elastic Scale feature of Azure SQL Database. For more information, see [Elastic Scale Split and Merge Service Tutorial](elastic-scale-configure-deploy-split-and-merge.md). - -## Configuring certificates - -Certificates are configured in two ways. - -1. [To Configure the TLS/SSL Certificate](#to-configure-the-tlsssl-certificate) -2. [To Configure Client Certificates](#to-configure-client-certificates) - -## To obtain certificates - -Certificates can be obtained from public Certificate Authorities (CAs) or from the [Windows Certificate Service](/windows/win32/seccrypto/certificate-services). These are the preferred methods to obtain certificates. - -If those options are not available, you can generate **self-signed certificates**. - -## Tools to generate certificates - -* [makecert.exe](/previous-versions/dotnet/netframework-4.0/bfsktky3(v=vs.100)) -* [pvk2pfx.exe](/windows-hardware/drivers/devtest/pvk2pfx) - -### To run the tools - -* From a Developer Command Prompt for Visual Studios, see [Visual Studio Command Prompt](/dotnet/framework/tools/developer-command-prompt-for-vs) - - If installed, go to: - - ```console - %ProgramFiles(x86)%\Windows Kits\x.y\bin\x86 - ``` - -* Get the WDK from [Windows 8.1: Download kits and tools](https://msdn.microsoft.com/windows/hardware/gg454513#drivers) - -## To configure the TLS/SSL certificate - -A TLS/SSL certificate is required to encrypt the communication and authenticate the server. Choose the most applicable of the three scenarios below, and execute all its steps: - -### Create a new self-signed certificate - -1. [Create a Self-Signed Certificate](#create-a-self-signed-certificate) -2. [Create PFX file for Self-Signed TLS/SSL Certificate](#create-pfx-file-for-self-signed-tlsssl-certificate) -3. [Upload TLS/SSL Certificate to Cloud Service](#upload-tlsssl-certificate-to-cloud-service) -4. [Update TLS/SSL Certificate in Service Configuration File](#update-tlsssl-certificate-in-service-configuration-file) -5. [Import TLS/SSL Certification Authority](#import-tlsssl-certification-authority) - -### To use an existing certificate from the certificate store -1. [Export TLS/SSL Certificate From Certificate Store](#export-tlsssl-certificate-from-certificate-store) -2. [Upload TLS/SSL Certificate to Cloud Service](#upload-tlsssl-certificate-to-cloud-service) -3. [Update TLS/SSL Certificate in Service Configuration File](#update-tlsssl-certificate-in-service-configuration-file) - -### To use an existing certificate in a PFX file -1. [Upload TLS/SSL Certificate to Cloud Service](#upload-tlsssl-certificate-to-cloud-service) -2. [Update TLS/SSL Certificate in Service Configuration File](#update-tlsssl-certificate-in-service-configuration-file) - -## To configure client certificates -Client certificates are required in order to authenticate requests to the service. Choose the most applicable of the three scenarios below, and execute all its steps: - -### Turn off client certificates -1. [Turn Off Client Certificate-Based Authentication](#turn-off-client-certificate-based-authentication) - -### Issue new self-signed client certificates -1. [Create a Self-Signed Certification Authority](#create-a-self-signed-certification-authority) -2. [Upload CA Certificate to Cloud Service](#upload-ca-certificate-to-cloud-service) -3. [Update CA Certificate in Service Configuration File](#update-ca-certificate-in-service-configuration-file) -4. [Issue Client Certificates](#issue-client-certificates) -5. [Create PFX files for Client Certificates](#create-pfx-files-for-client-certificates) -6. [Import Client Certificate](#import-client-certificate) -7. [Copy Client Certificate Thumbprints](#copy-client-certificate-thumbprints) -8. [Configure Allowed Clients in the Service Configuration File](#configure-allowed-clients-in-the-service-configuration-file) - -### Use existing client certificates -1. [Find CA Public Key](#find-ca-public-key) -2. [Upload CA Certificate to Cloud Service](#upload-ca-certificate-to-cloud-service) -3. [Update CA Certificate in Service Configuration File](#update-ca-certificate-in-service-configuration-file) -4. [Copy Client Certificate Thumbprints](#copy-client-certificate-thumbprints) -5. [Configure Allowed Clients in the Service Configuration File](#configure-allowed-clients-in-the-service-configuration-file) -6. [Configure Client Certificate Revocation Check](#configure-client-certificate-revocation-check) - -## Allowed IP addresses -Access to the service endpoints can be restricted to specific ranges of IP addresses. - -## To configure encryption for the store -A certificate is required to encrypt the credentials that are stored in the metadata store. Choose the most applicable of the three scenarios below, and execute all its steps: - -### Use a new self-signed certificate -1. [Create a Self-Signed Certificate](#create-a-self-signed-certificate) -2. [Create PFX file for Self-Signed Encryption Certificate](#create-pfx-file-for-self-signed-tlsssl-certificate) -3. [Upload Encryption Certificate to Cloud Service](#upload-encryption-certificate-to-cloud-service) -4. [Update Encryption Certificate in Service Configuration File](#update-encryption-certificate-in-service-configuration-file) - -### Use an existing certificate from the certificate store -1. [Export Encryption Certificate From Certificate Store](#export-encryption-certificate-from-certificate-store) -2. [Upload Encryption Certificate to Cloud Service](#upload-encryption-certificate-to-cloud-service) -3. [Update Encryption Certificate in Service Configuration File](#update-encryption-certificate-in-service-configuration-file) - -### Use an existing certificate in a PFX file -1. [Upload Encryption Certificate to Cloud Service](#upload-encryption-certificate-to-cloud-service) -2. [Update Encryption Certificate in Service Configuration File](#update-encryption-certificate-in-service-configuration-file) - -## The default configuration -The default configuration denies all access to the HTTP endpoint. This is the recommended setting, since the requests to these endpoints may carry sensitive information like database credentials. -The default configuration allows all access to the HTTPS endpoint. This setting may be restricted further. - -### Changing the Configuration -The group of access control rules that apply to and endpoint are configured in the **\** section in the **service configuration file**. - -```xml - - - - -``` - -The rules in an access control group are configured in a \ section of the service configuration file. - -The format is explained in Network Access Control Lists documentation. -For example, to allow only IPs in the range 100.100.0.0 to 100.100.255.255 to access the HTTPS endpoint, the rules would look like this: - -```xml - - - - - - - -``` - -## Denial of service prevention -There are two different mechanisms supported to detect and prevent Denial of Service attacks: - -* Restrict number of concurrent requests per remote host (off by default) -* Restrict rate of access per remote host (on by default) - -These are based on the features further documented in Dynamic IP Security in IIS. When changing this configuration beware of the following factors: - -* The behavior of proxies and Network Address Translation devices over the remote host information -* Each request to any resource in the web role is considered (for example, loading scripts, images, etc) - -## Restricting number of concurrent accesses -The settings that configure this behavior are: - -```xml - - -``` - -Change DynamicIpRestrictionDenyByConcurrentRequests to true to enable this protection. - -## Restricting rate of access -The settings that configure this behavior are: - -```xml - - - -``` - -## Configuring the response to a denied request -The following setting configures the response to a denied request: - -```xml - -``` - -Refer to the documentation for Dynamic IP Security in IIS for other supported values. - -## Operations for configuring service certificates -This topic is for reference only. Follow the configuration steps outlined in: - -* Configure the TLS/SSL certificate -* Configure client certificates - -## Create a self-signed certificate -Execute: - -```console -makecert ^ - -n "CN=myservice.cloudapp.net" ^ - -e MM/DD/YYYY ^ - -r -cy end -sky exchange -eku "1.3.6.1.5.5.7.3.1" ^ - -a sha256 -len 2048 ^ - -sv MySSL.pvk MySSL.cer -``` - -To customize: - -* -n with the service URL. Wildcards ("CN=*.cloudapp.net") and alternative names ("CN=myservice1.cloudapp.net, CN=myservice2.cloudapp.net") are supported. -* -e with the certificate expiration date - Create a strong password and specify it when prompted. - -## Create PFX file for self-signed TLS/SSL certificate -Execute: - -```console -pvk2pfx -pvk MySSL.pvk -spc MySSL.cer -``` - -Enter password and then export certificate with these options: - -* Yes, export the private key -* Export all extended properties - -## Export TLS/SSL certificate from certificate store -* Find certificate -* Click Actions -> All tasks -> Export… -* Export certificate into a .PFX file with these options: - * Yes, export the private key - * Include all certificates in the certification path if possible - *Export all extended properties - -## Upload TLS/SSL certificate to cloud service -Upload certificate with the existing or generated .PFX file with the TLS key pair: - -* Enter the password protecting the private key information - -## Update TLS/SSL certificate in service configuration file -Update the thumbprint value of the following setting in the service configuration file with the thumbprint of the certificate uploaded to the cloud service: - -```console - -``` - -## Import TLS/SSL certification authority -Follow these steps in all account/machine that will communicate with the service: - -* Double-click the .CER file in Windows Explorer -* In the Certificate dialog, click Install Certificate… -* Import certificate into the Trusted Root Certification Authorities store - -## Turn off client certificate-based authentication -Only client certificate-based authentication is supported and disabling it will allow for public access to the service endpoints, unless other mechanisms are in place (for example, Microsoft Azure Virtual Network). - -Change these settings to false in the service configuration file to turn off the feature: - -```xml - - -``` - -Then, copy the same thumbprint as the TLS/SSL certificate in the CA certificate setting: - -```xml - -``` - -## Create a self-signed certification authority -Execute the following steps to create a self-signed certificate to act as a Certification Authority: - -```console -makecert ^ --n "CN=MyCA" ^ --e MM/DD/YYYY ^ - -r -cy authority -h 1 ^ - -a sha256 -len 2048 ^ - -sr localmachine -ss my ^ - MyCA.cer -``` - -To customize it - -* -e with the certification expiration date - -## Find CA public key -All client certificates must have been issued by a Certification Authority trusted by the service. Find the public key to the Certification Authority that issued the client certificates that are going to be used for authentication in order to upload it to the cloud service. - -If the file with the public key is not available, export it from the certificate store: - -* Find certificate - * Search for a client certificate issued by the same Certification Authority -* Double-click the certificate. -* Select the Certification Path tab in the Certificate dialog. -* Double-click the CA entry in the path. -* Take notes of the certificate properties. -* Close the **Certificate** dialog. -* Find certificate - * Search for the CA noted above. -* Click Actions -> All tasks -> Export… -* Export certificate into a .CER with these options: - * **No, do not export the private key** - * Include all certificates in the certification path if possible. - * Export all extended properties. - -## Upload CA certificate to cloud service -Upload certificate with the existing or generated .CER file with the CA public key. - -## Update CA certificate in service configuration file -Update the thumbprint value of the following setting in the service configuration file with the thumbprint of the certificate uploaded to the cloud service: - -```xml - -``` - -Update the value of the following setting with the same thumbprint: - -```xml - -``` - -## Issue client certificates -Each individual authorized to access the service should have a client certificate issued for their exclusive use and should choose their own strong password to protect its private key. - -The following steps must be executed in the same machine where the self-signed CA certificate was generated and stored: - -```console -makecert ^ - -n "CN=My ID" ^ - -e MM/DD/YYYY ^ - -cy end -sky exchange -eku "1.3.6.1.5.5.7.3.2" ^ - -a sha256 -len 2048 ^ - -in "MyCA" -ir localmachine -is my ^ - -sv MyID.pvk MyID.cer -``` - -Customizing: - -* -n with an ID for to the client that will be authenticated with this certificate -* -e with the certificate expiration date -* MyID.pvk and MyID.cer with unique filenames for this client certificate - -This command will prompt for a password to be created and then used once. Use a strong password. - -## Create PFX files for client certificates -For each generated client certificate, execute: - -```console -pvk2pfx -pvk MyID.pvk -spc MyID.cer -``` - -Customizing: - -```console -MyID.pvk and MyID.cer with the filename for the client certificate -``` - -Enter password and then export certificate with these options: - -* Yes, export the private key -* Export all extended properties -* The individual to whom this certificate is being issued should choose the export password - -## Import client certificate -Each individual for whom a client certificate has been issued should import the key pair in the machines they will use to communicate with the service: - -* Double-click the .PFX file in Windows Explorer -* Import certificate into the Personal store with at least this option: - * Include all extended properties checked - -## Copy client certificate thumbprints -Each individual for whom a client certificate has been issued must follow these steps in order to obtain the thumbprint of their certificate, which will be added to the service configuration file: - -* Run certmgr.exe -* Select the Personal tab -* Double-click the client certificate to be used for authentication -* In the Certificate dialog that opens, select the Details tab -* Make sure Show is displaying All -* Select the field named Thumbprint in the list -* Copy the value of the thumbprint - * Delete non-visible Unicode characters in front of the first digit - * Delete all spaces - -## Configure Allowed clients in the service configuration file -Update the value of the following setting in the service configuration file with a comma-separated list of the thumbprints of the client certificates allowed access to the service: - -```xml - -``` - -## Configure client certificate revocation check -The default setting does not check with the Certification Authority for client certificate revocation status. To turn on the checks, if the Certification Authority that issued the client certificates supports such checks, change the following setting with one of the values defined in the X509RevocationMode Enumeration: - -```xml - -``` - -## Create PFX file for self-signed encryption certificates -For an encryption certificate, execute: - -```console -pvk2pfx -pvk MyID.pvk -spc MyID.cer -``` - -Customizing: - -```console -MyID.pvk and MyID.cer with the filename for the encryption certificate -``` - -Enter password and then export certificate with these options: - -* Yes, export the private key -* Export all extended properties -* You will need the password when uploading the certificate to the cloud service. - -## Export encryption certificate from certificate store -* Find certificate -* Click Actions -> All tasks -> Export… -* Export certificate into a .PFX file with these options: - * Yes, export the private key - * Include all certificates in the certification path if possible -* Export all extended properties - -## Upload encryption certificate to cloud service -Upload certificate with the existing or generated .PFX file with the encryption key pair: - -* Enter the password protecting the private key information - -## Update encryption certificate in service configuration file -Update the thumbprint value of the following settings in the service configuration file with the thumbprint of the certificate uploaded to the cloud service: - -```xml - -``` - -## Common certificate operations -* Configure the TLS/SSL certificate -* Configure client certificates - -## Find certificate -Follow these steps: - -1. Run mmc.exe. -2. File -> Add/Remove Snap-in… -3. Select **Certificates**. -4. Click **Add**. -5. Choose the certificate store location. -6. Click **Finish**. -7. Click **OK**. -8. Expand **Certificates**. -9. Expand the certificate store node. -10. Expand the Certificate child node. -11. Select a certificate in the list. - -## Export certificate -In the **Certificate Export Wizard**: - -1. Click **Next**. -2. Select **Yes**, then **Export the private key**. -3. Click **Next**. -4. Select the desired output file format. -5. Check the desired options. -6. Check **Password**. -7. Enter a strong password and confirm it. -8. Click **Next**. -9. Type or browse a filename where to store the certificate (use a .PFX extension). -10. Click **Next**. -11. Click **Finish**. -12. Click **OK**. - -## Import certificate -In the Certificate Import Wizard: - -1. Select the store location. - - * Select **Current User** if only processes running under current user will access the service - * Select **Local Machine** if other processes in this computer will access the service -2. Click **Next**. -3. If importing from a file, confirm the file path. -4. If importing a .PFX file: - 1. Enter the password protecting the private key - 2. Select import options -5. Select "Place" certificates in the following store -6. Click **Browse**. -7. Select the desired store. -8. Click **Finish**. - - * If the Trusted Root Certification Authority store was chosen, click **Yes**. -9. Click **OK** on all dialog windows. - -## Upload certificate -In the [Azure portal](https://portal.azure.com/) - -1. Select **Cloud Services**. -2. Select the cloud service. -3. On the top menu, click **Certificates**. -4. On the bottom bar, click **Upload**. -5. Select the certificate file. -6. If it is a .PFX file, enter the password for the private key. -7. Once completed, copy the certificate thumbprint from the new entry in the list. - -## Other security considerations -The TLS settings described in this document encrypt communication between the service and its clients when the HTTPS endpoint is used. This is important since credentials for database access and potentially other sensitive information are contained in the communication. Note, however, that the service persists internal status, including credentials, in its internal tables in the database in Azure SQL Database that you have provided for metadata storage in your Microsoft Azure subscription. That database was defined as part of the following setting in your service configuration file (.CSCFG file): - -```xml - -``` - -Credentials stored in this database are encrypted. However, as a best practice, ensure that both web and worker roles of your service deployments are kept up to date and secure as they both have access to the metadata database and the certificate used for encryption and decryption of stored credentials. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-upgrade-client-library.md b/articles/azure-sql/database/elastic-scale-upgrade-client-library.md deleted file mode 100644 index 13a2d02f6313b..0000000000000 --- a/articles/azure-sql/database/elastic-scale-upgrade-client-library.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Upgrade to the latest elastic database client library -description: Use NuGet to upgrade elastic database client library. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/03/2019 ---- -# Upgrade an app to use the latest elastic database client library -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -New versions of the [Elastic Database client library](elastic-database-client-library.md) are available through NuGet and the NuGet Package Manager interface in Visual Studio. Upgrades contain bug fixes and support for new capabilities of the client library. - -**For the latest version:** Go to [Microsoft.Azure.SqlDatabase.ElasticScale.Client](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/). - -Rebuild your application with the new library, as well as change your existing Shard Map Manager metadata stored in your databases in Azure SQL Database to support new features. - -Performing these steps in order ensures that old versions of the client library are no longer present in your environment when metadata objects are updated, which means that old-version metadata objects won’t be created after upgrade. - -## Upgrade steps - -**1. Upgrade your applications.** In Visual Studio, download and reference the latest client library version into all of your development projects that use the library; then rebuild and deploy. - -* In your Visual Studio solution, select **Tools** --> **NuGet Package Manager** --> **Manage NuGet Packages for Solution**. -* (Visual Studio 2013) In the left panel, select **Updates**, and then select the **Update** button on the package **Azure SQL Database Elastic Scale Client Library** that appears in the window. -* (Visual Studio 2015) Set the Filter box to **Upgrade available**. Select the package to update, and click the **Update** button. -* (Visual Studio 2017) At the top of the dialog, select **Updates**. Select the package to update, and click the **Update** button. -* Build and Deploy. - -**2. Upgrade your scripts.** If you are using **PowerShell** scripts to manage shards, [download the new library version](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/) and copy it into the directory from which you execute scripts. - -**3. Upgrade your split-merge service.** If you use the elastic database split-merge tool to reorganize sharded data, [download and deploy the latest version of the tool](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Service.SplitMerge/). Detailed upgrade steps for the Service can be found [here](elastic-scale-overview-split-and-merge.md). - -**4. Upgrade your Shard Map Manager databases**. Upgrade the metadata supporting your Shard Maps in Azure SQL Database. There are two ways you can accomplish this, using PowerShell or C#. Both options are shown below. - -***Option 1: Upgrade metadata using PowerShell*** - -1. Download the latest command-line utility for NuGet from [here](https://nuget.org/nuget.exe) and save to a folder. -2. Open a Command Prompt, navigate to the same folder, and issue the command: - `nuget install Microsoft.Azure.SqlDatabase.ElasticScale.Client` -3. Navigate to the subfolder containing the new client DLL version you have just downloaded, for example: - `cd .\Microsoft.Azure.SqlDatabase.ElasticScale.Client.1.0.0\lib\net45` -4. Download the elastic database client upgrade script from the [Script Center](https://gallery.technet.microsoft.com/scriptcenter/Azure-SQL-Database-Elastic-6442e6a9), and save it into the same folder containing the DLL. -5. From that folder, run “PowerShell .\upgrade.ps1” from the command prompt and follow the prompts. - -***Option 2: Upgrade metadata using C#*** - -Alternatively, create a Visual Studio application that opens your ShardMapManager, iterates over all shards, and performs the metadata upgrade by calling the methods [UpgradeLocalStore](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.upgradelocalstore) and [UpgradeGlobalStore](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.shardmapmanager.upgradeglobalstore) as in this example: - -```csharp - ShardMapManager smm = - ShardMapManagerFactory.GetSqlShardMapManager - (connStr, ShardMapManagerLoadPolicy.Lazy); - smm.UpgradeGlobalStore(); - - foreach (ShardLocation loc in - smm.GetDistinctShardLocations()) - { - smm.UpgradeLocalStore(loc); - } -``` - -These techniques for metadata upgrades can be applied multiple times without harm. For example, if an older client version inadvertently creates a shard after you have already updated, you can run upgrade again across all shards to ensure that the latest metadata version is present throughout your infrastructure. - -**Note:** New versions of the client library published to-date continue to work with prior versions of the Shard Map Manager metadata on Azure SQL Database, and vice-versa. However to take advantage of some of the new features in the latest client, metadata needs to be upgraded. Note that metadata upgrades will not affect any user-data or application-specific data, only objects created and used by the Shard Map Manager. And applications continue to operate through the upgrade sequence described above. - -## Elastic database client version history - -For version history, go to [Microsoft.Azure.SqlDatabase.ElasticScale.Client](https://www.nuget.org/packages/Microsoft.Azure.SqlDatabase.ElasticScale.Client/) - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]:./media/sql-database-elastic-scale-upgrade-client-library/nuget-upgrade.png \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-use-entity-framework-applications-visual-studio.md b/articles/azure-sql/database/elastic-scale-use-entity-framework-applications-visual-studio.md deleted file mode 100644 index 4f4b3f305d837..0000000000000 --- a/articles/azure-sql/database/elastic-scale-use-entity-framework-applications-visual-studio.md +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: Using elastic database client library with Entity Framework -description: Use Elastic Database client library and Entity Framework for coding databases -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: sample -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/04/2019 ---- -# Elastic Database client library with Entity Framework -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This document shows the changes in an Entity Framework application that are needed to integrate with the [Elastic Database tools](elastic-scale-introduction.md). The focus is on composing [shard map management](elastic-scale-shard-map-management.md) and [data-dependent routing](elastic-scale-data-dependent-routing.md) with the Entity Framework **Code First** approach. The [Code First - New Database](/ef/ef6/modeling/code-first/workflows/new-database) tutorial for EF serves as the running example throughout this document. The sample code accompanying this document is part of elastic database tools' set of samples in the Visual Studio Code Samples. - -## Downloading and Running the Sample Code - -To download the code for this article: - -* Visual Studio 2012 or later is required. -* Download the [Elastic DB Tools for Azure SQL - Entity Framework Integration sample](https://github.com/Azure/elastic-db-tools/). Unzip the sample to a location of your choosing. -* Start Visual Studio. -* In Visual Studio, select File -> Open Project/Solution. -* In the **Open Project** dialog, navigate to the sample you downloaded and select **EntityFrameworkCodeFirst.sln** to open the sample. - -To run the sample, you need to create three empty databases in Azure SQL Database: - -* Shard Map Manager database -* Shard 1 database -* Shard 2 database - -Once you have created these databases, fill in the place holders in **Program.cs** with your server name, the database names, and your credentials to connect to the databases. Build the solution in Visual Studio. Visual Studio downloads the required NuGet packages for the elastic database client library, Entity Framework, and Transient Fault handling as part of the build process. Make sure that restoring NuGet packages is enabled for your solution. You can enable this setting by right-clicking on the solution file in the Visual Studio Solution Explorer. - -## Entity Framework workflows - -Entity Framework developers rely on one of the following four workflows to build applications and to ensure persistence for application objects: - -* **Code First (New Database)**: The EF developer creates the model in the application code and then EF generates the database from it. -* **Code First (Existing Database)**: The developer lets EF generate the application code for the model from an existing database. -* **Model First**: The developer creates the model in the EF designer and then EF creates the database from the model. -* **Database First**: The developer uses EF tooling to infer the model from an existing database. - -All these approaches rely on the DbContext class to transparently manage database connections and database schema for an application. Different constructors on the DbContext base class allow for different levels of control over connection creation, database bootstrapping, and schema creation. Challenges arise primarily from the fact that the database connection management provided by EF intersects with the connection management capabilities of the data-dependent routing interfaces provided by the elastic database client library. - -## Elastic database tools assumptions - -For term definitions, see [Elastic Database tools glossary](elastic-scale-glossary.md). - -With elastic database client library, you define partitions of your application data called shardlets. Shardlets are identified by a sharding key and are mapped to specific databases. An application may have as many databases as needed and distribute the shardlets to provide enough capacity or performance given current business requirements. The mapping of sharding key values to the databases is stored by a shard map provided by the elastic database client APIs. This capability is called **Shard Map Management**, or SMM for short. The shard map also serves as the broker of database connections for requests that carry a sharding key. This capability is known as **data-dependent routing**. - -The shard map manager protects users from inconsistent views into shardlet data that can occur when concurrent shardlet management operations (such as relocating data from one shard to another) are happening. To do so, the shard maps managed by the client library broker the database connections for an application. This allows the shard map functionality to automatically kill a database connection when shard management operations could impact the shardlet that the connection has been created for. This approach needs to integrate with some of EF’s functionality, such as creating new connections from an existing one to check for database existence. In general, our observation has been that the standard DbContext constructors only work reliably for closed database connections that can safely be cloned for EF work. The design principle of elastic database instead is to only broker opened connections. One might think that closing a connection brokered by the client library before handing it over to the EF DbContext may solve this issue. However, by closing the connection and relying on EF to reopen it, one foregoes the validation and consistency checks performed by the library. The migrations functionality in EF, however, uses these connections to manage the underlying database schema in a way that is transparent to the application. Ideally, you will retain and combine all these capabilities from both the elastic database client library and EF in the same application. The following section discusses these properties and requirements in more detail. - -## Requirements - -When working with both the elastic database client library and Entity Framework APIs, you want to retain the following properties: - -* **Scale-out**: To add or remove databases from the data tier of the sharded application as necessary for the capacity demands of the application. This means control over the creation and deletion of databases and using the elastic database shard map manager APIs to manage databases, and mappings of shardlets. -* **Consistency**: The application employs sharding, and uses the data-dependent routing capabilities of the client library. To avoid corruption or wrong query results, connections are brokered through the shard map manager. This also retains validation and consistency. -* **Code First**: To retain the convenience of EF’s code first paradigm. In Code First, classes in the application are mapped transparently to the underlying database structures. The application code interacts with DbSets that mask most aspects involved in the underlying database processing. -* **Schema**: Entity Framework handles initial database schema creation and subsequent schema evolution through migrations. By retaining these capabilities, adapting your app is easy as the data evolves. - -The following guidance instructs how to satisfy these requirements for Code First applications using elastic database tools. - -## Data-dependent routing using EF DbContext - -Database connections with Entity Framework are typically managed through subclasses of **DbContext**. Create these subclasses by deriving from **DbContext**. This is where you define your **DbSets** that implement the database-backed collections of CLR objects for your application. In the context of data-dependent routing, you can identify several helpful properties that do not necessarily hold for other EF code first application scenarios: - -* The database already exists and has been registered in the elastic database shard map. -* The schema of the application has already been deployed to the database (explained below). -* Data-dependent routing connections to the database are brokered by the shard map. - -To integrate **DbContexts** with data-dependent routing for scale-out: - -1. Create physical database connections through the elastic database client interfaces of the shard map manager. -2. Wrap the connection with the **DbContext** subclass -3. Pass the connection down into the **DbContext** base classes to ensure all the processing on the EF side happens as well. - -The following code example illustrates this approach. (This code is also in the accompanying Visual Studio project) - -```csharp -public class ElasticScaleContext : DbContext -{ -public DbSet Blogs { get; set; } -... - - // C'tor for data-dependent routing. This call opens a validated connection - // routed to the proper shard by the shard map manager. - // Note that the base class c'tor call fails for an open connection - // if migrations need to be done and SQL credentials are used. This is the reason for the - // separation of c'tors into the data-dependent routing case (this c'tor) and the internal c'tor for new shards. - public ElasticScaleContext(ShardMap shardMap, T shardingKey, string connectionStr) - : base(CreateDDRConnection(shardMap, shardingKey, connectionStr), - true /* contextOwnsConnection */) - { - } - - // Only static methods are allowed in calls into base class c'tors. - private static DbConnection CreateDDRConnection( - ShardMap shardMap, - T shardingKey, - string connectionStr) - { - // No initialization - Database.SetInitializer>(null); - - // Ask shard map to broker a validated connection for the given key - SqlConnection conn = shardMap.OpenConnectionForKey - (shardingKey, connectionStr, ConnectionOptions.Validate); - return conn; - } -``` - -## Main points - -* A new constructor replaces the default constructor in the DbContext subclass -* The new constructor takes the arguments that are required for data-dependent routing through elastic database client library: - - * the shard map to access the data-dependent routing interfaces, - * the sharding key to identify the shardlet, - * a connection string with the credentials for the data-dependent routing connection to the shard. -* The call to the base class constructor takes a detour into a static method that performs all the steps necessary for data-dependent routing. - - * It uses the OpenConnectionForKey call of the elastic database client interfaces on the shard map to establish an open connection. - * The shard map creates the open connection to the shard that holds the shardlet for the given sharding key. - * This open connection is passed back to the base class constructor of DbContext to indicate that this connection is to be used by EF instead of letting EF create a new connection automatically. This way the connection has been tagged by the elastic database client API so that it can guarantee consistency under shard map management operations. - -Use the new constructor for your DbContext subclass instead of the default constructor in your code. Here is an example: - -```csharp -// Create and save a new blog. - -Console.Write("Enter a name for a new blog: "); -var name = Console.ReadLine(); - -using (var db = new ElasticScaleContext( - sharding.ShardMap, - tenantId1, - connStrBldr.ConnectionString)) -{ - var blog = new Blog { Name = name }; - db.Blogs.Add(blog); - db.SaveChanges(); - - // Display all Blogs for tenant 1 - var query = from b in db.Blogs - orderby b.Name - select b; - … -} -``` - -The new constructor opens the connection to the shard that holds the data for the shardlet identified by the value of **tenantid1**. The code in the **using** block stays unchanged to access the **DbSet** for blogs using EF on the shard for **tenantid1**. This changes semantics for the code in the using block such that all database operations are now scoped to the one shard where **tenantid1** is kept. For instance, a LINQ query over the blogs **DbSet** would only return blogs stored on the current shard, but not the ones stored on other shards. - -### Transient faults handling - -The Microsoft Patterns & Practices team published the [The Transient Fault Handling Application Block](/previous-versions/msp-n-p/dn440719(v=pandp.60)). The library is used with elastic scale client library in combination with EF. However, ensure that any transient exception returns to a place where you can ensure that the new constructor is being used after a transient fault so that any new connection attempt is made using the constructors you tweaked. Otherwise, a connection to the correct shard is not guaranteed, and there are no assurances the connection is maintained as changes to the shard map occur. - -The following code sample illustrates how a SQL retry policy can be used around the new **DbContext** subclass constructors: - -```csharp -SqlDatabaseUtils.SqlRetryPolicy.ExecuteAction(() => -{ - using (var db = new ElasticScaleContext( - sharding.ShardMap, - tenantId1, - connStrBldr.ConnectionString)) - { - var blog = new Blog { Name = name }; - db.Blogs.Add(blog); - db.SaveChanges(); - … - } - }); -``` - -**SqlDatabaseUtils.SqlRetryPolicy** in the code above is defined as a **SqlDatabaseTransientErrorDetectionStrategy** with a retry count of 10, and 5 seconds wait time between retries. This approach is similar to the guidance for EF and user-initiated transactions (see [Limitations with Retrying Execution Strategies (EF6 onwards)](/ef/ef6/fundamentals/connection-resiliency/retry-logic). Both situations require that the application program controls the scope to which the transient exception returns: to either reopen the transaction, or (as shown) recreate the context from the proper constructor that uses the elastic database client library. - -The need to control where transient exceptions take us back in scope also precludes the use of the built-in **SqlAzureExecutionStrategy** that comes with EF. **SqlAzureExecutionStrategy** would reopen a connection but not use **OpenConnectionForKey** and therefore bypass all the validation that is performed as part of the **OpenConnectionForKey** call. Instead, the code sample uses the built-in **DefaultExecutionStrategy** that also comes with EF. As opposed to **SqlAzureExecutionStrategy**, it works correctly in combination with the retry policy from Transient Fault Handling. The execution policy is set in the **ElasticScaleDbConfiguration** class. Note that we decided not to use **DefaultSqlExecutionStrategy** since it suggests using **SqlAzureExecutionStrategy** if transient exceptions occur - which would lead to wrong behavior as discussed. For more information on the different retry policies and EF, see [Connection Resiliency in EF](/ef/ef6/fundamentals/connection-resiliency/retry-logic). - -#### Constructor rewrites - -The code examples above illustrate the default constructor re-writes required for your application in order to use data-dependent routing with the Entity Framework. The following table generalizes this approach to other constructors. - -| Current Constructor | Rewritten Constructor for data | Base Constructor | Notes | -| --- | --- | --- | --- | -| MyContext() |ElasticScaleContext(ShardMap, TKey) |DbContext(DbConnection, bool) |The connection needs to be a function of the shard map and the data-dependent routing key. You need to by-pass automatic connection creation by EF and instead use the shard map to broker the connection. | -| MyContext(string) |ElasticScaleContext(ShardMap, TKey) |DbContext(DbConnection, bool) |The connection is a function of the shard map and the data-dependent routing key. A fixed database name or connection string does not work as they by-pass validation by the shard map. | -| MyContext(DbCompiledModel) |ElasticScaleContext(ShardMap, TKey, DbCompiledModel) |DbContext(DbConnection, DbCompiledModel, bool) |The connection gets created for the given shard map and sharding key with the model provided. The compiled model is passed on to the base c’tor. | -| MyContext(DbConnection, bool) |ElasticScaleContext(ShardMap, TKey, bool) |DbContext(DbConnection, bool) |The connection needs to be inferred from the shard map and the key. It cannot be provided as an input (unless that input was already using the shard map and the key). The Boolean is passed on. | -| MyContext(string, DbCompiledModel) |ElasticScaleContext(ShardMap, TKey, DbCompiledModel) |DbContext(DbConnection, DbCompiledModel, bool) |The connection needs to be inferred from the shard map and the key. It cannot be provided as an input (unless that input was using the shard map and the key). The compiled model is passed on. | -| MyContext(ObjectContext, bool) |ElasticScaleContext(ShardMap, TKey, ObjectContext, bool) |DbContext(ObjectContext, bool) |The new constructor needs to ensure that any connection in the ObjectContext passed as an input is re-routed to a connection managed by Elastic Scale. A detailed discussion of ObjectContexts is beyond the scope of this document. | -| MyContext(DbConnection, DbCompiledModel, bool) |ElasticScaleContext(ShardMap, TKey, DbCompiledModel, bool) |DbContext(DbConnection, DbCompiledModel, bool); |The connection needs to be inferred from the shard map and the key. The connection cannot be provided as an input (unless that input was already using the shard map and the key). Model and Boolean are passed on to the base class constructor. | - -## Shard schema deployment through EF migrations - -Automatic schema management is a convenience provided by the Entity Framework. In the context of applications using elastic database tools, you want to retain this capability to automatically provision the schema to newly created shards when databases are added to the sharded application. The primary use case is to increase capacity at the data tier for sharded applications using EF. Relying on EF’s capabilities for schema management reduces the database administration effort with a sharded application built on EF. - -Schema deployment through EF migrations works best on **unopened connections**. This is in contrast to the scenario for data-dependent routing that relies on the opened connection provided by the elastic database client API. Another difference is the consistency requirement: While desirable to ensure consistency for all data-dependent routing connections to protect against concurrent shard map manipulation, it is not a concern with initial schema deployment to a new database that has not yet been registered in the shard map, and not yet been allocated to hold shardlets. You can therefore rely on regular database connections for this scenario, as opposed to data-dependent routing. - -This leads to an approach where schema deployment through EF migrations is tightly coupled with the registration of the new database as a shard in the application’s shard map. This relies on the following prerequisites: - -* The database has already been created. -* The database is empty - it holds no user schema and no user data. -* The database cannot yet be accessed through the elastic database client APIs for data-dependent routing. - -With these prerequisites in place, you can create a regular un-opened **SqlConnection** to kick off EF migrations for schema deployment. The following code sample illustrates this approach. - -```csharp -// Enter a new shard - i.e. an empty database - to the shard map, allocate a first tenant to it -// and kick off EF initialization of the database to deploy schema - -public void RegisterNewShard(string server, string database, string connStr, int key) -{ - - Shard shard = this.ShardMap.CreateShard(new ShardLocation(server, database)); - - SqlConnectionStringBuilder connStrBldr = new SqlConnectionStringBuilder(connStr); - connStrBldr.DataSource = server; - connStrBldr.InitialCatalog = database; - - // Go into a DbContext to trigger migrations and schema deployment for the new shard. - // This requires an un-opened connection. - using (var db = new ElasticScaleContext(connStrBldr.ConnectionString)) - { - // Run a query to engage EF migrations - (from b in db.Blogs - select b).Count(); - } - - // Register the mapping of the tenant to the shard in the shard map. - // After this step, data-dependent routing on the shard map can be used - - this.ShardMap.CreatePointMapping(key, shard); -} -``` - -This sample shows the method **RegisterNewShard** that registers the shard in the shard map, deploys the schema through EF migrations, and stores a mapping of a sharding key to the shard. It relies on a constructor of the **DbContext** subclass (**ElasticScaleContext** in the sample) that takes a SQL connection string as input. The code of this constructor is straight-forward, as the following example shows: - -```csharp -// C'tor to deploy schema and migrations to a new shard -protected internal ElasticScaleContext(string connectionString) - : base(SetInitializerForConnection(connectionString)) -{ -} - -// Only static methods are allowed in calls into base class c'tors -private static string SetInitializerForConnection(string connectionString) -{ - // You want existence checks so that the schema can get deployed - Database.SetInitializer>( -new CreateDatabaseIfNotExists>()); - - return connectionString; -} -``` - -One might have used the version of the constructor inherited from the base class. But the code needs to ensure that the default initializer for EF is used when connecting. Hence the short detour into the static method before calling into the base class constructor with the connection string. Note that the registration of shards should run in a different app domain or process to ensure that the initializer settings for EF do not conflict. - -## Limitations - -The approaches outlined in this document entail a couple of limitations: - -* EF applications that use **LocalDb** first need to migrate to a regular SQL Server database before using elastic database client library. Scaling out an application through sharding with Elastic Scale is not possible with **LocalDb**. Note that development can still use **LocalDb**. -* Any changes to the application that imply database schema changes need to go through EF migrations on all shards. The sample code for this document does not demonstrate how to do this. Consider using Update-Database with a ConnectionString parameter to iterate over all shards; or extract the T-SQL script for the pending migration using Update-Database with the -Script option and apply the T-SQL script to your shards. -* Given a request, it is assumed that all of its database processing is contained within a single shard as identified by the sharding key provided by the request. However, this assumption does not always hold true. For example, when it is not possible to make a sharding key available. To address this, the client library provides the **MultiShardQuery** class that implements a connection abstraction for querying over several shards. Learning to use the **MultiShardQuery** in combination with EF is beyond the scope of this document - -## Conclusion - -Through the steps outlined in this document, EF applications can use the elastic database client library's capability for data-dependent routing by refactoring constructors of the **DbContext** subclasses used in the EF application. This limits the changes required to those places where **DbContext** classes already exist. In addition, EF applications can continue to benefit from automatic schema deployment by combining the steps that invoke the necessary EF migrations with the registration of new shards and mappings in the shard map. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/sql-database-elastic-scale-use-entity-framework-applications-visual-studio/sample.png \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-scale-working-with-dapper.md b/articles/azure-sql/database/elastic-scale-working-with-dapper.md deleted file mode 100644 index 8269cf37f54fe..0000000000000 --- a/articles/azure-sql/database/elastic-scale-working-with-dapper.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Using the elastic database client library with Dapper -description: Using the elastic database client library with Dapper. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 12/04/2018 ---- -# Using the elastic database client library with Dapper -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This document is for developers that rely on Dapper to build applications, but also want to embrace [elastic database tooling](elastic-scale-introduction.md) to create applications that implement sharding to scale out their data tier. This document illustrates the changes in Dapper-based applications that are necessary to integrate with elastic database tools. Our focus is on composing the elastic database shard management and data-dependent routing with Dapper. - -**Sample Code**: [Elastic database tools for Azure SQL Database - Dapper integration](https://code.msdn.microsoft.com/Elastic-Scale-with-Azure-e19fc77f). - -Integrating **Dapper** and **DapperExtensions** with the elastic database client library for Azure SQL Database is easy. Your applications can use data-dependent routing by changing the creation and opening of new [SqlConnection](/dotnet/api/system.data.sqlclient.sqlconnection) objects to use the [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) call from the [client library](/previous-versions/azure/dn765902(v=azure.100)). This limits changes in your application to only where new connections are created and opened. - -## Dapper overview -**Dapper** is an object-relational mapper. It maps .NET objects from your application to a relational database (and vice versa). The first part of the sample code illustrates how you can integrate the elastic database client library with Dapper-based applications. The second part of the sample code illustrates how to integrate when using both Dapper and DapperExtensions. - -The mapper functionality in Dapper provides extension methods on database connections that simplify submitting T-SQL statements for execution or querying the database. For instance, Dapper makes it easy to map between your .NET objects and the parameters of SQL statements for **Execute** calls, or to consume the results of your SQL queries into .NET objects using **Query** calls from Dapper. - -When using DapperExtensions, you no longer need to provide the SQL statements. Extensions methods such as **GetList** or **Insert** over the database connection create the SQL statements behind the scenes. - -Another benefit of Dapper and also DapperExtensions is that the application controls the creation of the database connection. This helps interact with the elastic database client library which brokers database connections based on the mapping of shardlets to databases. - -To get the Dapper assemblies, see [Dapper dot net](https://www.nuget.org/packages/Dapper/). For the Dapper extensions, see [DapperExtensions](https://www.nuget.org/packages/DapperExtensions). - -## A quick look at the elastic database client library -With the elastic database client library, you define partitions of your application data called *shardlets*, map them to databases, and identify them by *sharding keys*. You can have as many databases as you need and distribute your shardlets across these databases. The mapping of sharding key values to the databases is stored by a shard map provided by the library’s APIs. This capability is called **shard map management**. The shard map also serves as the broker of database connections for requests that carry a sharding key. This capability is referred to as **data-dependent routing**. - -![Shard maps and data-dependent routing][1] - -The shard map manager protects users from inconsistent views into shardlet data that can occur when concurrent shardlet management operations are happening on the databases. To do so, the shard maps broker the database connections for an application built with the library. When shard management operations could impact the shardlet, this allows the shard map functionality to automatically kill a database connection. - -Instead of using the traditional way to create connections for Dapper, you need to use the [OpenConnectionForKey method](/previous-versions/azure/dn824099(v=azure.100)). This ensures that all the validation takes place and connections are managed properly when any data moves between shards. - -### Requirements for Dapper integration -When working with both the elastic database client library and the Dapper APIs, you want to retain the following properties: - -* **Scale out**: We want to add or remove databases from the data tier of the sharded application as necessary for the capacity demands of the application. -* **Consistency**: Since the application is scaled out using sharding, you need to perform data-dependent routing. We want to use the data-dependent routing capabilities of the library to do so. In particular, you want to retain the validation and consistency guarantees provided by connections that are brokered through the shard map manager in order to avoid corruption or wrong query results. This ensures that connections to a given shardlet are rejected or stopped if (for instance) the shardlet is currently moved to a different shard using Split/Merge APIs. -* **Object Mapping**: We want to retain the convenience of the mappings provided by Dapper to translate between classes in the application and the underlying database structures. - -The following section provides guidance for these requirements for applications based on **Dapper** and **DapperExtensions**. - -## Technical guidance -### Data-dependent routing with Dapper -With Dapper, the application is typically responsible for creating and opening the connections to the underlying database. Given a type T by the application, Dapper returns query results as .NET collections of type T. Dapper performs the mapping from the T-SQL result rows to the objects of type T. Similarly, Dapper maps .NET objects into SQL values or parameters for data manipulation language (DML) statements. Dapper offers this functionality via extension methods on the regular [SqlConnection](/dotnet/api/system.data.sqlclient.sqlconnection) object from the ADO .NET SQL Client libraries. The SQL connection returned by the Elastic Scale APIs for DDR are also regular [SqlConnection](/dotnet/api/system.data.sqlclient.sqlconnection) objects. This allows us to directly use Dapper extensions over the type returned by the client library’s DDR API, as it is also a simple SQL Client connection. - -These observations make it straightforward to use connections brokered by the elastic database client library for Dapper. - -This code example (from the accompanying sample) illustrates the approach where the sharding key is provided by the application to the library to broker the connection to the right shard. - -```csharp - using (SqlConnection sqlconn = shardingLayer.ShardMap.OpenConnectionForKey( - key: tenantId1, - connectionString: connStrBldr.ConnectionString, - options: ConnectionOptions.Validate)) - { - var blog = new Blog { Name = name }; - sqlconn.Execute(@" - INSERT INTO - Blog (Name) - VALUES (@name)", new { name = blog.Name } - ); - } -``` - -The call to the [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) API replaces the default creation and opening of a SQL Client connection. The [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) call takes the arguments that are required for data-dependent routing: - -* The shard map to access the data-dependent routing interfaces -* The sharding key to identify the shardlet -* The credentials (user name and password) to connect to the shard - -The shard map object creates a connection to the shard that holds the shardlet for the given sharding key. The elastic database client APIs also tag the connection to implement its consistency guarantees. Since the call to [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) returns a regular SQL Client connection object, the subsequent call to the **Execute** extension method from Dapper follows the standard Dapper practice. - -Queries work very much the same way - you first open the connection using [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) from the client API. Then you use the regular Dapper extension methods to map the results of your SQL query into .NET objects: - -```csharp - using (SqlConnection sqlconn = shardingLayer.ShardMap.OpenConnectionForKey( - key: tenantId1, - connectionString: connStrBldr.ConnectionString, - options: ConnectionOptions.Validate )) - { - // Display all Blogs for tenant 1 - IEnumerable result = sqlconn.Query(@" - SELECT * - FROM Blog - ORDER BY Name"); - - Console.WriteLine("All blogs for tenant id {0}:", tenantId1); - foreach (var item in result) - { - Console.WriteLine(item.Name); - } - } -``` - -Note that the **using** block with the DDR connection scopes all database operations within the block to the one shard where tenantId1 is kept. The query only returns blogs stored on the current shard, but not the ones stored on any other shards. - -## Data-dependent routing with Dapper and DapperExtensions -Dapper comes with an ecosystem of additional extensions that can provide further convenience and abstraction from the database when developing database applications. DapperExtensions is an example. - -Using DapperExtensions in your application does not change how database connections are created and managed. It is still the application’s responsibility to open connections, and regular SQL Client connection objects are expected by the extension methods. We can rely on the [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) as outlined above. As the following code samples show, the only change is that you no longer have to write the T-SQL statements: - -```csharp - using (SqlConnection sqlconn = shardingLayer.ShardMap.OpenConnectionForKey( - key: tenantId2, - connectionString: connStrBldr.ConnectionString, - options: ConnectionOptions.Validate)) - { - var blog = new Blog { Name = name2 }; - sqlconn.Insert(blog); - } -``` - -And here is the code sample for the query: - -```csharp - using (SqlConnection sqlconn = shardingLayer.ShardMap.OpenConnectionForKey( - key: tenantId2, - connectionString: connStrBldr.ConnectionString, - options: ConnectionOptions.Validate)) - { - // Display all Blogs for tenant 2 - IEnumerable result = sqlconn.GetList(); - Console.WriteLine("All blogs for tenant id {0}:", tenantId2); - foreach (var item in result) - { - Console.WriteLine(item.Name); - } - } -``` - -### Handling transient faults -The Microsoft Patterns & Practices team published the [Transient Fault Handling Application Block](/previous-versions/msp-n-p/hh680934(v=pandp.50)) to help application developers mitigate common transient fault conditions encountered when running in the cloud. For more information, see [Perseverance, Secret of All Triumphs: Using the Transient Fault Handling Application Block](/previous-versions/msp-n-p/dn440719(v=pandp.60)). - -The code sample relies on the transient fault library to protect against transient faults. - -```csharp - SqlDatabaseUtils.SqlRetryPolicy.ExecuteAction(() => - { - using (SqlConnection sqlconn = - shardingLayer.ShardMap.OpenConnectionForKey(tenantId2, connStrBldr.ConnectionString, ConnectionOptions.Validate)) - { - var blog = new Blog { Name = name2 }; - sqlconn.Insert(blog); - } - }); -``` - -**SqlDatabaseUtils.SqlRetryPolicy** in the code above is defined as a **SqlDatabaseTransientErrorDetectionStrategy** with a retry count of 10, and 5 seconds wait time between retries. If you are using transactions, make sure that your retry scope goes back to the beginning of the transaction in the case of a transient fault. - -## Limitations -The approaches outlined in this document entail a couple of limitations: - -* The sample code for this document does not demonstrate how to manage schema across shards. -* Given a request, we assume that all its database processing is contained within a single shard as identified by the sharding key provided by the request. However, this assumption does not always hold, for example, when it is not possible to make a sharding key available. To address this, the elastic database client library includes the [MultiShardQuery class](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.query.multishardexception). The class implements a connection abstraction for querying over several shards. Using MultiShardQuery in combination with Dapper is beyond the scope of this document. - -## Conclusion -Applications using Dapper and DapperExtensions can easily benefit from elastic database tools for Azure SQL Database. Through the steps outlined in this document, those applications can use the tool's capability for data-dependent routing by changing the creation and opening of new [SqlConnection](/dotnet/api/system.data.sqlclient.sqlconnection) objects to use the [OpenConnectionForKey](/dotnet/api/microsoft.azure.sqldatabase.elasticscale.shardmanagement.rangeshardmap-1) call of the elastic database client library. This limits the application changes required to those places where new connections are created and opened. - -[!INCLUDE [elastic-scale-include](../../../includes/elastic-scale-include.md)] - - -[1]: ./media/elastic-scale-working-with-dapper/dapperimage1.png \ No newline at end of file diff --git a/articles/azure-sql/database/elastic-transactions-overview.md b/articles/azure-sql/database/elastic-transactions-overview.md deleted file mode 100644 index cfc60cae96d36..0000000000000 --- a/articles/azure-sql/database/elastic-transactions-overview.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Distributed transactions across cloud databases -description: Overview of Elastic Database Transactions with Azure SQL Database and Azure SQL Managed Instance. -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1, ignite-fall-2021 -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 11/02/2021 ---- -# Distributed transactions across cloud databases -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - - -Elastic database transactions for Azure SQL Database and Azure SQL Managed Instance allow you to run transactions that span several databases. Elastic database transactions are available for .NET applications using ADO.NET and integrate with the familiar programming experience using the [System.Transaction](/dotnet/api/system.transactions) classes. To get the library, see [.NET Framework 4.6.1 (Web Installer)](https://www.microsoft.com/download/details.aspx?id=49981). -Additionally, for managed instance distributed transactions are available in [Transact-SQL](/sql/t-sql/language-elements/begin-distributed-transaction-transact-sql). - -On premises, such a scenario usually requires running Microsoft Distributed Transaction Coordinator (MSDTC). Since MSDTC isn't available for Platform-as-a-Service application in Azure, the ability to coordinate distributed transactions has now been directly integrated into SQL Database or SQL Managed Instance. Applications can connect to any database to launch distributed transactions, and one of the databases or servers will transparently coordinate the distributed transaction, as shown in the following figure. - -In this document terms "distributed transactions" and "elastic database transactions" are considered synonyms and will be used interchangeably. - - ![Distributed transactions with Azure SQL Database using elastic database transactions ][1] - -## Common scenarios - -Elastic database transactions enable applications to make atomic changes to data stored in several different databases. Both SQL Database and SQL Managed Instance support client-side development experiences in C# and .NET. A server-side experience (code written in stored procedures or server-side scripts) using [Transact-SQL](/sql/t-sql/language-elements/begin-distributed-transaction-transact-sql) is available for SQL Managed Instance only. - -> [!IMPORTANT] -> Running elastic database transactions between Azure SQL Database and Azure SQL Managed Instance is not supported. Elastic database transaction can only span across a set of databases in SQL Database or a set databases across managed instances. - -Elastic database transactions target the following scenarios: - -* Multi-database applications in Azure: With this scenario, data is vertically partitioned across several databases in SQL Database or SQL Managed Instance such that different kinds of data reside on different databases. Some operations require changes to data, which is kept in two or more databases. The application uses elastic database transactions to coordinate the changes across databases and ensure atomicity. -* Sharded database applications in Azure: With this scenario, the data tier uses the [Elastic Database client library](elastic-database-client-library.md) or self-sharding to horizontally partition the data across many databases in SQL Database or SQL Managed Instance. One prominent use case is the need to perform atomic changes for a sharded multi-tenant application when changes span tenants. Think for instance of a transfer from one tenant to another, both residing on different databases. A second case is fine-grained sharding to accommodate capacity needs for a large tenant, which in turn typically implies that some atomic operations need to stretch across several databases used for the same tenant. A third case is atomic updates to reference data that are replicated across databases. Atomic, transacted, operations along these lines can now be coordinated across several databases. - Elastic database transactions use two phase commit to ensure transaction atomicity across databases. It's a good fit for transactions that involve fewer than 100 databases at a time within a single transaction. These limits aren't enforced, but one should expect performance and success rates for elastic database transactions to suffer when exceeding these limits. - -## Installation and migration - -The capabilities for elastic database transactions are provided through updates to the .NET libraries System.Data.dll and System.Transactions.dll. The DLLs ensure that two-phase commit is used where necessary to ensure atomicity. To start developing applications using elastic database transactions, install [.NET Framework 4.6.1](https://www.microsoft.com/download/details.aspx?id=49981) or a later version. When running on an earlier version of the .NET framework, transactions will fail to promote to a distributed transaction and an exception will be raised. - -After installation, you can use the distributed transaction APIs in System.Transactions with connections to SQL Database and SQL Managed Instance. If you have existing MSDTC applications using these APIs, rebuild your existing applications for .NET 4.6 after installing the 4.6.1 Framework. If your projects target .NET 4.6, they'll automatically use the updated DLLs from the new Framework version and distributed transaction API calls in combination with connections to SQL Database or SQL Managed Instance will now succeed. - -Remember that elastic database transactions don't require installing MSDTC. Instead, elastic database transactions are directly managed by and within the service. This significantly simplifies cloud scenarios since a deployment of MSDTC isn't necessary to use distributed transactions with SQL Database or SQL Managed Instance. Section 4 explains in more detail how to deploy elastic database transactions and the required .NET framework together with your cloud applications to Azure. - -## .NET installation for Azure Cloud Services - -Azure provides several offerings to host .NET applications. A comparison of the different offerings is available in [Azure App Service, Cloud Services, and Virtual Machines comparison](/azure/architecture/guide/technology-choices/compute-decision-tree). If the guest OS of the offering is smaller than .NET 4.6.1 required for elastic transactions, you need to upgrade the guest OS to 4.6.1. - -For Azure App Service, upgrades to the guest OS are currently not supported. For Azure Virtual Machines, simply log into the VM and run the installer for the latest .NET framework. For Azure Cloud Services, you need to include the installation of a newer .NET version into the startup tasks of your deployment. The concepts and steps are documented in [Install .NET on a Cloud Service Role](../../cloud-services/cloud-services-dotnet-install-dotnet.md). - -Note that the installer for .NET 4.6.1 may require more temporary storage during the bootstrapping process on Azure cloud services than the installer for .NET 4.6. To ensure a successful installation, you need to increase temporary storage for your Azure cloud service in your ServiceDefinition.csdef file in the LocalResources section and the environment settings of your startup task, as shown in the following sample: - -```xml - -... - - - - - - - ... - - - - - - - - - -``` - -## .NET development experience - -### Multi-database applications - -The following sample code uses the familiar programming experience with .NET System.Transactions. The TransactionScope class establishes an ambient transaction in .NET. (An "ambient transaction" is one that lives in the current thread.) All connections opened within the TransactionScope participate in the transaction. If different databases participate, the transaction is automatically elevated to a distributed transaction. The outcome of the transaction is controlled by setting the scope to complete to indicate a commit. - -```csharp -using (var scope = new TransactionScope()) -{ - using (var conn1 = new SqlConnection(connStrDb1)) - { - conn1.Open(); - SqlCommand cmd1 = conn1.CreateCommand(); - cmd1.CommandText = string.Format("insert into T1 values(1)"); - cmd1.ExecuteNonQuery(); - } - using (var conn2 = new SqlConnection(connStrDb2)) - { - conn2.Open(); - var cmd2 = conn2.CreateCommand(); - cmd2.CommandText = string.Format("insert into T2 values(2)"); - cmd2.ExecuteNonQuery(); - } - scope.Complete(); -} -``` - -### Sharded database applications - -Elastic database transactions for SQL Database and SQL Managed Instance also support coordinating distributed transactions where you use the OpenConnectionForKey method of the elastic database client library to open connections for a scaled out data tier. Consider cases where you need to guarantee transactional consistency for changes across several different sharding key values. Connections to the shards hosting the different sharding key values are brokered using OpenConnectionForKey. In the general case, the connections can be to different shards such that ensuring transactional guarantees requires a distributed transaction. -The following code sample illustrates this approach. It assumes that a variable called shardmap is used to represent a shard map from the elastic database client library: - -```csharp -using (var scope = new TransactionScope()) -{ - using (var conn1 = shardmap.OpenConnectionForKey(tenantId1, credentialsStr)) - { - SqlCommand cmd1 = conn1.CreateCommand(); - cmd1.CommandText = string.Format("insert into T1 values(1)"); - cmd1.ExecuteNonQuery(); - } - using (var conn2 = shardmap.OpenConnectionForKey(tenantId2, credentialsStr)) - { - var cmd2 = conn2.CreateCommand(); - cmd2.CommandText = string.Format("insert into T1 values(2)"); - cmd2.ExecuteNonQuery(); - } - scope.Complete(); -} -``` - -## Transact-SQL development experience - -A server-side distributed transactions using Transact-SQL are available only for Azure SQL Managed Instance. Distributed transaction can be executed only between Managed Instances that belong to the same [Server trust group](../managed-instance/server-trust-group-overview.md). In this scenario, Managed Instances need to use [linked server](/sql/relational-databases/linked-servers/create-linked-servers-sql-server-database-engine#TsqlProcedure) to reference each other. - -The following sample Transact-SQL code uses [BEGIN DISTRIBUTED TRANSACTION](/sql/t-sql/language-elements/begin-distributed-transaction-transact-sql) to start distributed transaction. - -```sql - -- Configure the Linked Server - -- Add one Azure SQL Managed Instance as Linked Server - EXEC sp_addlinkedserver - @server='RemoteServer', -- Linked server name - @srvproduct='', - @provider='sqlncli', -- SQL Server Native Client - @datasrc='managed-instance-server.46e7afd5bc81.database.windows.net' -- SQL Managed Instance endpoint - - -- Add credentials and options to this Linked Server - EXEC sp_addlinkedsrvlogin - @rmtsrvname = 'RemoteServer', -- Linked server name - @useself = 'false', - @rmtuser = '', -- login - @rmtpassword = '' -- password - - USE AdventureWorks2012; - GO - SET XACT_ABORT ON; - GO - BEGIN DISTRIBUTED TRANSACTION; - -- Delete candidate from local instance. - DELETE AdventureWorks2012.HumanResources.JobCandidate - WHERE JobCandidateID = 13; - -- Delete candidate from remote instance. - DELETE RemoteServer.AdventureWorks2012.HumanResources.JobCandidate - WHERE JobCandidateID = 13; - COMMIT TRANSACTION; - GO -``` - -## Combining .NET and Transact-SQL development experience - -.NET applications that use System.Transaction classes can combine TransactionScope class with Transact-SQL statement BEGIN DISTRIBUTED TRANSACTION. Within TransactionScope, inner transaction that executes BEGIN DISTRIBUTED TRANSACTION will explicitly be promoted to distributed transaction. Also, when second SqlConnecton is opened within the TransactionScope it will be implicitly promoted to distributed transaction. Once distributed transaction is started, all subsequent transactions requests, whether they are coming from .NET or Transact-SQL, will join the parent distributed transaction. As consequence all nested transaction scopes initiated by BEGIN statement will end up in same transaction and COMMIT/ROLLBACK statements will have following effect on overall outcome: - * COMMIT statement will not have any effect on transaction scope initiated by BEGIN statement, that is, no results will be committed before Complete() method is invoked on TransactionScope object. If TransactionScope object is destroyed before being completed, all changes done within the scope are rolled back. - * ROLLBACK statement will cause entire TransactionScope to roll back. Any attempts to enlist new transactions within TransactionScope will fail afterwards, as well as attempt to invoke Complete() on TransactionScope object. - -Here is an example where transaction is explicitly promoted to distributed transaction with Transact-SQL. - -```csharp -using (TransactionScope s = new TransactionScope()) -{ - using (SqlConnection conn = new SqlConnection(DB0_ConnectionString) - { - conn.Open(); - - // Transaction is here promoted to distributed by BEGIN statement - // - Helper.ExecuteNonQueryOnOpenConnection(conn, "BEGIN DISTRIBUTED TRAN"); - // ... - } -  - using (SqlConnection conn2 = new SqlConnection(DB1_ConnectionString) - { - conn2.Open(); - // ... - } - - s.Complete(); -} -``` - -Following example shows a transaction that is implicitly promoted to distributed transaction once the second SqlConnecton was started within the TransactionScope. - -```csharp -using (TransactionScope s = new TransactionScope()) -{ - using (SqlConnection conn = new SqlConnection(DB0_ConnectionString) - { - conn.Open(); - // ... - } - - using (SqlConnection conn = new SqlConnection(DB1_ConnectionString) - { - // Because this is second SqlConnection within TransactionScope transaction is here implicitly promoted distributed. - // - conn.Open(); - Helper.ExecuteNonQueryOnOpenConnection(conn, "BEGIN DISTRIBUTED TRAN"); - Helper.ExecuteNonQueryOnOpenConnection(conn, lsQuery); - // ... - } - - s.Complete(); -} -``` - -## Transactions for SQL Database - -Elastic database transactions are supported across different servers in Azure SQL Database. When transactions cross server boundaries, the participating servers first need to be entered into a mutual communication relationship. Once the communication relationship has been established, any database in any of the two servers can participate in elastic transactions with databases from the other server. With transactions spanning more than two servers, a communication relationship needs to be in place for any pair of servers. - -Use the following PowerShell cmdlets to manage cross-server communication relationships for elastic database transactions: - -* **New-AzSqlServerCommunicationLink**: Use this cmdlet to create a new communication relationship between two servers in Azure SQL Database. The relationship is symmetric, which means both servers can initiate transactions with the other server. -* **Get-AzSqlServerCommunicationLink**: Use this cmdlet to retrieve existing communication relationships and their properties. -* **Remove-AzSqlServerCommunicationLink**: Use this cmdlet to remove an existing communication relationship. - -## Transactions for SQL Managed Instance - -Distributed transactions are supported across databases within multiple instances. When transactions cross managed instance boundaries, the participating instances need to be in a mutual security and communication relationship. This is done by creating a [Server Trust Group](../managed-instance/server-trust-group-overview.md), which can be done by using the Azure portal or Azure PowerShell or the Azure CLI. If instances are not on the same Virtual network then you must configure [Virtual network peering](../../virtual-network/virtual-network-peering-overview.md) and Network security group inbound and outbound rules need to allow ports 5024 and 11000-12000 on all participating Virtual networks. - - ![Server Trust Groups on Azure Portal][3] - -The following diagram shows a Server Trust Group with managed instances that can execute distributed transactions with .NET or Transact-SQL: - - ![Distributed transactions with Azure SQL Managed Instance using elastic transactions][2] - -## Monitoring transaction status - -Use Dynamic Management Views (DMVs) to monitor status and progress of your ongoing elastic database transactions. All DMVs related to transactions are relevant for distributed transactions in SQL Database and SQL Managed Instance. You can find the corresponding list of DMVs here: [Transaction Related Dynamic Management Views and Functions (Transact-SQL)](/sql/relational-databases/system-dynamic-management-views/transaction-related-dynamic-management-views-and-functions-transact-sql). - -These DMVs are particularly useful: - -* **sys.dm\_tran\_active\_transactions**: Lists currently active transactions and their status. The UOW (Unit Of Work) column can identify the different child transactions that belong to the same distributed transaction. All transactions within the same distributed transaction carry the same UOW value. For more information, see the [DMV documentation](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-active-transactions-transact-sql). -* **sys.dm\_tran\_database\_transactions**: Provides additional information about transactions, such as placement of the transaction in the log. For more information, see the [DMV documentation](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql). -* **sys.dm\_tran\_locks**: Provides information about the locks that are currently held by ongoing transactions. For more information, see the [DMV documentation](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-locks-transact-sql). - -## Limitations - -The following limitations currently apply to elastic database transactions in *SQL Database*: - -* Only transactions across databases in SQL Database are supported. Other [X/Open XA](https://en.wikipedia.org/wiki/X/Open_XA) resource providers and databases outside of SQL Database can't participate in elastic database transactions. That means that elastic database transactions can't stretch across on premises SQL Server and Azure SQL Database. For distributed transactions on premises, continue to use MSDTC. -* Only client-coordinated transactions from a .NET application are supported. Server-side support for T-SQL such as BEGIN DISTRIBUTED TRANSACTION is planned, but not yet available. -* Transactions across WCF services aren't supported. For example, you have a WCF service method that executes a transaction. Enclosing the call within a transaction scope will fail as a [System.ServiceModel.ProtocolException](/dotnet/api/system.servicemodel.protocolexception). - -The following limitations currently apply to distributed transactions in *SQL Managed Instance*: - -* Only transactions across databases in managed instances are supported. Other [X/Open XA](https://en.wikipedia.org/wiki/X/Open_XA) resource providers and databases outside of Azure SQL Managed Instance can't participate in distributed transactions. That means that distributed transactions can't stretch across on-premises SQL Server and Azure SQL Managed Instance. For distributed transactions on premises, continue to use MSDTC. -* Transactions across WCF services aren't supported. For example, you have a WCF service method that executes a transaction. Enclosing the call within a transaction scope will fail as a [System.ServiceModel.ProtocolException](/dotnet/api/system.servicemodel.protocolexception). -* Azure SQL Managed Instance must be part of a [Server trust group](../managed-instance/server-trust-group-overview.md) in order to participate in distributed transaction. -* Limitations of [Server trust groups](../managed-instance/server-trust-group-overview.md) affect distributed transactions. -* Managed Instances that participate in distributed transactions need to have connectivity over private endpoints (using private IP address from the virtual network where they are deployed) and need to be mutually referenced using private FQDNs. Client applications can use distributed transactions on private endpoints. Additionally, in cases when Transact-SQL leverages linked servers referencing private endpoints, client applications can use distributed transactions on public endpoints as well. This limitation is explained on the following diagram. - -![Private endpoint connectivity limitation][4] - -## Next steps - -* For questions, reach out to us on the [Microsoft Q&A question page for SQL Database](/answers/topics/azure-sql-database.html). -* For feature requests, add them to the [SQL Database feedback forum](https://feedback.azure.com/forums/217321-sql-database/) or [SQL Managed Instance forum](https://feedback.azure.com/forums/915676-sql-managed-instance). - - - - -[1]: ./media/elastic-transactions-overview/distributed-transactions.png -[2]: ./media/elastic-transactions-overview/sql-mi-distributed-transactions.png -[3]: ./media/elastic-transactions-overview/server-trust-groups-azure-portal.png -[4]: ./media/elastic-transactions-overview/managed-instance-distributed-transactions-private-endpoint-limitations.png diff --git a/articles/azure-sql/database/failover-group-add-elastic-pool-tutorial.md b/articles/azure-sql/database/failover-group-add-elastic-pool-tutorial.md deleted file mode 100644 index c4b6f6ca5e441..0000000000000 --- a/articles/azure-sql/database/failover-group-add-elastic-pool-tutorial.md +++ /dev/null @@ -1,576 +0,0 @@ ---- -title: "Tutorial: Add an elastic pool to a failover group" -description: Add an Azure SQL Database elastic pool to a failover group using the Azure portal, PowerShell, or Azure CLI. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: seo-lt-2019 sqldbrb=1, devx-track-azurecli -ms.topic: tutorial -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- -# Tutorial: Add an Azure SQL Database elastic pool to a failover group - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> -> - [Azure SQL Database (single database)](failover-group-add-single-database-tutorial.md) -> - [Azure SQL Database (elastic pool)](failover-group-add-elastic-pool-tutorial.md) -> - [Azure SQL Managed Instance](../managed-instance/failover-group-add-instance-tutorial.md) - -Configure an [auto-failover group](auto-failover-group-sql-db.md) for an Azure SQL Database elastic pool and test failover using the Azure portal. - -In this tutorial, you'll learn how to: - -> [!div class="checklist"] -> -> - Create a single database. -> - Add the database to an elastic pool. -> - Create a failover group for two elastic pools between two servers. -> - Test failover. - -## Prerequisites - -# [Azure portal](#tab/azure-portal) - -To complete this tutorial, make sure you have: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. - -# [PowerShell](#tab/azure-powershell) - -To complete the tutorial, make sure you have the following items: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. -- [Azure PowerShell](/powershell/azure/) - -# [Azure CLI](#tab/azure-cli) - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - ---- - -## 1 - Create a single database - -In this step, you create a resource group, server, single database, and server-level IP firewall rule for access to the server. - -[!INCLUDE [sql-database-create-single-database](../includes/sql-database-create-single-database.md)] - -## 2 - Add the database to an elastic pool - -In this step, you'll create an elastic pool and add your database to it. - -# [Azure portal](#tab/azure-portal) - -Create your elastic pool using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the Azure portal. If **Azure SQL** isn't in the list, select **All services**, then type "Azure SQL" in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select **+ Add** to open the **Select SQL deployment option** page. You can view additional information about the different databases by selecting Show details on the Databases tile. -1. Select **Elastic pool** from the **Resource type** drop-down in the **SQL Databases** tile. Select **Create** to create your elastic pool. - - ![Select elastic pool](./media/failover-group-add-elastic-pool-tutorial/select-azure-sql-elastic-pool.png) - -1. Configure your elastic pool with the following values: - - **Name**: Provide a unique name for your elastic pool, such as `myElasticPool`. - - **Subscription**: Select your subscription from the drop-down. - - **ResourceGroup**: Select `myResourceGroup` from the drop-down, the resource group you created in section 1. - - **Server**: Select the server you created in section 1 from the drop-down. - - ![Create new server for elastic pool](./media/failover-group-add-elastic-pool-tutorial/use-existing-server-for-elastic-pool.png) - - - **Compute + storage**: Select **Configure elastic pool** to configure your compute, storage, and add your single database to your elastic pool. On the **Pool Settings** tab, leave the default of Gen5, with 2 vCores and 32gb. - -1. On the **Configure** page, select the **Databases** tab, and then choose to **Add database**. Choose the database you created in section 1 and then select **Apply** to add it to your elastic pool. Select **Apply** again to apply your elastic pool settings and close the **Configure** page. - - ![Add database to elastic pool](./media/failover-group-add-elastic-pool-tutorial/add-database-to-elastic-pool.png) - -1. Select **Review + create** to review your elastic pool settings and then select **Create** to create your elastic pool. - -# [PowerShell](#tab/azure-powershell) - -Create your elastic pools and secondary server using PowerShell. - - ```powershell-interactive - # Set variables for your server and database - # $subscriptionId = '' - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $location = "East US" - # $adminLogin = "azureuser" - # $password = "PWD27!"+(New-Guid).Guid - # $serverName = "mysqlserver-$(Get-Random)" - $poolName = "myElasticPool" - $databaseName = "mySampleDatabase" - $drLocation = "West US" - $drServerName = "mysqlsecondary-$(Get-Random)" - $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # The ip address range that you want to allow to access your server - # Leaving at 0.0.0.0 will prevent outside-of-azure connections - # $startIp = "0.0.0.0" - # $endIp = "0.0.0.0" - - # Show randomized variables - Write-host "DR Server name is" $drServerName - Write-host "Failover group name is" $failoverGroupName - - # Create primary Gen5 elastic 2 vCore pool - Write-host "Creating elastic pool..." - $elasticPool = New-AzSqlElasticPool -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName ` - -Edition "GeneralPurpose" ` - -vCore 2 ` - -ComputeGeneration Gen5 - $elasticPool - - # Add single db into elastic pool - Write-host "Creating elastic pool..." - $addDatabase = Set-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -ElasticPoolName $poolName - $addDatabase - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic database pool for an Azure SQL Database.| -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Sets properties for a database, or moves an existing database into an elastic pool. | - -# [Azure CLI](#tab/azure-cli) - -In this step, you create your elastic pool and add your database to the elastic pool using the Azure CLI. - -### Set additional parameter values to create elastic pool - -Set these additional parameter values for use in creating the elastic pool. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="SetPoolParameterValues"::: - -### Create elastic pool on primary server - -Use the [az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create) command to create an elastic pool. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="CreateElasticPool"::: - -### Add database to elastic pool - -Use the [az sql db update](/cli/azure/sql/db#az-sql-db-update) command to add a database to an elastic pool. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="AddDatabaseToPool"::: - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create) | Creates an elastic pool. | -| [az sql db update](/cli/azure/sql/db#az-sql-db-update) | Updates a database| - ---- - -## 3 - Create the failover group - -In this step, you'll create a [failover group](auto-failover-group-overview.md) between an existing server and a new server in another region. Then add the elastic pool to the failover group. - -# [Azure portal](#tab/azure-portal) - -Create your failover group using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** isn't in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the elastic pool created in the previous section, such as `myElasticPool`. -1. On the **Overview** pane, select the name of the server under **Server name** to open the settings for the server. - - ![Open server for elastic pool](./media/failover-group-add-elastic-pool-tutorial/server-for-elastic-pool.png) - -1. Select **Failover groups** under the **Settings** pane, and then select **Add group** to create a new failover group. - - ![Add new failover group](./media/failover-group-add-elastic-pool-tutorial/elastic-pool-failover-group.png) - -1. On the **Failover Group** page, enter or select the following values, and then select **Create**: - - **Failover group name**: Type in a unique failover group name, such as `failovergrouptutorial`. - - **Secondary server**: Select the option to *configure required settings* and then choose to **Create a new server**. Alternatively, you can choose an already-existing server as the secondary server. After entering the following values for your new secondary server, select **Select**. - - **Server name**: Type in a unique name for the secondary server, such as `mysqlsecondary`. - - **Server admin login**: Type `azureuser` - - **Password**: Type a complex password that meets password requirements. - - **Location**: Choose a location from the drop-down, such as `East US`. This location can't be the same location as your primary server. - - > [!NOTE] - > The server login and firewall settings must match that of your primary server. - - ![Create a secondary server for the failover group](./media/failover-group-add-elastic-pool-tutorial/create-secondary-failover-server.png) - -1. Select **Databases within the group** then select the elastic pool you created in section 2. A warning should appear, prompting you to create an elastic pool on the secondary server. Select the warning, and then select **OK** to create the elastic pool on the secondary server. - - ![Add elastic pool to the failover group](./media/failover-group-add-elastic-pool-tutorial/add-elastic-pool-to-failover-group.png) - -1. Select **Select** to apply your elastic pool settings to the failover group, and then select **Create** to create your failover group. Adding the elastic pool to the failover group will automatically start the geo-replication process. - -# [PowerShell](#tab/azure-powershell) - -Create your failover group using PowerShell. - - ```powershell-interactive - # Set variables for your server and database - # $subscriptionId = '' - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $location = "East US" - # $adminLogin = "azureuser" - # $password = "PWD27!"+(New-Guid).Guid - # $serverName = "mysqlserver-$(Get-Random)" - # $poolName = "myElasticPool" - # $databaseName = "mySampleDatabase" - # $drLocation = "West US" - # $drServerName = "mysqlsecondary-$(Get-Random)" - $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Create a secondary server in the failover region - Write-host "Creating a secondary server in the failover region..." - New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -Location $drLocation ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminlogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - Write-host "Secondary server =" $drServerName - - # Create a server firewall rule that allows access from the specified IP range - Write-host "Configuring firewall for secondary server..." - New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - Write-host "Firewall configured" - - # Create secondary Gen5 elastic 2 vCore pool - Write-host "Creating secondary elastic pool..." - $elasticPool = New-AzSqlElasticPool -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -ElasticPoolName $poolName ` - -Edition "GeneralPurpose" ` - -vCore 2 ` - -ComputeGeneration Gen5 - $elasticPool - - # Create a failover group between the servers - Write-host "Creating failover group..." - New-AzSqlDatabaseFailoverGroup ` - ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -PartnerServerName $drServerName ` - FailoverGroupName $failoverGroupName ` - FailoverPolicy Automatic ` - -GracePeriodWithDataLossHours 2 - Write-host "Failover group created successfully." - - # Add elastic pool to the failover group - Write-host "Enumerating databases in elastic pool...." - $FailoverGroup = Get-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - $databases = Get-AzSqlElasticPoolDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName - Write-host "Adding databases to failover group..." - $failoverGroup = $failoverGroup | Add-AzSqlDatabaseToFailoverGroup ` - -Database $databases - $failoverGroup - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a firewall rule for a server. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic pool for an Azure SQL Database.| -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more Azure SQL databases to a failover group. | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists Azure SQL Database failover groups. | - -# [Azure CLI](#tab/azure-cli) - -In this step, you use the Azure CLI to create your secondary server, failover group, elastic pool, and add a database to the failover group. - -### Set additional parameter values to create failover group - -Set these additional parameter values for use in creating the failover group. - -Change the failover location as appropriate for your environment. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="SetFailoverParameterValues"::: - -### Create secondary server - -Use the [az sql server create](/cli/azure/sql/server#az-sql-server-create) command to create a secondary server. -> [!NOTE] -> The server login and firewall settings must match that of your primary server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="CreateSecondaryServer"::: - -### Create elastic pool on secondary server - -Use the [az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create) command to create an elastic pool on the secondary server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="CreateElasticPoolOnSecondary"::: - -### Create failover group - -Use the [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) command to create a failover group. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="CreateFailoverGroup"::: - -### Add database to the failover group - -Use the [az sql failover-group update](/cli/azure/sql/failover-group#az-sql-failover-group-update) command to add a database to the failover group. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="AddDatabaseToFailoverGroup"::: - -### Azure CLI failover group creation reference - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az sql server create](/cli/azure/sql/server#az-sql-server-create) | Creates a server that hosts databases and elastic pools. | -| [az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create) | Creates an elastic pool.| -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) | Creates a failover group. | -| [az sql failover-group update](/cli/azure/sql/failover-group#az-sql-failover-group-update) | Updates a failover group.| - ---- - -## 4 - Test failover - -In this step, you'll fail your failover group over to the secondary server, and then fail back using the Azure portal. - -# [Azure portal](#tab/azure-portal) - -Test failover of your failover group using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** isn't in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the elastic pool created in the previous section, such as `myElasticPool`. -1. Select the name of the server under **Server name** to open the settings for the server. - - ![Open server for elastic pool](./media/failover-group-add-elastic-pool-tutorial/server-for-elastic-pool.png) - -1. Select **Failover groups** under the **Settings** pane and then choose the failover group you created in section 2. - - ![Select the failover group from the portal](./media/failover-group-add-elastic-pool-tutorial/select-failover-group.png) - -1. Review which server is primary, and which server is secondary. -1. Select **Failover** from the task pane to fail over your failover group containing your elastic pool. -1. Select **Yes** on the warning that notifies you that TDS sessions will be disconnected. - - ![Fail over your failover group containing your database](./media/failover-group-add-elastic-pool-tutorial/failover-sql-db.png) - -1. Review which server is primary, which server is secondary. If failover succeeded, the two servers should have swapped roles. -1. Select **Failover** again to fail the failover group back to the original settings. - -# [PowerShell](#tab/azure-powershell) - -Test failover of your failover group using PowerShell. - - ```powershell-interactive - # Set variables for your server and database - # $subscriptionId = '' - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $location = "East US" - # $adminLogin = "azureuser" - # $password = "PWD27!"+(New-Guid).Guid - # $serverName = "mysqlserver-$(Get-Random)" - # $poolName = "myElasticPool" - # $databaseName = "mySampleDatabase" - # $drLocation = "West US" - # $drServerName = "mysqlsecondary-$(Get-Random)" - # $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Check role of secondary replica - Write-host "Confirming the secondary server is secondary...." - (Get-AzSqlDatabaseFailoverGroup ` - -FailoverGroupName $failoverGroupName ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName).ReplicationRole - - # Failover to secondary server - Write-host "Failing over failover group to the secondary..." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FailoverGroupName $failoverGroupName - Write-host "Failover group failed over to" $drServerName - ``` - -Fail your failover group over to the secondary server, and then fail back using the PowerShell. - - ```powershell-interactive - # Set variables for your server and database - # $subscriptionId = '' - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $location = "East US" - # $adminLogin = "azureuser" - # $password = "PWD27!"+(New-Guid).Guid - # $serverName = "mysqlserver-$(Get-Random)" - # $poolName = "myElasticPool" - # $databaseName = "mySampleDatabase" - # $drLocation = "West US" - # $drServerName = "mysqlsecondary-$(Get-Random)" - # $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Check role of secondary replica - Write-host "Confirming the secondary server is now primary" - (Get-AzSqlDatabaseFailoverGroup ` - -FailoverGroupName $failoverGroupName ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName).ReplicationRole - - # Revert failover to primary server - Write-host "Failing over failover group to the primary...." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - Write-host "Failover group failed over to" $serverName - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists Azure SQL Database failover groups. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of an Azure SQL Database failover group. | - -# [Azure CLI](#tab/azure-cli) - -Test failover using the Azure CLI. - -### Verify the roles of each server - -Use the [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) command to confirm the roles of each server in the failover group. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="VerifyRoles"::: - -### Fail over to the secondary server - -Use the [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) command to fail over to the secondary server. Use the [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) command to verify a successful failover. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="FailingOver"::: - -### Revert failover group back to the primary server - -Use the [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) command to fail back to the primary server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="FailingBack"::: - -### Azure CLI failover group management reference - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) | Gets the failover groups in a server. | -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Set the primary of the failover group by failing over all databases from the current primary server. | - ---- - -## Clean up resources - -Clean up resources by deleting the resource group. - -# [Azure portal](#tab/azure-portal) - -1. Navigate to your resource group in the [Azure portal](https://portal.azure.com). -1. Select **Delete resource group** to delete all the resources in the group, as well as the resource group itself. -1. Type the name of the resource group, `myResourceGroup`, in the textbox, and then select **Delete** to delete the resource group. - -# [PowerShell](#tab/azure-powershell) - -Clean up your resources using PowerShell. - - ```powershell-interactive - # Set variables for your server and database - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - - # Clean up resources by removing the resource group - Write-host "Removing resource group..." - Remove-AzResourceGroup -ResourceGroupName $resourceGroupName - Write-host "Resource group removed =" $resourceGroupName - ``` - -This portion of the tutorial uses the following PowerShell cmdlet: - -| Command | Notes | -|---|---| -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group | - -# [Azure CLI](#tab/azure-cli) - -[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] - - ```azurecli - echo "Cleaning up resources by removing the resource group..." - az group delete --name $resourceGroup -y - ``` - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az group delete](/cli/azure/vm/extension#az-vm-extension-set) | Deletes a resource group including all nested resources. | - ---- - -> [!IMPORTANT] -> If you want to keep the resource group but delete the secondary database, remove it from the failover group before deleting it. Deleting a secondary database before it is removed from the failover group can cause unpredictable behavior. - -## Full script - -# [PowerShell](#tab/azure-powershell) - -[!code-powershell-interactive[main](../../../powershell_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-ps.ps1 "Add elastic pool to a failover group")] - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a firewall rule for a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic database pool for an Azure SQL Database.| -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Sets properties for a database, or moves an existing database into an elastic pool. | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) | Gets one or more databases in SQL Database. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more Azure SQL databases to a failover group. | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists Azure SQL Database failover groups. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of an Azure SQL Database failover group. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group | - -# [Azure CLI](#tab/azure-cli) - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="FullScript"::: - -# [Azure portal](#tab/azure-portal) - -There are no scripts available for the Azure portal. - ---- - -## Next steps - -In this tutorial, you added an Azure SQL Database elastic pool to a failover group, and tested failover. You learned how to: - -> [!div class="checklist"] -> -> - Create a single database. -> - Add the database into an elastic pool. -> - Create a [failover group](auto-failover-group-overview.md) for two elastic pools between two servers. -> - Test failover. - -Advance to the next tutorial on how to migrate using DMS. - -> [!div class="nextstepaction"] -> [Tutorial: Migrate SQL Server to a pooled database using DMS](../../dms/tutorial-sql-server-to-azure-sql.md?toc=/azure/sql-database/toc.json) diff --git a/articles/azure-sql/database/failover-group-add-single-database-tutorial.md b/articles/azure-sql/database/failover-group-add-single-database-tutorial.md deleted file mode 100644 index d3c96f152e3e2..0000000000000 --- a/articles/azure-sql/database/failover-group-add-single-database-tutorial.md +++ /dev/null @@ -1,445 +0,0 @@ ---- -title: "Tutorial: Add a database to a failover group" -description: Add a database in Azure SQL Database to an auto-failover group using the Azure portal, PowerShell, or the Azure CLI. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurecli -ms.topic: tutorial -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- -# Tutorial: Add an Azure SQL Database to an auto-failover group -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database (single database)](failover-group-add-single-database-tutorial.md) -> * [Azure SQL Database (elastic pool)](failover-group-add-elastic-pool-tutorial.md) -> * [Azure SQL Managed Instance](../managed-instance/failover-group-add-instance-tutorial.md) - - -A [failover group](auto-failover-group-sql-db.md) is a declarative abstraction layer that allows you to group multiple geo-replicated databases. Learn to configure a failover group for an Azure SQL Database and test failover using either the Azure portal, PowerShell, or the Azure CLI. In this tutorial, you'll learn how to: - -> [!div class="checklist"] -> -> - Create a database in Azure SQL Database -> - Create a failover group for the database between two servers. -> - Test failover. - -## Prerequisites - -# [Azure portal](#tab/azure-portal) - -To complete this tutorial, make sure you have: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. - -# [PowerShell](#tab/azure-powershell) - -To complete the tutorial, make sure you have the following items: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. -- [Azure PowerShell](/powershell/azure/) - -# [Azure CLI](#tab/azure-cli) - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - ---- - -## 1 - Create a database - -In this step, you create a resource group, server, single database, and server-level IP firewall rule for access to the server. - -[!INCLUDE [sql-database-create-single-database](../includes/sql-database-create-single-database.md)] - -## 2 - Create the failover group - -In this step, you' will create a [failover group](auto-failover-group-overview.md) between an existing server and a new server in another region. Then add the sample database to the failover group. - -# [Azure portal](#tab/azure-portal) - -Create your failover group and add your database to it using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** isn't in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the database created in section 1, such as `mySampleDatabase`. -1. Failover groups can be configured at the server level. Select the name of the server under **Server name** to open the settings for the server. - - ![Open server for database](./media/failover-group-add-single-database-tutorial/open-sql-db-server.png) - -1. Select **Failover groups** under the **Settings** pane, and then select **Add group** to create a new failover group. - - ![Add new failover group](./media/failover-group-add-single-database-tutorial/sqldb-add-new-failover-group.png) - -1. On the **Failover Group** page, enter or select the following values, and then select **Create**: - - - **Failover group name**: Type in a unique failover group name, such as `failovergrouptutorial`. - - **Secondary server**: Select the option to *configure required settings* and then choose to **Create a new server**. Alternatively, you can choose an already-existing server as the secondary server. After entering the following values, select **Select**. - - **Server name**: Type in a unique name for the secondary server, such as `mysqlsecondary`. - - **Server admin login**: Type `azureuser` - - **Password**: Type a complex password that meets password requirements. - - **Location**: Choose a location from the drop-down, such as `East US`. This location can't be the same location as your primary server. - - > [!NOTE] - > The server login and firewall settings must match that of your primary server. - - ![Create a secondary server for the failover group](./media/failover-group-add-single-database-tutorial/create-secondary-failover-server.png) - - - **Databases within the group**: Once a secondary server is selected, this option becomes unlocked. Select it to **Select databases to add** and then choose the database you created in section 1. Adding the database to the failover group will automatically start the geo-replication process. - - ![Add SQL Database to failover group](./media/failover-group-add-single-database-tutorial/add-sqldb-to-failover-group.png) - -# [PowerShell](#tab/azure-powershell) - -Create your failover group and add your database to it using PowerShell. - - > [!NOTE] - > The server login and firewall settings must match that of your primary server. - - ```powershell-interactive - # $subscriptionId = '' - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $location = "West US" - # $adminLogin = "azureuser" - # $password = "PWD27!"+(New-Guid).Guid - # $serverName = "mysqlserver-$(Get-Random)" - # $databaseName = "mySampleDatabase" - $drLocation = "East US" - $drServerName = "mysqlsecondary-$(Get-Random)" - $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # The ip address range that you want to allow to access your server - # (leaving at 0.0.0.0 will prevent outside-of-azure connections to your DB) - $startIp = "0.0.0.0" - $endIp = "0.0.0.0" - - # Show randomized variables - Write-host "DR Server name is" $drServerName - Write-host "Failover group name is" $failoverGroupName - - # Create a secondary server in the failover region - Write-host "Creating a secondary server in the failover region..." - $drServer = New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -Location $drLocation ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminlogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - $drServer - - # Create a server firewall rule that allows access from the specified IP range - Write-host "Configuring firewall for secondary server..." - $serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - $serverFirewallRule - - # Create a failover group between the servers - $failovergroup = Write-host "Creating a failover group between the primary and secondary server..." - New-AzSqlDatabaseFailoverGroup ` - –ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -PartnerServerName $drServerName ` - –FailoverGroupName $failoverGroupName ` - –FailoverPolicy Automatic ` - -GracePeriodWithDataLossHours 2 - $failovergroup - - # Add the database to the failover group - Write-host "Adding the database to the failover group..." - Get-AzSqlDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName | ` - Add-AzSqlDatabaseToFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - Write-host "Successfully added the database to the failover group..." - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server in Azure SQL Database that hosts single databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a firewall rule for a server in Azure SQL Database. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a new single database in Azure SQL Database. | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group in Azure SQL Database. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) | Gets one or more databases in Azure SQL Database. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more databases to a failover group in Azure SQL Database. | - -# [Azure CLI](#tab/azure-cli) - -In this step, you create your failover group and add your database to it using the Azure CLI. - -### Set additional parameter values - -Set these additional parameter values for use in creating the failover group, in addition to the values defined in the preceding script that created the primary resource group and server. - -Change the failover location as appropriate for your environment. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="SetAdditionalParameterValues"::: - -### Create the secondary server - -Use the [az sql server create](/cli/azure/sql/server#az-sql-server-create) command to create a secondary server with . -> [!NOTE] -> The server login and firewall settings must match that of your primary server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="CreateSecondaryServer"::: - -### Create the failover group - -Use the [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) command to create a failover group. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="CreateFailoverGroup"::: - -### Azure CLI failover group creation reference - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az sql server create](/cli/azure/sql/server#az-sql-server-create) | Creates a server that hosts databases and elastic pools. | -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) | Creates a failover group. | -| [az sql failover-group update](/cli/azure/sql/failover-group#az-sql-failover-group-update) | Updates a failover group.| - ---- - -## 3 - Test failover - -In this step, you will fail your failover group over to the secondary server, and then fail back using the Azure portal. - -# [Azure portal](#tab/azure-portal) - -Test failover using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** isn't in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the database created in the section 2, such as `mySampleDatbase`. -1. Select the name of the server under **Server name** to open the settings for the server. - - ![Open server for database](./media/failover-group-add-single-database-tutorial/open-sql-db-server.png) - -1. Select **Failover groups** under the **Settings** pane and then choose the failover group you created in section 2. - - ![Select the failover group from the portal](./media/failover-group-add-single-database-tutorial/select-failover-group.png) - -1. Review which server is primary and which server is secondary. -1. Select **Failover** from the task pane to fail over your failover group containing your sample database. -1. Select **Yes** on the warning that notifies you that TDS sessions will be disconnected. - - ![Fail over your failover group containing your database](./media/failover-group-add-single-database-tutorial/failover-sql-db.png) - -1. Review which server is now primary and which server is secondary. If failover succeeded, the two servers should have swapped roles. -1. Select **Failover** again to fail the servers back to their original roles. - -# [PowerShell](#tab/azure-powershell) - -Test failover using PowerShell. - -Check the role of the secondary replica: - - ```powershell-interactive - # Set variables - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $serverName = "mysqlserver-$(Get-Random)" - # $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Check role of secondary replica - Write-host "Confirming the secondary replica is secondary...." - (Get-AzSqlDatabaseFailoverGroup ` - -FailoverGroupName $failoverGroupName ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName).ReplicationRole - ``` - -Fail over to the secondary server: - - ```powershell-interactive - # Set variables - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $serverName = "mysqlserver-$(Get-Random)" - # $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Failover to secondary server - Write-host "Failing over failover group to the secondary..." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $drServerName ` - -FailoverGroupName $failoverGroupName - Write-host "Failed failover group successfully to" $drServerName - ``` - -Revert failover group back to the primary server: - - ```powershell-interactive - # Set variables - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - # $serverName = "mysqlserver-$(Get-Random)" - # $failoverGroupName = "failovergrouptutorial-$(Get-Random)" - - # Revert failover to primary server - Write-host "Failing over failover group to the primary...." - Switch-AzSqlDatabaseFailoverGroup ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FailoverGroupName $failoverGroupName - Write-host "Failed failover group successfully back to" $serverName - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists Azure SQL Database failover groups. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of an Azure SQL Database failover group. | - -# [Azure CLI](#tab/azure-cli) - -Test failover using the Azure CLI. - -### Verify the roles of each server - -Use the [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) command to confirm the roles of each server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="VerifyRole"::: - -### Fail over to the secondary server - -Use the [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) to fail over to the secondary server. Use the [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) command to verify a successful failover. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="FailingOver"::: - -### Revert failover group back to the primary server - -Use the [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) command to fail back to the primary server. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="FailingBack"::: - -### Azure CLI failover group management reference - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) | Gets the failover groups in a server. | -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Set the primary of the failover group by failing over all databases from the current primary server. | - ---- - -## Clean up resources - -Clean up resources by deleting the resource group. - -# [Azure portal](#tab/azure-portal) - -Delete the resource group using the Azure portal. - -1. Navigate to your resource group in the [Azure portal](https://portal.azure.com). -1. Select **Delete resource group** to delete all the resources in the group, as well as the resource group itself. -1. Type the name of the resource group, `myResourceGroup`, in the textbox, and then select **Delete** to delete the resource group. - -# [PowerShell](#tab/azure-powershell) - -Delete the resource group using PowerShell. - - ```powershell-interactive - # Set variables - # $resourceGroupName = "myResourceGroup-$(Get-Random)" - - # Remove the resource group - Write-host "Removing resource group..." - Remove-AzResourceGroup -ResourceGroupName $resourceGroupName - Write-host "Resource group removed =" $resourceGroupName - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group | - -# [Azure CLI](#tab/azure-cli) - -[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] - - ```azurecli - echo "Cleaning up resources by removing the resource group..." - az group delete --name $resourceGroup -y - - ``` - -This portion of the tutorial uses the following Azure CLI cmdlets: - -| Command | Notes | -|---|---| -| [az group delete](/cli/azure/vm/extension#az-vm-extension-set) | Deletes a resource group including all nested resources. | - ---- - -> [!IMPORTANT] -> If you want to keep the resource group but delete the secondary database, remove it from the failover group before deleting it. Deleting a secondary database before it is removed from the failover group can cause unpredictable behavior. - -## Full scripts - -# [PowerShell](#tab/azure-powershell) - -[!code-powershell-interactive[main](../../../powershell_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-ps.ps1 "Add database to a failover group")] - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts single databases and elastic pools in Azure SQL Database. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a firewall rule for a server in Azure SQL Database. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a new database in Azure SQL Database. | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group in Azure SQL Database. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) | Gets one or more databases in Azure SQL Database. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more databases to a failover group in Azure SQL Database. | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists failover groups in Azure SQL Database. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of a failover group in Azure SQL Database. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group in Azure SQL Database.| - -# [Azure CLI](#tab/azure-cli) - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="FullScript"::: - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [az account set](/cli/azure/account#az-account-set) | Sets a subscription to be the current active subscription. | -| [az group create](/cli/azure/group#az-group-create) | Creates a resource group in which all resources are stored. | -| [az sql server create](/cli/azure/sql/server#az-sql-server-create) | Creates a server that hosts single databases and elastic pools in Azure SQL Database. | -| [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule) | Creates the server-level IP firewall rules in Azure SQL Database. | -| [az sql db create](/cli/azure/sql/db) | Creates a database in Azure SQL Database. | -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) | Creates a failover group in Azure SQL Database. | -| [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) | Lists the failover groups in a server in Azure SQL Database. | -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Set the primary of the failover group by failing over all databases from the current primary server. | -| [az group delete](/cli/azure/vm/extension#az-vm-extension-set) | Deletes a resource group including all nested resources. | - -# [Azure portal](#tab/azure-portal) - -There are no scripts available for the Azure portal. - ---- - -For additional Azure SQL Database scripts, see: [Azure PowerShell](powershell-script-content-guide.md) and [Azure CLI](az-cli-script-samples-content-guide.md). - -## Next steps - -In this tutorial, you added a database in Azure SQL Database to a failover group, and tested failover. You learned how to: - -> [!div class="checklist"] -> -> - Create a database in Azure SQL Database -> - Create a failover group for the database between two servers. -> - Test failover. - -Advance to the next tutorial on how to add your elastic pool to a failover group. - -> [!div class="nextstepaction"] -> [Tutorial: Add an Azure SQL Database elastic pool to a failover group](failover-group-add-elastic-pool-tutorial.md) diff --git a/articles/azure-sql/database/features-comparison.md b/articles/azure-sql/database/features-comparison.md deleted file mode 100644 index d26fea9e36d82..0000000000000 --- a/articles/azure-sql/database/features-comparison.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Compare the database engine features of SQL Database and SQL Managed Instance -titleSuffix: Azure SQL Database & SQL Managed Instance -description: This article compares the database engine features of Azure SQL Database and Azure SQL Managed Instance -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: -ms.devlang: -ms.topic: conceptual -author: danimir -ms.author: danil -ms.reviewer: kendralittle, bonova, mathoma, danil -ms.date: 12/14/2021 ---- - -# Features comparison: Azure SQL Database and Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and SQL Managed Instance share a common code base with the latest stable version of SQL Server. -Most of the standard SQL language, query processing, and database management features are identical. The features that are common between SQL Server and SQL Database or SQL Managed Instance are: - -- Language features - [Control of flow language keywords](/sql/t-sql/language-elements/control-of-flow), [Cursors](/sql/t-sql/language-elements/cursors-transact-sql), [Data types](/sql/t-sql/data-types/data-types-transact-sql), [DML statements](/sql/t-sql/queries/queries), [Predicates](/sql/t-sql/queries/predicates), [Sequence numbers](/sql/relational-databases/sequence-numbers/sequence-numbers), [Stored procedures](/sql/relational-databases/stored-procedures/stored-procedures-database-engine), and [Variables](/sql/t-sql/language-elements/variables-transact-sql). -- Database features - [Automatic tuning (plan forcing)](/sql/relational-databases/automatic-tuning/automatic-tuning), [Change tracking](/sql/relational-databases/track-changes/about-change-tracking-sql-server), [Database collation](/sql/relational-databases/collations/set-or-change-the-database-collation), [Contained databases](/sql/relational-databases/databases/contained-databases), [Contained users](/sql/relational-databases/security/contained-database-users-making-your-database-portable), [Data compression](/sql/relational-databases/data-compression/data-compression), [Database configuration settings](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql), [Online index operations](/sql/relational-databases/indexes/perform-index-operations-online), [Partitioning](/sql/relational-databases/partitions/partitioned-tables-and-indexes), and [Temporal tables](/sql/relational-databases/tables/temporal-tables) ([see getting started guide](../temporal-tables.md)). -- Security features - [Application roles](/sql/relational-databases/security/authentication-access/application-roles), [Dynamic data masking](/sql/relational-databases/security/dynamic-data-masking) ([see getting started guide](dynamic-data-masking-overview.md)), [Row Level Security](/sql/relational-databases/security/row-level-security), and Threat detection - see getting started guides for [SQL Database](threat-detection-configure.md) and [SQL Managed Instance](../managed-instance/threat-detection-configure.md). -- Multi-model capabilities - [Graph processing](/sql/relational-databases/graphs/sql-graph-overview), [JSON data](/sql/relational-databases/json/json-data-sql-server) ([see getting started guide](json-features.md)), [OPENXML](/sql/t-sql/functions/openxml-transact-sql), [Spatial](/sql/relational-databases/spatial/spatial-data-sql-server), [OPENJSON](/sql/t-sql/functions/openjson-transact-sql), and [XML indexes](/sql/t-sql/statements/create-xml-index-transact-sql). - -Azure manages your databases and guarantees their high-availability. Some features that might affect high-availability or can't be used in PaaS world have limited functionalities in SQL Database and SQL Managed Instance. These features are described in the tables below. - -If you need more details about the differences, you can find them in the separate pages: -- [Azure SQL Database vs. SQL Server differences](transact-sql-tsql-differences-sql-server.md) -- [Azure SQL Managed Instance vs. SQL Server differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) - -## Features of SQL Database and SQL Managed Instance - -The following table lists the major features of SQL Server and provides information about whether the feature is partially or fully supported in Azure SQL Database and Azure SQL Managed Instance, with a link to more information about the feature. - -| **Feature** | **Azure SQL Database** | **Azure SQL Managed Instance** | -| --- | --- | --- | -| [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine) | Yes - see [Cert store](always-encrypted-certificate-store-configure.md) and [Key vault](always-encrypted-azure-key-vault-configure.md) | Yes - see [Cert store](always-encrypted-certificate-store-configure.md) and [Key vault](always-encrypted-azure-key-vault-configure.md) | -| [Always On Availability Groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server) | [99.99-99.995% availability](high-availability-sla.md) is guaranteed for every database. Disaster recovery is discussed in [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md) | [99.99.% availability](high-availability-sla.md) is guaranteed for every database and [can't be managed by user](../managed-instance/transact-sql-tsql-differences-sql-server.md#availability). Disaster recovery is discussed in [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md). Use [Auto-failover groups](auto-failover-group-overview.md) to configure a secondary SQL Managed Instance in another region. SQL Server instances and SQL Database can't be used as secondaries for SQL Managed Instance. | -| [Attach a database](/sql/relational-databases/databases/attach-a-database) | No | No | -| [Auditing](/sql/relational-databases/security/auditing/sql-server-audit-database-engine) | [Yes](auditing-overview.md)| [Yes](../managed-instance/auditing-configure.md), with some [differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#auditing) | -| [Azure Active Directory (Azure AD) authentication](authentication-aad-overview.md) | Yes. Azure AD users only. | Yes. Including server-level Azure AD logins. | -| [BACKUP command](/sql/t-sql/statements/backup-transact-sql) | No, only system-initiated automatic backups - see [Automated backups](automated-backups-overview.md) | Yes, user initiated copy-only backups to Azure Blob storage (automatic system backups can't be initiated by user) - see [Backup differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#backup) | -| [Built-in functions](/sql/t-sql/functions/functions) | Most - see individual functions | Yes - see [Stored procedures, functions, triggers differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#stored-procedures-functions-and-triggers) | -| [BULK INSERT statement](/sql/relational-databases/import-export/import-bulk-data-by-using-bulk-insert-or-openrowset-bulk-sql-server) | Yes, but just from Azure Blob storage as a source. | Yes, but just from Azure Blob Storage as a source - see [differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#bulk-insert--openrowset). | -| [Certificates and asymmetric keys](/sql/relational-databases/security/sql-server-certificates-and-asymmetric-keys) | Yes, without access to file system for `BACKUP` and `CREATE` operations. | Yes, without access to file system for `BACKUP` and `CREATE` operations - see [certificate differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#certificates). | -| [Change data capture - CDC](/sql/relational-databases/track-changes/about-change-data-capture-sql-server) | Yes (Preview) for S3 tier and above. Basic, S0, S1, S2 are not supported. | Yes | -| [Collation - server/instance](/sql/relational-databases/collations/set-or-change-the-server-collation) | No, default server collation `SQL_Latin1_General_CP1_CI_AS` is always used. | Yes, can be set when the [instance is created](../managed-instance/create-template-quickstart.md) and can't be updated later. | -| [Columnstore indexes](/sql/relational-databases/indexes/columnstore-indexes-overview) | Yes - [Premium tier, Standard tier - S3 and above, General Purpose tier, Business Critical, and Hyperscale tiers](/sql/relational-databases/indexes/columnstore-indexes-overview) |Yes | -| [Common language runtime - CLR](/sql/relational-databases/clr-integration/common-language-runtime-clr-integration-programming-concepts) | No | Yes, but without access to file system in `CREATE ASSEMBLY` statement - see [CLR differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#clr) | -| [Credentials](/sql/relational-databases/security/authentication-access/credentials-database-engine) | Yes, but only [database scoped credentials](/sql/t-sql/statements/create-database-scoped-credential-transact-sql). | Yes, but only **Azure Key Vault** and `SHARED ACCESS SIGNATURE` are supported - see [details](../managed-instance/transact-sql-tsql-differences-sql-server.md#credential) | -| [Cross-database/three-part name queries](/sql/relational-databases/linked-servers/linked-servers-database-engine) | No - see [Elastic queries](elastic-query-overview.md) | Yes| -| [Cross-database transactions](/sql/relational-databases/linked-servers/linked-servers-database-engine) | No | Yes, within the instance. See [Linked server differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#linked-servers) for cross-instance queries. | -| [Database mail - DbMail](/sql/relational-databases/database-mail/database-mail) | No | Yes | -| [Database mirroring](/sql/database-engine/database-mirroring/database-mirroring-sql-server) | No | [No](../managed-instance/transact-sql-tsql-differences-sql-server.md#database-mirroring) | -| [Database snapshots](/sql/relational-databases/databases/database-snapshots-sql-server) | No | No | -| [DBCC statements](/sql/t-sql/database-console-commands/dbcc-transact-sql) | Most - see individual statements | Yes - see [DBCC differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#dbcc) | -| [DDL statements](/sql/t-sql/statements/statements) | Most - see individual statements | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [DDL triggers](/sql/relational-databases/triggers/ddl-triggers) | Database only | Yes | -| [Distributed partition views](/sql/t-sql/statements/create-view-transact-sql#partitioned-views) | No | Yes | -| [Distributed transactions - MS DTC](/sql/relational-databases/native-client-ole-db-transactions/supporting-distributed-transactions) | No - see [Elastic transactions](elastic-transactions-overview.md) | No - see [Elastic transactions](elastic-transactions-overview.md) | -| [DML triggers](/sql/relational-databases/triggers/create-dml-triggers) | Most - see individual statements | Yes | -| [DMVs](/sql/relational-databases/system-dynamic-management-views/system-dynamic-management-views) | Most - see individual DMVs | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [Elastic query](elastic-query-overview.md) (in public preview) | Yes, with required RDBMS type. | No, use native cross-DB queries and Linked Server instead| -| [Event notifications](/sql/relational-databases/service-broker/event-notifications) | No - see [Alerts](alerts-insights-configure-portal.md) | No | -| [Expressions](/sql/t-sql/language-elements/expressions-transact-sql) |Yes | Yes | -| [Extended events (XEvent)](/sql/relational-databases/extended-events/extended-events) | Some - see [Extended events in SQL Database](xevent-db-diff-from-svr.md) | Yes - see [Extended events differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#extended-events) | -| [Extended stored procedures](/sql/relational-databases/extended-stored-procedures-programming/creating-extended-stored-procedures) | No | No | -| [Files and file groups](/sql/relational-databases/databases/database-files-and-filegroups) | Primary file group only | Yes. File paths are automatically assigned and the file location can't be specified in `ALTER DATABASE ADD FILE` [statement](../managed-instance/transact-sql-tsql-differences-sql-server.md#alter-database-statement). | -| [Filestream](/sql/relational-databases/blob/filestream-sql-server) | No | [No](../managed-instance/transact-sql-tsql-differences-sql-server.md#filestream-and-filetable) | -| [Full-text search (FTS)](/sql/relational-databases/search/full-text-search) | Yes, but third-party word breakers are not supported | Yes, but [third-party word breakers are not supported](../managed-instance/transact-sql-tsql-differences-sql-server.md#full-text-semantic-search) | -| [Functions](/sql/t-sql/functions/functions) | Most - see individual functions | Yes - see [Stored procedures, functions, triggers differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#stored-procedures-functions-and-triggers) | -| [In-memory optimization](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) | Yes in [Premium and Business Critical service tiers](../in-memory-oltp-overview.md).
    Limited support for non-persistent In-Memory OLTP objects such as memory-optimized table variables in [Hyperscale service tier](service-tier-hyperscale.md).| Yes in [Business Critical service tier](../managed-instance/sql-managed-instance-paas-overview.md) | -| [Language elements](/sql/t-sql/language-elements/language-elements-transact-sql) | Most - see individual elements | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [Ledger](ledger-overview.md) | Yes | No | -| [Linked servers](/sql/relational-databases/linked-servers/linked-servers-database-engine) | No - see [Elastic query](elastic-query-horizontal-partitioning.md) | Yes. Only to [SQL Server and SQL Database](../managed-instance/transact-sql-tsql-differences-sql-server.md#linked-servers) without distributed transactions. | -| [Linked servers](/sql/relational-databases/linked-servers/linked-servers-database-engine) that read from files (CSV, Excel)| No. Use [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql#e-importing-data-from-a-csv-file) or [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql#g-accessing-data-from-a-csv-file-with-a-format-file) as an alternative for CSV format. | No. Use [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql#e-importing-data-from-a-csv-file) or [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql#g-accessing-data-from-a-csv-file-with-a-format-file) as an alternative for CSV format. Track these requests on [SQL Managed Instance feedback item](https://feedback.azure.com/d365community/idea/db80cf6e-3425-ec11-b6e6-000d3a4f0f84)| -| [Log shipping](/sql/database-engine/log-shipping/about-log-shipping-sql-server) | [High availability](high-availability-sla.md) is included with every database. Disaster recovery is discussed in [Overview of business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). | Natively built in as a part of [Azure Data Migration Service (DMS)](../../dms/tutorial-sql-server-to-managed-instance.md) migration process. Natively built for custom data migration projects as an external [Log Replay Service (LRS)](../managed-instance/log-replay-service-migrate.md).
    Not available as High availability solution, because other [High availability](high-availability-sla.md) methods are included with every database and it is not recommended to use Log-shipping as HA alternative. Disaster recovery is discussed in [Overview of business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). Not available as a replication mechanism between databases - use secondary replicas on [Business Critical tier](service-tier-business-critical.md), [auto-failover groups](auto-failover-group-overview.md), or [transactional replication](../managed-instance/replication-transactional-overview.md) as the alternatives. | -| [Logins and users](/sql/relational-databases/security/authentication-access/principals-database-engine) | Yes, but `CREATE` and `ALTER` login statements do not offer all the options (no Windows and server-level Azure Active Directory logins). `EXECUTE AS LOGIN` is not supported - use `EXECUTE AS USER` instead. | Yes, with some [differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#logins-and-users). Windows logins are not supported and they should be replaced with Azure Active Directory logins. | -| [Minimal logging in bulk import](/sql/relational-databases/import-export/prerequisites-for-minimal-logging-in-bulk-import) | No, only Full Recovery model is supported. | No, only Full Recovery model is supported. | -| [Modifying system data](/sql/relational-databases/databases/system-databases) | No | Yes | -| [OLE Automation](/sql/database-engine/configure-windows/ole-automation-procedures-server-configuration-option) | No | No | -| [OPENDATASOURCE](/sql/t-sql/functions/opendatasource-transact-sql)|No|Yes, only to SQL Database, SQL Managed Instance and SQL Server. See [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md)| -| [OPENQUERY](/sql/t-sql/functions/openquery-transact-sql)|No|Yes, only to SQL Database, SQL Managed Instance and SQL Server. See [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md)| -| [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql)|Yes, only to import from Azure Blob storage. |Yes, only to SQL Database, SQL Managed Instance and SQL Server, and to import from Azure Blob storage. See [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md)| -| [Operators](/sql/t-sql/language-elements/operators-transact-sql) | Most - see individual operators |Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [Polybase](/sql/relational-databases/polybase/polybase-guide) | No. You can query data in the files placed on Azure Blob Storage using `OPENROWSET` function or use [an external table that references a serverless SQL pool in Synapse Analytics](https://devblogs.microsoft.com/azure-sql/read-azure-storage-files-using-synapse-sql-external-tables/). | No. You can query data in the files placed on Azure Blob Storage using `OPENROWSET` function, a linked server that references [serverless SQL pool in Synapse Analytics](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/), [SQL Database](https://techcommunity.microsoft.com/t5/azure-database-support-blog/lesson-learned-63-it-is-possible-to-create-linked-server-in/ba-p/369168), or SQL Server. | -| [Query Notifications](/sql/relational-databases/native-client/features/working-with-query-notifications) | No | Yes | -| [Machine Learning Services](/sql/advanced-analytics/what-is-sql-server-machine-learning) (_Formerly R Services_)| No | Yes, see [Machine Learning Services in Azure SQL Managed Instance](../managed-instance/machine-learning-services-overview.md) | -| [Recovery models](/sql/relational-databases/backup-restore/recovery-models-sql-server) | Only Full Recovery that guarantees high availability is supported. Simple and Bulk Logged recovery models are not available. | Only Full Recovery that guarantees high availability is supported. Simple and Bulk Logged recovery models are not available. | -| [Resource governor](/sql/relational-databases/resource-governor/resource-governor) | No | Yes | -| [RESTORE statements](/sql/t-sql/statements/restore-statements-for-restoring-recovering-and-managing-backups-transact-sql) | No | Yes, with mandatory `FROM URL` options for the backups files placed on Azure Blob Storage. See [Restore differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#restore-statement) | -| [Restore database from backup](/sql/relational-databases/backup-restore/back-up-and-restore-of-sql-server-databases#restore-data-backups) | From automated backups only - see [SQL Database recovery](recovery-using-backups.md) | From automated backups - see [SQL Database recovery](recovery-using-backups.md) and from full backups placed on Azure Blob Storage - see [Backup differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#backup) | -| [Restore database to SQL Server](/sql/relational-databases/backup-restore/back-up-and-restore-of-sql-server-databases#restore-data-backups) | No. Use BACPAC or BCP instead of native restore. | No, because SQL Server database engine used in SQL Managed Instance has higher version than any RTM version of SQL Server used on-premises. Use BACPAC, BCP, or Transactional replication instead. | -| [Semantic search](/sql/relational-databases/search/semantic-search-sql-server) | No | No | -| [Service Broker](/sql/database-engine/configure-windows/sql-server-service-broker) | No | Yes, but only within the instance. If you are using remote Service Broker routes, try to consolidate databases from several distributed SQL Server instances into one SQL Managed Instance during migration and use only local routes. See [Service Broker differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#service-broker) | -| [Server configuration settings](/sql/database-engine/configure-windows/server-configuration-options-sql-server) | No | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [Set statements](/sql/t-sql/statements/set-statements-transact-sql) | Most - see individual statements | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md)| -| [SQL Server Agent](/sql/ssms/agent/sql-server-agent) | No - see [Elastic jobs (preview)](elastic-jobs-overview.md) | Yes - see [SQL Server Agent differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) | -| [SQL Server Auditing](/sql/relational-databases/security/auditing/sql-server-audit-database-engine) | No - see [SQL Database auditing](auditing-overview.md) | Yes - see [Auditing differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#auditing) | -| [System stored functions](/sql/relational-databases/system-functions/system-functions-for-transact-sql) | Most - see individual functions | Yes - see [Stored procedures, functions, triggers differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#stored-procedures-functions-and-triggers) | -| [System stored procedures](/sql/relational-databases/system-stored-procedures/system-stored-procedures-transact-sql) | Some - see individual stored procedures | Yes - see [Stored procedures, functions, triggers differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#stored-procedures-functions-and-triggers) | -| [System tables](/sql/relational-databases/system-tables/system-tables-transact-sql) | Some - see individual tables | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [System catalog views](/sql/relational-databases/system-catalog-views/catalog-views-transact-sql) | Some - see individual views | Yes - see [T-SQL differences](../managed-instance/transact-sql-tsql-differences-sql-server.md) | -| [TempDB](/sql/relational-databases/databases/tempdb-database) | Yes. [32-GB size per core for every database](resource-limits-vcore-single-databases.md). | Yes. [24-GB size per vCore for entire GP tier and limited by instance size on BC tier](../managed-instance/resource-limits.md#service-tier-characteristics) | -| [Temporary tables](/sql/t-sql/statements/create-table-transact-sql#database-scoped-global-temporary-tables-azure-sql-database) | Local and database-scoped global temporary tables | Local and instance-scoped global temporary tables | -| Time zone choice | No | [Yes](../managed-instance/timezones-overview.md), and it must be configured when the SQL Managed Instance is created. | -| [Trace flags](/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql) | No | Yes, but only limited set of global trace flags. See [DBCC differences](../managed-instance/transact-sql-tsql-differences-sql-server.md#dbcc) | -| [Transactional Replication](../managed-instance/replication-transactional-overview.md) | Yes, [Transactional and snapshot replication subscriber only](migrate-to-database-from-sql-server.md) | Yes, in [public preview](/sql/relational-databases/replication/replication-with-sql-database-managed-instance). See the constraints [here](../managed-instance/transact-sql-tsql-differences-sql-server.md#replication). | -| [Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption-tde) | Yes - General Purpose, Business Critical, and Hyperscale (in preview) service tiers only| [Yes](transparent-data-encryption-tde-overview.md) | -| Windows authentication | No | Yes, see [Windows Authentication for Azure Active Directory principals (Preview)](../managed-instance/winauth-azuread-overview.md). | -| [Windows Server Failover Clustering](/sql/sql-server/failover-clusters/windows/windows-server-failover-clustering-wsfc-with-sql-server) | No. Other techniques that provide [high availability](high-availability-sla.md) are included with every database. Disaster recovery is discussed in [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md). | No. Other techniques that provide [high availability](high-availability-sla.md) are included with every database. Disaster recovery is discussed in [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md). | - -## Platform capabilities - -The Azure platform provides a number of PaaS capabilities that are added as an additional value to the standard database features. There is a number of external services that can be used with Azure SQL Database. - -| **Platform feature** | **Azure SQL Database** | **Azure SQL Managed Instance** | -| --- | --- | --- | -| [Active geo-replication](active-geo-replication-overview.md) | Yes - all service tiers. Public Preview in Hyperscale. | No, see [Auto-failover groups](auto-failover-group-overview.md) as an alternative. | -| [Auto-failover groups](auto-failover-group-overview.md) | Yes - all service tiers. Public Preview in Hyperscale. | Yes, see [Auto-failover groups](auto-failover-group-overview.md).| -| Auto-scale | Yes, but only in [serverless model](serverless-tier-overview.md). In the non-serverless model, the change of service tier (change of vCore, storage, or DTU) is fast and online. The service tier change requires minimal or no downtime. | No, you need to choose reserved compute and storage. The change of service tier (vCore or max storage) is online and requires minimal or no downtime. | -| [Automatic backups](automated-backups-overview.md) | Yes. Full backups are taken every 7 days, differential 12 hours, and log backups every 5-10 min. | Yes. Full backups are taken every 7 days, differential 12 hours, and log backups every 5-10 min. | -| [Automatic tuning (indexes)](/sql/relational-databases/automatic-tuning/automatic-tuning)| [Yes](automatic-tuning-overview.md)| No | -| [Availability Zones](../../availability-zones/az-overview.md) | Yes | No | -| [Azure Resource Health](../../service-health/resource-health-overview.md) | Yes | No | -| Backup retention | Yes. 7 days default, max 35 days. Hyperscale backups are currently limited to a 7 day retention period. | Yes. 7 days default, max 35 days. | -| [Data Migration Service (DMS)](/sql/dma/dma-overview) | Yes | Yes | -| [Elastic jobs](elastic-jobs-overview.md) | Yes - see [Elastic jobs (preview)](elastic-jobs-overview.md) | No ([SQL Agent](../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) can be used instead). | -| File system access | No. Use [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql#f-importing-data-from-a-file-in-azure-blob-storage) or [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql#i-accessing-data-from-a-file-stored-on-azure-blob-storage) to access and load data from Azure Blob Storage as an alternative. | No. Use [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql#f-importing-data-from-a-file-in-azure-blob-storage) or [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql#i-accessing-data-from-a-file-stored-on-azure-blob-storage) to access and load data from Azure Blob Storage as an alternative. | -| [Geo-restore](recovery-using-backups.md#geo-restore) | Yes | Yes | -| [Hyperscale architecture](service-tier-hyperscale.md) | Yes | No | -| [Long-term backup retention - LTR](long-term-retention-overview.md) | Yes, keep automatically taken backups up to 10 years. Long-term retention policies are not yet supported for Hyperscale databases. | Yes, keep automatically taken backups up to 10 years. | -| Pause/resume | Yes, in [serverless model](serverless-tier-overview.md) | No | -| [Policy-based management](/sql/relational-databases/policy-based-management/administer-servers-by-using-policy-based-management) | No | No | -| Public IP address | Yes. The access can be restricted using firewall or service endpoints. | Yes. Needs to be explicitly enabled and port 3342 must be enabled in NSG rules. Public IP can be disabled if needed. See [Public endpoint](../managed-instance/public-endpoint-overview.md) for more details. | -| [Point in time database restore](/sql/relational-databases/backup-restore/restore-a-sql-server-database-to-a-point-in-time-full-recovery-model) | Yes - all service tiers. See [SQL Database recovery](recovery-using-backups.md#point-in-time-restore) | Yes - see [SQL Database recovery](recovery-using-backups.md#point-in-time-restore) | -| Resource pools | Yes, as [Elastic pools](elastic-pool-overview.md) | Yes. A single instance of SQL Managed Instance can have multiple databases that share the same pool of resources. In addition, you can deploy multiple instances of SQL Managed Instance in [instance pools (preview)](../managed-instance/instance-pools-overview.md) that can share the resources. | -| Scaling up or down (online) | Yes, you can either change DTU or reserved vCores or max storage with the minimal downtime. | Yes, you can change reserved vCores or max storage with the minimal downtime. | -| [SQL Alias](/sql/database-engine/configure-windows/create-or-delete-a-server-alias-for-use-by-a-client) | No, use [DNS Alias](dns-alias-overview.md) | No, use [Cliconfg](https://techcommunity.microsoft.com/t5/Azure-Database-Support-Blog/Lesson-Learned-33-How-to-make-quot-cliconfg-quot-to-work-with/ba-p/369022) to set up alias on the client machines. | -| [SQL Analytics](../../azure-monitor/insights/azure-sql.md) | Yes | Yes | -| [SQL Data Sync](sql-data-sync-sql-server-configure.md) | Yes | No | -| [SQL Server Analysis Services (SSAS)](/sql/analysis-services/analysis-services) | No, [Azure Analysis Services](https://azure.microsoft.com/services/analysis-services/) is a separate Azure cloud service. | No, [Azure Analysis Services](https://azure.microsoft.com/services/analysis-services/) is a separate Azure cloud service. | -| [SQL Server Integration Services (SSIS)](/sql/integration-services/sql-server-integration-services) | Yes, with a managed SSIS in Azure Data Factory (ADF) environment, where packages are stored in SSISDB hosted by Azure SQL Database and executed on Azure SSIS Integration Runtime (IR), see [Create Azure-SSIS IR in ADF](../../data-factory/create-azure-ssis-integration-runtime.md).

    To compare the SSIS features in SQL Database and SQL Managed Instance, see [Compare SQL Database to SQL Managed Instance](../../data-factory/create-azure-ssis-integration-runtime.md#comparison-of-sql-database-and-sql-managed-instance). | Yes, with a managed SSIS in Azure Data Factory (ADF) environment, where packages are stored in SSISDB hosted by SQL Managed Instance and executed on Azure SSIS Integration Runtime (IR), see [Create Azure-SSIS IR in ADF](../../data-factory/create-azure-ssis-integration-runtime.md).

    To compare the SSIS features in SQL Database and SQL Managed Instance, see [Compare SQL Database to SQL Managed Instance](../../data-factory/create-azure-ssis-integration-runtime.md#comparison-of-sql-database-and-sql-managed-instance). | -| [SQL Server Reporting Services (SSRS)](/sql/reporting-services/create-deploy-and-manage-mobile-and-paginated-reports) | No - [see Power BI](/power-bi/) | No - use [Power BI paginated reports](/power-bi/paginated-reports/paginated-reports-report-builder-power-bi) instead or host SSRS on an Azure VM. While SQL Managed Instance cannot run SSRS as a service, it can host [SSRS catalog databases](/sql/reporting-services/install-windows/ssrs-report-server-create-a-report-server-database#database-server-version-requirements) for a reporting server installed on Azure Virtual Machine, using SQL Server authentication. | -| [Query Performance Insights (QPI)](query-performance-insight-use.md) | Yes | No. Use built-in reports in SQL Server Management Studio and Azure Data Studio. | -| [VNet](../../virtual-network/virtual-networks-overview.md) | Partial, it enables restricted access using [VNet Endpoints](vnet-service-endpoint-rule-overview.md) | Yes, SQL Managed Instance is injected in customer's VNet. See [subnet](../managed-instance/transact-sql-tsql-differences-sql-server.md#subnet) and [VNet](../managed-instance/transact-sql-tsql-differences-sql-server.md#vnet) | -| VNet Service endpoint | [Yes](vnet-service-endpoint-rule-overview.md) | Yes | -| VNet Global peering | Yes, using [Private IP and service endpoints](vnet-service-endpoint-rule-overview.md) | Yes, using [Virtual network peering](https://techcommunity.microsoft.com/t5/azure-sql/new-feature-global-vnet-peering-support-for-azure-sql-managed/ba-p/1746913). | -| [Private connectivity](../../private-link/private-link-overview.md) | Yes, using [Private Link](../../private-link/private-endpoint-overview.md) | Yes, using VNet. | - -## Tools - -Azure SQL Database and Azure SQL Managed Instance support various data tools that can help you manage your data. - -| **Tool** | **Azure SQL Database** | **Azure SQL Managed Instance** | -| --- | --- | --- | -| Azure portal | Yes | Yes | -| Azure CLI | Yes | Yes| -| [Azure Data Studio](/sql/azure-data-studio/what-is) | Yes | Yes | -| Azure PowerShell | Yes | Yes | -| [BACPAC file (export)](/sql/relational-databases/data-tier-applications/export-a-data-tier-application) | Yes - see [SQL Database export](database-export.md) | Yes - see [SQL Managed Instance export](database-export.md) | -| [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database) | Yes - see [SQL Database import](database-import.md) | Yes - see [SQL Managed Instance import](database-import.md) | -| [Data Quality Services (DQS)](/sql/data-quality-services/data-quality-services) | No | No | -| [Master Data Services (MDS)](/sql/master-data-services/master-data-services-overview-mds) | No | No | -| [SMO](/sql/relational-databases/server-management-objects-smo/sql-server-management-objects-smo-programming-guide) | [Yes](https://www.nuget.org/packages/Microsoft.SqlServer.SqlManagementObjects) | Yes [version 150](https://www.nuget.org/packages/Microsoft.SqlServer.SqlManagementObjects) | -| [SQL Server Data Tools (SSDT)](/sql/ssdt/download-sql-server-data-tools-ssdt) | Yes | Yes | -| [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) | Yes | Yes [version 18.0 and higher](/sql/ssms/download-sql-server-management-studio-ssms) | -| [SQL Server PowerShell](/sql/relational-databases/scripting/sql-server-powershell) | Yes | Yes | -| [SQL Server Profiler](/sql/tools/sql-server-profiler/sql-server-profiler) | No - see [Extended events](xevent-db-diff-from-svr.md) | Yes | -| [System Center Operations Manager](/system-center/scom/welcome) | [Yes](https://www.microsoft.com/download/details.aspx?id=38829) | [Yes](https://www.microsoft.com/en-us/download/details.aspx?id=101203) | - -## Migration methods - -You can use different migration methods to move your data between SQL Server, Azure SQL Database and Azure SQL Managed Instance. Some methods are **Online** and picking-up all changes that are made on the source while you are running migration, while in **Offline** methods you need to stop your workload that is modifying data on the source while the migration is in progress. - -| **Source** | **Azure SQL Database** | **Azure SQL Managed Instance** | -| --- | --- | --- | -| SQL Server (on-prem, AzureVM, Amazon RDS) | **Online:** [Transactional Replication](../managed-instance/replication-transactional-overview.md)
    **Offline:** [Data Migration Service (DMS)](/sql/dma/dma-overview), [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP | **Online:** [Data Migration Service (DMS)](/sql/dma/dma-overview), [Transactional Replication](../managed-instance/replication-transactional-overview.md)
    **Offline:** Native backup/restore, [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP, [Snapshot replication](../managed-instance/replication-transactional-overview.md) | -| Single database | **Offline:** [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP | **Offline:** [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP | -| SQL Managed Instance | **Online:** [Transactional Replication](../managed-instance/replication-transactional-overview.md)
    **Offline:** [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP, [Snapshot replication](../managed-instance/replication-transactional-overview.md) | **Online:** [Transactional Replication](../managed-instance/replication-transactional-overview.md)
    **Offline:** Cross-instance point-in-time restore ([Azure PowerShell](/powershell/module/az.sql/restore-azsqlinstancedatabase#examples) or [Azure CLI](https://techcommunity.microsoft.com/t5/Azure-SQL-Database/Cross-instance-point-in-time-restore-in-Azure-SQL-Database/ba-p/386208)), [Native backup/restore](../managed-instance/restore-sample-database-quickstart.md), [BACPAC file (import)](/sql/relational-databases/data-tier-applications/import-a-bacpac-file-to-create-a-new-user-database), BCP, [Snapshot replication](../managed-instance/replication-transactional-overview.md) | - -## Next steps - -Microsoft continues to add features to Azure SQL Database. Visit the Service Updates webpage for Azure for the newest updates using these filters: - -- Filtered to [Azure SQL Database](https://azure.microsoft.com/updates/?service=sql-database). -- Filtered to [General Availability \(GA\) announcements](https://azure.microsoft.com/updates/?service=sql-database&update-type=general-availability) for SQL Database features. - -For more information about Azure SQL Database and Azure SQL Managed Instance, see: - -- [What is Azure SQL Database?](sql-database-paas-overview.md) -- [What is Azure SQL Managed Instance?](../managed-instance/sql-managed-instance-paas-overview.md) -- [What is an Azure SQL Managed Instance pool?](../managed-instance/instance-pools-overview.md) diff --git a/articles/azure-sql/database/file-space-manage.md b/articles/azure-sql/database/file-space-manage.md deleted file mode 100644 index 7339345803cab..0000000000000 --- a/articles/azure-sql/database/file-space-manage.md +++ /dev/null @@ -1,545 +0,0 @@ ---- -title: Azure SQL Database file space management -description: This page describes how to manage file space with single and pooled databases in Azure SQL Database, and provides code samples for how to determine if you need to shrink a single or a pooled database as well as how to perform a database shrink operation. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: conceptual -author: oslake -ms.author: moslake -ms.reviewer: kendralittle, wiassaf, mathoma -ms.date: 1/4/2022 ---- -# Manage file space for databases in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes different types of storage space for databases in Azure SQL Database, and steps that can be taken when the file space allocated needs to be explicitly managed. - -> [!NOTE] -> This article does not apply to Azure SQL Managed Instance. - -## Overview - -With Azure SQL Database, there are workload patterns where the allocation of underlying data files for databases can become larger than the amount of used data pages. This condition can occur when space used increases and data is subsequently deleted. The reason is because file space allocated is not automatically reclaimed when data is deleted. - -Monitoring file space usage and shrinking data files may be necessary in the following scenarios: - -- Allow data growth in an elastic pool when the file space allocated for its databases reaches the pool max size. -- Allow decreasing the max size of a single database or elastic pool. -- Allow changing a single database or elastic pool to a different service tier or performance tier with a lower max size. - -> [!NOTE] -> Shrink operations should not be considered a regular maintenance operation. Data and log files that grow due to regular, recurring business operations do not require shrink operations. - -### Monitoring file space usage - -Most storage space metrics displayed in the following APIs only measure the size of used data pages: - -- Azure Resource Manager based metrics APIs including PowerShell [get-metrics](/powershell/module/az.monitor/get-azmetric) - -However, the following APIs also measure the size of space allocated for databases and elastic pools: - -- T-SQL: [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) -- T-SQL: [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) - -## Understanding types of storage space for a database - -Understanding the following storage space quantities are important for managing the file space of a database. - -|Database quantity|Definition|Comments| -|---|---|---| -|**Data space used**|The amount of space used to store database data.|Generally, space used increases (decreases) on inserts (deletes). In some cases, the space used does not change on inserts or deletes depending on the amount and pattern of data involved in the operation and any fragmentation. For example, deleting one row from every data page does not necessarily decrease the space used.| -|**Data space allocated**|The amount of formatted file space made available for storing database data.|The amount of space allocated grows automatically, but never decreases after deletes. This behavior ensures that future inserts are faster since space does not need to be reformatted.| -|**Data space allocated but unused**|The difference between the amount of data space allocated and data space used.|This quantity represents the maximum amount of free space that can be reclaimed by shrinking database data files.| -|**Data max size**|The maximum amount of space that can be used for storing database data.|The amount of data space allocated cannot grow beyond the data max size.| - -The following diagram illustrates the relationship between the different types of storage space for a database. - -![storage space types and relationships](./media/file-space-manage/storage-types.png) - -## Query a single database for storage space information - -The following queries can be used to determine storage space quantities for a single database. - -### Database data space used - -Modify the following query to return the amount of database data space used. Units of the query result are in MB. - -```sql --- Connect to master --- Database data space used in MB -SELECT TOP 1 storage_in_megabytes AS DatabaseDataSpaceUsedInMB -FROM sys.resource_stats -WHERE database_name = 'db1' -ORDER BY end_time DESC; -``` - -### Database data space allocated and unused allocated space - -Use the following query to return the amount of database data space allocated and the amount of unused space allocated. Units of the query result are in MB. - -```sql --- Connect to database --- Database data space allocated in MB and database data space allocated unused in MB -SELECT SUM(size/128.0) AS DatabaseDataSpaceAllocatedInMB, -SUM(size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS int)/128.0) AS DatabaseDataSpaceAllocatedUnusedInMB -FROM sys.database_files -GROUP BY type_desc -HAVING type_desc = 'ROWS'; -``` - -### Database data max size - -Modify the following query to return the database data max size. Units of the query result are in bytes. - -```sql --- Connect to database --- Database data max size in bytes -SELECT DATABASEPROPERTYEX('db1', 'MaxSizeInBytes') AS DatabaseDataMaxSizeInBytes; -``` - -## Understanding types of storage space for an elastic pool - -Understanding the following storage space quantities are important for managing the file space of an elastic pool. - -|Elastic pool quantity|Definition|Comments| -|---|---|---| -|**Data space used**|The summation of data space used by all databases in the elastic pool.|| -|**Data space allocated**|The summation of data space allocated by all databases in the elastic pool.|| -|**Data space allocated but unused**|The difference between the amount of data space allocated and data space used by all databases in the elastic pool.|This quantity represents the maximum amount of space allocated for the elastic pool that can be reclaimed by shrinking database data files.| -|**Data max size**|The maximum amount of data space that can be used by the elastic pool for all of its databases.|The space allocated for the elastic pool should not exceed the elastic pool max size. If this condition occurs, then space allocated that is unused can be reclaimed by shrinking database data files.| - -> [!NOTE] -> The error message "The elastic pool has reached its storage limit" indicates that the database objects have been allocated enough space to meet the elastic pool storage limit, but there may be unused space in the data space allocation. Consider increasing the elastic pool's storage limit, or as a short-term solution, freeing up data space using the [Reclaim unused allocated space](#reclaim-unused-allocated-space) section below. You should also be aware of the potential negative performance impact of shrinking database files, see [Index maintenance after shrink](#rebuild-indexes) section below. - -## Query an elastic pool for storage space information - -The following queries can be used to determine storage space quantities for an elastic pool. - -### Elastic pool data space used - -Modify the following query to return the amount of elastic pool data space used. Units of the query result are in MB. - -```sql --- Connect to master --- Elastic pool data space used in MB -SELECT TOP 1 avg_storage_percent / 100.0 * elastic_pool_storage_limit_mb AS ElasticPoolDataSpaceUsedInMB -FROM sys.elastic_pool_resource_stats -WHERE elastic_pool_name = 'ep1' -ORDER BY end_time DESC; -``` - -### Elastic pool data space allocated and unused allocated space - -Modify the following examples to return a table listing the space allocated and unused allocated space for each database in an elastic pool. The table orders databases from those databases with the greatest amount of unused allocated space to the least amount of unused allocated space. Units of the query result are in MB. - -The query results for determining the space allocated for each database in the pool can be added together to determine the total space allocated for the elastic pool. The elastic pool space allocated should not exceed the elastic pool max size. - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -The PowerShell script requires SQL Server PowerShell module – see [Download PowerShell module](/sql/powershell/download-sql-server-ps-module) to install. - -```powershell -$resourceGroupName = "" -$serverName = "" -$poolName = "" -$userName = "" -$password = "" - -# get list of databases in elastic pool -$databasesInPool = Get-AzSqlElasticPoolDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName -ElasticPoolName $poolName -$databaseStorageMetrics = @() - -# for each database in the elastic pool, get space allocated in MB and space allocated unused in MB -foreach ($database in $databasesInPool) { - $sqlCommand = "SELECT DB_NAME() as DatabaseName, ` - SUM(size/128.0) AS DatabaseDataSpaceAllocatedInMB, ` - SUM(size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS int)/128.0) AS DatabaseDataSpaceAllocatedUnusedInMB ` - FROM sys.database_files ` - GROUP BY type_desc ` - HAVING type_desc = 'ROWS'" - $serverFqdn = "tcp:" + $serverName + ".database.windows.net,1433" - $databaseStorageMetrics = $databaseStorageMetrics + - (Invoke-Sqlcmd -ServerInstance $serverFqdn -Database $database.DatabaseName ` - -Username $userName -Password $password -Query $sqlCommand) -} - -# display databases in descending order of space allocated unused -Write-Output "`n" "ElasticPoolName: $poolName" -Write-Output $databaseStorageMetrics | Sort -Property DatabaseDataSpaceAllocatedUnusedInMB -Descending | Format-Table -``` - -The following screenshot is an example of the output of the script: - -![elastic pool allocated space and unused allocated space example](./media/file-space-manage/elastic-pool-allocated-unused.png) - -### Elastic pool data max size - -Modify the following T-SQL query to return the last recorded elastic pool data max size. Units of the query result are in MB. - -```sql --- Connect to master --- Elastic pools max size in MB -SELECT TOP 1 elastic_pool_storage_limit_mb AS ElasticPoolMaxSizeInMB -FROM sys.elastic_pool_resource_stats -WHERE elastic_pool_name = 'ep1' -ORDER BY end_time DESC; -``` - -## Reclaim unused allocated space - -> [!IMPORTANT] -> Shrink commands impact database performance while running, and if possible should be run during periods of low usage. - -### Shrink data files - -Because of a potential impact to database performance, Azure SQL Database does not automatically shrink data files. However, customers may shrink data files via self-service at a time of their choosing. This should not be a regularly scheduled operation, but rather, a one-time event in response to a major reduction in data file used space consumption. - -> [!TIP] -> It is not recommended to shrink data files if regular application workload will cause the files to grow to the same allocated size again. - -In Azure SQL Database, to shrink files you can use either `DBCC SHRINKDATABASE` or `DBCC SHRINKFILE` commands: - -- `DBCC SHRINKDATABASE` shrinks all data and log files in a database using a single command. The command shrinks one data file at a time, which can take a long time for larger databases. It also [shrinks the log file](#shrinking-transaction-log-file), which is usually unnecessary because Azure SQL Database shrinks log files automatically as needed. -- `DBCC SHRINKFILE` command supports more advanced scenarios: - - It can target individual files as needed, rather than shrinking all files in the database. - - Each `DBCC SHRINKFILE` command can run in parallel with other `DBCC SHRINKFILE` commands to shrink multiple files at the same time and reduce the total time of shrink, at the expense of higher resource usage and a higher chance of blocking user queries, if they are executing during shrink. - - If the tail of the file does not contain data, it can reduce allocated file size much faster by specifying the `TRUNCATEONLY` argument. This does not require data movement within the file. -- For more information about these shrink commands, see [DBCC SHRINKDATABASE](/sql/t-sql/database-console-commands/dbcc-shrinkdatabase-transact-sql) and [DBCC SHRINKFILE](/sql/t-sql/database-console-commands/dbcc-shrinkfile-transact-sql). - -The following examples must be executed while connected to the target user database, not the `master` database. - -To use `DBCC SHRINKDATABASE` to shrink all data and log files in a given database: - -```sql --- Shrink database data space allocated. -DBCC SHRINKDATABASE (N'database_name'); -``` - -In Azure SQL Database, a database may have one or more data files, created automatically as data grows. To determine file layout of your database, including the used and allocated size of each file, query the `sys.database_files` catalog view using the following sample script: - -```sql --- Review file properties, including file_id and name values to reference in shrink commands -SELECT file_id, - name, - CAST(FILEPROPERTY(name, 'SpaceUsed') AS bigint) * 8 / 1024. AS space_used_mb, - CAST(size AS bigint) * 8 / 1024. AS space_allocated_mb, - CAST(max_size AS bigint) * 8 / 1024. AS max_file_size_mb -FROM sys.database_files -WHERE type_desc IN ('ROWS','LOG'); -``` - -You can execute a shrink against one file only via the `DBCC SHRINKFILE` command, for example: - -```sql --- Shrink database data file named 'data_0` by removing all unused at the end of the file, if any. -DBCC SHRINKFILE ('data_0', TRUNCATEONLY); -GO -``` - -Be aware of the potential negative performance impact of shrinking database files, see the [Index maintenance after shrink](#rebuild-indexes) section below. - -### Shrinking transaction log file - -Unlike data files, Azure SQL Database automatically shrinks transaction log file to avoid excessive space usage that can lead to out-of-space errors. It is usually not necessary for customers to shrink the transaction log file. - -In Premium and Business Critical service tiers, if the transaction log becomes large, it may significantly contribute to local storage consumption toward the [maximum local storage](resource-limits-logical-server.md#storage-space-governance) limit. If local storage consumption is close to the limit, customers may choose to shrink transaction log using the [DBCC SHRINKFILE](/sql/t-sql/database-console-commands/dbcc-shrinkfile-transact-sql) command as shown in the following example. This releases local storage as soon as the command completes, without waiting for the periodic automatic shrink operation. - -The following example should be executed while connected to the target user database, not the master database. - -```sql --- Shrink the database log file (always file_id 2), by removing all unused space at the end of the file, if any. -DBCC SHRINKFILE (2, TRUNCATEONLY); -``` - -### Auto-shrink - -As an alternative to shrinking data files manually, auto-shrink can be enabled for a database. However, auto shrink can be less effective in reclaiming file space than `DBCC SHRINKDATABASE` and `DBCC SHRINKFILE`. - -By default, auto-shrink is disabled, which is recommended for most databases. If it becomes necessary to enable auto-shrink, it is recommended to disable it once space management goals have been achieved, instead of keeping it enabled permanently. For more information, see [Considerations for AUTO_SHRINK](/troubleshoot/sql/admin/considerations-autogrow-autoshrink#considerations-for-auto_shrink). - -For example, auto-shrink can be helpful in the specific scenario where an elastic pool contains many databases that experience significant growth and reduction in data file space used, causing the pool to approach its maximum size limit. This is not a common scenario. - -To enable auto-shrink, execute the following command while connected to your database (not the master database). - -```sql --- Enable auto-shrink for the current database. -ALTER DATABASE CURRENT SET AUTO_SHRINK ON; -``` - -For more information about this command, see [DATABASE SET](/sql/t-sql/statements/alter-database-transact-sql-set-options) options. - -### Index maintenance after shrink - -After a shrink operation is completed against data files, indexes may become fragmented. This reduces their performance optimization effectiveness for certain workloads, such as queries using large scans. If performance degradation occurs after the shrink operation is complete, consider index maintenance to rebuild indexes. Keep in mind that index rebuilds require free space in the database, and hence may cause the allocated space to increase, counteracting the effect of shrink. - -For more information about index maintenance, see [Optimize index maintenance to improve query performance and reduce resource consumption](/sql/relational-databases/indexes/reorganize-and-rebuild-indexes). - -## Shrink large databases - -When database allocated space is in hundreds of gigabytes or higher, shrink may require a significant time to complete, often measured in hours, or days for multi-terabyte databases. There are process optimizations and best practices you can use to make this process more efficient and less impactful to application workloads. - -### Capture space usage baseline - -Before starting shrink, capture the current used and allocated space in each database file by executing the following space usage query: - -```sql -SELECT file_id, - CAST(FILEPROPERTY(name, 'SpaceUsed') AS bigint) * 8 / 1024. AS space_used_mb, - CAST(size AS bigint) * 8 / 1024. AS space_allocated_mb, - CAST(max_size AS bigint) * 8 / 1024. AS max_size_mb -FROM sys.database_files -WHERE type_desc = 'ROWS'; -``` - -Once shrink has completed, you can execute this query again and compare the result to the initial baseline. - -### Truncate data files - -It is recommended to first execute shrink for each data file with the `TRUNCATEONLY` parameter. This way, if there is any allocated but unused space at the end of the file, it will be removed quickly and without any data movement. The following sample command truncates data file with file_id 4: - -```sql -DBCC SHRINKFILE (4, TRUNCATEONLY); -``` - -Once this command is executed for every data file, you can rerun the space usage query to see the reduction in allocated space, if any. You can also view allocated space for the database in Azure portal. - -### Evaluate index page density - -If truncating data files did not result in a sufficient reduction in allocated space, you will need to shrink data files. However, as an optional but recommended step, you should first determine average page density for indexes in the database. For the same amount of data, shrink will complete faster if page density is high, because it will have to move fewer pages. If page density is low for some indexes, consider performing maintenance on these indexes to increase page density before shrinking data files. This will also let shrink achieve a deeper reduction in allocated storage space. - -To determine page density for all indexes in the database, use the following query. Page density is reported in the `avg_page_space_used_in_percent` column. - -```sql -SELECT OBJECT_SCHEMA_NAME(ips.object_id) AS schema_name, - OBJECT_NAME(ips.object_id) AS object_name, - i.name AS index_name, - i.type_desc AS index_type, - ips.avg_page_space_used_in_percent, - ips.avg_fragmentation_in_percent, - ips.page_count, - ips.alloc_unit_type_desc, - ips.ghost_record_count -FROM sys.dm_db_index_physical_stats(DB_ID(), default, default, default, 'SAMPLED') AS ips -INNER JOIN sys.indexes AS i -ON ips.object_id = i.object_id - AND - ips.index_id = i.index_id -ORDER BY page_count DESC; -``` - -If there are indexes with high page count that have page density lower than 60-70%, consider rebuilding or reorganizing these indexes before shrinking data files. - -> [!NOTE] -> For larger databases, the query to determine page density may take a long time (hours) to complete. Additionally, rebuilding or reorganizing large indexes also requires substantial time and resource usage. There is a tradeoff between spending extra time on increasing page density on one hand, and reducing shrink duration and achieving higher space savings on another. - -Following is a sample command to rebuild an index and increase its page density: - -```sql -ALTER INDEX [index_name] ON [schema_name].[table_name] REBUILD WITH (FILLFACTOR = 100, MAXDOP = 8, ONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = NONE)), RESUMABLE = ON); -``` - -This command initiates an online and resumable index rebuild. This lets concurrent workloads continue using the table while the rebuild is in progress, and lets you resume the rebuild if it gets interrupted for any reason. However, this type of rebuild is slower than an offline rebuild, which blocks access to the table. If no other workloads need to access the table during rebuild, set the `ONLINE` and `RESUMABLE` options to `OFF` and remove the `WAIT_AT_LOW_PRIORITY` clause. - -If there are multiple indexes with low page density, you may be able to rebuild them in parallel on multiple database sessions to speed up the process. However, make sure that you are not approaching database resource limits by doing so, and leave sufficient resource headroom for application workloads that may be running. Monitor resource consumption (CPU, Data IO, Log IO) in Azure portal or using the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view, and start additional parallel rebuilds only if resource utilization on each of these dimensions remains substantially lower than 100%. If CPU, Data IO, or Log IO utilization is at 100%, you can scale up the database to have more CPU cores and increase IO throughput. This may enable additional parallel rebuilds to complete the process faster. - -To learn more about index maintenance, see [Optimize index maintenance to improve query performance and reduce resource consumption](/sql/relational-databases/indexes/reorganize-and-rebuild-indexes). - -### Shrink multiple data files - -As noted earlier, shrink with data movement is a long-running process. If the database has multiple data files, you can speed up the process by shrinking multiple data files in parallel. You do this by opening multiple database sessions, and using `DBCC SHRINKFILE` on each session with a different `file_id` value. Similar to rebuilding indexes earlier, make sure you have sufficient resource headroom (CPU, Data IO, Log IO) before starting each new parallel shrink command. - -The following sample command shrinks data file with file_id 4, attempting to reduce its allocated size to 52000 MB by moving pages within the file: - -```sql -DBCC SHRINKFILE (4, 52000); -``` - -If you want to reduce allocated space for the file to the minimum possible, execute the statement without specifying the target size: - -```sql -DBCC SHRINKFILE (4); -``` - -If a workload is running concurrently with shrink, it may start using the storage space freed by shrink before shrink completes and truncates the file. In this case, shrink will not be able to reduce allocated space to the specified target. - -You can mitigate this by shrinking each file in smaller steps. This means that in the `DBCC SHRINKFILE` command, you set the target that is slightly smaller than the current allocated space for the file, as seen in the results of [baseline space usage query](#capture-space-usage-baseline). For example, if allocated space for file with file_id 4 is 200,000 MB, and you want to shrink it to 100,000 MB, you can first set the target to 170,000 MB: - -```sql -DBCC SHRINKFILE (4, 170000); -``` - -Once this command completes, it will have truncated the file and reduced its allocated size to 170,000 MB. You can then repeat this command, setting target first to 140,000 MB, then to 110,000 MB, etc., until the file is shrunk to the desired size. If the command completes but the file is not truncated, use smaller steps, for example 15,000 MB rather than 30,000 MB. - -To monitor shrink progress for all concurrently running shrink sessions, you can use the following query: - -```sql -SELECT command, - percent_complete, - status, - wait_resource, - session_id, - wait_type, - blocking_session_id, - cpu_time, - reads, - CAST(((DATEDIFF(s,start_time, GETDATE()))/3600) AS varchar) + ' hour(s), ' - + CAST((DATEDIFF(s,start_time, GETDATE())%3600)/60 AS varchar) + 'min, ' - + CAST((DATEDIFF(s,start_time, GETDATE())%60) AS varchar) + ' sec' AS running_time -FROM sys.dm_exec_requests AS r -LEFT JOIN sys.databases AS d -ON r.database_id = d.database_id -WHERE r.command IN ('DbccSpaceReclaim','DbccFilesCompact','DbccLOBCompact','DBCC'); -``` - -> [!NOTE] -> Shrink progress may be non-linear, and the value in the `percent_complete` column may remain virtually unchanged for long periods of time, even though shrink is still in progress. - -Once shrink has completed for all data files, rerun the [space usage query](#capture-space-usage-baseline) (or check in Azure portal) to determine the resulting reduction in allocated storage size. If is is insufficient and there is still a large difference between used space and allocated space, you can [rebuild indexes](#evaluate-index-page-density) as described earlier. This may temporarily increase allocated space further, however shrinking data files again after rebuilding indexes should result in a deeper reduction in allocated space. - -## Transient errors during shrink - -Occasionally, a shrink command may fail with various errors such as timeouts and deadlocks. In general, these errors are transient, and do not occur again if the same command is repeated. If shrink fails with an error, the progress it has made so far in moving data pages is retained, and the same shrink command can be executed again to continue shrinking the file. - -The following sample script shows how you can run shrink in a retry loop to automatically retry up to a configurable number of times when a timeout error or a deadlock error occurs. This retry approach is applicable to many other errors that may occur during shrink. - -```sql -DECLARE @RetryCount int = 3; -- adjust to configure desired number of retries -DECLARE @Delay char(12); - --- Retry loop -WHILE @RetryCount >= 0 -BEGIN - -BEGIN TRY - -DBCC SHRINKFILE (1); -- adjust file_id and other shrink parameters - --- Exit retry loop on successful execution -SELECT @RetryCount = -1; - -END TRY -BEGIN CATCH - -- Retry for the declared number of times without raising an error if deadlocked or timed out waiting for a lock - IF ERROR_NUMBER() IN (1205, 49516) AND @RetryCount > 0 - BEGIN - SELECT @RetryCount -= 1; - - PRINT CONCAT('Retry at ', SYSUTCDATETIME()); - - -- Wait for a random period of time between 1 and 10 seconds before retrying - SELECT @Delay = '00:00:0' + CAST(CAST(1 + RAND() * 8.999 AS decimal(5,3)) AS varchar(5)); - WAITFOR DELAY @Delay; - END - ELSE -- Raise error and exit loop - BEGIN - SELECT @RetryCount = -1; - THROW; - END -END CATCH -END; -``` - -In addition to timeouts and deadlocks, shrink may encounter errors due to certain known issues. - -The errors returned and mitigation steps are as follows: - -- **Error number: 49503**, error message: _%.*ls: Page %d:%d could not be moved because it is an off-row persistent version store page. Page holdup reason: %ls. Page holdup timestamp: %I64d._ - -This error occurs when there are long running active transactions that have generated row versions in persistent version store (PVS). The pages containing these row versions cannot be moved by shrink, hence it cannot make progress and fails with this error. - -To mitigate, you have to wait until these long running transactions have completed. Alternatively, you can identify and terminate these long running transactions, but this can impact your application if it does not handle transaction failures gracefully. One way to find long running transactions is by running the following query in the database where you ran the shrink command: - -```sql --- Transactions sorted by duration -SELECT st.session_id, - dt.database_transaction_begin_time, - DATEDIFF(second, dt.database_transaction_begin_time, CURRENT_TIMESTAMP) AS transaction_duration_seconds, - dt.database_transaction_log_bytes_used, - dt.database_transaction_log_bytes_reserved, - st.is_user_transaction, - st.open_transaction_count, - ib.event_type, - ib.parameters, - ib.event_info -FROM sys.dm_tran_database_transactions AS dt -INNER JOIN sys.dm_tran_session_transactions AS st -ON dt.transaction_id = st.transaction_id -OUTER APPLY sys.dm_exec_input_buffer(st.session_id, default) AS ib -WHERE dt.database_id = DB_ID() -ORDER BY transaction_duration_seconds DESC; -``` - -You can terminate a transaction by using the `KILL` command and specifying the associated `session_id` value from query result: - -```sql -KILL 4242; -- replace 4242 with the session_id value from query results -``` - -> [!CAUTION] -> Terminating a transaction may negatively impact workloads. - -Once long running transactions have been terminated or have completed, an internal background task will clean up no longer needed row versions after some time. You can monitor PVS size to gauge cleanup progress, using the following query. Run the query in the database where you ran the shrink command: - -```sql -SELECT pvss.persistent_version_store_size_kb / 1024. / 1024 AS persistent_version_store_size_gb, - pvss.online_index_version_store_size_kb / 1024. / 1024 AS online_index_version_store_size_gb, - pvss.current_aborted_transaction_count, - pvss.aborted_version_cleaner_start_time, - pvss.aborted_version_cleaner_end_time, - dt.database_transaction_begin_time AS oldest_transaction_begin_time, - asdt.session_id AS active_transaction_session_id, - asdt.elapsed_time_seconds AS active_transaction_elapsed_time_seconds -FROM sys.dm_tran_persistent_version_store_stats AS pvss -LEFT JOIN sys.dm_tran_database_transactions AS dt -ON pvss.oldest_active_transaction_id = dt.transaction_id - AND - pvss.database_id = dt.database_id -LEFT JOIN sys.dm_tran_active_snapshot_database_transactions AS asdt -ON pvss.min_transaction_timestamp = asdt.transaction_sequence_num - OR - pvss.online_index_min_transaction_timestamp = asdt.transaction_sequence_num -WHERE pvss.database_id = DB_ID(); -``` - -Once PVS size reported in the `persistent_version_store_size_gb` column is substantially reduced compared to its original size, rerunning shrink should succeed. - -- **Error number: 5223**, error message: _%.*ls: Empty page %d:%d could not be deallocated._ - -This error may occur if there are ongoing index maintenance operations such as `ALTER INDEX`. Retry the shrink command after these operations are complete. - -If this error persists, the associated index might have to be rebuilt. To find the index to rebuild, execute the following query in the same database where you ran the shrink command: - -```sql -SELECT OBJECT_SCHEMA_NAME(pg.object_id) AS schema_name, - OBJECT_NAME(pg.object_id) AS object_name, - i.name AS index_name, - p.partition_number -FROM sys.dm_db_page_info(DB_ID(), , , default) AS pg -INNER JOIN sys.indexes AS i -ON pg.object_id = i.object_id - AND - pg.index_id = i.index_id -INNER JOIN sys.partitions AS p -ON pg.partition_id = p.partition_id; -``` - -Before executing this query, replace the `` and `` placeholders with the actual values from the error message you received. For example, if the message is _Empty page 1:62669 could not be deallocated_, then `` is `1` and `` is `62669`. - -Rebuild the index identified by the query, and retry the shrink command. - -- **Error number: 5201**, error message: _DBCC SHRINKDATABASE: File ID %d of database ID %d was skipped because the file does not have enough free space to reclaim._ - -This error means that the data file cannot be shrunk further. You can move on to the next data file. - -## Next steps - -- For information about database max sizes, see: - - [Azure SQL Database vCore-based purchasing model limits for a single database](resource-limits-vcore-single-databases.md) - - [Resource limits for single databases using the DTU-based purchasing model](resource-limits-dtu-single-databases.md) - - [Azure SQL Database vCore-based purchasing model limits for elastic pools](resource-limits-vcore-elastic-pools.md) - - [Resources limits for elastic pools using the DTU-based purchasing model](resource-limits-dtu-elastic-pools.md) diff --git a/articles/azure-sql/database/firewall-configure.md b/articles/azure-sql/database/firewall-configure.md deleted file mode 100644 index 046551c4fa15f..0000000000000 --- a/articles/azure-sql/database/firewall-configure.md +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: IP firewall rules -description: Configure server-level IP firewall rules for a database in Azure SQL Database or Azure Synapse Analytics firewall. Manage access and configure database-level IP firewall rules for SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: security -titleSuffix: Azure SQL Database and Azure Synapse Analytics -ms.custom: sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: conceptual -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 03/09/2022 ---- -# Azure SQL Database and Azure Synapse IP firewall rules -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -When you create a new server in Azure SQL Database or Azure Synapse Analytics named *mysqlserver*, for example, a server-level firewall blocks all access to the public endpoint for the server (which is accessible at *mysqlserver.database.windows.net*). For simplicity, *SQL Database* is used to refer to both SQL Database and Azure Synapse Analytics. - -> [!IMPORTANT] -> This article does *not* apply to *Azure SQL Managed Instance*. For information about network configuration, see [Connect your application to Azure SQL Managed Instance](../managed-instance/connect-application-instance.md). -> -> Azure Synapse only supports server-level IP firewall rules. It doesn't support database-level IP firewall rules. - - -## How the firewall works - -Connection attempts from the internet and Azure must pass through the firewall before they reach your server or database, as the following diagram shows. - - ![Firewall configuration diagram][1] - -### Server-level IP firewall rules - -These rules enable clients to access your entire server, that is, all the databases managed by the server. The rules are stored in the *master* database. You can have a maximum of 128 server-level IP firewall rules for a server. If you have the **Allow Azure Services and resources to access this server** setting enabled, this counts as a single firewall rule for the server. - -You can configure server-level IP firewall rules by using the Azure portal, PowerShell, or Transact-SQL statements. - -- To use the portal or PowerShell, you must be the subscription owner or a subscription contributor. -- To use Transact-SQL, you must connect to the *master* database as the server-level principal login or as the Azure Active Directory administrator. (A server-level IP firewall rule must first be created by a user who has Azure-level permissions.) - -> [!NOTE] -> By default, during creation of a new logical SQL server from the Azure portal, the **Allow Azure Services and resources to access this server** setting is set to **No**. - -### Database-level IP firewall rules - -Database-level IP firewall rules enable clients to access certain (secure) databases. You create the rules for each database (including the *master* database), and they're stored in the individual database. - -- You can only create and manage database-level IP firewall rules for master and user databases by using Transact-SQL statements and only after you configure the first server-level firewall. -- If you specify an IP address range in the database-level IP firewall rule that's outside the range in the server-level IP firewall rule, only those clients that have IP addresses in the database-level range can access the database. -- You can have a maximum of 128 database-level IP firewall rules for a database. For more information about configuring database-level IP firewall rules, see the example later in this article and see [sp_set_database_firewall_rule (Azure SQL Database)](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database). - -### Recommendations for how to set firewall rules - -We recommend that you use database-level IP firewall rules whenever possible. This practice enhances security and makes your database more portable. Use server-level IP firewall rules for administrators. Also use them when you have many databases that have the same access requirements, and you don't want to configure each database individually. - -> [!NOTE] -> For information about portable databases in the context of business continuity, see [Authentication requirements for disaster recovery](active-geo-replication-security-configure.md). - -## Server-level versus database-level IP firewall rules - -*Should users of one database be fully isolated from another database?* - -If *yes*, use database-level IP firewall rules to grant access. This method avoids using server-level IP firewall rules, which permit access through the firewall to all databases. That would reduce the depth of your defenses. - -*Do users at the IP addresses need access to all databases?* - -If *yes*, use server-level IP firewall rules to reduce the number of times that you have to configure IP firewall rules. - -*Does the person or team who configures the IP firewall rules only have access through the Azure portal, PowerShell, or the REST API?* - -If so, you must use server-level IP firewall rules. Database-level IP firewall rules can only be configured through Transact-SQL. - -*Is the person or team who configures the IP firewall rules prohibited from having high-level permission at the database level?* - -If so, use server-level IP firewall rules. You need at least *CONTROL DATABASE* permission at the database level to configure database-level IP firewall rules through Transact-SQL. - -*Does the person or team who configures or audits the IP firewall rules centrally manage IP firewall rules for many (perhaps hundreds) of databases?* - -In this scenario, best practices are determined by your needs and environment. Server-level IP firewall rules might be easier to configure, but scripting can configure rules at the database-level. And even if you use server-level IP firewall rules, you might need to audit database-level IP firewall rules to see if users with *CONTROL* permission on the database create database-level IP firewall rules. - -*Can I use a mix of server-level and database-level IP firewall rules?* - -Yes. Some users, such as administrators, might need server-level IP firewall rules. Other users, such as users of a database application, might need database-level IP firewall rules. - -### Connections from the internet - -When a computer tries to connect to your server from the internet, the firewall first checks the originating IP address of the request against the database-level IP firewall rules for the database that the connection requests. - -- If the address is within a range that's specified in the database-level IP firewall rules, the connection is granted to the database that contains the rule. -- If the address isn't within a range in the database-level IP firewall rules, the firewall checks the server-level IP firewall rules. If the address is within a range that's in the server-level IP firewall rules, the connection is granted. Server-level IP firewall rules apply to all databases managed by the server. -- If the address isn't within a range that's in any of the database-level or server-level IP firewall rules, the connection request fails. - -> [!NOTE] -> To access Azure SQL Database from your local computer, ensure that the firewall on your network and local computer allow outgoing communication on TCP port 1433. - -### Connections from inside Azure - -To allow applications hosted inside Azure to connect to your SQL server, Azure connections must be enabled. To enable Azure connections, there must be a firewall rule with starting and ending IP addresses set to 0.0.0.0. This recommended rule is only applicable to Azure SQL Database. - -When an application from Azure tries to connect to the server, the firewall checks that Azure connections are allowed by verifying this firewall rule exists. This can be turned on directly from the Azure portal blade by switching the **Allow Azure Services and resources to access this server** to **ON** in the **Firewalls and virtual networks** settings. Switching the setting to ON creates an inbound firewall rule for IP 0.0.0.0 - 0.0.0.0 named **AllowAllWindowsAzureIps**. The rule can be viewed in your master database [sys.firewall_rules](/sql/relational-databases/system-catalog-views/sys-firewall-rules-azure-sql-database) view. Use PowerShell or the Azure CLI to create a firewall rule with start and end IP addresses set to 0.0.0.0 if you’re not using the portal. - -> [!IMPORTANT] -> This option configures the firewall to allow all connections from Azure, including connections from the subscriptions of other customers. If you select this option, make sure that your login and user permissions limit access to authorized users only. - -## Permissions - -To be able to create and manage IP firewall rules for the Azure SQL Server, you will need to either be: - -- in the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) role -- in the [SQL Security Manager](../../role-based-access-control/built-in-roles.md#sql-security-manager) role -- the owner of the resource that contains the Azure SQL Server - -## Create and manage IP firewall rules - -You create the first server-level firewall setting by using the [Azure portal](https://portal.azure.com/) or programmatically by using [Azure PowerShell](/powershell/module/az.sql), [Azure CLI](/cli/azure/sql/server/firewall-rule), or an Azure [REST API](/rest/api/sql/firewallrules/createorupdate). You create and manage additional server-level IP firewall rules by using these methods or Transact-SQL. - -> [!IMPORTANT] -> Database-level IP firewall rules can only be created and managed by using Transact-SQL. - -To improve performance, server-level IP firewall rules are temporarily cached at the database level. To refresh the cache, see [DBCC FLUSHAUTHCACHE](/sql/t-sql/database-console-commands/dbcc-flushauthcache-transact-sql). - -> [!TIP] -> You can use [Database Auditing](/azure/azure-sql/database/auditing-overview) to audit server-level and database-level firewall changes. - -### Use the Azure portal to manage server-level IP firewall rules - -To set a server-level IP firewall rule in the Azure portal, go to the overview page for your database or your server. - -> [!TIP] -> For a tutorial, see [Create a database using the Azure portal](single-database-create-quickstart.md). - -#### From the database overview page - -1. To set a server-level IP firewall rule from the database overview page, select **Set server firewall** on the toolbar, as the following image shows. - - ![Server IP firewall rule](./media/firewall-configure/sql-database-server-set-firewall-rule.png) - - The **Firewall settings** page for the server opens. - -2. Select **Add client IP** on the toolbar to add the IP address of the computer that you're using, and then select **Save**. A server-level IP firewall rule is created for your current IP address. - - ![Set server-level IP firewall rule](./media/firewall-configure/sql-database-server-firewall-settings.png) - -#### From the server overview page - -The overview page for your server opens. It shows the fully qualified server name (such as *mynewserver20170403.database.windows.net*) and provides options for further configuration. - -1. To set a server-level rule from this page, select **Firewall** from the **Settings** menu on the left side. - -2. Select **Add client IP** on the toolbar to add the IP address of the computer that you're using, and then select **Save**. A server-level IP firewall rule is created for your current IP address. - -### Use Transact-SQL to manage IP firewall rules - -| Catalog view or stored procedure | Level | Description | -| --- | --- | --- | -| [sys.firewall_rules](/sql/relational-databases/system-catalog-views/sys-firewall-rules-azure-sql-database) |Server |Displays the current server-level IP firewall rules | -| [sp_set_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-set-firewall-rule-azure-sql-database) |Server |Creates or updates server-level IP firewall rules | -| [sp_delete_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-delete-firewall-rule-azure-sql-database) |Server |Removes server-level IP firewall rules | -| [sys.database_firewall_rules](/sql/relational-databases/system-catalog-views/sys-database-firewall-rules-azure-sql-database) |Database |Displays the current database-level IP firewall rules | -| [sp_set_database_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database) |Database |Creates or updates the database-level IP firewall rules | -| [sp_delete_database_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-delete-database-firewall-rule-azure-sql-database) |Databases |Removes database-level IP firewall rules | - -The following example reviews the existing rules, enables a range of IP addresses on the server *Contoso*, and deletes an IP firewall rule: - -```sql -SELECT * FROM sys.firewall_rules ORDER BY name; -``` - -Next, add a server-level IP firewall rule. - -```sql -EXECUTE sp_set_firewall_rule @name = N'ContosoFirewallRule', - @start_ip_address = '192.168.1.1', @end_ip_address = '192.168.1.10' -``` - -To delete a server-level IP firewall rule, execute the *sp_delete_firewall_rule* stored procedure. The following example deletes the rule *ContosoFirewallRule*: - -```sql -EXECUTE sp_delete_firewall_rule @name = N'ContosoFirewallRule' -``` - -### Use PowerShell to manage server-level IP firewall rules - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all development is now for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az and AzureRm modules are substantially identical. - -| Cmdlet | Level | Description | -| --- | --- | --- | -| [Get-AzSqlServerFirewallRule](/powershell/module/az.sql/get-azsqlserverfirewallrule) |Server |Returns the current server-level firewall rules | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) |Server |Creates a new server-level firewall rule | -| [Set-AzSqlServerFirewallRule](/powershell/module/az.sql/set-azsqlserverfirewallrule) |Server |Updates the properties of an existing server-level firewall rule | -| [Remove-AzSqlServerFirewallRule](/powershell/module/az.sql/remove-azsqlserverfirewallrule) |Server |Removes server-level firewall rules | - -The following example uses PowerShell to set a server-level IP firewall rule: - -```powershell -New-AzSqlServerFirewallRule -ResourceGroupName "myResourceGroup" ` - -ServerName $servername ` - -FirewallRuleName "ContosoIPRange" -StartIpAddress "192.168.1.0" -EndIpAddress "192.168.1.255" -``` - -> [!TIP] -> For $servername specify the server name and not the fully qualified DNS name e.g. specify **mysqldbserver** instead of **mysqldbserver.database.windows.net** -> -> For PowerShell examples in the context of a quickstart, see [Create DB - PowerShell](powershell-script-content-guide.md) and [Create a single database and configure a server-level IP firewall rule using PowerShell](scripts/create-and-configure-database-powershell.md). - -### Use CLI to manage server-level IP firewall rules - -| Cmdlet | Level | Description | -| --- | --- | --- | -|[az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-create)|Server|Creates a server IP firewall rule| -|[az sql server firewall-rule list](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-list)|Server|Lists the IP firewall rules on a server| -|[az sql server firewall-rule show](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-show)|Server|Shows the detail of an IP firewall rule| -|[az sql server firewall-rule update](/cli/azure/sql/server/firewall-rule##az-sql-server-firewall-rule-update)|Server|Updates an IP firewall rule| -|[az sql server firewall-rule delete](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-delete)|Server|Deletes an IP firewall rule| - -The following example uses CLI to set a server-level IP firewall rule: - -```azurecli-interactive -az sql server firewall-rule create --resource-group myResourceGroup --server $servername \ --n ContosoIPRange --start-ip-address 192.168.1.0 --end-ip-address 192.168.1.255 -``` - -> [!TIP] -> For $servername specify the server name and not the fully qualified DNS name e.g. specify **mysqldbserver** instead of **mysqldbserver.database.windows.net** -> -> For a CLI example in the context of a quickstart, see [Create DB - Azure CLI](az-cli-script-samples-content-guide.md) and [Create a single database and configure a server-level IP firewall rule using the Azure CLI](scripts/create-and-configure-database-cli.md). - -### Use a REST API to manage server-level IP firewall rules - -| API | Level | Description | -| --- | --- | --- | -| [List firewall rules](/rest/api/sql/firewallrules/listbyserver) |Server |Displays the current server-level IP firewall rules | -| [Create or update firewall rules](/rest/api/sql/firewallrules/createorupdate) |Server |Creates or updates server-level IP firewall rules | -| [Delete firewall rules](/rest/api/sql/firewallrules/delete) |Server |Removes server-level IP firewall rules | -| [Get firewall rules](/rest/api/sql/firewallrules/get) | Server | Gets server-level IP firewall rules | - -## Troubleshoot the database firewall - -Consider the following points when access to Azure SQL Database doesn't behave as you expect. - -- **Local firewall configuration:** - - Before your computer can access Azure SQL Database, you may need to create a firewall exception on your computer for TCP port 1433. To make connections inside the Azure cloud boundary, you may have to open additional ports. For more information, see the "SQL Database: Outside vs inside" section of [Ports beyond 1433 for ADO.NET 4.5 and Azure SQL Database](adonet-v12-develop-direct-route-ports.md). - -- **Network address translation:** - - Because of network address translation (NAT), the IP address that's used by your computer to connect to Azure SQL Database may be different than the IP address in your computer's IP configuration settings. To view the IP address that your computer is using to connect to Azure: - 1. Sign in to the portal. - 1. Go to the **Configure** tab on the server that hosts your database. - 1. The **Current Client IP Address** is displayed in the **Allowed IP Addresses** section. Select **Add** for **Allowed IP Addresses** to allow this computer to access the server. - -- **Changes to the allow list haven't taken effect yet:** - - There may be up to a five-minute delay for changes to the Azure SQL Database firewall configuration to take effect. - -- **The login isn't authorized, or an incorrect password was used:** - - If a login doesn't have permissions on the server or the password is incorrect, the connection to the server is denied. Creating a firewall setting only gives clients an *opportunity* to try to connect to your server. The client must still provide the necessary security credentials. For more information about preparing logins, see [Controlling and granting database access](logins-create-manage.md). - -- **Dynamic IP address:** - - If you have an internet connection that uses dynamic IP addressing and you have trouble getting through the firewall, try one of the following solutions: - - - Ask your internet service provider for the IP address range that's assigned to your client computers that access the server. Add that IP address range as an IP firewall rule. - - Get static IP addressing instead for your client computers. Add the IP addresses as IP firewall rules. - -## Next steps - -- Confirm that your corporate network environment allows inbound communication from the compute IP address ranges (including SQL ranges) that are used by the Azure datacenters. You might have to add those IP addresses to the allow list. See [Microsoft Azure datacenter IP ranges](https://www.microsoft.com/download/details.aspx?id=41653). -- See our quickstart about [creating a single database in Azure SQL Database](single-database-create-quickstart.md). -- For help with connecting to a database in Azure SQL Database from open-source or third-party applications, see [Client quickstart code samples to Azure SQL Database](connect-query-content-reference-guide.md#libraries). -- For information about additional ports that you may need to open, see the "SQL Database: Outside vs inside" section of [Ports beyond 1433 for ADO.NET 4.5 and SQL Database](adonet-v12-develop-direct-route-ports.md) -- For an overview of Azure SQL Database security, see [Securing your database](security-overview.md). - - -[1]: ./media/firewall-configure/sqldb-firewall-1.png diff --git a/articles/azure-sql/database/firewall-create-server-level-portal-quickstart.md b/articles/azure-sql/database/firewall-create-server-level-portal-quickstart.md deleted file mode 100644 index fd6b42d43830a..0000000000000 --- a/articles/azure-sql/database/firewall-create-server-level-portal-quickstart.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Create a server-level firewall rule -description: Create an server-level firewall rule -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=1, mode-ui -ms.devlang: -ms.topic: quickstart -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 02/11/2019 ---- -# Quickstart: Create a server-level firewall rule using the Azure portal -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This quickstart walks through how to create a [server-level firewall rule](firewall-configure.md) in Azure SQL Database using the Azure portal to enable you to connect to [logical SQL servers](logical-servers.md), single databases, and elastic pools and their databases. A firewall rule is required to connect from other Azure resources and from on-premises resources. Server-level firewall rules do not apply to Azure SQL Managed Instance. - -## Prerequisites - -This quickstart uses the resources created in [Create a single database using the Azure portal](single-database-create-quickstart.md) as its starting point. - -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com/). - -## Create a server-level IP firewall rule - - SQL Database creates a firewall at the server level for single and pooled databases. This firewall prevents client applications from connecting to the server or any of its databases unless you create an IP firewall rule to open the firewall. For a connection from an IP address outside Azure, create a firewall rule for a specific IP address or range of addresses that you want to be able to connect from. For more information about server-level and database-level IP firewall rules, see [Server-level and database-level IP firewall rules](firewall-configure.md). - -> [!NOTE] -> Azure SQL Database communicates over port 1433. If you're trying to connect from within a corporate network, outbound traffic over port 1433 might not be allowed by your network's firewall. If so, you can't connect to your server unless your IT department opens port 1433. -> [!IMPORTANT] -> A firewall rule of 0.0.0.0 enables all Azure services to pass through the server-level firewall rule and attempt to connect to a database through the server. - -Follow these steps to create a server-level IP firewall rule for your client's IP address and enable external connectivity through the Azure SQL Database firewall for your IP address only. - -1. After the [database](#prerequisites) deployment completes, select **SQL databases** from the left-hand menu and then choose **mySampleDatabase** on the **SQL databases** page. The overview page for your database opens, showing you the fully qualified server name (such as **mynewserver-20170824.database.windows.net**) and provides options for further configuration. - -2. Copy this fully qualified server name to use when connecting to your server and its databases in other quickstarts. - - ![server name](./media/firewall-create-server-level-portal-quickstart/server-name.png) - -3. Select **Set server firewall** on the toolbar. The **Firewall settings** page for the server opens. - - ![server-level IP firewall rule](./media/firewall-create-server-level-portal-quickstart/server-firewall-rule.png) - -4. Choose **Add client IP** on the toolbar to add your current IP address to a new server-level IP firewall rule. A server-level IP firewall rule can open port 1433 for a single IP address or a range of IP addresses. - - > [!IMPORTANT] - > By default, access through the Azure SQL Database firewall is disabled for all Azure services. Choose **ON** on this page if you want to enable access for all Azure services. - > - -5. Select **Save**. A server-level IP firewall rule is created for your current IP address opening port 1433 on the server. - -6. Close the **Firewall settings** page. - -Using SQL Server Management Studio or another tool of your choice, you can now connect to the server and its databases from this IP address using the server admin account created previously. - -## Clean up resources - -Save these resources if you want to go to [Next steps](#next-steps) and learn how to connect and query your database using a number of different methods. If, however, you want to delete the resources that you created in this quickstart, use the following steps. - -1. From the left-hand menu in the Azure portal, select **Resource groups** and then select **myResourceGroup**. -2. On your resource group page, select **Delete**, type **myResourceGroup** in the text box, and then select **Delete**. - -## Next steps - -- Now that you have a database, you can [connect and query](connect-query-content-reference-guide.md) using one of your favorite tools or languages, including - - [Connect and query using SQL Server Management Studio](connect-query-ssms.md) - - [Connect and query using Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=/azure/sql-database/toc.json) -- To learn how to design your first database, create tables, and insert data, see one of these tutorials: - - [Design your first single database in Azure SQL Database using SSMS](design-first-database-tutorial.md) - - [Design a single database in Azure SQL Database and connect with C# and ADO.NET](design-first-database-csharp-tutorial.md) diff --git a/articles/azure-sql/database/free-sql-db-free-account-how-to-deploy.md b/articles/azure-sql/database/free-sql-db-free-account-how-to-deploy.md deleted file mode 100644 index 7bf4ecbcd99b3..0000000000000 --- a/articles/azure-sql/database/free-sql-db-free-account-how-to-deploy.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: Free SQL Database with Azure free account -description: Guidance on how to deploy an Azure SQL Database for free using an Azure free account. -author: rajeshsetlem -ms.author: rsetlem -ms.service: sql-database -ms.subservice: service-overview -ms.topic: how-to -ms.date: 02/25/2022 -ms.custom: template-how-to ---- - - -# Try Azure SQL Database free with Azure free account - -Azure SQL Database is an intelligent, scalable, relational database service built for the cloud. SQL Database is a fully managed platform as a service (PaaS) database engine that handles most database management functions such as upgrading, patching, backups, and monitoring without user involvement. - -Using an Azure free account, you can try Azure SQL Database for **free for 12 months** with the following **monthly limit**: -- **1 S0 database with 10 database transaction units and 250 GB storage** - -This article shows you how to create and use an Azure SQL Database for free using an [Azure free account](https://azure.microsoft.com/free/). - - -## Prerequisites - -To try Azure SQL Database for free, you need: - -- An Azure free account. If you don't have one, [create a free account](https://azure.microsoft.com/free/) before you begin. - - -## Create a database - -This article uses the Azure portal to create a SQL Database with public access. Alternatively, you can create a SQL Database using [PowerShell, the Azure CLI](./single-database-create-quickstart.md) or an [ARM template](./single-database-create-arm-template-quickstart.md). - -To create your database, follow these steps: - -1. Sign in to the [Azure portal](https://portal.azure.com/) with your Azure free account. -1. Search for and select **SQL databases**: - - :::image type="content" source="./media/free-sql-db-free-account-how-to-deploy/search-sql-database.png" alt-text="Screenshot that shows how to search and select SQL database."::: - - Alternatively, you can search for and navigate to **Free Services**, and then select the **Azure SQL Database** tile from the list: - - :::image type="content" source="media/free-sql-db-free-account-how-to-deploy/free-services-sql-database.png" alt-text="Screenshot that shows a list of all free services on the Azure portal."::: - -1. Select **Create**. -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the free trial Azure **Subscription**. -1. For **Resource group**, select **Create new**, enter *myResourceGroup*, and select **OK**. -1. For **Database name**, enter *mySampleDatabase*. -1. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - **Server name**: Enter *mysqlserver*, and add some characters for uniqueness. We can't provide an exact server name to use because server names must be globally unique for all servers in Azure, not just unique within a subscription. So enter something like mysqlserver12345, and the portal lets you know if it's available or not. - - **Server admin login**: Enter *azureuser*. - - **Password**: Enter a password that meets complexity requirements, and enter it again in the **Confirm password** field. - - **Location**: Select a location from the dropdown list. - - Select **OK**. - -1. Leave **Want to use SQL elastic pool** set to **No**. -1. Under **Compute + storage**, select **Configure database**. -1. For the free trial, under **Service Tier** select **Standard (For workloads with typical performance requirements)**. Set **DTUs** to **10** and **Data max size (GB)** to **250**, and then select **Apply**. - - :::image type="content" source="media/free-sql-db-free-account-how-to-deploy/configure-database.png" alt-text="Screenshot that shows selecting database service tier."::: - -1. Leave **Backup storage redundancy** set to **Geo-redundant backup storage** -1. Select **Next: Networking** at the bottom of the page. - - :::image type="content" source="./media/free-sql-db-free-account-how-to-deploy/create-database-basics-tab.png" alt-text="New SQL database - Basic tab"::: - -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. -1. For **Firewall rules**, set **Allow Azure services and resources to access this server** set to **Yes** and set **Add current client IP address** to **Yes**. -1. Leave **Connection policy** set to **Default**. -1. For **Encrypted Connections**, leave **Minimum TLS version** set to **TLS 1.2**. -1. Select **Next: Security** at the bottom of the page. - - :::image type="content" source="./media/free-sql-db-free-account-how-to-deploy/create-database-networking-tab.png" alt-text="Networking tab"::: - -1. Leave the values unchanged on **Security** tab. - - - :::image type="content" source="./media/free-sql-db-free-account-how-to-deploy/create-database-security-tab.png" alt-text="Security tab"::: - -1. Select **Next: Additional settings** at the bottom of the page. -1. On the **Additional settings** tab, in the **Data source** section, for **Use existing data**, select **Sample**. This creates an AdventureWorksLT sample database so there are some tables and data to query and experiment with, as opposed to an empty blank database. -1. Select **Review + create** at the bottom of the page. - - :::image type="content" source="./media/free-sql-db-free-account-how-to-deploy/create-database-additional-settings-tab.png" alt-text="Additional settings"::: - -1. On the **Review + create** page, after reviewing, select **Create**. - - > [!IMPORTANT] - > While creating the SQL Database from your Azure free account, you will still see an **Estimated cost per month** in the **Compute + Storage : Cost Summary** blade and **Review + Create** tab. But, as long as you are using your Azure free account, and your free service usage is within monthly limits, you won't be charged for the service. To view usage information, review [**Monitor and track free services usage**](#monitor-and-track-service-usage) later in this article. - -## Query the database - -Once your database is created, you can use the **Query editor (preview)** in the Azure portal to connect to the database and query data. - -1. In the portal, search for and select **SQL databases**, and then select your database from the list. -1. On the page for your database, select **Query editor (preview)** in the navigation menu. -1. Enter your server admin login information, and select **OK**. - - :::image type="content" source="./media/single-database-create-quickstart/query-editor-login.png" alt-text="Sign in to Query editor"::: - -1. Enter the following query in the **Query editor** pane. - - ```sql - SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM SalesLT.ProductCategory pc - JOIN SalesLT.Product p - ON pc.productcategoryid = p.productcategoryid; - ``` - -1. Select **Run**, and then review the query results in the **Results** pane. - - :::image type="content" source="./media/single-database-create-quickstart/query-editor-results.png" alt-text="Query editor results"::: - -1. Close the **Query editor** page, and select **OK** when prompted to discard your unsaved edits. - -## Monitor and track service usage - -You are not charged for the Azure SQL Database included with your Azure free account unless you exceed the free service limit. To remain within the limit, use the Azure portal to track and monitor your free services usage. - - -To track usage, follow these steps: - -1. In the Azure portal, search for **Subscriptions** and select the free trial subscription. - -1. On the **Overview** page, scroll down to see the tile **Top free services by usage**, and then select **View all free services**. - - :::image type="content" source="media/free-sql-db-free-account-how-to-deploy/free-services-usage-overview.png" alt-text="Screenshot that shows the Free Trial subscription overview page and highlights View all free services."::: - -1. Locate the meters related to **Azure SQL Database** to track usage. - - :::image type="content" source="media/free-sql-db-free-account-how-to-deploy/free-services-tracking.png" alt-text="Screenshot that shows the View and track usage information blade on Azure portal for all free services."::: - -The following table describes the values on the track usage page: - -| **Value**| **Description**| -| ---- | ---------- | -|**Meter** | Identifies the unit of measure for the service being consumed. For example, the meter for Azure SQL Database is *SQL Database, Single Standard, S0 DTUs*, which tracks the number of S0 databases used per day, and has a monthly limit of 1. | -| **Usage/limit** | The usage of the meter for the current month, and the limit for the meter. -| **Status**| The current status of your usage of the service defined by the meter. The possible values for status are:
    **Not in use**: You haven't used the meter or the usage for the meter hasn't reached the billing system.
    **Exceeded on \**: You've exceeded the limit for the meter on \.
    **Unlikely to Exceed**: You're unlikely to exceed the limit for the meter.
    **Exceeds on \**: You're likely to exceed the limit for the meter on \. | - - ->[!IMPORTANT] -> - With an Azure free account, you also get $200 in credit to use in 30 days. During this time, any usage of the service beyond the free monthly amount is deducted from this credit. -> - At the end of your first 30 days or after you spend your $200 credit (whichever comes first), you'll only pay for what you use beyond the free monthly amount of services. To keep getting free services after 30 days, move to pay-as-you-go pricing. If you don't move to pay as you go, you can't purchase Azure services beyond your $200 credit and eventually your account and services will be disabled. -> - For more information, see [**Azure free account FAQ**](https://azure.microsoft.com/free/free-account-faq/). - -## Clean up resources - -When you're finished using these resources, you can delete the resource group you created, which will also delete the server and single database within it. - -To delete **myResourceGroup** and all its resources using the Azure portal: - -1. In the portal, search for and select **Resource groups**, and then select **myResourceGroup** from the list. -1. On the resource group page, select **Delete resource group**. -1. Under **Type the resource group name**, enter *myResourceGroup*, and then select **Delete**. - - -## Next steps - -[Connect and query](connect-query-content-reference-guide.md) your database using different tools and languages: -> [!div class="nextstepaction"] -> [Connect and query using SQL Server Management Studio](connect-query-ssms.md) -> -> [Connect and query using Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=/azure/sql-database/toc.json) - diff --git a/articles/azure-sql/database/gateway-migration.md b/articles/azure-sql/database/gateway-migration.md deleted file mode 100644 index 0f74b4a8e04ae..0000000000000 --- a/articles/azure-sql/database/gateway-migration.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Gateway traffic migration notice -description: Article provides notice to users about the migration of Azure SQL Database gateway IP addresses -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=1  -ms.topic: conceptual -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/13/2022 - ---- -# Azure SQL Database traffic migration to newer Gateways -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Microsoft periodically refreshes hardware to optimize the customer experience. During these refreshes, Azure adds gateways built on newer hardware, migrates traffic to them, and eventually decommissions gateways built on older hardware in some regions. - - -To avoid service disruptions during refreshes, allow the communication with SQL Gateway IP subnet ranges for the region. Review [SQL Gateway IP subnet ranges](connectivity-architecture.md#gateway-ip-addresses) and include the ranges for your region. - - -Customers can [use the Azure portal to set up activity log alerts](../../service-health/alerts-activity-log-service-notifications-portal.md). - - -## Status updates - -# [In progress](#tab/in-progress-ip) -## August 2021 -New SQL Gateways are being added to the following regions: - -- Norway East: 51.120.104.32, 51.120.208.32 -- Japan East: 40.79.184.32 -- Central India: 40.80.48.32, 20.192.96.32 - -These SQL Gateway shall start accepting customer traffic on 2 August 2021. - -## June 2021 -New SQL Gateways are being added to the following regions: - -- UK West: 51.140.208.96, 51.140.208.97 -- Korea Central: 20.44.24.32, 20.194.64.33 -- Japan East: 13.78.104.32 - -These SQL Gateway shall start accepting customer traffic on 1 June 2021. - -# [Completed](#tab/completed-ip) -The following gateway migrations are complete: - -## May 2021 -New SQL Gateways are being added to the following regions: -- UK South: 51.140.144.36, 51.105.72.32 -- West Central US: 13.71.193.32, 13.71.193.33 - -This SQL Gateway shall start accepting customer traffic on 17 May 2021. - -## April 2021 -New SQL Gateways are being added to the following regions: -- East US 2: 40.70.144.193 - -This SQL Gateway shall start accepting customer traffic on 30 April 2021. - -New SQL Gateways are being added to the following regions: -- Norway East: 51.120.96.33 -- South East Asia: 13.67.16.193 -- South Africa North: 102.133.152.32 -- Korea South: 52.231.151.96 -- North Central: US 52.162.105.9 -- Australia South East: 13.77.49.32 - -These SQL Gateways shall start accepting customer traffic on 5 April 2021. - -## March 2021 -The following SQL Gateways in multiple regions are in the process of being deactivated: -- Brazil South: 104.41.11.5 -- East Asia: 191.234.2.139 -- East US: 191.238.6.43 -- Japan East: 191.237.240.43 -- Japan West: 191.238.68.11 -- North Europe: 191.235.193.75 -- South Central US: 23.98.162.75 -- Southeast Asia: 23.100.117.95 -- West Europe: 191.237.232.75 -- West US: 23.99.34.75 - -No customer impact is anticipated since these Gateways (running on older hardware) are not routing any customer traffic. The IP addresses for these Gateways shall be deactivated on 15th March 2021. - -## February 2021 -New SQL Gateways are being added to the following regions: - -- Central US: 13.89.169.20 - -These SQL Gateways shall start accepting customer traffic on 28 February 2021. - -## January 2021 -New SQL Gateways are being added to the following regions: - -- Australia Central: 20.36.104.6 , 20.36.104.7 -- Australia Central 2: 20.36.112.6 -- Brazil South: 191.234.144.16 ,191.234.152.3 -- Canada East: 40.69.105.9 ,40.69.105.10 -- Central India: 104.211.86.30 , 104.211.86.31 -- East Asia: 13.75.32.14 -- France Central: 40.79.137.8, 40.79.145.12 -- France South: 40.79.177.10 ,40.79.177.12 -- Korea Central: 52.231.17.22 ,52.231.17.23 -- West India: 104.211.144.4 - -These SQL Gateways shall start accepting customer traffic on 31 January 2021. - - - -### October 2020 - -New SQL Gateways are being added to the following regions: - -- Germany West Central: 51.116.240.0, 51.116.248.0 - -These SQL Gateways shall start accepting customer traffic on 12 October 2020. - -### September 2020 -New SQL Gateways are being added to the following regions. These SQL Gateways shall start accepting customer traffic on **15 September 2020**: - -- Australia Southeast: 13.77.48.10 -- Canada East: 40.86.226.166, 52.242.30.154 -- UK South: 51.140.184.11, 51.105.64.0 - -Existing SQL Gateways will start accepting traffic in the following regions. These SQL Gateways shall start accepting customer traffic on **15 September 2020** : - -- Australia Southeast: 191.239.192.109 and 13.73.109.251 -- Central US: 13.67.215.62, 52.182.137.15, 23.99.160.139, 104.208.16.96, and 104.208.21.1 -- East Asia: 191.234.2.139, 52.175.33.150, and 13.75.32.4 -- East US: 40.121.158.30, 40.79.153.12, 191.238.6.43, and 40.78.225.32 -- East US 2: 40.79.84.180, 52.177.185.181, 52.167.104.0, 191.239.224.107, and 104.208.150.3 -- France Central: 40.79.137.0 and 40.79.129.1 -- Japan West: 104.214.148.156, 40.74.100.192, 191.238.68.11, and 40.74.97.10 -- North Central US: 23.96.178.199, 23.98.55.75, and 52.162.104.33 -- Southeast Asia: 104.43.15.0, 23.100.117.95, and 40.78.232.3 -- West US: 104.42.238.205, 23.99.34.75, and 13.86.216.196 - -New SQL Gateways are being added to the following regions. These SQL Gateways shall start accepting customer traffic on **10 September 2020**: - -- West Central US: 13.78.248.43 -- South Africa North: 102.133.120.2 - -New SQL Gateways are being added to the following regions. These SQL Gateways shall start accepting customer traffic on **1 September 2020**: - -- North Europe: 13.74.104.113 -- West US2: 40.78.248.10 -- West Europe: 52.236.184.163 -- South Central US: 20.45.121.1, 20.49.88.1 - -Existing SQL Gateways will start accepting traffic in the following regions. These SQL Gateways shall start accepting customer traffic on **1 September 2020**: -- Japan East: 40.79.184.8, 40.79.192.5 - - -### August 2020 - -New SQL Gateways are being added to the following regions: - -- Australia East: 13.70.112.9 -- Canada Central: 52.246.152.0, 20.38.144.1 -- West US 2: 40.78.240.8 - -These SQL Gateways shall start accepting customer traffic on 10 August 2020. - -### October 2019 -- Brazil South -- West US -- West Europe -- East US -- Central US -- South East Asia -- South Central US -- North Europe -- North Central US -- Japan West -- Japan East -- East US 2 -- East Asia - ---- - -## Impact of this change - -Traffic migration may change the public IP address that DNS resolves for your database in Azure SQL Database. -You may be impacted if you: - -- Hard coded the IP address for any particular gateway in your on-premises firewall -- Have any subnets using Microsoft.SQL as a Service Endpoint but cannot communicate with the gateway IP addresses -- Use the [zone redundant configuration for general purpose tier](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) -- Use the [zone redundant configuration for premium & business critical tiers](high-availability-sla.md#premium-and-business-critical-service-tier-zone-redundant-availability) - -You will not be impacted if you have: - -- Redirection as the connection policy -- Connections to SQL Database from inside Azure and using Service Tags -- Connections made using supported versions of JDBC Driver for SQL Server will see no impact. For supported JDBC versions, see [Download Microsoft JDBC Driver for SQL Server](/sql/connect/jdbc/download-microsoft-jdbc-driver-for-sql-server). - -## What to do you do if you're affected - -We recommend that you allow outbound traffic to IP addresses for all the [gateway IP addresses](connectivity-architecture.md#gateway-ip-addresses) in the region on TCP port 1433. Also, allow port range 11000 thru 11999 when connecting from a client located within Azure (for example, an Azure VM) or when your Connection Policy is set to Redirection. This recommendation is applicable to clients connecting from on-premises and also those connecting via Service Endpoints. For more information on port ranges, see [Connection policy](connectivity-architecture.md#connection-policy). - -Connections made from applications using Microsoft JDBC Driver below version 4.0 might fail certificate validation. Lower versions of Microsoft JDBC rely on Common Name (CN) in the Subject field of the certificate. The mitigation is to ensure that the hostNameInCertificate property is set to *.database.windows.net. For more information on how to set the hostNameInCertificate property, see [Connecting with Encryption](/sql/connect/jdbc/connecting-with-ssl-encryption). - -If the above mitigation doesn't work, file a support request for SQL Database or SQL Managed Instance using the following URL: https://aka.ms/getazuresupport - -## Next steps - -- Find out more about [Azure SQL Connectivity Architecture](connectivity-architecture.md) diff --git a/articles/azure-sql/database/geo-distributed-application-configure-tutorial.md b/articles/azure-sql/database/geo-distributed-application-configure-tutorial.md deleted file mode 100644 index 49d51ec768e2b..0000000000000 --- a/articles/azure-sql/database/geo-distributed-application-configure-tutorial.md +++ /dev/null @@ -1,377 +0,0 @@ ---- -title: Implement a geo-distributed solution -description: Learn to configure your database in Azure SQL Database and client application for failover to a replicated database, and test failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Tutorial: Implement a geo-distributed database (Azure SQL Database) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Configure a database in SQL Database and client application for failover to a remote region and test a failover plan. You learn how to: - -> [!div class="checklist"] -> -> - Create a [failover group](auto-failover-group-overview.md) -> - Run a Java application to query a database in SQL Database -> - Test failover - -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -## Prerequisites - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To complete the tutorial, make sure you've installed the following items: - -- [Azure PowerShell](/powershell/azure/) -- A single database in Azure SQL Database. To create one use, - - [The Azure Portal](single-database-create-quickstart.md) - - [The Azure CLI](az-cli-script-samples-content-guide.md) - - [PowerShell](powershell-script-content-guide.md) - - > [!NOTE] - > The tutorial uses the *AdventureWorksLT* sample database. - -- Java and Maven, see [Build an app using SQL Server](https://www.microsoft.com/sql-server/developer-get-started/), highlight **Java** and select your environment, then follow the steps. - -> [!IMPORTANT] -> Be sure to set up firewall rules to use the public IP address of the computer on which you're performing the steps in this tutorial. Database-level firewall rules will replicate automatically to the secondary server. -> -> For information see [Create a database-level firewall rule](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database) or to determine the IP address used for the server-level firewall rule for your computer see [Create a server-level firewall](firewall-create-server-level-portal-quickstart.md). - -## Create a failover group - -Using Azure PowerShell, create [failover groups](auto-failover-group-overview.md) between an existing server and a new server in another region. Then add the sample database to the failover group. - -# [PowerShell](#tab/azure-powershell) - -> [!IMPORTANT] -> [!INCLUDE [sample-powershell-install](../../../includes/sample-powershell-install-no-ssh.md)] - -To create a failover group, run the following script: - -```powershell -$admin = "" -$password = "" -$resourceGroup = "" -$location = "" -$server = "" -$database = "" -$drLocation = "" -$drServer = "" -$failoverGroup = "" - -# create a backup server in the failover region -New-AzSqlServer -ResourceGroupName $resourceGroup -ServerName $drServer ` - -Location $drLocation -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $admin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - -# create a failover group between the servers -New-AzSqlDatabaseFailoverGroup –ResourceGroupName $resourceGroup -ServerName $server ` - -PartnerServerName $drServer –FailoverGroupName $failoverGroup –FailoverPolicy Automatic -GracePeriodWithDataLossHours 2 - -# add the database to the failover group -Get-AzSqlDatabase -ResourceGroupName $resourceGroup -ServerName $server -DatabaseName $database | ` - Add-AzSqlDatabaseToFailoverGroup -ResourceGroupName $resourceGroup -ServerName $server -FailoverGroupName $failoverGroup -``` - -# [The Azure CLI](#tab/azure-cli) - -> [!IMPORTANT] -> Run `az login` to sign in to Azure. - -```azurecli -$admin = "" -$password = "" -$resourceGroup = "" -$location = "" -$server = "" -$database = "" -$drLocation = "" # must be different then $location -$drServer = "" -$failoverGroup = "" - -# create a backup server in the failover region -az sql server create --admin-password $password --admin-user $admin ` - --name $drServer --resource-group $resourceGroup --location $drLocation - -# create a failover group between the servers -az sql failover-group create --name $failoverGroup --partner-server $drServer ` - --resource-group $resourceGroup --server $server --add-db $database ` - --failover-policy Automatic --grace-period 2 -``` - -* * * - -Geo-replication settings can also be changed in the Azure portal, by selecting your database, then **Settings** > **Geo-Replication**. - -![Geo-replication settings](./media/geo-distributed-application-configure-tutorial/geo-replication.png) - -## Run the sample project - -1. In the console, create a Maven project with the following command: - - ```bash - mvn archetype:generate "-DgroupId=com.sqldbsamples" "-DartifactId=SqlDbSample" "-DarchetypeArtifactId=maven-archetype-quickstart" "-Dversion=1.0.0" - ``` - -1. Type **Y** and press **Enter**. - -1. Change directories to the new project. - - ```bash - cd SqlDbSample - ``` - -1. Using your favorite editor, open the *pom.xml* file in your project folder. - -1. Add the Microsoft JDBC Driver for SQL Server dependency by adding the following `dependency` section. The dependency must be pasted within the larger `dependencies` section. - - ```xml - - com.microsoft.sqlserver - mssql-jdbc - 6.1.0.jre8 - - ``` - -1. Specify the Java version by adding the `properties` section after the `dependencies` section: - - ```xml - - 1.8 - 1.8 - - ``` - -1. Support manifest files by adding the `build` section after the `properties` section: - - ```xml - - - - org.apache.maven.plugins - maven-jar-plugin - 3.0.0 - - - - com.sqldbsamples.App - - - - - - - ``` - -1. Save and close the *pom.xml* file. - -1. Open the *App.java* file located in ..\SqlDbSample\src\main\java\com\sqldbsamples and replace the contents with the following code: - - ```java - package com.sqldbsamples; - - import java.sql.Connection; - import java.sql.Statement; - import java.sql.PreparedStatement; - import java.sql.ResultSet; - import java.sql.Timestamp; - import java.sql.DriverManager; - import java.util.Date; - import java.util.concurrent.TimeUnit; - - public class App { - - private static final String FAILOVER_GROUP_NAME = ""; // add failover group name - - private static final String DB_NAME = ""; // add database name - private static final String USER = ""; // add database user - private static final String PASSWORD = ""; // add database password - - private static final String READ_WRITE_URL = String.format("jdbc:" + - "sqlserver://%s.database.windows.net:1433;database=%s;user=%s;password=%s;encrypt=true;" + - "hostNameInCertificate=*.database.windows.net;loginTimeout=30;", - FAILOVER_GROUP_NAME, DB_NAME, USER, PASSWORD); - private static final String READ_ONLY_URL = String.format("jdbc:" + - "sqlserver://%s.secondary.database.windows.net:1433;database=%s;user=%s;password=%s;encrypt=true;" + - "hostNameInCertificate=*.database.windows.net;loginTimeout=30;", - FAILOVER_GROUP_NAME, DB_NAME, USER, PASSWORD); - - public static void main(String[] args) { - System.out.println("#######################################"); - System.out.println("## GEO DISTRIBUTED DATABASE TUTORIAL ##"); - System.out.println("#######################################"); - System.out.println(""); - - int highWaterMark = getHighWaterMarkId(); - - try { - for(int i = 1; i < 1000; i++) { - // loop will run for about 1 hour - System.out.print(i + ": insert on primary " + - (insertData((highWaterMark + i)) ? "successful" : "failed")); - TimeUnit.SECONDS.sleep(1); - System.out.print(", read from secondary " + - (selectData((highWaterMark + i)) ? "successful" : "failed") + "\n"); - TimeUnit.SECONDS.sleep(3); - } - } catch(Exception e) { - e.printStackTrace(); - } - } - - private static boolean insertData(int id) { - // Insert data into the product table with a unique product name so we can find the product again - String sql = "INSERT INTO SalesLT.Product " + - "(Name, ProductNumber, Color, StandardCost, ListPrice, SellStartDate) VALUES (?,?,?,?,?,?);"; - - try (Connection connection = DriverManager.getConnection(READ_WRITE_URL); - PreparedStatement pstmt = connection.prepareStatement(sql)) { - pstmt.setString(1, "BrandNewProduct" + id); - pstmt.setInt(2, 200989 + id + 10000); - pstmt.setString(3, "Blue"); - pstmt.setDouble(4, 75.00); - pstmt.setDouble(5, 89.99); - pstmt.setTimestamp(6, new Timestamp(new Date().getTime())); - return (1 == pstmt.executeUpdate()); - } catch (Exception e) { - return false; - } - } - - private static boolean selectData(int id) { - // Query the data previously inserted into the primary database from the geo replicated database - String sql = "SELECT Name, Color, ListPrice FROM SalesLT.Product WHERE Name = ?"; - - try (Connection connection = DriverManager.getConnection(READ_ONLY_URL); - PreparedStatement pstmt = connection.prepareStatement(sql)) { - pstmt.setString(1, "BrandNewProduct" + id); - try (ResultSet resultSet = pstmt.executeQuery()) { - return resultSet.next(); - } - } catch (Exception e) { - return false; - } - } - - private static int getHighWaterMarkId() { - // Query the high water mark id stored in the table to be able to make unique inserts - String sql = "SELECT MAX(ProductId) FROM SalesLT.Product"; - int result = 1; - try (Connection connection = DriverManager.getConnection(READ_WRITE_URL); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(sql)) { - if (resultSet.next()) { - result = resultSet.getInt(1); - } - } catch (Exception e) { - e.printStackTrace(); - } - return result; - } - } - ``` - -1. Save and close the *App.java* file. - -1. In the command console, run the following command: - - ```bash - mvn package - ``` - -1. Start the application that will run for about 1 hour until stopped manually, allowing you time to run the failover test. - - ```bash - mvn -q -e exec:java "-Dexec.mainClass=com.sqldbsamples.App" - ``` - - ```output - ####################################### - ## GEO DISTRIBUTED DATABASE TUTORIAL ## - ####################################### - - 1. insert on primary successful, read from secondary successful - 2. insert on primary successful, read from secondary successful - 3. insert on primary successful, read from secondary successful - ... - ``` - -## Test failover - -Run the following scripts to simulate a failover and observe the application results. Notice how some inserts and selects will fail during the database migration. - -# [PowerShell](#tab/azure-powershell) - -You can check the role of the disaster recovery server during the test with the following command: - -```powershell -(Get-AzSqlDatabaseFailoverGroup -FailoverGroupName $failoverGroup ` - -ResourceGroupName $resourceGroup -ServerName $drServer).ReplicationRole -``` - -To test a failover: - -1. Start a manual failover of the failover group: - - ```powershell - Switch-AzSqlDatabaseFailoverGroup -ResourceGroupName $resourceGroup ` - -ServerName $drServer -FailoverGroupName $failoverGroup - ``` - -1. Revert failover group back to the primary server: - - ```powershell - Switch-AzSqlDatabaseFailoverGroup -ResourceGroupName $resourceGroup ` - -ServerName $server -FailoverGroupName $failoverGroup - ``` - -# [The Azure CLI](#tab/azure-cli) - -You can check the role of the disaster recovery server during the test with the following command: - -```azurecli -az sql failover-group show --name $failoverGroup --resource-group $resourceGroup --server $drServer -``` - -To test a failover: - -1. Start a manual failover of the failover group: - - ```azurecli - az sql failover-group set-primary --name $failoverGroup --resource-group $resourceGroup --server $drServer - ``` - -1. Revert failover group back to the primary server: - - ```azurecli - az sql failover-group set-primary --name $failoverGroup --resource-group $resourceGroup --server $server - ``` - -* * * - -## Next steps - -In this tutorial, you configured a database in Azure SQL Database and an application for failover to a remote region and tested a failover plan. You learned how to: - -> [!div class="checklist"] -> -> - Create a geo-replication failover group -> - Run a Java application to query a database in SQL Database -> - Test failover - -Advance to the next tutorial on how to add an instance of Azure SQL Managed Instance to a failover group: - -> [!div class="nextstepaction"] -> [Add an instance of Azure SQL Managed Instance to a failover group](../managed-instance/failover-group-add-instance-tutorial.md) diff --git a/articles/azure-sql/database/high-availability-sla.md b/articles/azure-sql/database/high-availability-sla.md deleted file mode 100644 index 94bbf39438876..0000000000000 --- a/articles/azure-sql/database/high-availability-sla.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: High availability -titleSuffix: Azure SQL Database and SQL Managed Instance -description: Learn about the Azure SQL Database and SQL Managed Instance service high availability capabilities and features -services: sql-database -ms.service: sql-db-mi -ms.subservice: high-availability -ms.custom: sqldbrb=2, references_regions -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma, emlisa -ms.date: 04/13/2022 ---- - -# High availability for Azure SQL Database and SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -The goal of the high availability architecture in Azure SQL Database and SQL Managed Instance is to guarantee that your database is up and running minimum of 99.99% of time without worrying about the impact of maintenance operations and outages. For more information regarding specific SLA for different tiers, refer to [SLA for Azure SQL Database](https://azure.microsoft.com/support/legal/sla/azure-sql-database) and SLA for [Azure SQL Managed Instance](https://azure.microsoft.com/support/legal/sla/azure-sql-sql-managed-instance/). - -Azure automatically handles critical servicing tasks, such as patching, backups, Windows and Azure SQL upgrades, and unplanned events such as underlying hardware, software, or network failures. When the underlying database in Azure SQL Database is patched or fails over, the downtime is not noticeable if you [employ retry logic](develop-overview.md#resiliency) in your app. SQL Database and SQL Managed Instance can quickly recover even in the most critical circumstances ensuring that your data is always available. - -The high availability solution is designed to ensure that committed data is never lost due to failures, that maintenance operations do not affect your workload, and that the database will not be a single point of failure in your software architecture. There are no maintenance windows or downtimes that should require you to stop the workload while the database is upgraded or maintained. - -There are two high availability architectural models: - -- **Standard availability model** that is based on a separation of compute and storage. It relies on high availability and reliability of the remote storage tier. This architecture targets budget-oriented business applications that can tolerate some performance degradation during maintenance activities. -- **Premium availability model** that is based on a cluster of database engine processes. It relies on the fact that there is always a quorum of available database engine nodes. This architecture targets mission-critical applications with high IO performance, high transaction rate and guarantees minimal performance impact to your workload during maintenance activities. - -SQL Database and SQL Managed Instance both run on the latest stable version of the SQL Server database engine and Windows operating system, and most users would not notice that upgrades are performed continuously. - -## Basic, Standard, and General Purpose service tier locally redundant availability - -The Basic, Standard, and General Purpose service tiers use the standard availability architecture for both serverless and provisioned compute. The following figure shows four different nodes with the separated compute and storage layers. - -![Separation of compute and storage](./media/high-availability-sla/general-purpose-service-tier.png) - -The standard availability model includes two layers: - -- A stateless compute layer that runs the `sqlservr.exe` process and contains only transient and cached data, such as TempDB, model databases on the attached SSD, and plan cache, buffer pool, and columnstore pool in memory. This stateless node is operated by Azure Service Fabric that initializes `sqlservr.exe`, controls health of the node, and performs failover to another node if necessary. -- A stateful data layer with the database files (.mdf/.ldf) that are stored in Azure Blob storage. Azure blob storage has built-in data availability and redundancy feature. It guarantees that every record in the log file or page in the data file will be preserved even if `sqlservr.exe` process crashes. - -Whenever the database engine or the operating system is upgraded, or a failure is detected, Azure Service Fabric will move the stateless `sqlservr.exe` process to another stateless compute node with sufficient free capacity. Data in Azure Blob storage is not affected by the move, and the data/log files are attached to the newly initialized `sqlservr.exe` process. This process guarantees 99.99% availability, but a heavy workload may experience some performance degradation during the transition since the new `sqlservr.exe` process starts with cold cache. - -## General Purpose service tier zone redundant availability - -Zone-redundant configuration for the general purpose service tier is offered for both serverless and provisioned compute. This configuration utilizes [Azure Availability Zones](../../availability-zones/az-overview.md)  to replicate databases across multiple physical locations within an Azure region. By selecting zone-redundancy, you can make your new and existing serverless and provisioned general purpose single databases and elastic pools resilient to a much larger set of failures, including catastrophic datacenter outages, without any changes of the application logic. - -Zone-redundant configuration for the general purpose tier has two layers: - -- A stateful data layer with the database files (.mdf/.ldf) that are stored in ZRS(zone-redundant storage). Using [ZRS](../../storage/common/storage-redundancy.md) the data and log files are synchronously copied across three physically isolated Azure availability zones. -- A stateless compute layer that runs the sqlservr.exe process and contains only transient and cached data, such as TempDB, model databases on the attached SSD, and plan cache, buffer pool, and columnstore pool in memory. This stateless node is operated by Azure Service Fabric that initializes sqlservr.exe, controls health of the node, and performs failover to another node if necessary. For zone-redundant serverless and provisioned general purpose databases, nodes with spare capacity are readily available in other Availability Zones for failover. - -The zone-redundant version of the high availability architecture for the general purpose service tier is illustrated by the following diagram: - -![Zone redundant configuration for general purpose](./media/high-availability-sla/zone-redundant-for-general-purpose.png) - -> [!IMPORTANT] -> For general purpose tier the zone-redundant configuration is Generally Available in the following regions: West Europe, North Europe, West US 2, and France Central. This is in preview in the following regions: East US, East US 2, Southeast Asia, Australia East, Japan East, and UK South. - -> [!NOTE] -> Zone-redundant configuration is not available in SQL Managed Instance. In SQL Database this feature is only available when the Gen5 hardware is selected. - - -## Premium and Business Critical service tier locally redundant availability - -Premium and Business Critical service tiers use the Premium availability model, which integrates compute resources (`sqlservr.exe` process) and storage (locally attached SSD) on a single node. High availability is achieved by replicating both compute and storage to additional nodes creating a three to four-node cluster. - -![Cluster of database engine nodes](./media/high-availability-sla/business-critical-service-tier.png) - -The underlying database files (.mdf/.ldf) are placed on the attached SSD storage to provide very low latency IO to your workload. High availability is implemented using a technology similar to SQL Server [Always On availability groups](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). The cluster includes a single primary replica that is accessible for read-write customer workloads, and up to three secondary replicas (compute and storage) containing copies of data. The primary node constantly pushes changes to the secondary nodes in order and ensures that the data is persisted to at least one secondary replica before committing each transaction. This process guarantees that if the primary node crashes for any reason, there is always a fully synchronized node to fail over to. The failover is initiated by the Azure Service Fabric. Once the secondary replica becomes the new primary node, another secondary replica is created to ensure the cluster has enough nodes (quorum set). Once failover is complete, Azure SQL connections are automatically redirected to the new primary node. - -As an extra benefit, the premium availability model includes the ability to redirect read-only Azure SQL connections to one of the secondary replicas. This feature is called [Read Scale-Out](read-scale-out.md). It provides 100% additional compute capacity at no extra charge to off-load read-only operations, such as analytical workloads, from the primary replica. - -## Premium and Business Critical service tier zone redundant availability - -By default, the cluster of nodes for the premium availability model is created in the same datacenter. With the introduction of [Azure Availability Zones](../../availability-zones/az-overview.md), SQL Database can place different replicas of the Business Critical database to different availability zones in the same region. To eliminate a single point of failure, the control ring is also duplicated across multiple zones as three gateway rings (GW). The routing to a specific gateway ring is controlled by [Azure Traffic Manager](../../traffic-manager/traffic-manager-overview.md) (ATM). Because the zone-redundant configuration in the Premium or Business Critical service tiers does not create additional database redundancy, you can enable it at no extra cost. By selecting a zone-redundant configuration, you can make your Premium or Business Critical databases resilient to a much larger set of failures, including catastrophic datacenter outages, without any changes to the application logic. You can also convert any existing Premium or Business Critical databases or pools to the zone-redundant configuration. - -Because the zone-redundant databases have replicas in different datacenters with some distance between them, the increased network latency may increase the commit time and thus impact the performance of some OLTP workloads. You can always return to the single-zone configuration by disabling the zone-redundancy setting. This process is an online operation similar to the regular service tier upgrade. At the end of the process, the database or pool is migrated from a zone-redundant ring to a single zone ring or vice versa. - -> [!IMPORTANT] -> This feature is not available in SQL Managed Instance. In SQL Database, when using the Business Critical tier, zone-redundant configuration is only available when the Gen5 hardware is selected. For up to date information about the regions that support zone-redundant databases, see [Services support by region](../../availability-zones/az-region.md). - -The zone-redundant version of the high availability architecture is illustrated by the following diagram: - -![high availability architecture zone redundant](./media/high-availability-sla/zone-redundant-business-critical-service-tier.png) - - -## Hyperscale service tier locally redundant availability - -The Hyperscale service tier architecture is described in [Distributed functions architecture](./service-tier-hyperscale.md#distributed-functions-architecture) and is only currently available for SQL Database, not SQL Managed Instance. - -![Hyperscale functional architecture](./media/high-availability-sla/hyperscale-architecture.png) - -The availability model in Hyperscale includes four layers: - -- A stateless compute layer that runs the `sqlservr.exe` processes and contains only transient and cached data, such as non-covering RBPEX cache, TempDB, model database, etc. on the attached SSD, and plan cache, buffer pool, and columnstore pool in memory. This stateless layer includes the primary compute replica and optionally a number of secondary compute replicas that can serve as failover targets. -- A stateless storage layer formed by page servers. This layer is the distributed storage engine for the `sqlservr.exe` processes running on the compute replicas. Each page server contains only transient and cached data, such as covering RBPEX cache on the attached SSD, and data pages cached in memory. Each page server has a paired page server in an active-active configuration to provide load balancing, redundancy, and high availability. -- A stateful transaction log storage layer formed by the compute node running the Log service process, the transaction log landing zone, and transaction log long-term storage. Landing zone and long-term storage use Azure Storage, which provides availability and [redundancy](../../storage/common/storage-redundancy.md) for transaction log, ensuring data durability for committed transactions. -- A stateful data storage layer with the database files (.mdf/.ndf) that are stored in Azure Storage and are updated by page servers. This layer uses data availability and [redundancy](../../storage/common/storage-redundancy.md) features of Azure Storage. It guarantees that every page in a data file will be preserved even if processes in other layers of Hyperscale architecture crash, or if compute nodes fail. - -Compute nodes in all Hyperscale layers run on Azure Service Fabric, which controls health of each node and performs failovers to available healthy nodes as necessary. - -For more information on high availability in Hyperscale, see [Database High Availability in Hyperscale](./service-tier-hyperscale.md#database-high-availability-in-hyperscale). - -## Hyperscale service tier zone redundant availability (Preview) - -Zone redundancy for the Azure SQL Database Hyperscale service tier is [now in public preview](https://aka.ms/zrhyperscale). Enabling this configuration ensures zone-level resiliency through replication across Availability Zones for all Hyperscale layers. By selecting zone-redundancy, you can make your Hyperscale databases resilient to a much larger set of failures, including catastrophic datacenter outages, without any changes to the application logic. - -Consider the following limitations: - -- Currently, only the following Azure regions are supported: UK South, Brazil South, West US 2, Japan East, North Europe, Southeast Asia, Canada Central, Central US, South Central US, France Central, Australia East, Germany West Central, East Asia, Korea Central, Norway East, and West US 3. -- Zone redundant configuration can only be specified during database creation. This setting cannot be modified once the resource is provisioned. Use [Database copy](database-copy.md), [point-in-time restore](recovery-using-backups.md#point-in-time-restore), or create a [geo-replica](active-geo-replication-overview.md) to update the zone redundant configuration for an existing Hyperscale database. When using one of these update options, if the target database is in a different region than the source or if the database backup storage redundancy from the target differs from the source database, the [copy operation](database-copy.md#database-copy-for-azure-sql-hyperscale) will be a size of data operation. Additionally, when using one of these update options the target database will not have the historical backup data from the source database for point-in-time restore. -- Named replicas are not supported. -- Only [zone-redundant backup](automated-backups-overview.md) is supported. -- Only Gen5 hardware is supported. -- [Geo-Restore](recovery-using-backups.md#geo-restore) is not currently supported. -- Zone redundancy cannot currently be specified when migrating an existing database from another Azure SQL Database service tier to Hyperscale. - -> [!IMPORTANT] -> At least 1 high availability compute replica and the use of zone-redundant backup storage is required for enabling the zone redundant configuration for Hyperscale. - - -### Create a zone redundant Hyperscale database - -Use [Azure PowerShell](/powershell/azure/install-az-ps) or the [Azure CLI](/cli/azure/update-azure-cli) to create a zone redundant Hyperscale database. Confirm you have the latest version of the API to ensure support for any recent changes. - -# [Azure PowerShell](#tab/azure-powershell) - -Specify the `-ZoneRedundant` parameter to enable zone redundancy for your Hyperscale database by using Azure PowerShell. The database must have at least 1 high availability replica and zone-redundant backup storage must be specified. - -To enable zone redundancy using Azure Powershell, use the following example command: - -```powershell -New-AzSqlDatabase -ResourceGroupName "ResourceGroup01" -ServerName "Server01" -DatabaseName "Database01" ` - -Edition "Hyperscale" -HighAvailabilityReplicaCount 1 -ZoneRedundant -BackupStorageRedundancy Zone -``` - - - -# [Azure CLI](#tab/azure-cli) - -Specify the `-zone-redundant parameter` to enable zone redundancy for your Hyperscale database by using the Azure CLI. The database copy must have at least 1 high availability replica and zone-redundant backup storage. - -To enable zone redundancy using the Azure CLI, use the following example command: - -```azurecli -az sql db create -g mygroup -s myserver -n mydb -e Hyperscale -f Gen5 –ha-replicas 1 –-zone-redundant -–backup-storage-redundancy Zone -``` - -* * * - -### Create a zone redundant Hyperscale database by creating a geo-replica - -To make an existing Hyperscale database zone redundant, use Azure PowerShell or the Azure CLI to create a zone redundant Hyperscale database using active geo-replication. The geo-replica can be in the same or different region as the existing Hyperscale database. - -# [Azure PowerShell](#tab/azure-powershell) - -Specify the `-ZoneRedundant` parameter to enable zone redundancy for your Hyperscale database secondary. The secondary database must have at least 1 high availability replica and zone-redundant backup storage must be specified. - -To create your zone redundant database using Azure PowerShell, use the following example command: - -```powershell -New-AzSqlDatabaseSecondary -ResourceGroupName "myResourceGroup" -ServerName $sourceserver -DatabaseName "databaseName" -PartnerResourceGroupName "myPartnerResourceGroup" -PartnerServerName $targetserver -PartnerDatabaseName "zoneRedundantCopyOfMySampleDatabase” -ZoneRedundant -BackupStorageRedundancy Zone -HighAvailabilityReplicaCount 1 -``` - - -# [Azure CLI](#tab/azure-cli) - -Specify the `-zone-redundant parameter` to enable zone redundancy for your Hyperscale database secondary. The secondary database must have at least 1 high availability replica and zone-redundant backup storage. - -To enable zone redundancy using the Azure CLI, use the following example command: - -```azurecli -az sql db replica create -g mygroup -s myserver -n originalDb --partner-server newDb -–ha-replicas 1 -–zone-redundant -–backup-storage-redundancy Zone -``` - -* * * - -### Create a zone redundant Hyperscale database by creating a database copy - -To make an existing Hyperscale database zone redundant, use Azure PowerShell or the Azure CLI to create a zone redundant Hyperscale database using database copy. The database copy can be in the same or different region as the existing Hyperscale database. - -# [Azure PowerShell](#tab/azure-powershell) - -Specify the `-ZoneRedundant` parameter to enable zone redundancy for your Hyperscale database copy. The database copy must have at least 1 high availability replica and zone-redundant backup storage must be specified. - -To create your zone redundant database using Azure PowerShell, use the following example command: - -```powershell -New-AzSqlDatabaseCopy -ResourceGroupName "myResourceGroup" -ServerName $sourceserver -DatabaseName "databaseName" -CopyResourceGroupName "myCopyResourceGroup" -CopyServerName $copyserver -CopyDatabaseName "zoneRedundantCopyOfMySampleDatabase” -ZoneRedundant -BackupStorageRedundancy Zone -``` - - -# [Azure CLI](#tab/azure-cli) - -Specify the `-zone-redundant parameter` to enable zone redundancy for your Hyperscale database copy. The database copy must have at least 1 high availability replica and zone-redundant backup storage. - -To enable zone redundancy using the Azure CLI, use the following example command: - -```azurecli -az sql db copy --dest-name "CopyOfMySampleDatabase" --dest-resource-group "myResourceGroup" --dest-server $targetserver --name "" --resource-group "" --server $sourceserver -–ha-replicas 1 -–zone-redundant -–backup-storage-redundancy Zone -``` - -* * * - -## Accelerated Database Recovery (ADR) - -[Accelerated Database Recovery (ADR)](../accelerated-database-recovery.md) is a new database engine feature that greatly improves database availability, especially in the presence of long running transactions. ADR is currently available for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. - -## Testing application fault resiliency - -High availability is a fundamental part of the SQL Database and SQL Managed Instance platform that works transparently for your database application. However, we recognize that you may want to test how the automatic failover operations initiated during planned or unplanned events would impact an application before you deploy it to production. You can manually trigger a failover by calling a special API to restart a database, an elastic pool, or a managed instance. In the case of a zone-redundant serverless or provisioned General Purpose database or elastic pool, the API call would result in redirecting client connections to the new primary in an Availability Zone different from the Availability Zone of the old primary. So in addition to testing how failover impacts existing database sessions, you can also verify if it changes the end-to-end performance due to changes in network latency. Because the restart operation is intrusive and a large number of them could stress the platform, only one failover call is allowed every 15 minutes for each database, elastic pool, or managed instance. - -A failover can be initiated using PowerShell, REST API, or Azure CLI: - -|Deployment type|PowerShell|REST API| Azure CLI| -|:---|:---|:---|:---| -|Database|[Invoke-AzSqlDatabaseFailover](/powershell/module/az.sql/invoke-azsqldatabasefailover)|[Database failover](/rest/api/sql/databases/failover)|[az rest](/cli/azure/reference-index#az-rest) may be used to invoke a REST API call from Azure CLI| -|Elastic pool|[Invoke-AzSqlElasticPoolFailover](/powershell/module/az.sql/invoke-azsqlelasticpoolfailover)|[Elastic pool failover](/javascript/api/@azure/arm-sql/elasticpools)|[az rest](/cli/azure/reference-index#az-rest) may be used to invoke a REST API call from Azure CLI| -|Managed Instance|[Invoke-AzSqlInstanceFailover](/powershell/module/az.sql/Invoke-AzSqlInstanceFailover/)|[Managed Instances - Failover](/rest/api/sql/managed%20instances%20-%20failover/failover)|[az sql mi failover](/cli/azure/sql/mi/#az-sql-mi-failover) may be used to invoke a REST API call from Azure CLI| - -> [!IMPORTANT] -> The Failover command is not available for readable secondary replicas of Hyperscale databases. - -## Conclusion - -Azure SQL Database and Azure SQL Managed Instance feature a built-in high availability solution, that is deeply integrated with the Azure platform. It is dependent on Service Fabric for failure detection and recovery, on Azure Blob storage for data protection, and on Availability Zones for higher fault tolerance (as mentioned earlier in document not applicable to Azure SQL Managed Instance yet). In addition, SQL Database and SQL Managed Instance use the Always On availability group technology from the SQL Server instance for replication and failover. The combination of these technologies enables applications to fully realize the benefits of a mixed storage model and support the most demanding SLAs. - -## Next steps - -- Learn about [Azure Availability Zones](../../availability-zones/az-overview.md) -- Learn about [Service Fabric](../../service-fabric/service-fabric-overview.md) -- Learn about [Azure Traffic Manager](../../traffic-manager/traffic-manager-overview.md) -- Learn [How to initiate a manual failover on SQL Managed Instance](../managed-instance/user-initiated-failover.md) -- For more options for high availability and disaster recovery, see [Business Continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md) diff --git a/articles/azure-sql/database/high-cpu-diagnose-troubleshoot.md b/articles/azure-sql/database/high-cpu-diagnose-troubleshoot.md deleted file mode 100644 index d6f0ea075c1dc..0000000000000 --- a/articles/azure-sql/database/high-cpu-diagnose-troubleshoot.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: Diagnose and troubleshoot high CPU -titleSuffix: Azure SQL Database -description: Learn to diagnose and troubleshoot high CPU problems in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 04/06/2022 ---- -# Diagnose and troubleshoot high CPU on Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Azure SQL Database](sql-database-paas-overview.md) provides built-in tools to identify the causes of high CPU usage and to optimize workload performance. You can use these tools to troubleshoot high CPU usage while it's occurring, or reactively after the incident has completed. You can also enable [automatic tuning](automatic-tuning-overview.md) to proactively reduce CPU usage over time for your database. This article teaches you to diagnose and troubleshoot high CPU with built-in tools in Azure SQL Database and explains [when to add CPU resources](#when-to-add-cpu-resources). - -## Understand vCore count - -It's helpful to understand the number of virtual cores (vCores) available to your database when diagnosing a high CPU incident. A vCore is equivalent to a logical CPU. The number of vCores helps you understand the CPU resources available to your database. - -### Identify vCore count in the Azure portal - -You can quickly identify the vCore count for a database in the Azure portal if you're using a [vCore-based service tier](service-tiers-vcore.md) with the provisioned compute tier. In this case, the **pricing tier** listed for the database on its **Overview** page will contain the vCore count. For example, a database's pricing tier might be 'General Purpose: Gen5, 16 vCores'. - -For databases in the [serverless](serverless-tier-overview.md) compute tier, vCore count will always be equivalent to the max vCore setting for the database. VCore count will show in the **pricing tier** listed for the database on its **Overview** page. For example, a database's pricing tier might be 'General Purpose: Serverless, Gen5, 16 vCores'. - -If you're using a database under the [DTU-based purchasing model](service-tiers-dtu.md), you will need to use Transact-SQL to query the database's vCore count. - -### Identify vCore count with Transact-SQL - -You can identify the current vCore count for any database with Transact-SQL. You can run Transact-SQL against Azure SQL Database with [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or [the Azure portal's query editor (preview)](connect-query-portal.md). - -Connect to your database and run the following query: - -```sql -SELECT - COUNT(*) as vCores -FROM sys.dm_os_schedulers -WHERE status = N'VISIBLE ONLINE'; -GO -``` - -> [!NOTE] -> For databases using Gen4 hardware, the number of visible online schedulers in `sys.dm_os_schedulers` may be double the number of vCores specified at database creation and shown in Azure portal. - -## Identify the causes of high CPU -You can measure and analyze CPU utilization using the Azure portal, Query Store interactive tools in SSMS, and Transact-SQL queries in SSMS and Azure Data Studio. - -The Azure portal and Query Store show execution statistics, such as CPU metrics, for completed queries. If you are experiencing a current high CPU incident that may be caused by one or more ongoing long-running queries, [identify currently running queries with Transact-SQL](#identify-currently-running-queries-with-transact-sql). - -Common causes of new and unusual high CPU utilization are: - -* New queries in the workload that use a large amount of CPU. -* An increase in the frequency of regularly running queries. -* Query plan regression, including regression due to [parameter sensitive plan (PSP) problems](../identify-query-performance-issues.md), resulting in one or more queries consuming more CPU. -* A significant increase in compilation or recompilation of query plans. -* Databases where queries use [excessive parallelism](configure-max-degree-of-parallelism.md#excessive-parallelism). - -To understand what is causing your high CPU incident, identify when high CPU utilization is occurring against your database and the top queries using CPU at that time. - -Examine: - -- Are new queries using significant CPU appearing in the workload, or are you seeing an increase in frequency of regularly running queries? Use any of the following methods to investigate. Look for queries with limited history (new queries), and at the frequency of execution for queries with longer history. - - [Review CPU metrics and related top queries in the Azure portal](#review-cpu-usage-metrics-and-related-top-queries-in-the-azure-portal) - - [Query the top recent 15 queries by CPU usage](#query-the-top-recent-15-queries-by-cpu-usage) with Transact-SQL. - - [Use interactive Query Store tools in SSMS to identify top queries by CPU time](#use-interactive-query-store-tools-to-identify-top-queries-by-cpu-time) -- Are some queries in the workload using more CPU per execution than they did in the past? If so, has the query execution plan changed? These queries may [have parameter sensitive plan (PSP) problems](../identify-query-performance-issues.md). Use either of the following techniques to investigate. Look for queries with multiple query execution plans with significant variation in CPU usage: - - [Query the top recent 15 queries by CPU usage](#query-the-top-recent-15-queries-by-cpu-usage) with Transact-SQL. - - [Use interactive Query Store tools in SSMS to identify top queries by CPU time](#use-interactive-query-store-tools-to-identify-top-queries-by-cpu-time) -- Is there evidence of a large amount of compilation or recompilation occurring? Query the [most frequently compiled queries by query hash](#query-the-most-frequently-compiled-queries-by-query-hash) and review how frequently they compile. -- Are queries using excessive parallelism? Query your [MAXDOP database scoped configuration](configure-max-degree-of-parallelism.md#maxdop-database-scoped-configuration-1) and review your [vCore count](#understand-vcore-count). Excessive parallelism often occurs in databases where MAXDOP is set to 0 with a core count higher than eight. - -> [!Note] -> Azure SQL Database requires compute resources to implement core service features such as high availability and disaster recovery, database backup and restore, monitoring, Query Store, automatic tuning, etc. Use of these compute resources may be particularly noticeable on databases with low vCore counts or databases in dense [elastic pools](elastic-pool-overview.md). Learn more in [Resource management in Azure SQL Database](resource-limits-logical-server.md#resource-consumption-by-user-workloads-and-internal-processes). - - -### Review CPU usage metrics and related top queries in the Azure portal - -Use the Azure portal to track various CPU metrics, including the percentage of available CPU used by your database over time. The Azure portal combines CPU metrics with information from your database's Query Store, which allows you to identify which queries consumed CPU in your database at a given time. - -Follow these steps to find CPU percentage metrics. - -1. Navigate to the database in the Azure portal. -1. Under **Intelligent Performance** in the left menu, select **Query Performance Insight**. - -The default view of Query Performance Insight shows 24 hours of data. CPU usage is shown as a percentage of total available CPU used for the database. - -The top five queries running in that period are displayed in vertical bars above the CPU usage graph. Select a band of time on the chart or use the **Customize** menu to explore specific time periods. You may also increase the number of queries shown. - -:::image type="content" source="./media/high-cpu-troubleshoot/azure-portal-query-performance-insight-cpu-queries.png" lightbox="./media/high-cpu-troubleshoot/azure-portal-query-performance-insight.png" alt-text="Screenshot shows Query Performance Insight in the Azure portal."::: - -Select each query ID exhibiting high CPU to open details for the query. Details include query text along with performance history for the query. Examine if CPU has increased for the query recently. - -Take note of the query ID to further investigate the query plan using Query Store in the following section. -### Review query plans for top queries identified in the Azure portal - -Follow these steps to use a query ID in SSMS's interactive Query Store tools to examine the query's execution plan over time. - -1. Open SSMS. -1. Connect to your Azure SQL Database in Object Explorer. -1. Expand the database node in Object Explorer -1. Expand the **Query Store** folder. -1. Open the **Tracked Queries** pane. -1. Enter the query ID in the **Tracking query** box at the top left of the screen and press enter. -1. If necessary, select **Configure** to adjust the time interval to match the time when high CPU utilization was occurring. - -The page will show the execution plan(s) and related metrics for the query over the most recent 24 hours. - -### Identify currently running queries with Transact-SQL - -Transact-SQL allows you to identify currently running queries with CPU time they have used so far. You can also use Transact-SQL to query recent CPU usage in your database, top queries by CPU, and queries that compiled the most often. - -You can query CPU metrics with [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or [the Azure portal's query editor (preview)](connect-query-portal.md). When using SSMS or Azure Data Studio, open a new query window and connect it to your database (not the master database). - -Find currently running queries with CPU usage and execution plans by executing the following query. CPU time is returned in milliseconds. - -```sql -SELECT - req.session_id, - req.status, - req.start_time, - req.cpu_time AS 'cpu_time_ms', - req.logical_reads, - req.dop, - s.login_name, - s.host_name, - s.program_name, - object_name(st.objectid,st.dbid) 'ObjectName', - REPLACE (REPLACE (SUBSTRING (st.text,(req.statement_start_offset/2) + 1, - ((CASE req.statement_end_offset WHEN -1 THEN DATALENGTH(st.text) - ELSE req.statement_end_offset END - req.statement_start_offset)/2) + 1), - CHAR(10), ' '), CHAR(13), ' ') AS statement_text, - qp.query_plan, - qsx.query_plan as query_plan_with_in_flight_statistics -FROM sys.dm_exec_requests as req -JOIN sys.dm_exec_sessions as s on req.session_id=s.session_id -CROSS APPLY sys.dm_exec_sql_text(req.sql_handle) as st -OUTER APPLY sys.dm_exec_query_plan(req.plan_handle) as qp -OUTER APPLY sys.dm_exec_query_statistics_xml(req.session_id) as qsx -ORDER BY req.cpu_time desc; -GO -``` - -This query returns two copies of the execution plan. The column `query_plan` contains the execution plan from [sys.dm_exec_query_plan()](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-plan-transact-sql). This version of the query plan contains only estimates of row counts and does not contain any execution statistics. - -If the column `query_plan_with_in_flight_statistics` returns an execution plan, this plan provides more information. The `query_plan_with_in_flight_statistics` column returns data from [sys.dm_exec_query_statistics_xml()](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-statistics-xml-transact-sql), which includes "in flight" execution statistics such as the actual number of rows returned so far by a currently running query. - -### Review CPU usage metrics for the last hour - -The following query against `sys.dm_db_resource_stats` returns the average CPU usage over 15-second intervals for approximately the last hour. - -```sql -SELECT - end_time, - avg_cpu_percent, - avg_instance_cpu_percent -FROM sys.dm_db_resource_stats -ORDER BY end_time DESC; -GO -``` - -It is important to not focus only on the `avg_cpu_percent` column. The `avg_instance_cpu_percent` column includes CPU used by both user and internal workloads. If `avg_instance_cpu_percent` is close to 100%, CPU resources are saturated. In this case, you should troubleshoot high CPU if app throughput is insufficient or query latency is high. - -Learn more in [Resource management in Azure SQL Database](resource-limits-logical-server.md#resource-consumption-by-user-workloads-and-internal-processes). - -Review the examples in [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) for more queries. - -### Query the top recent 15 queries by CPU usage - -Query Store tracks execution statistics, including CPU usage, for queries. The following query returns the top 15 queries that have run in the last 2 hours, sorted by CPU usage. CPU time is returned in milliseconds. - -```sql -WITH AggregatedCPU AS - (SELECT - q.query_hash, - SUM(count_executions * avg_cpu_time / 1000.0) AS total_cpu_ms, - SUM(count_executions * avg_cpu_time / 1000.0)/ SUM(count_executions) AS avg_cpu_ms, - MAX(rs.max_cpu_time / 1000.00) AS max_cpu_ms, - MAX(max_logical_io_reads) max_logical_reads, - COUNT(DISTINCT p.plan_id) AS number_of_distinct_plans, - COUNT(DISTINCT p.query_id) AS number_of_distinct_query_ids, - SUM(CASE WHEN rs.execution_type_desc='Aborted' THEN count_executions ELSE 0 END) AS aborted_execution_count, - SUM(CASE WHEN rs.execution_type_desc='Regular' THEN count_executions ELSE 0 END) AS regular_execution_count, - SUM(CASE WHEN rs.execution_type_desc='Exception' THEN count_executions ELSE 0 END) AS exception_execution_count, - SUM(count_executions) AS total_executions, - MIN(qt.query_sql_text) AS sampled_query_text - FROM sys.query_store_query_text AS qt - JOIN sys.query_store_query AS q ON qt.query_text_id=q.query_text_id - JOIN sys.query_store_plan AS p ON q.query_id=p.query_id - JOIN sys.query_store_runtime_stats AS rs ON rs.plan_id=p.plan_id - JOIN sys.query_store_runtime_stats_interval AS rsi ON rsi.runtime_stats_interval_id=rs.runtime_stats_interval_id - WHERE - rs.execution_type_desc IN ('Regular', 'Aborted', 'Exception') AND - rsi.start_time>=DATEADD(HOUR, -2, GETUTCDATE()) - GROUP BY q.query_hash), -OrderedCPU AS - (SELECT *, - ROW_NUMBER() OVER (ORDER BY total_cpu_ms DESC, query_hash ASC) AS RN - FROM AggregatedCPU) -SELECT * -FROM OrderedCPU AS OD -WHERE OD.RN<=15 -ORDER BY total_cpu_ms DESC; -GO -``` - -This query groups by a hashed value of the query. If you find a high value in the `number_of_distinct_query_ids` column, investigate if a frequently run query isn't properly parameterized. Non-parameterized queries may be compiled on each execution, which consumes significant CPU and [affect the performance of Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store#Parameterize). - -To learn more about an individual query, note the query hash and use it to [Identify the CPU usage and query plan for a given query hash](#identify-the-cpu-usage-and-query-plan-for-a-given-query-hash). - -### Query the most frequently compiled queries by query hash - -Compiling a query plan is a CPU-intensive process. Azure SQL Database [cache plans in memory for reuse](/sql/relational-databases/query-processing-architecture-guide#execution-plan-caching-and-reuse). Some queries may be frequently compiled if they are not parameterized or if [RECOMPILE hints](/sql/t-sql/queries/hints-transact-sql-query) force recompilation. - -Query Store tracks the number of times queries are compiled. Run the following query to identify the top 20 queries in Query Store by compilation count, along with the average number of compilations per minute: - -```sql -SELECT TOP (20) - query_hash, - MIN(initial_compile_start_time) as initial_compile_start_time, - MAX(last_compile_start_time) as last_compile_start_time, - CASE WHEN DATEDIFF(mi,MIN(initial_compile_start_time), MAX(last_compile_start_time)) > 0 - THEN 1.* SUM(count_compiles) / DATEDIFF(mi,MIN(initial_compile_start_time), - MAX(last_compile_start_time)) - ELSE 0 - END as avg_compiles_minute, - SUM(count_compiles) as count_compiles -FROM sys.query_store_query AS q -GROUP BY query_hash -ORDER BY count_compiles DESC; -GO -``` - -To learn more about an individual query, note the query hash and use it to [Identify the CPU usage and query plan for a given query hash](#identify-the-cpu-usage-and-query-plan-for-a-given-query-hash). - -### Identify the CPU usage and query plan for a given query hash - -Run the following query to find the individual query ID, query text, and query execution plans for a given `query_hash`. CPU time is returned in milliseconds. - -Replace the value for the `@query_hash` variable with a valid `query_hash` for your workload. - -```sql -declare @query_hash binary(8); - -SET @query_hash = 0x6557BE7936AA2E91; - -with query_ids as ( - SELECT - q.query_hash, - q.query_id, - p.query_plan_hash, - SUM(qrs.count_executions) * AVG(qrs.avg_cpu_time)/1000. as total_cpu_time_ms, - SUM(qrs.count_executions) AS sum_executions, - AVG(qrs.avg_cpu_time)/1000. AS avg_cpu_time_ms - FROM sys.query_store_query q - JOIN sys.query_store_plan p on q.query_id=p.query_id - JOIN sys.query_store_runtime_stats qrs on p.plan_id = qrs.plan_id - WHERE q.query_hash = @query_hash - GROUP BY q.query_id, q.query_hash, p.query_plan_hash) -SELECT qid.*, - qt.query_sql_text, - p.count_compiles, - TRY_CAST(p.query_plan as XML) as query_plan -FROM query_ids as qid -JOIN sys.query_store_query AS q ON qid.query_id=q.query_id -JOIN sys.query_store_query_text AS qt on q.query_text_id = qt.query_text_id -JOIN sys.query_store_plan AS p ON qid.query_id=p.query_id and qid.query_plan_hash=p.query_plan_hash -ORDER BY total_cpu_time_ms DESC; -GO -``` - -This query returns one row for each variation of an execution plan for the `query_hash` across the entire history of your Query Store. The results are sorted by total CPU time. - -### Use interactive Query Store tools to track historic CPU utilization - -If you prefer to use graphic tools, follow these steps to use the interactive Query Store tools in SSMS. - -1. Open SSMS and connect to your database in Object Explorer. -1. Expand the database node in Object Explorer -1. Expand the **Query Store** folder. -1. Open the **Overall Resource Consumption** pane. - -Total CPU time for your database over the last month in milliseconds is shown in the bottom-left portion of the pane. In the default view, CPU time is aggregated by day. - -:::image type="content" source="./media/high-cpu-troubleshoot/ssms-query-store-resources-consumption.png" alt-text="Screenshot shows the Overall Resource Consumption view of Query Store in SSMS."::: - -Select **Configure** in the top right of the pane to select a different time period. You can also change the unit of aggregation. For example, you can choose to see data for a specific date range and aggregate the data by hour. - -### Use interactive Query Store tools to identify top queries by CPU time - -Select a bar in the chart to drill in and see queries running in a specific time period. The **Top Resource Consuming Queries** pane will open. Alternately, you can open **Top Resource Consuming Queries** from the Query Store node under your database in Object Explorer directly. - -:::image type="content" source="./media/high-cpu-troubleshoot/ssms-query-store-top-resource-consuming-queries.png" alt-text="Screenshot shows the Top Resource Consuming Queries pane for Query Store in S S M S."::: - -In the default view, the **Top Resource Consuming Queries** pane shows queries by **Duration (ms)**. Duration may sometimes be lower than CPU time: queries using parallelism may use much more CPU time than their overall duration. Duration may also be higher than CPU time if waits were significant. To see queries by CPU time, select the **Metric** drop-down at the top left of the pane and select **CPU Time(ms)**. - -Each bar in the top-left quadrant represents a query. Select a bar to see details for that query. The top-right quadrant of the screen shows how many execution plans are in Query Store for that query and maps them according to when they were executed and how much of your selected metric was used. Select each **Plan ID** to control which query execution plan is displayed in the bottom half of the screen. - -> [!NOTE] -> For a guide to interpreting Query Store views and the shapes which appear in the Top Resource Consumers view, see [Best practices with Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store#start-with-query-performance-troubleshooting) - -## Reduce CPU usage -Part of your troubleshooting should include learning more about the queries identified in the previous section. You can reduce CPU usage by tuning indexes, modifying your application patterns, tuning queries, and adjusting CPU-related settings for your database. - -- If you found new queries using significant CPU appearing in the workload, validate that indexes have been optimized for those queries. You can [tune indexes manually](#tune-indexes-manually) or [reduce CPU usage with automatic index tuning](#reduce-cpu-usage-with-automatic-index-tuning). Evaluate if your [max degree of parallelism](#reduce-cpu-usage-by-tuning-the-max-degree-of-parallelism) setting is correct for your increased workload. -- If you found that the overall execution count of queries is higher than it used to be, [tune indexes for your highest CPU consuming queries](#tune-indexes-manually) and consider [automatic index tuning](#reduce-cpu-usage-with-automatic-index-tuning). Evaluate if your [max degree of parallelism](#reduce-cpu-usage-by-tuning-the-max-degree-of-parallelism) setting is correct for your increased workload. -- If you found queries in the workload with [parameter sensitive plan (PSP) problems](../identify-query-performance-issues.md), consider [automatic plan correction (force plan)](#reduce-cpu-usage-with-automatic-plan-correction-force-plan). You can also [manually force a plan in Query Store](/sql/relational-databases/system-stored-procedures/sp-query-store-force-plan-transact-sql) or tune the Transact-SQL for the query to result in a consistently high-performing query plan. -- If you found evidence that a large amount of compilation or recompilation is occurring, [tune the queries so that they are properly parameterized or do not require recompile hints](#tune-your-application-queries-and-database-settings). -- If you found that queries are using excessive parallelism, [tune the max degree of parallelism](#reduce-cpu-usage-by-tuning-the-max-degree-of-parallelism). - -Consider the following strategies in this section. - -### Reduce CPU usage with automatic index tuning - -Effective index tuning reduces CPU usage for many queries. Optimized indexes reduce the logical and physical reads for a query, which often results in the query needing to do less work. - -Azure SQL Database offers [automatic index management](automatic-tuning-overview.md#automatic-tuning-options) for workloads on primary replicas. Automatic index management uses machine learning to monitor your workload and optimize rowstore disk-based nonclustered indexes for your database. - -[Review performance recommendations](database-advisor-find-recommendations-portal.md), including index recommendations, in the Azure portal. You can apply these recommendations manually or [enable the CREATE INDEX automatic tuning option](automatic-tuning-enable.md) to create and verify the performance of new indexes in your database. - -### Reduce CPU usage with automatic plan correction (force plan) - -Another common cause of high CPU incidents is [execution plan choice regression](/sql/relational-databases/automatic-tuning/automatic-tuning#what-is-execution-plan-choice-regression). Azure SQL Database offers the [force plan](automatic-tuning-overview.md#automatic-tuning-options) automatic tuning option to identify regressions in query execution plans in workloads on primary replicas. With this automatic tuning feature enabled, Azure SQL Database will test if forcing a query execution plan results in reliable improved performance for queries with execution plan regression. - -If your database was created after March 2020, the **force plan** automatic tuning option was automatically enabled. If your database was created prior to this time, you may wish to [enable the force plan automatic tuning option](automatic-tuning-enable.md). - -### Tune indexes manually - -Use the methods described in [Identify the causes of high CPU](#identify-the-causes-of-high-cpu) to identify query plans for your top CPU consuming queries. These execution plans will aid you in [identifying and adding nonclustered indexes](performance-guidance.md#identifying-and-adding-missing-indexes) to speed up your queries. - -Each disk based [nonclustered index](/sql/relational-databases/indexes/clustered-and-nonclustered-indexes-described) in your database requires storage space and must be maintained by the SQL engine. Modify existing indexes instead of adding new indexes when possible and ensure that new indexes successfully reduce CPU usage. For an overview of nonclustered indexes, see [Nonclustered Index Design Guidelines](/sql/relational-databases/sql-server-index-design-guide#Nonclustered). - -For some workloads, columnstore indexes may be the best choice to reduce CPU of frequent read queries. See [Columnstore indexes - Design guidance](/sql/relational-databases/indexes/columnstore-indexes-design-guidance) for high-level recommendations on scenarios when columnstore indexes may be appropriate. - -### Tune your application, queries, and database settings - -In examining your top queries, you may find [application characteristics to tune](performance-guidance.md#application-characteristics) such as "chatty" behavior, workloads that would benefit from sharding, and suboptimal database access design. For read-heavy workloads, consider [read-only replicas to offload read-only query workloads](read-scale-out.md) and [application-tier caching](performance-guidance.md#application-tier-caching) as long-term strategies to scale out frequently read data. - -You may also choose to manually tune the top CPU using queries identified in your workload. Manual tuning options include rewriting Transact-SQL statements, [forcing plans](/sql/relational-databases/system-stored-procedures/sp-query-store-force-plan-transact-sql) in Query Store, and applying [query hints](/sql/t-sql/queries/hints-transact-sql-query). - -If you identify cases where queries sometimes use an execution plan which is not optimal for performance, review the solutions in [queries that parameter sensitive plan (PSP) problems](../identify-query-performance-issues.md) - -If you identify non-parameterized queries with a high number of plans, consider parameterizing these queries, making sure to fully declare parameter data types, including length and precision. This may be done by modifying the queries, creating a [plan guide to force parameterization](/sql/relational-databases/performance/specify-query-parameterization-behavior-by-using-plan-guides) of a specific query, or by enabling [forced parameterization](/sql/relational-databases/query-processing-architecture-guide#execution-plan-caching-and-reuse) at the database level. - -If you identify queries with high compilation rates, identify what causes the frequent compilation. The most common cause of frequent compilation is [RECOMPILE hints](/sql/t-sql/queries/hints-transact-sql-query). Whenever possible, identify when the `RECOMPILE` hint was added and what problem it was meant to solve. Investigate whether an alternate performance tuning solution can be implemented to provide consistent performance for frequently running queries without a `RECOMPILE` hint. - -### Reduce CPU usage by tuning the max degree of parallelism - -The [max degree of parallelism (MAXDOP)](configure-max-degree-of-parallelism.md#overview) setting controls intra-query parallelism in the database engine. Higher MAXDOP values generally result in more parallel threads per query, and faster query execution. - -In some cases, a large number of parallel queries running concurrently can slow down a workload and cause high CPU usage. Excessive parallelism is most likely to occur in databases with a large number of vCores where MAXDOP is set to a high number or to zero. When MAXDOP is set to zero, the database engine sets the number of [schedulers](/sql/relational-databases/thread-and-task-architecture-guide#sql-server-task-scheduling) to be used by parallel threads to the total number of logical cores or 64, whichever is smaller. - -You can identify the max degree of parallelism setting for your database with Transact-SQL. Connect to your database with SSMS or Azure Data Studio and run the following query: - -```sql -SELECT - name, - value, - value_for_secondary, - is_value_default -FROM sys.database_scoped_configurations -WHERE name=N'MAXDOP'; -GO -``` - -Consider experimenting with small changes in the MAXDOP configuration at the database level, or modifying individual problematic queries to use a non-default MAXDOP using a query hint. For more information, see the examples in [configure max degree of parallelism](configure-max-degree-of-parallelism.md). - -## When to add CPU resources - -You may find that your workload's queries and indexes are properly tuned, or that performance tuning requires changes that you cannot make in the short term due to internal processes or other reasons. Adding more CPU resources may be beneficial for these databases. You can [scale database resources with minimal downtime](scale-resources.md). - -You can add more CPU resources to your Azure SQL Database by configuring the vCore count or the [hardware configuration](service-tiers-sql-database-vcore.md#hardware-configuration) for databases using the [vCore purchasing model](service-tiers-sql-database-vcore.md). - -Under the [DTU-based purchasing model](service-tiers-dtu.md), you can raise your service tier and increase the number of database transaction units (DTUs). A DTU represents a blended measure of CPU, memory, reads, and writes. One benefit of the vCore purchasing model is that it allows more granular control over the hardware in use and the number of vCores. You can [migrate Azure SQL Database from the DTU-based model to the vCore-based model](migrate-dtu-to-vcore.md) to transition between purchasing models. - -## Next steps - -Learn more about monitoring and performance tuning Azure SQL Database in the following articles: - -* [Monitoring Azure SQL Database and Azure SQL Managed Instance performance using dynamic management views](monitoring-with-dmvs.md) -* [SQL Server index architecture and design guide](/sql/relational-databases/sql-server-index-design-guide) -* [Enable automatic tuning to monitor queries and improve workload performance](automatic-tuning-enable.md) -* [Query processing architecture guide](/sql/relational-databases/query-processing-architecture-guide) -* [Best practices with Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store) -* [Detectable types of query performance bottlenecks in Azure SQL Database](../identify-query-performance-issues.md) -* [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md) \ No newline at end of file diff --git a/articles/azure-sql/database/how-to-content-reference-guide.md b/articles/azure-sql/database/how-to-content-reference-guide.md deleted file mode 100644 index b99347c708af2..0000000000000 --- a/articles/azure-sql/database/how-to-content-reference-guide.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Configure & manage content reference -description: Find a reference of content that teaches you to configure and manage Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: guide -author: LitKnd -ms.author: kendralittle -ms.date: 01/14/2020 ---- -# Configure and manage content reference - Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this article you can find a content reference of various guides, scripts, and explanations that can help you to manage and configure your Azure SQL Database. - -## Load data - -- [Migrate to SQL Database](migrate-to-database-from-sql-server.md) -- Learn how to [manage SQL Database after migration](manage-data-after-migrating-to-database.md). -- [Copy a database](database-copy.md) -- [Import a DB from a BACPAC](database-import.md) -- [Export a DB to BACPAC](database-export.md) -- [Load data with BCP](../load-from-csv-with-bcp.md) -- [Load data with ADF](../../data-factory/connector-azure-sql-database.md?toc=/azure/sql-database/toc.json) - -## Configure features - -- [Configure Azure Active Directory (Azure AD) auth](authentication-aad-configure.md) -- [Configure Conditional Access](conditional-access-configure.md) -- [Multi-factor Azure AD auth](authentication-mfa-ssms-overview.md) -- [Configure Multi-Factor Authentication](authentication-mfa-ssms-configure.md) -- [Configure backup retention](long-term-backup-retention-configure.md) for a database to keep your backups on Azure Blob Storage. -- [Configure geo-replication](active-geo-replication-overview.md) to keep a replica of your database in another region. -- [Configure auto-failover group](auto-failover-group-configure-sql-db.md) to automatically failover a group of single or pooled databases to a secondary server in another region in the event of a disaster. -- [Configure temporal retention policy](temporal-tables-retention-policy.md) -- [Configure TDE with BYOK](transparent-data-encryption-byok-configure.md) -- [Rotate TDE BYOK keys](transparent-data-encryption-byok-key-rotation.md) -- [Remove TDE protector](transparent-data-encryption-byok-remove-tde-protector.md) -- [Configure In-Memory OLTP](../in-memory-oltp-configure.md) -- [Configure Azure Automation](automation-manage.md) -- [Configure transactional replication](replication-to-sql-database.md) to replicate your date between databases. -- [Configure threat detection](threat-detection-configure.md) to let Azure SQL Database identify suspicious activities such as SQL Injection or access from suspicious locations. -- [Configure dynamic data masking](dynamic-data-masking-configure-portal.md) to protect your sensitive data. -- [Configure security for geo-replicas](active-geo-replication-security-configure.md). - -## Monitor and tune your database - -- [Manual tuning](performance-guidance.md) -- [Use DMVs to monitor performance](monitoring-with-dmvs.md) -- [Use Query store to monitor performance](/sql/relational-databases/performance/best-practice-with-the-query-store#Insight) -- [Enable automatic tuning](automatic-tuning-enable.md) to let Azure SQL Database optimize performance of your workload. -- [Enable e-mail notifications for automatic tuning](automatic-tuning-email-notifications-configure.md) to get information about tuning recommendations. -- [Apply performance recommendations](database-advisor-find-recommendations-portal.md) and optimize your database. -- [Create alerts](alerts-insights-configure-portal.md) to get notifications from Azure SQL Database. -- [Troubleshoot connectivity](troubleshoot-common-errors-issues.md) if you notice some connectivity issues between the applications and the database. You can also use [Resource Health for connectivity issues](resource-health-to-troubleshoot-connectivity.md). -- [Troubleshoot performance with Intelligent Insights](intelligent-insights-troubleshoot-performance.md) -- [Manage file space](file-space-manage.md) to monitor storage usage in your database. -- [Use Intelligent Insights diagnostics log](intelligent-insights-use-diagnostics-log.md) -- [Monitor In-memory OLTP space](../in-memory-oltp-monitor-space.md) - -### Extended events - -- [Extended events](xevent-db-diff-from-svr.md) -- [Store Extended events into event file](xevent-code-event-file.md) -- [Store Extended events into ring buffer](xevent-code-ring-buffer.md) - -## Query distributed data - -- [Query vertically partitioned data](elastic-query-getting-started-vertical.md) across multiple databases. -- [Report across scaled-out data tier](elastic-query-horizontal-partitioning.md). -- [Query across tables with different schemas](elastic-query-vertical-partitioning.md). - -### Data sync - -- [SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md) -- [Data Sync Agent](sql-data-sync-agent-overview.md) -- [Replicate schema changes](sql-data-sync-update-sync-schema.md) -- [Monitor with OMS](./monitor-tune-overview.md) -- [Best practices for Data Sync](sql-data-sync-best-practices.md) -- [Troubleshoot Data Sync](sql-data-sync-troubleshoot.md) - -## Elastic Database jobs - -- [Create and manage](elastic-jobs-powershell-create.md) Elastic Database Jobs using PowerShell. -- [Create and manage](elastic-jobs-tsql-create-manage.md) Elastic Database Jobs using Transact-SQL. -- [Migrate from old Elastic job](elastic-jobs-migrate.md). - -## Database sharding - -- [Upgrade elastic database client library](elastic-scale-upgrade-client-library.md). -- [Create sharded app](elastic-scale-get-started.md). -- [Query horizontally sharded data](elastic-query-getting-started.md). -- Run [Multi-shard queries](elastic-scale-multishard-querying.md). -- [Move sharded data](elastic-scale-configure-deploy-split-and-merge.md). -- [Configure security](elastic-scale-split-merge-security-configuration.md) in database shards. -- [Add a shard](elastic-scale-add-a-shard.md) to the current set of database shards. -- [Fix shard map problems](elastic-database-recovery-manager.md). -- [Migrate sharded DB](elastic-convert-to-use-elastic-tools.md). -- [Create counters](elastic-database-perf-counters.md). -- [Use entity framework](elastic-scale-use-entity-framework-applications-visual-studio.md) to query sharded data. -- [Use Dapper framework](elastic-scale-working-with-dapper.md) to query sharded data. - -## Develop applications - -- [Connectivity](connect-query-content-reference-guide.md#libraries) -- [Use Spark Connector](spark-connector.md) -- [Authenticate app](application-authentication-get-client-id-keys.md) -- [Use batching for better performance](../performance-improve-use-batching.md) -- [Connectivity guidance](troubleshoot-common-connectivity-issues.md) -- [DNS aliases](dns-alias-overview.md) -- [Setup DNS alias PowerShell](dns-alias-powershell-create.md) -- [Ports - ADO.NET](adonet-v12-develop-direct-route-ports.md) -- [C and C ++](develop-cplusplus-simple.md) -- [Excel](connect-excel.md) - -## Design applications - -- [Design for disaster recovery](designing-cloud-solutions-for-disaster-recovery.md) -- [Design for elastic pools](disaster-recovery-strategies-for-applications-with-elastic-pool.md) -- [Design for app upgrades](manage-application-rolling-upgrade.md) - -### Design Multi-tenant software as a service (SaaS) applications - -- [SaaS design patterns](saas-tenancy-app-design-patterns.md) -- [SaaS video indexer](saas-tenancy-video-index-wingtip-brk3120-20171011.md) -- [SaaS app security](saas-tenancy-elastic-tools-multi-tenant-row-level-security.md) - -## Next steps - -- Learn more about [How-to guides for Azure SQL Managed Instance](../managed-instance/how-to-content-reference-guide.md) \ No newline at end of file diff --git a/articles/azure-sql/database/hyperscale-architecture.md b/articles/azure-sql/database/hyperscale-architecture.md deleted file mode 100644 index 721a851094af2..0000000000000 --- a/articles/azure-sql/database/hyperscale-architecture.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Hyperscale distributed functions architecture -description: Learn how Hyperscale databases are architected to scale out storage and compute resources for Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 2/17/2022 ---- - -# Hyperscale distributed functions architecture - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The [Hyperscale service tier](service-tier-hyperscale.md) utilizes an architecture with highly scalable storage and compute performance tiers. This article describes the components that enable customers to quickly scale Hyperscale databases while benefiting from nearly instantaneous backups and highly scalable transaction logging. - -## Hyperscale architecture overview - -Traditional database engines centralize data management functions in a single process: even so called distributed databases in production today have multiple copies of a monolithic data engine. - -Hyperscale databases follow a different approach. Hyperscale separates the query processing engine, where the semantics of various data engines diverge, from the components that provide long-term storage and durability for the data. In this way, storage capacity can be smoothly scaled out as far as needed. The initially supported storage limit is 100 TB. - -High availability and named replicas share the same storage components, so no data copy is required to spin up a new replica. - -The following diagram illustrates the different types of nodes in a Hyperscale database: - -:::image type="content" source="./media/service-tier-Hyperscale/Hyperscale-architecture.png" alt-text="Diagram that shows that Hyperscale's compute tier consists of a primary compute note and secondary compute nodes, each with RBPEX data cache. The log service communicates both with compute notes and page servers. Page servers exist in their own tier, and also have RBPEX data cache." lightbox="./media/service-tier-Hyperscale/Hyperscale-architecture.png"::: - -A Hyperscale database contains the following types of components: compute nodes, page servers, the log service, and Azure storage. - -## Compute - -The compute node is where the relational engine lives. The compute node is where language, query, and transaction processing occur. All user interactions with a Hyperscale database happen through compute nodes. - -Compute nodes have SSD-based caches called Resilient Buffer Pool Extension (RBPEX Data Cache). RBPEX Data Cache is a non-covering data cache that minimizes the number of network round trips required to fetch a page of data. - -Hyperscale databases have one primary compute node where the read-write workload and transactions are processed. One or more secondary compute nodes act as hot standby nodes for failover purposes. Secondary compute nodes can serve as read-only compute nodes to offload read workloads when desired. [Named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview) are secondary compute nodes designed to enable massive OLTP [read-scale out](read-scale-out.md) scenarios and to improve Hybrid Transactional and Analytical Processing (HTAP) workloads. - -The database engine running on Hyperscale compute nodes is the same as in other Azure SQL Database service tiers. When users interact with the database engine on Hyperscale compute nodes, the supported surface area and engine behavior are the same as in other service tiers, with the exception of [known limitations](service-tier-hyperscale.md#known-limitations). - -## Page server - -Page servers are systems representing a scaled-out storage engine. Each page server is responsible for a subset of the pages in the database. Nominally, each page server controls either up to 128 GB or up to 1 TB of data. Each page server also has a replica that is kept for redundancy and availability. - -The job of a page server is to serve database pages out to the compute nodes on demand, and to keep the pages updated as transactions update data. Page servers are kept up to date by playing transaction log records from the log service. - -Page servers also maintain covering SSD-based caches to enhance performance. Long-term storage of data pages is kept in Azure Storage for durability. - -## Log service - -The log service accepts transaction log records that correspond to data changes from the primary compute replica. Page servers then receive the log records from the log service and apply the changes to their respective slices of data. Additionally, compute secondary replicas receive log records from the log service and replay only the changes to pages already in their buffer pool or local RBPEX cache. All data changes from the primary compute replica are propagated through the log service to all the secondary compute replicas and page servers. - -Finally, transaction log records are pushed out to long-term storage in Azure Storage, which is a virtually infinite storage repository. This mechanism removes the need for frequent log truncation. The log service has local memory and SSD caches to speed up access to log records. - -The log on Hyperscale is practically infinite, with the restriction that a single transaction cannot generate more than 1 TB of log. Additionally, if using [Change Data Capture](/sql/relational-databases/track-changes/about-change-data-capture-sql-server), at most 1 TB of log can be generated since the start of the oldest active transaction. Avoid unnecessarily large transactions to stay below this limit. - -## Azure storage - -Azure Storage contains all data files in a database. Page servers keep data files in Azure Storage up to date. This storage is also used for backup purposes and may be replicated between regions based on choice of storage redundancy. - -Backups are implemented using storage snapshots of data files. Restore operations using snapshots are fast regardless of data size. A database can be restored to any point in time within its backup retention period. - -Hyperscale supports configurable storage redundancy. When creating a Hyperscale database, you can choose read-access geo-redundant storage (RA-GRS), zone-redundant storage (ZRS)(preview), or locally redundant storage (LRS)(preview) Azure standard storage. The selected storage redundancy option will be used for the lifetime of the database for both data storage redundancy and [backup storage redundancy](automated-backups-overview.md#backup-storage-redundancy). - -## Next steps - -Learn more about Hyperscale in the following articles: - -- [Hyperscale service tier](service-tier-hyperscale.md) -- [Azure SQL Database Hyperscale FAQ](service-tier-hyperscale-frequently-asked-questions-faq.yml) -- [Quickstart: Create a Hyperscale database in Azure SQL Database](hyperscale-database-create-quickstart.md) -- [Azure SQL Database Hyperscale named replicas FAQ](service-tier-hyperscale-named-replicas-faq.yml) diff --git a/articles/azure-sql/database/hyperscale-database-create-quickstart.md b/articles/azure-sql/database/hyperscale-database-create-quickstart.md deleted file mode 100644 index 3275a08464d2f..0000000000000 --- a/articles/azure-sql/database/hyperscale-database-create-quickstart.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: Create a Hyperscale database -description: Create a Hyperscale database in Azure SQL Database using the Azure portal, Transact-SQL, PowerShell, or the Azure CLI. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.topic: quickstart -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 2/17/2022 ---- -# Quickstart: Create a Hyperscale database in Azure SQL Database - -In this quickstart, you create a [logical server in Azure](logical-servers.md) and a [Hyperscale](service-tier-hyperscale.md) database in Azure SQL Database using the Azure portal, a PowerShell script, or an Azure CLI script, with the option to create one or more [High Availability (HA) replicas](service-tier-hyperscale-replicas.md#high-availability-replica). If you would like to use an existing logical server in Azure, you can also create a Hyperscale database using Transact-SQL. - -## Prerequisites - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- The latest version of either [Azure PowerShell](/powershell/azure/install-az-ps) or [Azure CLI](/cli/azure/install-azure-cli-windows), if you would like to follow the quickstart programmatically. Alternately, you can complete the quickstart in the Azure portal. -- An existing [logical server](logical-servers.md) in Azure is required if you would like to create a Hyperscale database with Transact-SQL. For this approach, you will need to install [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or the client of your choice to run Transact-SQL commands ([sqlcmd](/sql/tools/sqlcmd-utility), etc.). - -## Create a Hyperscale database - -This quickstart creates a single database in the [Hyperscale service tier](service-tier-hyperscale.md). - -# [Portal](#tab/azure-portal) - -To create a single database in the Azure portal, this quickstart starts at the Azure SQL page. - -1. Browse to the [Select SQL Deployment option](https://portal.azure.com/#create/Microsoft.AzureSQL) page. -1. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - - :::image type="content" source="media/hyperscale-database-create-quickstart/azure-sql-create-resource.png" alt-text="Screenshot of the Azure SQL page in the Azure portal. The page offers the ability to select a deployment option including creating SQL databases, SQL managed instances, and SQL virtual machines." lightbox="media/hyperscale-database-create-quickstart/azure-sql-create-resource.png"::: - -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. -1. For **Resource group**, select **Create new**, enter *myResourceGroup*, and select **OK**. -1. For **Database name**, enter *mySampleDatabase*. -1. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - **Server name**: Enter *mysqlserver*, and add some characters for uniqueness. We can't provide an exact server name to use because server names must be globally unique for all servers in Azure, not just unique within a subscription. Enter a name such as *mysqlserver12345*, and the portal will let you know if it's available. - - **Server admin login**: Enter *azureuser*. - - **Password**: Enter a password that meets requirements, and enter it again in the **Confirm password** field. - - **Location**: Select a location from the dropdown list. - - Select **OK**. - -1. Under **Compute + storage**, select **Configure database**. -1. This quickstart creates a Hyperscale database. For **Service tier**, select **Hyperscale**. - - :::image type="content" source="media/hyperscale-database-create-quickstart/create-database-select-hyperscale-service-tier.png" alt-text="Screenshot of the service and compute tier configuration page for a new database in Azure SQL Database. The Hyperscale service tier has been selected." lightbox="media/hyperscale-database-create-quickstart/create-database-select-hyperscale-service-tier.png"::: - -1. Under **Compute Hardware**, select **Change configuration**. Review the available hardware configurations and select the most appropriate configuration for your database. For this example, we will select the **Gen5** configuration. -1. Select **OK** to confirm the hardware generation. -1. Under **Save money**, review if you qualify to use Azure Hybrid Benefit for this database. If so, select **Yes** and then confirm you have the required license. -1. Optionally, adjust the **vCores** slider if you would like to increase the number of vCores for your database. For this example, we will select 2 vCores. -1. Adjust the **High-Availability Secondary Replicas** slider to create one [High Availability (HA) replica](service-tier-hyperscale-replicas.md#high-availability-replica). -1. Select **Apply**. -1. Carefully consider the configuration option for **Backup storage redundancy** when creating a Hyperscale database. Storage redundancy can only be specified during the database creation process for Hyperscale databases. You may choose locally redundant (preview), zone-redundant (preview), or geo-redundant storage. The selected storage redundancy option will be used for the lifetime of the database for both [data storage redundancy](hyperscale-architecture.md#azure-storage) and [backup storage redundancy](automated-backups-overview.md#backup-storage-redundancy). Existing databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore. - - :::image type="content" source="media/hyperscale-database-create-quickstart/azure-sql-create-database-basics-tab.png" alt-text="Screenshot of the basics tab in the create database process after the Hyperscale service tier has been selected and configured." lightbox="media/hyperscale-database-create-quickstart/azure-sql-create-database-basics-tab.png"::: - - -1. Select **Next: Networking** at the bottom of the page. -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. -1. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. -1. Select **Next: Security** at the bottom of the page. - - :::image type="content" source="media/hyperscale-database-create-quickstart/azure-sql-database-configure-network.png" alt-text="Screenshot of the networking configuration page for a new database in Azure SQL Database that enables you to configure endpoints and optionally add a firewall rule for your client IP address." lightbox="media/hyperscale-database-create-quickstart/azure-sql-database-configure-network.png"::: - -1. Optionally, enable [Microsoft Defender for SQL](../database/azure-defender-for-sql.md). -1. Select **Next: Additional settings** at the bottom of the page. -1. On the **Additional settings** tab, in the **Data source** section, for **Use existing data**, select **Sample**. This creates an AdventureWorksLT sample database so there's some tables and data to query and experiment with, as opposed to an empty blank database. -1. Select **Review + create** at the bottom of the page: - - :::image type="content" source="media/hyperscale-database-create-quickstart/azure-sql-create-database-sample-data.png" alt-text="Screenshot of the 'Additional Settings' screen to create a database in Azure SQL Database allows you to select sample data." lightbox="media/hyperscale-database-create-quickstart/azure-sql-create-database-sample-data.png"::: - -1. On the **Review + create** page, after reviewing, select **Create**. - -# [Azure CLI](#tab/azure-cli) - -The Azure CLI code blocks in this section create a resource group, server, single database, and server-level IP firewall rule for access to the server. Make sure to record the generated resource group and server names, so you can manage these resources later. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment-h3.md)] - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names need to be globally unique across all of Azure so the $RANDOM function is used to create the server name. - -Before running the sample code, change the `location` as appropriate for your environment. Replace `0.0.0.0` with the IP address range to match your specific environment. Use the public IP address of the computer you're using to restrict access to the server to only your IP address. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" range="4-18"::: - -```azurecli-interactive -let "randomIdentifier=$RANDOM*$RANDOM" -location="East US" -resourceGroupName="myResourceGroup" -tag="create-and-configure-database" -serverName="mysqlserver-$randomIdentifier" -databaseName="mySampleDatabase" -login="azureuser" -password="Pa$$w0rD-$randomIdentifier" -# Specify appropriate IP address values for your environment -# to limit access to the SQL Database server -startIp=0.0.0.0 -endIp=0.0.0.0 - -echo "Using resource group $resourceGroupName with login: $login, password: $password..." - -``` - -### Create a resource group - -Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group in the location specified for the `location` parameter in the prior step: - -```azurecli-interactive -echo "Creating $resourceGroupName in $location..." -az group create --name $resourceGroupName --location "$location" --tag $tag - -``` - -### Create a server - -Create a [logical server](logical-servers.md) with the [az sql server create](/cli/azure/sql/server) command. - -```azurecli-interactive - -echo "Creating $serverName in $location..." -az sql server create --name $serverName --resource-group $resourceGroupName --location "$location" --admin-user $login --admin-password $password - -``` - -### Configure a server-based firewall rule - -Create a firewall rule with the [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule) command. - -```azurecli-interactive -echo "Configuring firewall..." -az sql server firewall-rule create --resource-group $resourceGroupName --server $serverName -n AllowYourIp --start-ip-address $startIp --end-ip-address $endIp - -``` - -### Create a single database - -Create a database in the [Hyperscale service tier](service-tier-hyperscale.md) with the [az sql db create](/cli/azure/sql/db) command. - -When creating a Hyperscale database, carefully consider the setting for `backup-storage-redundancy`. Storage redundancy can only be specified during the database creation process for Hyperscale databases. You may choose locally redundant (preview), zone-redundant (preview), or geo-redundant storage. The selected storage redundancy option will be used for the lifetime of the database for both [data storage redundancy](hyperscale-architecture.md#azure-storage) and [backup storage redundancy](automated-backups-overview.md#backup-storage-redundancy). Existing databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore. Allowed values for the `backup-storage-redundancy` parameter are: `Local`, `Zone`, `Geo`. Unless explicitly specified, databases will be configured to use geo-redundant backup storage. - -Run the following command to create a Hyperscale database populated with AdventureWorksLT sample data. The database uses Gen5 hardware with 2 vCores. Geo-redundant backup storage is used for the database. The command also creates one [High Availability (HA) replica](service-tier-hyperscale-replicas.md#high-availability-replica). - -```azurecli -az sql db create \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $databaseName \3 - --sample-name AdventureWorksLT \ - --edition Hyperscale \ - --compute-model Provisioned \ - --family Gen5 \ - --capacity 2 \ - --backup-storage-redundancy Geo \ - --ha-replicas 1 - -``` - -# [PowerShell](#tab/azure-powershell) - -You can create a resource group, server, and single database using Azure PowerShell. - -### Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). - -When Cloud Shell opens, verify that **PowerShell** is selected for your environment. Subsequent sessions will use Azure CLI in a Bash environment, Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names need to be globally unique across all of Azure so the Get-Random cmdlet is used to create the server name. - -Before running the sample code, change the `location` as appropriate for your environment. Replace `0.0.0.0` with the IP address range to match your specific environment. Use the public IP address of the computer you're using to restrict access to the server to only your IP address. - -```azurepowershell-interactive - # Set variables for your server and database - $resourceGroupName = "myResourceGroup" - $location = "eastus" - $adminLogin = "azureuser" - $password = "Pa$$w0rD-$(Get-Random)" - $serverName = "mysqlserver-$(Get-Random)" - $databaseName = "mySampleDatabase" - - # The ip address range that you want to allow to access your server - $startIp = "0.0.0.0" - $endIp = "0.0.0.0" - - # Show randomized variables - Write-host "Resource group name is" $resourceGroupName - Write-host "Server name is" $serverName - Write-host "Password is" $password - -``` - -### Create resource group - -Create an Azure resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). A resource group is a logical container into which Azure resources are deployed and managed. - -```azurepowershell-interactive - Write-host "Creating resource group..." - $resourceGroup = New-AzResourceGroup -Name $resourceGroupName -Location $location -Tag @{Owner="SQLDB-Samples"} - $resourceGroup - -``` - -### Create a server - -Create a server with the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) cmdlet. - -```azurepowershell-interactive - Write-host "Creating primary server..." - $server = New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -Location $location ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminLogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - $server - -``` - -### Create a firewall rule - -Create a server firewall rule with the [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) cmdlet. - -```azurepowershell-interactive - Write-host "Configuring server firewall rule..." - $serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - $serverFirewallRule - -``` - -### Create a single database - -Create a single database with the [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) cmdlet. - -When creating a Hyperscale database, carefully consider the setting for `BackupStorageRedundancy`. Storage redundancy can only be specified during the database creation process for Hyperscale databases. You may choose locally redundant (preview), zone-redundant (preview), or geo-redundant storage. The selected storage redundancy option will be used for the lifetime of the database for both [data storage redundancy](hyperscale-architecture.md#azure-storage) and [backup storage redundancy](automated-backups-overview.md#backup-storage-redundancy). Existing databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore. Allowed values for the `BackupStorageRedundancy` parameter are: `Local`, `Zone`, `Geo`. Unless explicitly specified, databases will be configured to use geo-redundant backup storage. - -Run the following command to create a Hyperscale database populated with AdventureWorksLT sample data. The database uses Gen5 hardware with 2 vCores. Geo-redundant backup storage is used for the database. The command also creates one [High Availability (HA) replica](service-tier-hyperscale-replicas.md#high-availability-replica). - -```azurepowershell-interactive - Write-host "Creating a gen5 2 vCore Hyperscale database..." - $database = New-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition Hyperscale ` - -ComputeModel Provisioned ` - -ComputeGeneration Gen5 ` - -VCore 2 ` - -MinimumCapacity 2 ` - -SampleName "AdventureWorksLT" ` - -BackupStorageRedundancy Geo ` - -HighAvailabilityReplicaCount 1 - $database - -``` - -# [Transact-SQL](#tab/t-sql) - -To create a Hyperscale database with Transact-SQL, you must first [create or identify connection information for an existing logical server](logical-servers.md) in Azure. - -Connect to the master database using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or the client of your choice to run Transact-SQL commands ([sqlcmd](/sql/tools/sqlcmd-utility), etc.). - -When creating a Hyperscale database, carefully consider the setting for `BACKUP_STORAGE_REDUNDANCY`. Storage redundancy can only be specified during the database creation process for Hyperscale databases. You may choose locally redundant (preview), zone-redundant (preview), or geo-redundant storage. The selected storage redundancy option will be used for the lifetime of the database for both [data storage redundancy](hyperscale-architecture.md#azure-storage) and [backup storage redundancy](automated-backups-overview.md#backup-storage-redundancy). Existing databases can migrate to different storage redundancy using [database copy](database-copy.md) or point in time restore. Allowed values for the `BackupStorageRedundancy` parameter are: `LOCAL`, `ZONE`, `GEO`. Unless explicitly specified, databases will be configured to use geo-redundant backup storage. - -Run the following Transact-SQL command to create a new Hyperscale database with Gen 5 hardware, 2 vCores, and geo-redundant backup storage. You must specify both the edition and service objective in the `CREATE DATABASE` statement. Refer to the [resource limits](./resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen4) for a list of valid service objectives, such as `HS_Gen5_2`. - -This example code creates an empty database. If you would like to create a database with sample data, use the Azure portal, Azure CLI, or PowerShell examples in this quickstart. - -```sql -CREATE DATABASE [myHyperscaleDatabase] - (EDITION = 'Hyperscale', SERVICE_OBJECTIVE = 'HS_Gen5_2', BACKUP_STORAGE_REDUNDANCY= 'GEO'); -GO -``` - -Refer to [CREATE DATABASE (Transact-SQL)](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true) for more parameters and options. - -To add one or more [High Availability (HA) replicas](service-tier-hyperscale-replicas.md#high-availability-replica) to your database, use the **Compute and storage** pane for the database in the Azure portal, the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) PowerShell command, or the [az sql db update](/cli/azure/sql/db#az_sql_db_update) Azure CLI command. - ---- - -## Query the database - -Once your database is created, you can use the **Query editor (preview)** in the Azure portal to connect to the database and query data. If you prefer, you can alternately query the database by [connecting with Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database), [SQL Server Management Studio (SSMS)](connect-query-ssms.md), or the client of your choice to run Transact-SQL commands ([sqlcmd](/sql/tools/sqlcmd-utility), etc.). - -1. In the portal, search for and select **SQL databases**, and then select your database from the list. -1. On the page for your database, select **Query editor (preview)** in the left menu. -1. Enter your server admin login information, and select **OK**. - - :::image type="content" source="media/hyperscale-database-create-quickstart/query-editor-azure-portal-authenticate.png" alt-text="Screenshot of the Query editor (preview) pane in Azure SQL Database gives two options for authentication. In this example, we have filled in Login and Password under SQL server authentication." lightbox="media/hyperscale-database-create-quickstart/query-editor-azure-portal-authenticate.png"::: - -1. If you created your Hyperscale database from the AdventureWorksLT sample database, enter the following query in the **Query editor** pane. - - ```sql - SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM SalesLT.ProductCategory pc - JOIN SalesLT.Product p - ON pc.productcategoryid = p.productcategoryid; - ``` - - If you created an empty database using [the Transact-SQL sample code](?tabs=t-sql#create-a-hyperscale-database), enter another example query in the **Query editor** pane, such as the following: - - ```sql - CREATE TABLE dbo.TestTable( - TestTableID int IDENTITY(1,1) NOT NULL, - TestTime datetime NOT NULL, - TestMessage nvarchar(4000) NOT NULL, - CONSTRAINT PK_TestTable_TestTableID PRIMARY KEY CLUSTERED (TestTableID ASC) - ) - GO - - ALTER TABLE dbo.TestTable ADD CONSTRAINT DF_TestTable_TestTime DEFAULT (getdate()) FOR TestTime - GO - - INSERT dbo.TestTable (TestMessage) - VALUES (N'This is a test'); - GO - - SELECT TestTableID, TestTime, TestMessage - FROM dbo.TestTable; - GO - ``` - -1. Select **Run**, and then review the query results in the **Results** pane. - - :::image type="content" source="media/hyperscale-database-create-quickstart/query-editor-azure-portal-run-query.png" alt-text="Screenshot of the Query editor (preview) pane in Azure SQL Database after a query has been run against AdventureWorks sample data." lightbox="media/hyperscale-database-create-quickstart/query-editor-azure-portal-run-query.png"::: - -1. Close the **Query editor** page, and select **OK** when prompted to discard your unsaved edits. - -## Clean up resources - -Keep the resource group, server, and single database to go on to the next steps, and learn how to connect and query your database with different methods. - -When you're finished using these resources, you can delete the resource group you created, which will also delete the server and single database within it. - -# [Portal](#tab/azure-portal) - -To delete **myResourceGroup** and all its resources using the Azure portal: - -1. In the portal, search for and select **Resource groups**, and then select **myResourceGroup** from the list. -1. On the resource group page, select **Delete resource group**. -1. Under **Type the resource group name**, enter *myResourceGroup*, and then select **Delete**. - -# [Azure CLI](#tab/azure-cli) - -Use the following command to remove the resource group and all resources associated with it using the [az group delete](/cli/azure/vm/extension#az_vm_extension_set) command - unless you have an ongoing need for these resources. Some of these resources may take a while to create, and to delete. - -```azurecli-interactive -az group delete --name $resourceGroup - -``` - -# [PowerShell](#tab/azure-powershell) - -To delete the resource group and all its resources, run the following PowerShell cmdlet, using the name of your resource group: - -```azurepowershell-interactive -Remove-AzResourceGroup -Name $resourceGroupName - -``` - -# [Transact-SQL](#tab/t-sql) - -This option deletes only the Hyperscale database. It doesn't remove any logical SQL servers or resource groups that you may have created in addition to the database. - -To delete a Hyperscale database with Transact-SQL, connect to the master database using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or the client of your choice to run Transact-SQL commands ([sqlcmd](/sql/tools/sqlcmd-utility), etc.). - -Run the following Transact-SQL command to drop the database: - -```sql -DROP DATABASE [myHyperscaleDatabase]; -GO -``` - ---- - -## Next steps - -[Connect and query](connect-query-content-reference-guide.md) your database using different tools and languages: -- [Connect and query using SQL Server Management Studio](connect-query-ssms.md) -- [Connect and query using Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=/azure/sql-database/toc.json) - -Learn more about Hyperscale databases in the following articles: - -- [Hyperscale service tier](service-tier-hyperscale.md) -- [Azure SQL Database Hyperscale FAQ](service-tier-hyperscale-frequently-asked-questions-faq.yml) -- [Hyperscale secondary replicas](service-tier-hyperscale-replicas.md) -- [Azure SQL Database Hyperscale named replicas FAQ](service-tier-hyperscale-named-replicas-faq.yml) diff --git a/articles/azure-sql/database/hyperscale-named-replica-security-configure.md b/articles/azure-sql/database/hyperscale-named-replica-security-configure.md deleted file mode 100644 index 353fd2e97aedc..0000000000000 --- a/articles/azure-sql/database/hyperscale-named-replica-security-configure.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Configure named replicas security to allow isolated access -description: Learn the security considerations for configuring and managing named replica so that a user can access the named replica but not other replicas. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.topic: how-to -author: yorek -ms.author: damauri -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 7/27/2021 ---- -# Configure isolated access to a Hyperscale named replica -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes the procedure to grant access to an Azure SQL Hyperscale [named replica](service-tier-hyperscale-replicas.md) without granting access to the primary replica or other named replicas. This scenario allows resource and security isolation of a named replica - as the named replica will be running using its own compute node - and it is useful whenever isolated read-only access to an Azure SQL Hyperscale database is needed. Isolated, in this context, means that CPU and memory are not shared between the primary and the named replica, queries running on the named replica do not use compute resources of the primary or of any other replicas, and principals accessing the named replica cannot access other replicas, including the primary. - -## Create a login in the master database on the primary server - -In the `master` database on the logical server hosting the *primary* database, execute the following to create a new login. Use your own strong and unique password. - -```sql -create login [third-party-login] with password = 'Just4STRONG_PAZzW0rd!'; -``` - -Retrieve the SID hexadecimal value for the created login from the `sys.sql_logins` system view: - -```sql -select sid from sys.sql_logins where name = 'third-party-login'; -``` - -Disable the login. This will prevent this login from accessing any database on the server hosting the primary replica. - -```sql -alter login [third-party-login] disable; -``` - -## Create a user in the primary read-write database - -Once the login has been created, connect to the primary read-write replica of your database, for example WideWorldImporters (you can find a sample script to restore it here: [Restore Database in Azure SQL](https://github.com/yorek/azure-sql-db-samples/tree/master/samples/01-restore-database)) and create a database user for that login: - -```sql -create user [third-party-user] from login [third-party-login]; -``` - -As an optional step, once the database user has been created, you can drop the server login created in the previous step if there are concerns about the login getting re-enabled in any way. Connect to the master database on the logical server hosting the primary database, and execute the following: - -```sql -drop login [third-party-login]; -``` - -## Create a named replica on a different logical server - -Create a new Azure SQL logical server that will be used to isolate access to the named replica. Follow the instructions available at [Create and manage servers and single databases in Azure SQL Database](single-database-manage.md). To create a named replica, this server must be in the same Azure region as the server hosting the primary replica. - -Using, for example, AZ CLI: - -```azurecli -az sql server create -g MyResourceGroup -n MyNamedReplicaServer -l MyLocation --admin-user MyAdminUser --admin-password MyStrongADM1NPassw0rd! -``` - -Then, create a named replica for the primary database on this server. For example, using AZ CLI: - -```azurecli -az sql db replica create -g MyResourceGroup -n WideWorldImporters -s MyPrimaryServer --secondary-type Named --partner-database WideWorldImporters_NR --partner-server MyNamedReplicaServer -``` - -## Create a login in the master database on the named replica server - -Connect to the `master` database on the logical server hosting the named replica, created in the previous step. Add the login using the SID retrieved from the primary replica: - -```sql -create login [third-party-login] with password = 'Just4STRONG_PAZzW0rd!', sid = 0x0...1234; -``` - -At this point, users and applications using `third-party-login` can connect to the named replica, but not to the primary replica. - -## Grant object-level permissions within the database - -Once you have set up login authentication as described, you can use regular `GRANT`, `DENY` and `REVOKE` statements to manage authorization, or object-level permissions within the database. In these statements, reference the name of the user you created in the database, or a database role that includes this user as a member. Remember to execute these commands on the primary replica. The changes will propagate to all secondary replicas, however they will only be effective on the named replica where the server-level login was created. - -Remember that by default a newly created user has a minimal set of permissions granted (for example, it cannot access any user tables). If you want to allow `third-party-user` to read data in a table, you need to explicitly grant the `SELECT` permission: - -```sql -grant select on [Application].[Cities] to [third-party-user]; -``` - -As an alternative to granting permissions individually on every table, you can add the user to the `db_datareaders` [database role](/sql/relational-databases/security/authentication-access/database-level-roles) to allow read access to all tables, or you can use [schemas](/sql/relational-databases/security/authentication-access/create-a-database-schema) to [allow access](/sql/t-sql/statements/grant-schema-permissions-transact-sql) to all existing and new tables in a schema. - -## Test access - -You can test this configuration by using any client tool and attempt to connect to the primary and the named replica. For example, using `sqlcmd`, you can try to connect to the primary replica using the `third-party-login` user: - -``` -sqlcmd -S MyPrimaryServer.database.windows.net -U third-party-login -P Just4STRONG_PAZzW0rd! -d WideWorldImporters -``` - -This will result in an error as the user is not allowed to connect to the server: - -``` -Sqlcmd: Error: Microsoft ODBC Driver 13 for SQL Server : Login failed for user 'third-party-login'. Reason: The account is disabled. -``` - -The attempt to connect to the named replica succeeds: - -``` -sqlcmd -S MyNamedReplicaServer.database.windows.net -U third-party-login -P Just4STRONG_PAZzW0rd! -d WideWorldImporters_NR -``` - -No errors are returned, and queries can be executed on the named replica as allowed by granted object-level permissions. - -For more information: - -* Azure SQL logical Servers, see [What is a server in Azure SQL Database](logical-servers.md) -* Managing database access and logins, see [SQL Database security: Manage database access and login security](logins-create-manage.md) -* Database engine permissions, see [Permissions](/sql/relational-databases/security/permissions-database-engine) -* Granting object permissions, see [GRANT Object Permissions](/sql/t-sql/statements/grant-object-permissions-transact-sql) - - - diff --git a/articles/azure-sql/database/hyperscale-performance-diagnostics.md b/articles/azure-sql/database/hyperscale-performance-diagnostics.md deleted file mode 100644 index cdbd2d4967ed0..0000000000000 --- a/articles/azure-sql/database/hyperscale-performance-diagnostics.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Performance diagnostics in Hyperscale -description: This article describes how to troubleshoot Hyperscale performance problems in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: seo-lt-2019 sqldbrb=1 -ms.topic: troubleshooting -author: denzilribeiro -ms.author: denzilr -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 10/18/2019 ---- - -# SQL Hyperscale performance troubleshooting diagnostics -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -To troubleshoot performance problems in a Hyperscale database, [general performance tuning methodologies](monitor-tune-overview.md) on the Azure SQL Database compute node is the starting point of a performance investigation. However, given the [distributed architecture](service-tier-hyperscale.md#distributed-functions-architecture) of Hyperscale, additional diagnostics have been added to assist. This article describes Hyperscale-specific diagnostic data. - -## Log rate throttling waits - -Every Azure SQL Database service level has log generation rate limits enforced via [log rate governance](resource-limits-logical-server.md#transaction-log-rate-governance). In Hyperscale, the log generation limit is currently set to 100 MB/sec, regardless of the service level. However, there are times when the log generation rate on the primary compute replica has to be throttled to maintain recoverability SLAs. This throttling happens when a [page server or another compute replica](service-tier-hyperscale.md#distributed-functions-architecture) is significantly behind applying new log records from the log service. - -The following wait types (in [sys.dm_os_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql/)) describe the reasons why log rate can be throttled on the primary compute replica: - -|Wait Type |Description | -|------------- |------------------------------------| -|RBIO_RG_STORAGE | Occurs when a Hyperscale database primary compute node log generation rate is being throttled due to delayed log consumption at the page server(s). | -|RBIO_RG_DESTAGE | Occurs when a Hyperscale database compute node log generation rate is being throttled due to delayed log consumption by the long-term log storage. | -|RBIO_RG_REPLICA | Occurs when a Hyperscale database compute node log generation rate is being throttled due to delayed log consumption by the readable secondary replica(s). | -|RBIO_RG_GEOREPLICA | Occurs when a Hyperscale database compute node log generation rate is being throttled due to delayed log consumption by the Geo-secondary replica. | -|RBIO_RG_LOCALDESTAGE | Occurs when a Hyperscale database compute node log generation rate is being throttled due to delayed log consumption by the log service. | - - -## Page server reads - -The compute replicas do not cache a full copy of the database locally. The data local to the compute replica is stored in the buffer pool (in memory) and in the local resilient buffer pool extension (RBPEX) cache that is a partial (non-covering) cache of data pages. This local RBPEX cache is sized proportionally to the compute size and is three times the memory of the compute tier. RBPEX is similar to the buffer pool in that it has the most frequently accessed data. Each page server, on the other hand, has a covering RBPEX cache for the portion of the database it maintains. - -When a read is issued on a compute replica, if the data doesn't exist in the buffer pool or local RBPEX cache, a getPage(pageId, LSN) function call is issued, and the page is fetched from the corresponding page server. Reads from page servers are remote reads and are thus slower than reads from the local RBPEX. When troubleshooting IO-related performance problems, we need to be able to tell how many IOs were done via relatively slower remote page server reads. - -Several dynamic managed views (DMVs) and extended events have columns and fields that specify the number of remote reads from a page server, which can be compared against the total reads. Query store also captures remote reads as part of the query run time stats. - -- Columns to report page server reads are available in execution DMVs and catalog views, such as: - - - [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql/) - - [sys.dm_exec_query_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-stats-transact-sql/) - - [sys.dm_exec_procedure_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-procedure-stats-transact-sql/) - - [sys.dm_exec_trigger_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-trigger-stats-transact-sql/) - - [sys.query_store_runtime_stats](/sql/relational-databases/system-catalog-views/sys-query-store-runtime-stats-transact-sql/) -- Page server reads are added to the following extended events: - - sql_statement_completed - - sp_statement_completed - - sql_batch_completed - - rpc_completed - - scan_stopped - - query_store_begin_persist_runtime_stat - - query-store_execution_runtime_info -- ActualPageServerReads/ActualPageServerReadAheads are added to query plan XML for actual plans. For example: - -`` - -> [!NOTE] -> To view these attributes in the query plan properties window, SSMS 18.3 or later is required. - -## Virtual file stats and IO accounting - -In Azure SQL Database, the [sys.dm_io_virtual_file_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql/) DMF is the primary way to monitor SQL Database IO. IO characteristics in Hyperscale are different due to its [distributed architecture](service-tier-hyperscale.md#distributed-functions-architecture). In this section, we focus on IO (reads and writes) to data files as seen in this DMF. In Hyperscale, each data file visible in this DMF corresponds to a remote page server. The RBPEX cache mentioned here is a local SSD-based cache, that is a non-covering cache on the compute replica. - -### Local RBPEX cache usage - -Local RBPEX cache exists on the compute replica, on local SSD storage. Thus, IO against this cache is faster than IO against remote page servers. Currently, [sys.dm_io_virtual_file_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql/) in a Hyperscale database has a special row reporting the IO against the local RBPEX cache on the compute replica. This row has the value of 0 for both `database_id` and `file_id` columns. For example, the query below returns RBPEX usage statistics since database startup. - -`select * from sys.dm_io_virtual_file_stats(0,NULL);` - -A ratio of reads done on RBPEX to aggregated reads done on all other data files provides RBPEX cache hit ratio. The counter `RBPEX cache hit ratio` is also exposed in the performance counters DMV `sys.dm_os_performance_counters`. - -### Data reads - -- When reads are issued by the SQL Server database engine on a compute replica, they may be served either by the local RBPEX cache, or by remote page servers, or by a combination of the two if reading multiple pages. -- When the compute replica reads some pages from a specific file, for example file_id 1, if this data resides solely on the local RBPEX cache, all IO for this read is accounted against file_id 0 (RBPEX). If some part of that data is in the local RBPEX cache, and some part is on a remote page server, then IO is accounted towards file_id 0 for the part served from RBPEX, and the part served from the remote page server is accounted towards file_id 1. -- When a compute replica requests a page at a particular [LSN](/sql/relational-databases/sql-server-transaction-log-architecture-and-management-guide/) from a page server, if the page server has not caught up to the LSN requested, the read on the compute replica will wait until the page server catches up before the page is returned to the compute replica. For any read from a page server on the compute replica, you will see the PAGEIOLATCH_* wait type if it is waiting on that IO. In Hyperscale, this wait time includes both the time to catch up the requested page on the page server to the LSN required, and the time needed to transfer the page from the page server to the compute replica. -- Large reads such as read-ahead are often done using ["Scatter-Gather" Reads](/sql/relational-databases/reading-pages/). This allows reads of up to 4 MB of pages at a time, considered a single read in the SQL Server database engine. However, when data being read is in RBPEX, these reads are accounted as multiple individual 8-KB reads, since the buffer pool and RBPEX always use 8-KB pages. As the result, the number of read IOs seen against RBPEX may be larger than the actual number of IOs performed by the engine. - -### Data writes - -- The primary compute replica does not write directly to page servers. Instead, log records from the log service are replayed on corresponding page servers. -- Writes that happen on the compute replica are predominantly writes to the local RBPEX (file_id 0). For writes on logical files that are larger than 8 KB, in other words those done using [Gather-write](/sql/relational-databases/writing-pages/), each write operation is translated into multiple 8-KB individual writes to RBPEX since the buffer pool and RBPEX always use 8-KB pages. As the result, the number of write IOs seen against RBPEX may be larger than the actual number of IOs performed by the engine. -- Non-RBPEX files, or data files other than file_id 0 that correspond to page servers, also show writes. In the Hyperscale service tier, these writes are simulated, because the compute replicas never write directly to page servers. Write IOPS and throughput are accounted as they occur on the compute replica, but latency for data files other than file_id 0 does not reflect the actual latency of page server writes. - -### Log writes - -- On the primary compute, a log write is accounted for in file_id 2 of sys.dm_io_virtual_file_stats. A log write on primary compute is a write to the log Landing Zone. -- Log records are not hardened on the secondary replica on a commit. In Hyperscale, log is applied by the log service to the secondary replicas asynchronously. Because log writes don't actually occur on secondary replicas, any accounting of log IOs on the secondary replicas is for tracking purposes only. - -## Data IO in resource utilization statistics - -In a non-Hyperscale database, combined read and write IOPS against data files, relative to the [resource governance](./resource-limits-logical-server.md#resource-governance) data IOPS limit, are reported in [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) and [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) views, in the `avg_data_io_percent` column. The same value is reported in the Azure portal as _Data IO Percentage_. - -In a Hyperscale database, this column reports on data IOPS utilization relative to the limit for local storage on compute replica only, specifically IO against RBPEX and `tempdb`. A 100% value in this column indicates that resource governance is limiting local storage IOPS. If this is correlated with a performance problem, tune the workload to generate less IO, or increase database service objective to increase the resource governance _Max Data IOPS_ [limit](resource-limits-vcore-single-databases.md). For resource governance of RBPEX reads and writes, the system counts individual 8-KB IOs, rather than larger IOs that may be issued by the SQL Server database engine. - -Data IO against remote page servers is not reported in resource utilization views or in the portal, but is reported in the [sys.dm_io_virtual_file_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql/) DMF, as noted earlier. - -## Additional resources - -- For vCore resource limits for a Hyperscale single database see [Hyperscale service tier vCore Limits](resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen5) -- For monitoring Azure SQL Databases, enable [Azure Monitor SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) -- For Azure SQL Database performance tuning, see [Query performance in Azure SQL Database](performance-guidance.md) -- For performance tuning using Query Store, see [Performance monitoring using Query store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store/) -- For DMV monitoring scripts, see [Monitoring performance Azure SQL Database using dynamic management views](monitoring-with-dmvs.md) \ No newline at end of file diff --git a/articles/azure-sql/database/index.yml b/articles/azure-sql/database/index.yml deleted file mode 100644 index eb76ec8cb6311..0000000000000 --- a/articles/azure-sql/database/index.yml +++ /dev/null @@ -1,197 +0,0 @@ -### YamlMime:Landing - -title: Azure SQL Database documentation -summary: Find concepts, quickstarts, tutorials, and samples for singled and pooled databases in Azure SQL Database. - -metadata: - title: Azure SQL Database documentation - description: Find documentation about Azure SQL Database, a platform-as-a-service (PaaS) based on the latest stable version of Microsoft SQL Server. - services: sql-database - ms.service: sql-database - ms.subservice: service-overview - ms.topic: landing-page - author: MashaMSFT - ms.author: mathoma - ms.reviewer: kendralittle, mathoma - ms.date: 01/25/2021 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Azure SQL Database - linkLists: - - linkListType: whats-new - links: - - text: What's new? - url: doc-changes-updates-release-notes-whats-new.md - - linkListType: quickstart - links: - - text: Create SQL Database - url: single-database-create-quickstart.md - - text: Configure firewall - url: firewall-create-server-level-portal-quickstart.md - - linkListType: video - links: - - text: Azure SQL Database overview - url: /shows/Azure-SQL-for-Beginners/Azure-SQL-Database-Overview-7-of-61 - - linkListType: concept - links: - - text: What is SQL Database? - url: sql-database-paas-overview.md - - text: Purchasing options - url: purchasing-models.md - - text: Serverless compute - url: serverless-tier-overview.md - - text: Hyperscale service tier - url: service-tier-hyperscale.md - - text: Migrate from SQL Server - url: migrate-to-database-from-sql-server.md - - text: T-SQL differences with SQL Server - url: transact-sql-tsql-differences-sql-server.md - - text: Reserved capacity - url: reserved-capacity-overview.md - - # Card - - title: Business continuity - linkLists: - - linkListType: how-to-guide - links: - - text: Business continuity - url: business-continuity-high-availability-disaster-recover-hadr-overview.md - - text: High availability - url: high-availability-sla.md - - text: Active-geo replication - url: active-geo-replication-overview.md - - text: Auto-failover groups - url: auto-failover-group-overview.md - - text: Automated backups - url: automated-backups-overview.md - - text: Recover with backup - url: recovery-using-backups.md - - text: Long-term backup retention - url: long-term-retention-overview.md - - - # Card - - title: Advanced security - linkLists: - - linkListType: concept - links: - - text: Security capabilities - url: security-overview.md - - text: Security best practices - url: security-best-practice.md - - text: Logins, user accounts, roles, and permissions - url: logins-create-manage.md - - text: Azure Active Directory - url: authentication-aad-overview.md - - text: Auditing - url: auditing-overview.md - - text: Transparent Data Encryption (TDE) - url: transparent-data-encryption-tde-overview.md - - text: Dynamic Data Masking - url: dynamic-data-masking-overview.md - - # Card - - title: Elastic pools - linkLists: - - linkListType: concept - links: - - text: What is an elastic pool? - url: elastic-pool-overview.md - - text: Manage - url: elastic-pool-manage.md - - text: Scale - url: elastic-pool-scale.md - - text: Resource management - url: elastic-pool-resource-management.md - - # Card - - title: Learn Azure SQL - linkLists: - - linkListType: learn - links: - - text: Azure SQL for beginners - url: https://aka.ms/azuresql4beginners - - text: Azure SQL fundamentals - url: /learn/paths/azure-sql-fundamentals/ - - text: Azure SQL hands-on labs - url: https://aka.ms/asqlworkshop - - text: Azure SQL bootcamp - url: https://aka.ms/azuresqlbootcamp - - text: Educational SQL resources - url: /sql/sql-server/educational-sql-resources - - # Card - - title: Reference - linkLists: - - linkListType: deploy - links: - - text: Azure CLI samples - url: az-cli-script-samples-content-guide.md - - text: PowerShell samples - url: powershell-script-content-guide.md - - text: ARM template samples - url: arm-templates-content-guide.md - - linkListType: download - links: - - text: SQL Server Management Studio (SSMS) - url: /sql/ssms/download-sql-server-management-studio-ssms - - text: Azure Data Studio - url: /sql/azure-data-studio/download-azure-data-studio - - text: SQL Server Data Tools - url: /sql/ssdt/download-sql-server-data-tools-ssdt - - text: Visual Studio 2019 - url: https://visualstudio.microsoft.com/downloads/ - - linkListType: reference - links: - - text: Migration guide - url: https://datamigration.microsoft.com/ - - text: Transact-SQL (T-SQL) - url: /sql/t-sql/language-reference - - text: Azure CLI - url: /cli/azure/azure-cli-reference-for-sql#sql-database-references - - text: PowerShell - url: /powershell/module/az.sql - - text: REST API - url: /rest/api/sql/ - - # Card - - title: Scalability - linkLists: - - linkListType: how-to-guide - links: - - text: Dynamically scale up or down - url: scale-resources.md - - text: Read Scale-Out - url: read-scale-out.md - - text: Cross database jobs - url: elastic-jobs-overview.md - - text: Elastic scale - url: elastic-scale-introduction.md - - text: Elastic query - url: elastic-query-overview.md - - # Card - - title: Planned maintenance - linkLists: - - linkListType: concept - links: - - text: Plan for Azure maintenance events - url: planned-maintenance.md - - text: Maintenance window - url: maintenance-window.md - - text: Resource health - url: resource-health-to-troubleshoot-connectivity.md - - - linkListType: how-to-guide - links: - - text: Configure maintenance window - url: maintenance-window-configure.md - - text: Maintenance window notifications - url: advance-notifications.md - \ No newline at end of file diff --git a/articles/azure-sql/database/intelligent-insights-overview.md b/articles/azure-sql/database/intelligent-insights-overview.md deleted file mode 100644 index d75222aaa8236..0000000000000 --- a/articles/azure-sql/database/intelligent-insights-overview.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Monitor database performance with Intelligent Insights -description: Intelligent Insights in Azure SQL Database and Azure SQL Managed Instance uses built-in intelligence to continuously monitor database usage through artificial intelligence and detect disruptive events that cause poor performance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 01/31/2022 ---- -# Intelligent Insights using AI to monitor and troubleshoot database performance (preview) -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Intelligent Insights in Azure SQL Database and Azure SQL Managed Instance lets you know what is happening with your database performance. - -Intelligent Insights uses built-in intelligence to continuously monitor database usage through artificial intelligence and detect disruptive events that cause poor performance. Once detected, a detailed analysis is performed that generates an Intelligent Insights resource log called SQLInsights (unrelated to [Azure Monitor SQL Insights (preview)](../../azure-sql/database/monitoring-sql-database-azure-monitor.md)) with an [intelligent assessment of the issues](intelligent-insights-troubleshoot-performance.md). This assessment consists of a root cause analysis of the database performance issue and, where possible, recommendations for performance improvements. - -## What can Intelligent Insights do for you? - -Intelligent Insights is a unique capability of Azure built-in intelligence that provides the following value: - -- Proactive monitoring -- Tailored performance insights -- Early detection of database performance degradation -- Root cause analysis of issues detected -- Performance improvement recommendations -- Scale out capability on hundreds of thousands of databases -- Positive impact to DevOps resources and the total cost of ownership - -## How does Intelligent Insights work - -Intelligent Insights analyzes database performance by comparing the database workload from the last hour with the past seven-day baseline workload. Database workload is composed of queries determined to be the most significant to the database performance, such as the most repeated and largest queries. Because each database is unique based on its structure, data, usage, and application, each workload baseline that is generated is specific and unique to that workload. Intelligent Insights, independent of the workload baseline, also monitors absolute operational thresholds and detects issues with excessive wait times, critical exceptions, and issues with query parameterizations that might affect performance. - -After a performance degradation issue is detected from multiple observed metrics by using artificial intelligence, analysis is performed. A diagnostics log is generated with an intelligent insight on what is happening with your database. Intelligent Insights makes it easy to track the database performance issue from its first appearance until resolution. Each detected issue is tracked through its lifecycle from initial issue detection and verification of performance improvement to its completion. - -![Database performance analysis workflow](./media/intelligent-insights-overview/intelligent-insights-concept.png) - -The metrics used to measure and detect database performance issues are based on query duration, timeout requests, excessive wait times, and errored requests. For more information on metrics, see [Detection metrics](#detection-metrics). - -Identified database performance degradations are recorded in the Intelligent Insights SQLInsights log with intelligent entries that consist of the following properties: - -| Property | Details | -| :------------------- | ------------------- | -| Database information | Metadata about a database on which an insight was detected, such as a resource URI. | -| Observed time range | Start and end time for the period of the detected insight. | -| Impacted metrics | Metrics that caused an insight to be generated:
    • Query duration increase [seconds].
    • Excessive waiting [seconds].
    • Timed-out requests [percentage].
    • Errored-out requests [percentage].
    | -| Impact value | Value of a metric measured. | -| Impacted queries and error codes | Query hash or error code. These can be used to easily correlate to affected queries. Metrics that consist of either query duration increase, waiting time, timeout counts, or error codes are provided. | -| Detections | Detection identified at the database during the time of an event. There are 15 detection patterns. For more information, see [Troubleshoot database performance issues with Intelligent Insights](intelligent-insights-troubleshoot-performance.md). | -| Root cause analysis | Root cause analysis of the issue identified in a human-readable format. Some insights might contain a performance improvement recommendation where possible. | - - -Intelligent Insights shines in discovering and troubleshooting database performance issues. In order to use Intelligent Insights to troubleshoot database performance issues, see [Troubleshoot performance issues with Intelligent Insights](intelligent-insights-troubleshoot-performance.md). - -## Intelligent Insights options - -Intelligent Insights options available are: - -| Intelligent Insights option | Azure SQL Database support | Azure SQL Managed Instance support | -| :----------------------------- | ----- | ----- | -| **Configure Intelligent Insights** - Configure Intelligent Insights analysis for your databases. | Yes | Yes | -| **Stream insights to Azure SQL Analytics** -- Stream insights to Azure SQL Analytics. | Yes | Yes | -| **Stream insights to Azure Event Hubs** - Stream insights to Event Hubs for further custom integrations. | Yes | Yes | -| **Stream insights to Azure Storage** - Stream insights to Azure Storage for further analysis and long-term archival. | Yes | Yes | - -> [!NOTE] -> Intelligent insights is a preview feature, not available in the following regions: West Europe, North Europe, West US 1 and East US 1. - -## Configure the export of the Intelligent Insights log - -Output of the Intelligent Insights can be streamed to one of several destinations for analysis: - -- Output streamed to a Log Analytics workspace can be used with [Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md) to view insights through the user interface of the Azure portal. This is the integrated Azure solution, and the most typical way to view insights. -- Output streamed to Azure Event Hubs can be used for development of custom monitoring and alerting scenarios -- Output streamed to Azure Storage can be used for custom application development, such are for example custom reporting, long-term data archival and so forth. - -Integration of Azure SQL Analytics, Azure Event Hubs, Azure Storage, or third-party products for consumption is performed through first enabling Intelligent Insights logging (the "SQLInsights" log) in the Diagnostic settings blade of a database, and then configuring Intelligent Insights log data to be streamed into one of these destinations. - -For more information on how to enable Intelligent Insights logging and to configure metric and resource log data to be streamed to a consuming product, see [Metrics and diagnostics logging](metrics-diagnostic-telemetry-logging-streaming-export-configure.md). - -### Set up with Azure SQL Analytics - -Azure SQL Analytics solution provides graphical user interface, reporting and alerting capabilities on database performance, using the Intelligent Insights resource log data. - -Add Azure SQL Analytics to your Azure portal dashboard from the marketplace and to create a workspace, see [configure Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md#configuration) - -To use Intelligent Insights with Azure SQL Analytics, configure Intelligent Insights log data to be streamed to Azure SQL Analytics workspace you've created in the previous step, see [Metrics and diagnostics logging](metrics-diagnostic-telemetry-logging-streaming-export-configure.md). - -The following example shows an Intelligent Insights viewed through Azure SQL Analytics: - -![Intelligent Insights report](./media/intelligent-insights-overview/intelligent-insights-azure-sql-analytics.png) - -### Set up with Event Hubs - -To use Intelligent Insights with Event Hubs, configure Intelligent Insights log data to be streamed to Event Hubs, see [Metrics and diagnostics logging](metrics-diagnostic-telemetry-logging-streaming-export-configure.md) and [Stream Azure diagnostics logs to Event Hubs](../../azure-monitor/essentials/resource-logs.md#send-to-azure-event-hubs). - -To use Event Hubs to set up custom monitoring and alerting, see [What to do with metrics and diagnostics logs in Event Hubs](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#what-to-do-with-metrics-and-resource-logs-in-event-hubs). - -### Set up with Azure Storage - -To use Intelligent Insights with Storage, configure Intelligent Insights log data to be streamed to Storage, see [Metrics and diagnostics logging](metrics-diagnostic-telemetry-logging-streaming-export-configure.md) and [Stream into Azure Storage](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#stream-into-azure-storage). - -### Custom integrations of Intelligent Insights log - -To use Intelligent Insights with third-party tools, or for custom alerting and monitoring development, see [Use the Intelligent Insights database performance diagnostics log](intelligent-insights-use-diagnostics-log.md). - -## Detection metrics - -Metrics used for detection models that generate Intelligent Insights are based on monitoring: - -- Query duration -- Timeout requests -- Excessive wait time -- Errored out requests - -Query duration and timeout requests are used as primary models in detecting issues with database workload performance. They're used because they directly measure what is happening with the workload. To detect all possible cases of workload performance degradation, excessive wait time and errored-out requests are used as additional models to indicate issues that affect the workload performance. - -The system automatically considers changes to the workload and changes in the number of query requests made to the database to dynamically determine normal and out-of-the-ordinary database performance thresholds. - -All of the metrics are considered together in various relationships through a scientifically derived data model that categorizes each performance issue detected. Information provided through an intelligent insight includes: - -- Details of the performance issue detected. -- A root cause analysis of the issue detected. -- Recommendations on how to improve the performance of the monitored database, where possible. - -## Query duration - -The query duration degradation model analyzes individual queries and detects the increase in the time it takes to compile and execute a query compared to the performance baseline. - -If built-in intelligence detects a significant increase in query compile or query execution time that affects workload performance, these queries are flagged as query duration performance degradation issues. - -The Intelligent Insights diagnostics log outputs the query hash of the query degraded in performance. The query hash indicates whether the performance degradation was related to query compile or execution time increase, which increased query duration time. - -## Timeout requests - -The timeout requests degradation model analyzes individual queries and detects any increase in timeouts at the query execution level and the overall request timeouts at the database level compared to the performance baseline period. - -Some of the queries might time out even before they reach the execution stage. Through the means of aborted workers vs. requests made, built-in intelligence measures and analyzes all queries that reached the database whether they got to the execution stage or not. - -After the number of timeouts for executed queries or the number of aborted request workers crosses the system-managed threshold, a diagnostics log is populated with intelligent insights. - -The insights generated contain the number of timed-out requests and the number of timed-out queries. Indication of the performance degradation is related to timeout increase at the execution stage, or the overall database level is provided. When the increase in timeouts is deemed significant to database performance, these queries are flagged as timeout performance degradation issues. - -## Excessive wait times - -The excessive wait time model monitors individual database queries. It detects unusually high query wait stats that crossed the system-managed absolute thresholds. The following query excessive wait-time metrics are observed by using, [Query Store Wait Stats (sys.query_store_wait_stats)](/sql/relational-databases/system-catalog-views/sys-query-store-wait-stats-transact-sql): - -- Reaching resource limits -- Reaching elastic pool resource limits -- Excessive number of worker or session threads -- Excessive database locking -- Memory pressure -- Other wait stats - -Reaching resource limits or elastic pool resource limits denote that consumption of available resources on a subscription or in the elastic pool crossed absolute thresholds. These stats indicate workload performance degradation. An excessive number of worker or session threads denotes a condition in which the number of worker threads or sessions initiated crossed absolute thresholds. These stats indicate workload performance degradation. - -Excessive database locking denotes a condition in which the count of locks on a database has crossed absolute thresholds. This stat indicates a workload performance degradation. Memory pressure is a condition in which the number of threads requesting memory grants crossed an absolute threshold. This stat indicates a workload performance degradation. - -Other wait stats detection indicates a condition in which miscellaneous metrics measured through the Query Store Wait Stats crossed an absolute threshold. These stats indicate workload performance degradation. - -After excessive wait times are detected, depending on the data available, the Intelligent Insights diagnostics log outputs hashes of the affecting and affected queries degraded in performance, details of the metrics that cause queries to wait in execution, and measured wait time. - -## Errored requests - -The errored requests degradation model monitors individual queries and detects an increase in the number of queries that errored out compared to the baseline period. This model also monitors critical exceptions that crossed absolute thresholds managed by built-in intelligence. The system automatically considers the number of query requests made to the database and accounts for any workload changes in the monitored period. - -When the measured increase in errored requests relative to the overall number of requests made is deemed significant to workload performance, affected queries are flagged as errored requests performance degradation issues. - -The Intelligent Insights log outputs the count of errored requests. It indicates whether the performance degradation was related to an increase in errored requests or to crossing a monitored critical exception threshold and measured time of the performance degradation. - -If any of the monitored critical exceptions cross the absolute thresholds managed by the system, an intelligent insight is generated with critical exception details. - -## Next steps - -- Learn how to [Monitor databases by using SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- Learn how to [Troubleshoot performance issues with Intelligent Insights](intelligent-insights-troubleshoot-performance.md). \ No newline at end of file diff --git a/articles/azure-sql/database/intelligent-insights-troubleshoot-performance.md b/articles/azure-sql/database/intelligent-insights-troubleshoot-performance.md deleted file mode 100644 index 1de8e8966c906..0000000000000 --- a/articles/azure-sql/database/intelligent-insights-troubleshoot-performance.md +++ /dev/null @@ -1,332 +0,0 @@ ---- -title: Troubleshoot performance issues with Intelligent Insights -description: Intelligent Insights helps you troubleshoot Azure SQL Database and Azure SQL Managed Instance performance issues. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: troubleshooting -author: AlainDormehlMSFT -ms.author: aldorme -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 11/04/2021 ---- -# Troubleshoot Azure SQL Database and Azure SQL Managed Instance performance issues with Intelligent Insights -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This page provides information on Azure SQL Database and Azure SQL Managed Instance performance issues detected through the [Intelligent Insights](intelligent-insights-overview.md) resource log. Metrics and resource logs can be streamed to [Azure Monitor logs](../../azure-monitor/insights/azure-sql.md), [Azure Event Hubs](../../azure-monitor/essentials/resource-logs.md#send-to-azure-event-hubs), [Azure Storage](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#stream-into-azure-storage), or a third-party solution for custom DevOps alerting and reporting capabilities. - -> [!NOTE] -> For a quick performance troubleshooting guide using Intelligent Insights, see the [Recommended troubleshooting flow](intelligent-insights-troubleshoot-performance.md#recommended-troubleshooting-flow) flowchart in this document. -> -> Intelligent insights is a preview feature, not available in the following regions: West Europe, North Europe, West US 1 and East US 1. - -## Detectable database performance patterns - -Intelligent Insights automatically detects performance issues based on query execution wait times, errors, or time-outs. Intelligent Insights outputs detected performance patterns to the resource log. Detectable performance patterns are summarized in the table below. - -| Detectable performance patterns | Azure SQL Database | Azure SQL Managed Instance | -| :------------------- | ------------------- | ------------------- | -| [Reaching resource limits](intelligent-insights-troubleshoot-performance.md#reaching-resource-limits) | Consumption of available resources (DTUs), database worker threads, or database login sessions available on the monitored subscription has reached its resource limits. This is affecting performance. | Consumption of CPU resources is reaching its resource limits. This is affecting the database performance. | -| [Workload increase](intelligent-insights-troubleshoot-performance.md#workload-increase) | Workload increase or continuous accumulation of workload on the database was detected. This is affecting performance. | Workload increase has been detected. This is affecting the database performance. | -| [Memory pressure](intelligent-insights-troubleshoot-performance.md#memory-pressure) | Workers that requested memory grants have to wait for memory allocations for statistically significant amounts of time, or an increased accumulation of workers that requested memory grants exist. This is affecting performance. | Workers that have requested memory grants are waiting for memory allocations for a statistically significant amount of time. This is affecting the database performance. | -| [Locking](intelligent-insights-troubleshoot-performance.md#locking) | Excessive database locking was detected affecting performance. | Excessive database locking was detected affecting the database performance. | -| [Increased MAXDOP](intelligent-insights-troubleshoot-performance.md#increased-maxdop) | The maximum degree of parallelism option (MAXDOP) has changed affecting the query execution efficiency. This is affecting performance. | The maximum degree of parallelism option (MAXDOP) has changed affecting the query execution efficiency. This is affecting performance. | -| [Pagelatch contention](intelligent-insights-troubleshoot-performance.md#pagelatch-contention) | Multiple threads are concurrently attempting to access the same in-memory data buffer pages resulting in increased wait times and causing pagelatch contention. This is affecting performance. | Multiple threads are concurrently attempting to access the same in-memory data buffer pages resulting in increased wait times and causing pagelatch contention. This is affecting database the performance. | -| [Missing Index](intelligent-insights-troubleshoot-performance.md#missing-index) | Missing index was detected affecting performance. | Missing index was detected affecting the database performance. | -| [New Query](intelligent-insights-troubleshoot-performance.md#new-query) | New query was detected affecting the overall performance. | New query was detected affecting the overall database performance. | -| [Increased Wait Statistic](intelligent-insights-troubleshoot-performance.md#increased-wait-statistic) | Increased database wait times were detected affecting performance. | Increased database wait times were detected affecting the database performance. | -| [TempDB Contention](intelligent-insights-troubleshoot-performance.md#tempdb-contention) | Multiple threads are trying to access the same TempDB resource causing a bottleneck. This is affecting performance. | Multiple threads are trying to access the same TempDB resource causing a bottleneck. This is affecting the database performance. | -| [Elastic pool DTU shortage](intelligent-insights-troubleshoot-performance.md#elastic-pool-dtu-shortage) | Shortage of available eDTUs in the elastic pool is affecting performance. | Not available for Azure SQL Managed Instance as it uses the vCore model. | -| [Plan Regression](intelligent-insights-troubleshoot-performance.md#plan-regression) | New plan, or a change in the workload of an existing plan was detected. This is affecting performance. | New plan, or a change in the workload of an existing plan was detected. This is affecting the database performance. | -| [Database-scoped configuration value change](intelligent-insights-troubleshoot-performance.md#database-scoped-configuration-value-change) | Configuration change on the database was detected affecting the database performance. | Configuration change on the database was detected affecting the database performance. | -| [Slow client](intelligent-insights-troubleshoot-performance.md#slow-client) | Slow application client is unable to consume output from the database fast enough. This is affecting performance. | Slow application client is unable to consume output from the database fast enough. This is affecting the database performance. | -| [Pricing tier downgrade](intelligent-insights-troubleshoot-performance.md#pricing-tier-downgrade) | Pricing tier downgrade action decreased available resources. This is affecting performance. | Pricing tier downgrade action decreased available resources. This is affecting the database performance. | - -> [!TIP] -> For continuous performance optimization of databases, enable [automatic tuning](automatic-tuning-overview.md). This built-in intelligence feature continuously monitors your database, automatically tunes indexes, and applies query execution plan corrections. -> - -The following section describes detectable performance patterns in more detail. - -## Reaching resource limits - -### What is happening - -This detectable performance pattern combines performance issues that are related to reaching available resource limits, worker limits, and session limits. After this performance issue is detected, a description field of the diagnostics log indicates whether the performance issue is related to resource, worker, or session limits. - -Resources on Azure SQL Database are typically referred to [DTU](service-tiers-dtu.md) or [vCore](service-tiers-vcore.md) resources, and resources on Azure SQL Managed Instance are referred to as vCore resources. The pattern of reaching resource limits is recognized when detected query performance degradation is caused by reaching any of the measured resource limits. - -The session limits resource denotes the number of available concurrent logins to the database. This performance pattern is recognized when applications that are connected to the databases have reached the number of available concurrent logins to the database. If applications attempt to use more sessions than are available on a database, the query performance is affected. - -Reaching worker limits is a specific case of reaching resource limits because available workers aren't counted in the DTU or vCore usage. Reaching worker limits on a database can cause the rise of resource-specific wait times, which results in query performance degradation. - -### Troubleshooting - -The diagnostics log outputs query hashes of queries that affected the performance and resource consumption percentages. You can use this information as a starting point for optimizing your database workload. In particular, you can optimize the queries that affect the performance degradation by adding indexes. Or you can optimize applications with a more even workload distribution. If you're unable to reduce workloads or make optimizations, consider increasing the pricing tier of your database subscription to increase the amount of resources available. - -If you have reached the available session limits, you can optimize your applications by reducing the number of logins made to the database. If you're unable to reduce the number of logins from your applications to the database, consider increasing the pricing tier of your database subscription. Or you can split and move your database into multiple databases for a more balanced workload distribution. - -For more suggestions on resolving session limits, see [How to deal with the limits of maximum logins](/archive/blogs/latam/how-to-deal-with-the-limits-of-azure-sql-database-maximum-logins). See [Overview of resource limits on a server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. - -## Workload increase - -### What is happening - -This performance pattern identifies issues caused by a workload increase or, in its more severe form, a workload pile-up. - -This detection is made through a combination of several metrics. The basic metric measured is detecting an increase in workload compared with the past workload baseline. The other form of detection is based on measuring a large increase in active worker threads that is large enough to affect the query performance. - -In its more severe form, the workload might continuously pile up due to the inability of a database to handle the workload. The result is a continuously growing workload size, which is the workload pile-up condition. Due to this condition, the time that the workload waits for execution grows. This condition represents one of the most severe database performance issues. This issue is detected through monitoring the increase in the number of aborted worker threads. - -### Troubleshooting - -The diagnostics log outputs the number of queries whose execution has increased and the query hash of the query with the largest contribution to the workload increase. You can use this information as a starting point for optimizing the workload. The query identified as the largest contributor to the workload increase is especially useful as your starting point. - -You might consider distributing the workloads more evenly to the database. Consider optimizing the query that is affecting the performance by adding indexes. You also might distribute your workload among multiple databases. If these solutions aren't possible, consider increasing the pricing tier of your database subscription to increase the amount of resources available. - -## Memory pressure - -### What is happening - -This performance pattern indicates degradation in the current database performance caused by memory pressure, or in its more severe form a memory pile-up condition, compared to the past seven-day performance baseline. - -Memory pressure denotes a performance condition in which there is a large number of worker threads requesting memory grants. The high volume causes a high memory utilization condition in which the database is unable to efficiently allocate memory to all workers that request it. One of the most common reasons for this issue is related to the amount of memory available to the database on one hand. On the other hand, an increase in workload causes the increase in worker threads and the memory pressure. - -The more severe form of memory pressure is the memory pile-up condition. This condition indicates that a higher number of worker threads are requesting memory grants than there are queries releasing the memory. This number of worker threads requesting memory grants also might be continuously increasing (piling up) because the database engine is unable to allocate memory efficiently enough to meet the demand. The memory pile-up condition represents one of the most severe database performance issues. - -### Troubleshooting - -The diagnostics log outputs the memory object store details with the clerk (that is, worker thread) marked as the highest reason for high memory usage and relevant time stamps. You can use this information as the basis for troubleshooting. - -You can optimize or remove queries related to the clerks with the highest memory usage. You also can make sure that you aren't querying data that you don't plan to use. Good practice is to always use a WHERE clause in your queries. In addition, we recommend that you create nonclustered indexes to seek the data rather than scan it. - -You also can reduce the workload by optimizing or distributing it over multiple databases. Or you can distribute your workload among multiple databases. If these solutions aren't possible, consider increasing the pricing tier of your database to increase the amount of memory resources available to the database. - -For additional troubleshooting suggestions, see [Memory grants meditation: The mysterious SQL Server memory consumer with many names](https://techcommunity.microsoft.com/t5/sql-server-support/memory-grants-meditation-the-mysterious-sql-server-memory/ba-p/333994). For more information on out of memory errors in Azure SQL Database, see [Troubleshoot out of memory errors with Azure SQL Database](troubleshoot-memory-errors-issues.md). - -## Locking - -### What is happening - -This performance pattern indicates degradation in the current database performance in which excessive database locking is detected compared to the past seven-day performance baseline. - -In modern RDBMS, locking is essential for implementing multithreaded systems in which performance is maximized by running multiple simultaneous workers and parallel database transactions where possible. Locking in this context refers to the built-in access mechanism in which only a single transaction can exclusively access the rows, pages, tables, and files that are required and not compete with another transaction for resources. When the transaction that locked the resources for use is done with them, the lock on those resources is released, which allows other transactions to access required resources. For more information on locking, see [Lock in the database engine](/previous-versions/sql/sql-server-2008-r2/ms190615(v=sql.105)). - -If transactions executed by the SQL engine are waiting for prolonged periods of time to access resources locked for use, this wait time causes the slowdown of the workload execution performance. - -### Troubleshooting - -The diagnostics log outputs locking details that you can use as the basis for troubleshooting. You can analyze the reported blocking queries, that is, the queries that introduce the locking performance degradation, and remove them. In some cases, you might be successful in optimizing the blocking queries. - -The simplest and safest way to mitigate the issue is to keep transactions short and to reduce the lock footprint of the most expensive queries. You can break up a large batch of operations into smaller operations. Good practice is to reduce the query lock footprint by making the query as efficient as possible. Reduce large scans because they increase chances of deadlocks and adversely affect overall database performance. For identified queries that cause locking, you can create new indexes or add columns to the existing index to avoid the table scans. - -For more suggestions, see: -- [Understand and resolve Azure SQL blocking problems](understand-resolve-blocking.md) -- [How to resolve blocking problems that are caused by lock escalation in SQL Server](https://support.microsoft.com/help/323630/how-to-resolve-blocking-problems-that-are-caused-by-lock-escalation-in) - -## Increased MAXDOP - -### What is happening - -This detectable performance pattern indicates a condition in which a chosen query execution plan was parallelized more than it should have been. The query optimizer can enhance the workload performance by executing queries in parallel to speed up things where possible. In some cases, parallel workers processing a query spend more time waiting on each other to synchronize and merge results compared to executing the same query with fewer parallel workers, or even in some cases compared to a single worker thread. - -The expert system analyzes the current database performance compared to the baseline period. It determines if a previously running query is running slower than before because the query execution plan is more parallelized than it should be. - -The MAXDOP server configuration option is used to control how many CPU cores can be used to execute the same query in parallel. - -### Troubleshooting - -The diagnostics log outputs query hashes related to queries for which the duration of execution increased because they were parallelized more than they should have been. The log also outputs CXP wait times. This time represents the time a single organizer/coordinator thread (thread 0) is waiting for all other threads to finish before merging the results and moving ahead. In addition, the diagnostics log outputs the wait times that the poor-performing queries were waiting in execution overall. You can use this information as the basis for troubleshooting. - -First, optimize or simplify complex queries. Good practice is to break up long batch jobs into smaller ones. In addition, ensure that you created indexes to support your queries. You can also manually enforce the maximum degree of parallelism (MAXDOP) for a query that was flagged as poor performing. To configure this operation by using T-SQL, see [Configure the MAXDOP server configuration option](/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option). - -Setting the MAXDOP server configuration option to zero (0) as a default value denotes that database can use all available CPU cores to parallelize threads for executing a single query. Setting MAXDOP to one (1) denotes that only one core can be used for a single query execution. In practical terms, this means that parallelism is turned off. Depending on the case-per-case basis, available cores to the database, and diagnostics log information, you can tune the MAXDOP option to the number of cores used for parallel query execution that might resolve the issue in your case. - -## Pagelatch contention - -### What is happening - -This performance pattern indicates the current database workload performance degradation due to pagelatch contention compared to the past seven-day workload baseline. - -Latches are lightweight synchronization mechanisms used to enable multithreading. They guarantee consistency of in-memory structures that include indices, data pages, and other internal structures. - -There are many types of latches available. For simplicity purposes, buffer latches are used to protect in-memory pages in the buffer pool. IO latches are used to protect pages not yet loaded into the buffer pool. Whenever data is written to or read from a page in the buffer pool, a worker thread needs to acquire a buffer latch for the page first. Whenever a worker thread attempts to access a page that isn't already available in the in-memory buffer pool, an IO request is made to load the required information from the storage. This sequence of events indicates a more severe form of performance degradation. - -Contention on the page latches occurs when multiple threads concurrently attempt to acquire latches on the same in-memory structure, which introduces an increased wait time to query execution. In the case of pagelatch IO contention, when data needs to be accessed from storage, this wait time is even larger. It can affect workload performance considerably. Pagelatch contention is the most common scenario of threads waiting on each other and competing for resources on multiple CPU systems. - -### Troubleshooting - -The diagnostics log outputs pagelatch contention details. You can use this information as the basis for troubleshooting. - -Because a pagelatch is an internal control mechanism, it automatically determines when to use them. Application decisions, including schema design, can affect pagelatch behavior due to the deterministic behavior of latches. - -One method for handling latch contention is to replace a sequential index key with a nonsequential key to evenly distribute inserts across an index range. Typically, a leading column in the index distributes the workload proportionally. Another method to consider is table partitioning. Creating a hash partitioning scheme with a computed column on a partitioned table is a common approach for mitigating excessive latch contention. In the case of pagelatch IO contention, introducing indexes helps to mitigate this performance issue. - -For more information, see [Diagnose and resolve latch contention on SQL Server](http://databaser.net/moniwiki/pds/PerformanceTuning/SQLServerLatchContention.pdf) (PDF download). - -## Missing index - -### What is happening - -This performance pattern indicates the current database workload performance degradation compared to the past seven-day baseline due to a missing index. - -An index is used to speed up the performance of queries. It provides quick access to table data by reducing the number of dataset pages that need to be visited or scanned. - -Specific queries that caused performance degradation are identified through this detection for which creating indexes would be beneficial to the performance. - -### Troubleshooting - -The diagnostics log outputs query hashes for the queries that were identified to affect the workload performance. You can build indexes for these queries. You also can optimize or remove these queries if they aren't required. A good performance practice is to avoid querying data that you don't use. - -> [!TIP] -> Did you know that built-in intelligence can automatically manage the best-performing indexes for your databases? -> -> For continuous performance optimization, we recommend that you enable [automatic tuning](automatic-tuning-overview.md). This unique built-in intelligence feature continuously monitors your database and automatically tunes and creates indexes for your databases. -> - -## New query - -### What is happening - -This performance pattern indicates that a new query is detected that is performing poorly and affecting the workload performance compared to the seven-day performance baseline. - -Writing a good-performing query sometimes can be a challenging task. For more information on writing queries, see [Writing SQL queries](/previous-versions/sql/sql-server-2005/express-administrator/bb264565(v=sql.90)). To optimize existing query performance, see [Query tuning](/previous-versions/sql/sql-server-2008-r2/ms176005(v=sql.105)). - -### Troubleshooting - -The diagnostics log outputs information up to two new most CPU-consuming queries, including their query hashes. Because the detected query affects the workload performance, you can optimize your query. Good practice is to retrieve only data you need to use. We also recommend using queries with a WHERE clause. We also recommend that you simplify complex queries and break them up into smaller queries. Another good practice is to break down large batch queries into smaller batch queries. Introducing indexes for new queries is typically a good practice to mitigate this performance issue. - -In Azure SQL Database, consider using [Query Performance Insight](query-performance-insight-use.md). - -## Increased wait statistic - -### What is happening - -This detectable performance pattern indicates a workload performance degradation in which poor-performing queries are identified compared to the past seven-day workload baseline. - -In this case, the system can't classify the poor-performing queries under any other standard detectable performance categories, but it detected the wait statistic responsible for the regression. Therefore, it considers them as queries with *increased wait statistics*, where the wait statistic responsible for the regression is also exposed. - -### Troubleshooting - -The diagnostics log outputs information on increased wait time details and query hashes of the affected queries. - -Because the system couldn't successfully identify the root cause for the poor-performing queries, the diagnostics information is a good starting point for manual troubleshooting. You can optimize the performance of these queries. A good practice is to fetch only data you need to use and to simplify and break down complex queries into smaller ones. - -For more information on optimizing query performance, see [Query tuning](/previous-versions/sql/sql-server-2008-r2/ms176005(v=sql.105)). - -## TempDB contention - -### What is happening - -This detectable performance pattern indicates a database performance condition in which a bottleneck of threads trying to access tempDB resources exists. (This condition isn't IO-related.) The typical scenario for this performance issue is hundreds of concurrent queries that all create, use, and then drop small tempDB tables. The system detected that the number of concurrent queries using the same tempDB tables increased with sufficient statistical significance to affect database performance compared to the past seven-day performance baseline. - -### Troubleshooting - -The diagnostics log outputs tempDB contention details. You can use the information as the starting point for troubleshooting. There are two things you can pursue to alleviate this kind of contention and increase the throughput of the overall workload: You can stop using the temporary tables. You also can use memory-optimized tables. - -For more information, see [Introduction to memory-optimized tables](/sql/relational-databases/in-memory-oltp/introduction-to-memory-optimized-tables). - -## Elastic pool DTU shortage - -### What is happening - -This detectable performance pattern indicates a degradation in the current database workload performance compared to the past seven-day baseline. It's due to the shortage of available DTUs in the elastic pool of your subscription. - -[Azure elastic pool resources](elastic-pool-overview.md) are used as a pool of available resources shared between multiple databases for scaling purposes. When available eDTU resources in your elastic pool aren't sufficiently large to support all the databases in the pool, an elastic pool DTU shortage performance issue is detected by the system. - -### Troubleshooting - -The diagnostics log outputs information on the elastic pool, lists the top DTU-consuming databases, and provides a percentage of the pool's DTU used by the top-consuming database. - -Because this performance condition is related to multiple databases using the same pool of eDTUs in the elastic pool, the troubleshooting steps focus on the top DTU-consuming databases. You can reduce the workload on the top-consuming databases, which includes optimization of the top-consuming queries on those databases. You also can ensure that you aren't querying data that you don't use. Another approach is to optimize applications by using the top DTU-consuming databases and redistribute the workload among multiple databases. - -If reduction and optimization of the current workload on your top DTU-consuming databases aren't possible, consider increasing your elastic pool pricing tier. Such increase results in the increase of the available DTUs in the elastic pool. - -## Plan regression - -### What is happening - -This detectable performance pattern denotes a condition in which the database utilizes a suboptimal query execution plan. The suboptimal plan typically causes increased query execution, which leads to longer wait times for the current and other queries. - -The database engine determines the query execution plan with the least cost to a query execution. As the type of queries and workloads change, sometimes the existing plans are no longer efficient, or perhaps the database engine didn't make a good assessment. As a matter of correction, query execution plans can be manually forced. - -This detectable performance pattern combines three different cases of plan regression: new plan regression, old plan regression, and existing plans changed workload. The particular type of plan regression that occurred is provided in the *details* property in the diagnostics log. - -The new plan regression condition refers to a state in which the database engine starts executing a new query execution plan that isn't as efficient as the old plan. The old plan regression condition refers to the state when the database engine switches from using a new, more efficient plan to the old plan, which isn't as efficient as the new plan. The existing plans changed workload regression refers to the state in which the old and the new plans continuously alternate, with the balance going more toward the poor-performing plan. - -For more information on plan regressions, see [What is plan regression in SQL Server?](/archive/blogs/sqlserverstorageengine/what-is-plan-regression-in-sql-server). - -### Troubleshooting - -The diagnostics log outputs the query hashes, good plan ID, bad plan ID, and query IDs. You can use this information as the basis for troubleshooting. - -You can analyze which plan is better performing for your specific queries that you can identify with the query hashes provided. After you determine which plan works better for your queries, you can manually force it. - -For more information, see [Learn how SQL Server prevents plan regressions](/archive/blogs/sqlserverstorageengine/you-shall-not-regress-how-sql-server-2017-prevents-plan-regressions). - -> [!TIP] -> Did you know that the built-in intelligence feature can automatically manage the best-performing query execution plans for your databases? -> -> For continuous performance optimization, we recommend that you enable [automatic tuning](automatic-tuning-overview.md). This built-in intelligence feature continuously monitors your database and automatically tunes and creates best-performing query execution plans for your databases. - -## Database-scoped configuration value change - -### What is happening - -This detectable performance pattern indicates a condition in which a change in the database-scoped configuration causes performance regression that is detected compared to the past seven-day database workload behavior. This pattern denotes that a recent change made to the database-scoped configuration doesn't seem to be beneficial to your database performance. - -Database-scoped configuration changes can be set for each individual database. This configuration is used on a case-by-case basis to optimize the individual performance of your database. The following options can be configured for each individual database: MAXDOP, LEGACY_CARDINALITY_ESTIMATION, PARAMETER_SNIFFING, QUERY_OPTIMIZER_HOTFIXES, and CLEAR PROCEDURE_CACHE. - -### Troubleshooting - -The diagnostics log outputs database-scoped configuration changes that were made recently that caused performance degradation compared to the previous seven-day workload behavior. You can revert the configuration changes to the previous values. You also can tune value by value until the desired performance level is reached. You can copy database-scope configuration values from a similar database with satisfactory performance. If you're unable to troubleshoot the performance, revert to the default values and attempt to fine-tune starting from this baseline. - -For more information on optimizing database-scoped configuration and T-SQL syntax on changing the configuration, see [Alter database-scoped configuration (Transact-SQL)](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql). - -## Slow client - -### What is happening - -This detectable performance pattern indicates a condition in which the client using the database can't consume the output from the database as fast as the database sends the results. Because the database isn't storing results of the executed queries in a buffer, it slows down and waits for the client to consume the transmitted query outputs before proceeding. This condition also might be related to a network that isn't sufficiently fast enough to transmit outputs from the database to the consuming client. - -This condition is generated only if a performance regression is detected compared to the past seven-day database workload behavior. This performance issue is detected only if a statistically significant performance degradation occurs compared to previous performance behavior. - -### Troubleshooting - -This detectable performance pattern indicates a client-side condition. Troubleshooting is required at the client-side application or client-side network. The diagnostics log outputs the query hashes and wait times that seem to be waiting the most for the client to consume them within the past two hours. You can use this information as the basis for troubleshooting. - -You can optimize performance of your application for consumption of these queries. You also can consider possible network latency issues. Because the performance degradation issue was based on change in the last seven-day performance baseline, you can investigate whether recent application or network condition changes caused this performance regression event. - -## Pricing tier downgrade - -### What is happening - -This detectable performance pattern indicates a condition in which the pricing tier of your database subscription was downgraded. Because of reduction of resources (DTUs) available to the database, the system detected a drop in the current database performance compared to the past seven-day baseline. - -In addition, there could be a condition in which the pricing tier of your database subscription was downgraded and then upgraded to a higher tier within a short period of time. Detection of this temporary performance degradation is outputted in the details section of the diagnostics log as a pricing tier downgrade and upgrade. - -### Troubleshooting - -If you reduced your pricing tier, and therefore the DTUs available, and you're satisfied with the performance, there's nothing you need to do. If you reduced your pricing tier and you're unsatisfied with your database performance, reduce your database workloads or consider increasing the pricing tier to a higher level. - -## Recommended troubleshooting flow - - Follow the flowchart for a recommended approach to troubleshoot performance issues by using Intelligent Insights. - -Access Intelligent Insights through the Azure portal by going to Azure SQL Analytics. Attempt to locate the incoming performance alert, and select it. Identify what is happening on the detections page. Observe the provided root cause analysis of the issue, query text, query time trends, and incident evolution. Attempt to resolve the issue by using the Intelligent Insights recommendation for mitigating the performance issue. - -[![Troubleshooting flow chart](./media/intelligent-insights-troubleshoot-performance/intelligent-insights-troubleshooting-flowchart.png)](https://github.com/Microsoft/sql-server-samples/blob/master/samples/features/intelligent-insight/Troubleshoot%20Azure%20SQL%20Database%20performance%20issues%20using%20Intelligent%20Insight.pdf) - -> [!TIP] -> Select the flowchart to download a PDF version. - -Intelligent Insights usually needs one hour of time to perform the root cause analysis of the performance issue. If you can't locate your issue in Intelligent Insights and it's critical to you, use the Query Store to manually identify the root cause of the performance issue. (Typically, these issues are less than one hour old.) For more information, see [Monitor performance by using the Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store). - -## Next steps - -- Learn [Intelligent Insights](intelligent-insights-overview.md) concepts. -- Use the [Intelligent Insights performance diagnostics log](intelligent-insights-use-diagnostics-log.md). -- Monitor using [Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- Learn to [collect and consume log data from your Azure resources](../../azure-monitor/essentials/platform-logs-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/database/intelligent-insights-use-diagnostics-log.md b/articles/azure-sql/database/intelligent-insights-use-diagnostics-log.md deleted file mode 100644 index 84544e137be78..0000000000000 --- a/articles/azure-sql/database/intelligent-insights-use-diagnostics-log.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: Intelligent Insights performance diagnostics log -description: Intelligent Insights provides a diagnostics log of Azure SQL Database and Azure SQL Managed Instance performance issues -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 01/31/2022 ---- - -# Use the Intelligent Insights performance diagnostics log of Azure SQL Database and Azure SQL Managed Instance performance issues -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This page provides information on how to use the performance diagnostics log generated by [Intelligent Insights](intelligent-insights-overview.md) of Azure SQL Database and Azure SQL Managed Instance performance issues, its format, and the data it contains for your custom development needs. You can send this diagnostics log to [Azure Monitor logs](../../azure-monitor/insights/azure-sql.md), [Azure Event Hubs](../../azure-monitor/essentials/resource-logs.md#send-to-azure-event-hubs), [Azure Storage](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#stream-into-azure-storage), or a third-party solution for custom DevOps alerting and reporting capabilities. - -> [!NOTE] -> Intelligent insights is a preview feature, not available in the following regions: West Europe, North Europe, West US 1 and East US 1. - -## Log header - -The diagnostics log uses JSON standard format to output Intelligent Insights findings. The exact category property for accessing an Intelligent Insights log is the fixed value "SQLInsights", unrelated to [Monitoring Azure SQL Database with Azure Monitor SQL Insights (preview)](../../azure-sql/database/monitoring-sql-database-azure-monitor.md). - -The header of the log is common and consists of the time stamp (TimeGenerated) that shows when an entry was created. It also includes a resource ID (ResourceId) that refers to the particular database the entry relates to. The category (Category), level (Level), and operation name (OperationName) are fixed properties whose values do not change. They indicate that the log entry is informational and that it comes from Intelligent Insights (SQLInsights). - -```json -"TimeGenerated" : "2017-9-25 11:00:00", // time stamp of the log entry -"ResourceId" : "database identifier", // value points to a database resource -"Category": "SQLInsights", // fixed property -"Level" : "Informational", // fixed property -"OperationName" : "Insight", // fixed property -``` - -## Issue ID and database affected - -The issue identification property (issueId_d) provides a way of uniquely tracking performance issues until resolved. Multiple event records in the log reporting status of the same issue will share the same issue ID. - -Along with the issue ID, the diagnostics log reports the start (intervalStartTime_t) and end (intervalEndTme_t) time stamps of the particular event related to an issue that's reported in the diagnostics log. - -The elastic pool (elasticPoolName_s) property indicates which elastic pool the database with an issue belongs to. If the database isn't part of an elastic pool, this property has no value. The database in which an issue was detected is disclosed in the database name (databaseName_s) property. - -```json -"intervalStartTime_t": "2017-9-25 11:00", // start of the issue reported time stamp -"intervalEndTme_t":"2017-9-25 12:00", // end of the issue reported time stamp -"elasticPoolName_s" : "", // resource elastic pool (if applicable) -"databaseName_s" : "db_name", // database name -"issueId_d" : 1525, // unique ID of the issue detected -"status_s" : "Active" // status of the issue – possible values: "Active", "Verifying", and "Complete" -``` - -## Detected issues - -The next section of the Intelligent Insights performance log contains performance issues that were detected through built-in artificial intelligence. Detections are disclosed in properties within the JSON diagnostics log. These detections consist of the category of an issue, the impact of the issue, the queries affected, and the metrics. The detections properties might contain multiple performance issues that were detected. - -Detected performance issues are reported with the following detections property structure: - -```json -"detections_s" : [{ -"impact" : 1 to 3, // impact of the issue detected, possible values 1-3 (1 low, 2 moderate, 3 high impact) -"category" : "Detectable performance pattern", // performance issue detected, see the table -"details":
    // details of an issue (see the table) -}] -``` - -Detectable performance patterns and the details that are outputted to the diagnostics log are provided in the following table. - -### Detection category - -The category (category) property describes the category of detectable performance patterns. See the following table for all possible categories of detectable performance patterns. For more information, see [Troubleshoot database performance issues with Intelligent Insights](intelligent-insights-troubleshoot-performance.md). - -Depending on the performance issue detected, the details outputted in the diagnostics log file differ accordingly. - -| Detectable performance patterns | Details outputted | -| :------------------- | ------------------- | -| Reaching resource limits |
  • Resources affected
  • Query hashes
  • Resource consumption percentage
  • | -| Workload Increase |
  • Number of queries whose execution increased
  • Query hashes of queries with the largest contribution to the workload increase
  • | -| Memory Pressure |
  • Memory clerk
  • | -| Locking |
  • Affected query hashes
  • Blocking query hashes
  • | -| Increased MAXDOP |
  • Query hashes
  • CXP wait times
  • Wait times
  • | -| Pagelatch Contention |
  • Query hashes of queries causing contention
  • | -| Missing Index |
  • Query hashes
  • | -| New Query |
  • Query hash of the new queries
  • | -| Unusual Wait Statistic |
  • Unusual wait types
  • Query hashes
  • Query wait times
  • | -| TempDB Contention |
  • Query hashes of queries causing contention
  • Query attribution to the overall database pagelatch contention wait time [%]
  • | -| Elastic pool DTU Shortage |
  • Elastic pool
  • Top DTU-consuming database
  • Percent of pool DTU used by the top consumer
  • | -| Plan Regression |
  • Query hashes
  • Good plan IDs
  • Bad plan IDs
  • | -| Database-Scoped Configuration Value Change |
  • Database-scoped configuration changes compared to the default values
  • | -| Slow Client |
  • Query hashes
  • Wait times
  • | -| Pricing Tier Downgrade |
  • Text notification
  • | - -### Impact - -The impact (impact) property describes how much a detected behavior contributed to the problem that a database is having. Impacts range from 1 to 3, with 3 as the highest contribution, 2 as moderate, and 1 as the lowest contribution. The impact value might be used as an input for custom alerting automation, depending on your specific needs. The property queries impacted (QueryHashes) provide a list of the query hashes that were affected by a particular detection. - -### Impacted queries - -The next section of the Intelligent Insights log provides information about particular queries that were affected by the detected performance issues. This information is disclosed as an array of objects embedded in the impact_s property. The impact property consists of entities and metrics. Entities refer to a particular query (Type: Query). The unique query hash is disclosed under the value (Value) property. In addition, each of the queries disclosed is followed by a metric and a value, which indicate a detected performance issue. - -In the following log example, the query with the hash 0x9102EXZ4 was detected to have an increased duration of execution (Metric: DurationIncreaseSeconds). The value of 110 seconds indicates that this particular query took 110 seconds longer to execute. Because multiple queries can be detected, this particular log section might include multiple query entries. - -```json -"impact" : [{ -"entity" : { -"Type" : "Query", // type of entity - query -"Value" : "query hash value", // for example "0x9102EXZ4" query hash value }, -"Metric" : "DurationIncreaseSeconds", // measured metric and the measurement unit (in this case seconds) -"Value" : 110 // value of the measured metric (in this case seconds) -}] -``` - -### Metrics - -The unit of measurement for each metric reported is provided under the metric (metric) property with the possible values of seconds, number, and percentage. The value of a measured metric is reported in the value (value) property. - -The DurationIncreaseSeconds property provides the unit of measurement in seconds. The CriticalErrorCount unit of measurement is a number that represents an error count. - -```json -"metric" : "DurationIncreaseSeconds", // issue metric type – possible values: DurationIncreaseSeconds, CriticalErrorCount, WaitingSeconds -"value" : 102 // value of the measured metric (in this case seconds) -``` - -## Root cause analysis and improvement recommendations - -The last part of the Intelligent Insights performance log pertains to the automated root cause analysis of the identified performance degradation issue. The information appears in human-friendly verbiage in the root cause analysis (rootCauseAnalysis_s) property. Improvement recommendations are included in the log where possible. - -```json -// example of reported root cause analysis of the detected performance issue, in a human-readable format - -"rootCauseAnalysis_s" : "High data IO caused performance to degrade. It seems that this database is missing some indexes that could help." -``` - -You can use the Intelligent Insights performance log with [Azure Monitor logs](../../azure-monitor/insights/azure-sql.md) or a third-party solution for custom DevOps alerting and reporting capabilities. - -## Next steps - -- Learn about [Intelligent Insights](intelligent-insights-overview.md) concepts. -- Learn how to [Troubleshoot performance issues with Intelligent Insights](intelligent-insights-troubleshoot-performance.md). -- Learn how to [Monitor performance issues by using Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- Learn how to [collect and consume log data from your Azure resources](../../azure-monitor/essentials/platform-logs-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/database/job-automation-overview.md b/articles/azure-sql/database/job-automation-overview.md deleted file mode 100644 index 0b128d144aadc..0000000000000 --- a/articles/azure-sql/database/job-automation-overview.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Job automation overview with Elastic Jobs -description: 'Use Elastic Jobs for Job Automation to run Transact-SQL (T-SQL) scripts across a set of one or more databases' -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1, contperf-fy21q3 -ms.devlang: -dev_langs: - - "TSQL" -ms.topic: conceptual -author: williamdassafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 2/1/2021 ---- -# Automate management tasks using elastic jobs (preview) - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You can create and schedule elastic jobs that could be periodically executed against one or many Azure SQL databases to run Transact-SQL (T-SQL) queries and perform maintenance tasks. - -You can define target database or groups of databases where the job will be executed, and also define schedules for running a job. -A job handles the task of logging in to the target database. You also define, maintain, and persist Transact-SQL scripts to be executed across a group of databases. - -Every job logs the status of execution and also automatically retries the operations if any failure occurs. - -## When to use elastic jobs - -There are several scenarios when you could use elastic job automation: - -- Automate management tasks and schedule them to run every weekday, after hours, etc. - - Deploy schema changes, credentials management, performance data collection or tenant (customer) telemetry collection. - - Update reference data (information common across all databases), load data from Azure Blob storage. -- Configure jobs to execute across a collection of databases on a recurring basis, such as during off-peak hours. - - Collect query results from a set of databases into a central table on an on-going basis. Performance queries can be continually executed and configured to trigger additional tasks to be executed. -- Collect data for reporting - - Aggregate data from a collection of databases into a single destination table. - - Execute longer running data processing queries across a large set of databases, for example the collection of customer telemetry. Results are collected into a single destination table for further analysis. -- Data movements - -### Automation on other platforms - -Consider the following job scheduling technologies on different platforms: - -- **Elastic Jobs** are Job Scheduling services that execute custom jobs on one or many databases in Azure SQL Database. -- **SQL Agent Jobs** are executed by the SQL Agent service that continues to be used for task automation in SQL Server and is also included with Azure SQL Managed Instances. SQL Agent Jobs are not available in Azure SQL Database. - -Elastic Jobs can target [Azure SQL Databases](sql-database-paas-overview.md), [Azure SQL Database elastic pools](elastic-pool-overview.md), and Azure SQL Databases in [shard maps](elastic-scale-shard-map-management.md). - -- For T-SQL script job automation in SQL Server and Azure SQL Managed Instance, consider [SQL Agent](../managed-instance/job-automation-managed-instance.md). - -- For T-SQL script job automation in Azure Synapse Analytics, consider [pipelines with recurring triggers](../../synapse-analytics/data-integration/concepts-data-factory-differences.md), which are [based on Azure Data Factory](../../synapse-analytics/data-integration/concepts-data-factory-differences.md). - -It is worth noting differences between SQL Agent (available in SQL Server and as part of SQL Managed Instance), and the Database Elastic Job agent (which can execute T-SQL on Azure SQL Databases or databases in SQL Server and Azure SQL Managed Instance, Azure Synapse Analytics). - -| |Elastic Jobs |SQL Agent | -|---------|---------|---------| -|**Scope** | Any number of databases in Azure SQL Database and/or data warehouses in the same Azure cloud as the job agent. Targets can be in different servers, subscriptions, and/or regions.

    Target groups can be composed of individual databases or data warehouses, or all databases in a server, pool, or shard map (dynamically enumerated at job runtime). | Any individual database in the same instance as the SQL agent. The Multi Server Administration feature of SQL Server Agent allows for master/target instances to coordinate job execution, though this feature is not available in SQL managed instance. | -|**Supported APIs and Tools** | Portal, PowerShell, T-SQL, Azure Resource Manager | T-SQL, SQL Server Management Studio (SSMS) | - -## Elastic job targets - -**Elastic Jobs** provide the ability to run one or more T-SQL scripts in parallel, across a large number of databases, on a schedule or on-demand. - -You can run scheduled jobs against any combination of databases: one or more individual databases, all databases on a server, all databases in an elastic pool, or shard map, with the added flexibility to include or exclude any specific database. Jobs can run across multiple servers, multiple pools, and can even run against databases in different subscriptions. Servers and pools are dynamically enumerated at runtime, so jobs run against all databases that exist in the target group at the time of execution. - -The following image shows a job agent executing jobs across the different types of target groups: - -![Elastic Job agent conceptual model](./media/job-automation-overview/conceptual-diagram.png) - -### Elastic job components - -|Component | Description (additional details are below the table) | -|---------|---------| -|[**Elastic Job agent**](#elastic-job-agent) | The Azure resource you create to run and manage Jobs. | -|[**Job database**](#elastic-job-database) | A database in Azure SQL Database that the job agent uses to store job related data, job definitions, etc. | -|[**Target group**](#target-group) | The set of servers, pools, databases, and shard maps to run a job against. | -|[**Job**](#elastic-jobs-and-job-steps) | A job is a unit of work that is composed of one or more job steps. Job steps specify the T-SQL script to run, as well as other details required to execute the script. | - -#### Elastic job agent - -An Elastic Job agent is the Azure resource for creating, running, and managing jobs. The Elastic Job agent is an Azure resource you create in the portal ([PowerShell](elastic-jobs-powershell-create.md) and REST are also supported). - -Creating an **Elastic Job agent** requires an existing database in Azure SQL Database. The agent configures this existing Azure SQL Database as the [*Job database*](#elastic-job-database). - -The Elastic Job agent is free. The job database is billed at the same rate as any database in Azure SQL Database. - -#### Elastic job database - -The *Job database* is used for defining jobs and tracking the status and history of job executions. The *Job database* is also used to store agent metadata, logs, results, job definitions, and also contains many useful stored procedures and other database objects for creating, running, and managing jobs using T-SQL. - -For the current preview, an existing database in Azure SQL Database (S0 or higher) is required to create an Elastic Job agent. - -The *Job database* should be a clean, empty, S0 or higher service objective Azure SQL Database. The recommended service objective of the *Job database* is S1 or higher, but the optimal choice depends on the performance needs of your job(s): the number of job steps, the number of job targets, and how frequently jobs are run. - -If operations against the job database are slower than expected, [monitor](monitor-tune-overview.md#azure-sql-database-and-azure-sql-managed-instance-resource-monitoring) database performance and the resource utilization in the job database during periods of slowness using Azure portal or the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) DMV. If utilization of a resource, such as CPU, Data IO, or Log Write approaches 100% and correlates with periods of slowness, consider incrementally scaling the database to higher service objectives (either in the [DTU model](service-tiers-dtu.md) or in the [vCore model](service-tiers-vcore.md)) until job database performance is sufficiently improved. - -##### Elastic job database permissions - -During job agent creation, a schema, tables, and a role called *jobs_reader* are created in the *Job database*. The role is created with the following permission and is designed to give administrators finer access control for job monitoring: - -|Role name |'jobs' schema permissions |'jobs_internal' schema permissions | -|---------|---------|---------| -|**jobs_reader** | SELECT | None | - -> [!IMPORTANT] -> Consider the security implications before granting access to the *Job database* as a database administrator. A malicious user with permissions to create or edit jobs could create or edit a job that uses a stored credential to connect to a database under the malicious user's control, which could allow the malicious user to determine the credential's password. - -#### Target group - -A *target group* defines the set of databases a job step will execute on. A target group can contain any number and combination of the following: - -- **Logical SQL server** - if a server is specified, all databases that exist in the server at the time of the job execution are part of the group. The master database credential must be provided so that the group can be enumerated and updated prior to job execution. For more information on logical servers, see [What is a server in Azure SQL Database and Azure Synapse Analytics?](logical-servers.md). -- **Elastic pool** - if an elastic pool is specified, all databases that are in the elastic pool at the time of the job execution are part of the group. As for a server, the master database credential must be provided so that the group can be updated prior to the job execution. -- **Single database** - specify one or more individual databases to be part of the group. -- **Shard map** - databases of a shard map. - -> [!TIP] -> At the moment of job execution, *dynamic enumeration* re-evaluates the set of databases in target groups that include servers or pools. Dynamic enumeration ensures that **jobs run across all databases that exist in the server or pool at the time of job execution**. Re-evaluating the list of databases at runtime is specifically useful for scenarios where pool or server membership changes frequently. - -Pools and single databases can be specified as included or excluded from the group. This enables creating a target group with any combination of databases. For example, you can add a server to a target group, but exclude specific databases in an elastic pool (or exclude an entire pool). - -A target group can include databases in multiple subscriptions, and across multiple regions. Note that cross-region executions have higher latency than executions within the same region. - -The following examples show how different target group definitions are dynamically enumerated at the moment of job execution to determine which databases the job will run: - -![Target group examples](./media/job-automation-overview/targetgroup-examples1.png) - -**Example 1** shows a target group that consists of a list of individual databases. When a job step is executed using this target group, the job step's action will be executed in each of those databases.
    -**Example 2** shows a target group that contains a server as a target. When a job step is executed using this target group, the server is dynamically enumerated to determine the list of databases that are currently in the server. The job step's action will be executed in each of those databases.
    -**Example 3** shows a similar target group as *Example 2*, but an individual database is specifically excluded. The job step's action will *not* be executed in the excluded database.
    -**Example 4** shows a target group that contains an elastic pool as a target. Similar to *Example 2*, the pool will be dynamically enumerated at job run time to determine the list of databases in the pool. -

    - -![Additional target group examples](./media/job-automation-overview/targetgroup-examples2.png) - -**Example 5** and **Example 6** show advanced scenarios where servers, elastic pools, and databases can be combined using include and exclude rules.
    -**Example 7** shows that the shards in a shard map can also be evaluated at job run time. - -> [!NOTE] -> The Job database itself can be the target of a job. In this scenario, the Job database is treated just like any other target database. The job user must be created and granted sufficient permissions in the Job database, and the database scoped credential for the job user must also exist in the Job database, just like it does for any other target database. - -#### Elastic jobs and job steps - -A *job* is a unit of work that is executed on a schedule or as a one-time job. A job consists of one or more *job steps*. - -Each job step specifies a T-SQL script to execute, one or more target groups to run the T-SQL script against, and the credentials the job agent needs to connect to the target database. Each job step has customizable timeout and retry policies, and can optionally specify output parameters. - -#### Job output - -The outcome of a job's steps on each target database are recorded in detail, and script output can be captured to a specified table. You can specify a database to save any data returned from a job. - -#### Job history - -View Elastic Job execution history in the *Job database* by [querying the table jobs.job_executions](elastic-jobs-tsql-create-manage.md#monitor-job-execution-status). A system cleanup job purges execution history that is older than 45 days. To remove history less than 45 days old, call the `sp_purge_jobhistory` stored procedure in the *Job database*. - -#### Job status - -You can monitor Elastic Job executions in the *Job database* by [querying the table jobs.job_executions](elastic-jobs-tsql-create-manage.md#monitor-job-execution-status). - -### Agent performance, capacity, and limitations - -Elastic Jobs use minimal compute resources while waiting for long-running jobs to complete. - -Depending on the size of the target group of databases and the desired execution time for a job (number of concurrent workers), the agent requires different amounts of compute and performance of the *Job database* (the more targets and the higher number of jobs, the higher the amount of compute required). - -Currently, the limit is 100 concurrent jobs. - -#### Prevent jobs from reducing target database performance - -To ensure resources aren't overburdened when running jobs against databases in a SQL elastic pool, jobs can be configured to limit the number of databases a job can run against at the same time. - -## Next steps - -- [How to create and manage elastic jobs](elastic-jobs-overview.md) -- [Create and manage Elastic Jobs using PowerShell](elastic-jobs-powershell-create.md) -- [Create and manage Elastic Jobs using Transact-SQL (T-SQL)](elastic-jobs-tsql-create-manage.md) diff --git a/articles/azure-sql/database/json-features.md b/articles/azure-sql/database/json-features.md deleted file mode 100644 index ad4bf0b597b05..0000000000000 --- a/articles/azure-sql/database/json-features.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Working with JSON data -description: Azure SQL Database and Azure SQL Managed Instance enable you to parse, query, and format data in JavaScript Object Notation (JSON) notation. -services: sql-database -ms.service: sql-db-mi -ms.subservice: development -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: uc-msft -ms.author: umajay -ms.reviewer: kendralittle, mathoma -ms.date: 10/18/2021 ---- -# Getting started with JSON features in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and Azure SQL Managed Instance let you parse and query data represented in JavaScript Object Notation [(JSON)](https://www.json.org/) format, and export your relational data as JSON text. The following JSON scenarios are available: - -- [Formatting relational data in JSON format](#formatting-relational-data-in-json-format) using `FOR JSON` clause. -- [Working with JSON data](#working-with-json-data) -- [Querying JSON data](#querying-json-data) using JSON scalar functions. -- [Transforming JSON into tabular format](#transforming-json-into-tabular-format) using `OPENJSON` function. - -## Formatting relational data in JSON format - -If you have a web service that takes data from the database layer and provides a response in JSON format, or client-side JavaScript frameworks or libraries that accept data formatted as JSON, you can format your database content as JSON directly in a SQL query. You no longer have to write application code that formats results from Azure SQL Database or Azure SQL Managed Instance as JSON, or include some JSON serialization library to convert tabular query results and then serialize objects to JSON format. Instead, you can use the FOR JSON clause to format SQL query results as JSON and use it directly in your application. - -In the following example, rows from the `Sales.Customer` table are formatted as JSON by using the FOR JSON clause: - -```sql -select CustomerName, PhoneNumber, FaxNumber -from Sales.Customers -FOR JSON PATH -``` - -The FOR JSON PATH clause formats the results of the query as JSON text. Column names are used as keys, while the cell values are generated as JSON values: - -```json -[ -{"CustomerName":"Eric Torres","PhoneNumber":"(307) 555-0100","FaxNumber":"(307) 555-0101"}, -{"CustomerName":"Cosmina Vlad","PhoneNumber":"(505) 555-0100","FaxNumber":"(505) 555-0101"}, -{"CustomerName":"Bala Dixit","PhoneNumber":"(209) 555-0100","FaxNumber":"(209) 555-0101"} -] -``` - -The result set is formatted as a JSON array where each row is formatted as a separate JSON object. - -PATH indicates that you can customize the output format of your JSON result by using dot notation in column aliases. The following query changes the name of the "CustomerName" key in the output JSON format, and puts phone and fax numbers in the "Contact" sub-object: - -```sql -select CustomerName as Name, PhoneNumber as [Contact.Phone], FaxNumber as [Contact.Fax] -from Sales.Customers -where CustomerID = 931 -FOR JSON PATH, WITHOUT_ARRAY_WRAPPER -``` - -The output of this query looks like this: - -```json -{ - "Name":"Nada Jovanovic", - "Contact":{ - "Phone":"(215) 555-0100", - "Fax":"(215) 555-0101" - } -} -``` - -In this example, we returned a single JSON object instead of an array by specifying the [WITHOUT_ARRAY_WRAPPER](/sql/relational-databases/json/remove-square-brackets-from-json-without-array-wrapper-option) option. You can use this option if you know that you are returning a single object as a result of query. - -The main value of the FOR JSON clause is that it lets you return complex hierarchical data from your database formatted as nested JSON objects or arrays. The following example shows how to include the rows from the `Orders` table that belong to the `Customer` as a nested array of `Orders`: - -```sql -select CustomerName as Name, PhoneNumber as Phone, FaxNumber as Fax, - Orders.OrderID, Orders.OrderDate, Orders.ExpectedDeliveryDate -from Sales.Customers Customer - join Sales.Orders Orders - on Customer.CustomerID = Orders.CustomerID -where Customer.CustomerID = 931 -FOR JSON AUTO, WITHOUT_ARRAY_WRAPPER -``` - -Instead of sending separate queries to get Customer data and then to fetch a list of related Orders, you can get all the necessary data with a single query, as shown in the following sample output: - -```json -{ - "Name":"Nada Jovanovic", - "Phone":"(215) 555-0100", - "Fax":"(215) 555-0101", - "Orders":[ - {"OrderID":382,"OrderDate":"2013-01-07","ExpectedDeliveryDate":"2013-01-08"}, - {"OrderID":395,"OrderDate":"2013-01-07","ExpectedDeliveryDate":"2013-01-08"}, - {"OrderID":1657,"OrderDate":"2013-01-31","ExpectedDeliveryDate":"2013-02-01"} - ] -} -``` - -## Working with JSON data - -If you don't have strictly structured data, if you have complex sub-objects, arrays, or hierarchical data, or if your data structures evolve over time, the JSON format can help you to represent any complex data structure. - -JSON is a textual format that can be used like any other string type in Azure SQL Database and Azure SQL Managed Instance. You can send or store JSON data as a standard NVARCHAR: - -```sql -CREATE TABLE Products ( - Id int identity primary key, - Title nvarchar(200), - Data nvarchar(max) -) -go -CREATE PROCEDURE InsertProduct(@title nvarchar(200), @json nvarchar(max)) -AS BEGIN - insert into Products(Title, Data) - values(@title, @json) -END -``` - -The JSON data used in this example is represented by using the NVARCHAR(MAX) type. JSON can be inserted into this table or provided as an argument of the stored procedure using standard Transact-SQL syntax as shown in the following example: - -```sql -EXEC InsertProduct 'Toy car', '{"Price":50,"Color":"White","tags":["toy","children","games"]}' -``` - -Any client-side language or library that works with string data in Azure SQL Database and Azure SQL Managed Instance will also work with JSON data. JSON can be stored in any table that supports the NVARCHAR type, such as a Memory-optimized table or a System-versioned table. JSON does not introduce any constraint either in the client-side code or in the database layer. - -## Querying JSON data - -If you have data formatted as JSON stored in Azure SQL tables, JSON functions let you use this data in any SQL query. - -JSON functions that are available in Azure SQL Database and Azure SQL Managed Instance let you treat data formatted as JSON as any other SQL data type. You can easily extract values from the JSON text, and use JSON data in any query: - -```sql -select Id, Title, JSON_VALUE(Data, '$.Color'), JSON_QUERY(Data, '$.tags') -from Products -where JSON_VALUE(Data, '$.Color') = 'White' - -update Products -set Data = JSON_MODIFY(Data, '$.Price', 60) -where Id = 1 -``` - -The JSON_VALUE function extracts a value from JSON text stored in the Data column. This function uses a JavaScript-like path to reference a value in JSON text to extract. The extracted value can be used in any part of SQL query. - -The JSON_QUERY function is similar to JSON_VALUE. Unlike JSON_VALUE, this function extracts complex sub-object such as arrays or objects that are placed in JSON text. - -The JSON_MODIFY function lets you specify the path of the value in the JSON text that should be updated, as well as a new value that will overwrite the old one. This way you can easily update JSON text without reparsing the entire structure. - -Since JSON is stored in a standard text, there are no guarantees that the values stored in text columns are properly formatted. You can verify that text stored in JSON column is properly formatted by using standard Azure SQL Database check constraints and the ISJSON function: - -```sql -ALTER TABLE Products - ADD CONSTRAINT [Data should be formatted as JSON] - CHECK (ISJSON(Data) > 0) -``` - -If the input text is properly formatted JSON, the ISJSON function returns the value 1. On every insert or update of JSON column, this constraint will verify that new text value is not malformed JSON. - -## Transforming JSON into tabular format - -Azure SQL Database and Azure SQL Managed Instance also let you transform JSON collections into tabular format and load or query JSON data. - -OPENJSON is a table-value function that parses JSON text, locates an array of JSON objects, iterates through the elements of the array, and returns one row in the output result for each element of the array. - -![JSON tabular](./media/json-features/image_2.png) - -In the example above, we can specify where to locate the JSON array that should be opened (in the $.Orders path), what columns should be returned as result, and where to find the JSON values that will be returned as cells. - -We can transform a JSON array in the @orders variable into a set of rows, analyze this result set, or insert rows into a standard table: - -```sql -CREATE PROCEDURE InsertOrders(@orders nvarchar(max)) -AS BEGIN - - insert into Orders(Number, Date, Customer, Quantity) - select Number, Date, Customer, Quantity - FROM OPENJSON (@orders) - WITH ( - Number varchar(200), - Date datetime, - Customer varchar(200), - Quantity int - ) -END -``` - -The collection of orders formatted as a JSON array and provided as a parameter to the stored procedure can be parsed and inserted into the Orders table. diff --git a/articles/azure-sql/database/ledger-append-only-ledger-tables.md b/articles/azure-sql/database/ledger-append-only-ledger-tables.md deleted file mode 100644 index 1fdee636ac1f6..0000000000000 --- a/articles/azure-sql/database/ledger-append-only-ledger-tables.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Azure SQL Database append-only ledger tables" -description: This article provides information on append-only ledger table schema and views in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Azure SQL Database append-only ledger tables - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -Append-only ledger tables allow only `INSERT` operations on your tables, which ensures that privileged users such as database administrators can't alter data through traditional [Data Manipulation Language](/sql/t-sql/queries/queries) operations. Append-only ledger tables are ideal for systems that don't update or delete records, such as security information event and management systems or blockchain systems where data needs to be replicated from the blockchain to a database. Because there are no `UPDATE` or `DELETE` operations on an append-only table, there's no need for a corresponding history table as there is with [updatable ledger tables](ledger-updatable-ledger-tables.md). - -:::image type="content" source="media/ledger/ledger-table-architecture-append-only.png" alt-text="Diagram that shows architecture of ledger tables."::: - -You can create an append-only ledger table by specifying the `LEDGER = ON` argument in your [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql) statement and specifying the `APPEND_ONLY = ON` option. - -> [!IMPORTANT] -> After a table is created as a ledger table, it can't be reverted to a table that doesn't have ledger functionality. As a result, an attacker can't temporarily remove ledger capabilities, make changes to the table, and then reenable ledger functionality. - -### Append-only ledger table schema - -An append-only table needs to have the following [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns that contain metadata noting which transactions made changes to the table and the order of operations by which rows were updated by the transaction. When you create an append-only ledger table, `GENERATED ALWAYS` columns will be created in your ledger table. This data is useful for forensics purposes in understanding how data was inserted over time. - -If you don't specify the definitions of the `GENERATED ALWAYS` columns in the [CREATE TABLE](/sql/t-sql/statements/create-table-transact-sql) statement, the system automatically adds them by using the following default names. - -| Default column name | Data type | Description | -|--|--|--| -| ledger_start_transaction_id | bigint | The ID of the transaction that created a row version | -| ledger_start_sequence_number | bigint | The sequence number of an operation within a transaction that created a row version | - -## Ledger view - -For every append-only ledger table, the system automatically generates a view, called the ledger view. The ledger view reports all row inserts that have occurred on the table. The ledger view is primarily helpful for [updatable ledger tables](ledger-updatable-ledger-tables.md), rather than append-only ledger tables, because append-only ledger tables don't have any `UPDATE` or `DELETE` capabilities. The ledger view for append-only ledger tables is available for consistency between both updatable and append-only ledger tables. - -### Ledger view schema - -> [!NOTE] -> The ledger view column names can be customized when you create the table by using the `` parameter with the [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true) statement. For more information, see [ledger view options](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true#ledger-view-options) and the corresponding examples in [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true). - -| Default column name | Data type | Description | -| --- | --- | --- | -| ledger_transaction_id | bigint | The ID of the transaction that created or deleted a row version. | -| ledger_sequence_number | bigint | The sequence number of a row-level operation within the transaction on the table. | -| ledger_operation_type | tinyint | Contains `1` (**INSERT**) or `2` (**DELETE**). Inserting a row into the ledger table produces a new row in the ledger view that contains `1` in this column. Deleting a row from the ledger table produces a new row in the ledger view that contains `2` in this column. Updating a row in the ledger table produces two new rows in the ledger view. One row contains `2` (**DELETE**), and the other row contains `1` (**INSERT**) in this column. A DELETE shouldn't occur on an append-only ledger table. | -| ledger_operation_type_desc | nvarchar(128) | Contains `INSERT` or `DELETE`. For more information, see the preceding row. | - -## Next steps - -- [Create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md) -- [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md) diff --git a/articles/azure-sql/database/ledger-audit.md b/articles/azure-sql/database/ledger-audit.md deleted file mode 100644 index 32f5b7c6d017a..0000000000000 --- a/articles/azure-sql/database/ledger-audit.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: "Azure SQL Database audit events with ledger-enabled tables" -description: Overview of Azure SQL Database ledger auditing capabilities -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Azure SQL Database audit events with ledger-enabled tables - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -When you perform forensics activities with ledger-enabled tables, data is captured in the ledger view and database ledger. Other action IDs are added to the SQL audit logs, too. The following tables outline these new audit logging events. The conditions that trigger the events follow each table. - -## Enable ledger - -| Column | Value | -|--|--| -| **action_id** | ENLR | -| **name** | ENABLE LEDGER | -| **class_desc** | OBJECT | -| **covering_action_desc** | NULL | -| **parent_class_desc** | DATABASE | -| **covering_parent_action_name** | LEDGER_OPERATION_GROUP | -| **configuration_level** | NULL | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 1 | - -**Conditions that trigger the event**: When you create a new ledger table or convert a regular table to a ledger table. - -## Alter ledger - -| Column | Value | -|--|--| -| **action_id** | ALLR | -| **name** | ALTER LEDGER | -| **class_desc** | OBJECT | -| **covering_action_desc** | NULL | -| **parent_class_desc** | DATABASE | -| **covering_parent_action_name** | LEDGER_OPERATION_GROUP | -| **configuration_level** | NULL | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 1 | - -**Conditions that trigger the event**: When you drop or rename a ledger table, convert a ledger table to a normal table, and add, drop, or rename a column in a ledger table. - - -## Generate ledger digest - -| Column | Value | -|--|--| -| **action_id** | GDLR | -| **name** | GENERATE LEDGER DIGEST | -| **class_desc** | DATABASE | -| **covering_action_desc** | LEDGER_OPERATION_GROUP | -| **parent_class_desc** | SERVER | -| **covering_parent_action_name** | LEDGER_OPERATION_GROUP | -| **configuration_level** | NULL | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 1 | - -**Condition that triggers the event**: When you generate a ledger digest. - -## Verify ledger - -| Column | Value | -|--|--| -| **action_id** | VFLR | -| **name** | VERIFY LEDGER | -| **class_desc** | DATABASE | -| **covering_action_desc** | LEDGER_OPERATION_GROUP | -| **parent_class_desc** | SERVER | -| **covering_parent_action_name** | LEDGER_OPERATION_GROUP | -| **configuration_level** | NULL | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 1 | - -**Condition that triggers the event**: When you verify a ledger digest. - -## Ledger operation group - -| Column | Value | -|--|--| -| **action_id** | OPLR | -| **name** | LEDGER_OPERATION_GROUP | -| **class_desc** | DATABASE | -| **covering_action_desc** | NULL | -| **parent_class_desc** | SERVER | -| **covering_parent_action_name** | NULL | -| **configuration_level** | GROUP | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 0 | - -**Condition that triggers the event**: N/A - -| Column | Value | -|--|--| -| **action_id** | OPLR | -| **name** | LEDGER_OPERATION_GROUP | -| **class_desc** | SERVER | -| **covering_action_desc** | NULL | -| **parent_class_desc** | NULL | -| **covering_parent_action_name** | NULL | -| **configuration_level** | GROUP | -| **configuration_group_name** | LEDGER_OPERATION_GROUP | -| **action_in_log** | 0 | - -**Condition that triggers the event**: N/A - -## Next steps - -- [Auditing for Azure SQL Database and Azure Synapse Analytics](auditing-overview.md) -- [Azure SQL Database ledger overview](ledger-overview.md) -- [Quickstart: Create a database in Azure SQL Database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md) diff --git a/articles/azure-sql/database/ledger-create-a-single-database-with-ledger-enabled.md b/articles/azure-sql/database/ledger-create-a-single-database-with-ledger-enabled.md deleted file mode 100644 index a75783ce6d186..0000000000000 --- a/articles/azure-sql/database/ledger-create-a-single-database-with-ledger-enabled.md +++ /dev/null @@ -1,492 +0,0 @@ ---- -title: Create a single database with ledger enabled -description: Create a single database in Azure SQL Database with ledger enabled by using the Azure portal. -ms.service: sql-database -ms.subservice: security -ms.topic: quickstart -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: "01/20/2022" -ms.custom: mode-other ---- - -# Quickstart: Create a database in Azure SQL Database with ledger enabled - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -In this quickstart, you create a [ledger database](ledger-overview.md#ledger-database) in Azure SQL Database and configure [automatic digest storage with Azure Blob Storage](ledger-digest-management-and-database-verification.md#automatic-generation-and-storage-of-database-digests) by using the Azure portal. For more information about ledger, see [Azure SQL Database ledger](ledger-overview.md). - -## Prerequisite - -You need an active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). - -## Create a ledger database and configure digest storage - -Create a single ledger database in the [serverless compute tier](serverless-tier-overview.md), and configure uploading ledger digests to an Azure Storage account. - -# [Portal](#tab/azure-portal) - -To create a single database in the Azure portal, this quickstart starts at the Azure SQL page. - -1. Browse to the [Select SQL Deployment option](https://portal.azure.com/#create/Microsoft.AzureSQL) page. - -1. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - - ![Screenshot that shows adding to Azure SQL.](./media/single-database-create-quickstart/select-deployment.png) - -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the Azure subscription you want to use. - -1. For **Resource group**, select **Create new**, enter **myResourceGroup**, and select **OK**. - -1. For **Database name**, enter **demo**. - -1. For **Server**, select **Create new**. Fill out the **New server** form with the following values: - - **Server name**: Enter **mysqlserver**, and add some characters for uniqueness. We can't provide an exact server name to use because server names must be globally unique for all servers in Azure, not just unique within a subscription. Enter something like **mysqlserver12345**, and the portal lets you know if it's available or not. - - **Server admin login**: Enter **azureuser**. - - **Password**: Enter a password that meets requirements. Enter it again in the **Confirm password** box. - - **Location**: Select a location from the dropdown list. - - **Allow Azure services to access this server**: Select this option to enable access to digest storage. - - Select **OK**. - -1. Leave **Want to use SQL elastic pool** set to **No**. - -1. Under **Compute + storage**, select **Configure database**. - -1. This quickstart uses a serverless database, so select **Serverless**, and then select **Apply**. - - ![Screenshot that shows configuring a serverless database.](./media/single-database-create-quickstart/configure-database.png) - -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. -1. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. -1. Select **Next: Security** at the bottom of the page. - - :::image type="content" source="media/ledger/ledger-create-database-networking-tab.png" alt-text="Screenshot that shows the Networking tab of the Create SQL Database screen in the Azure portal."::: - -1. On the **Security** tab, in the **Ledger** section, select the **Configure ledger** option. - - :::image type="content" source="media/ledger/ledger-configure-ledger-security-tab.png" alt-text="Screenshot that shows configuring a ledger on the Security tab of the Azure portal."::: - -1. On the **Configure ledger** pane, in the **Ledger** section, select the **Enable for all future tables in this database** checkbox. This setting ensures that all future tables in the database will be ledger tables. For this reason, all data in the database will show any evidence of tampering. By default, new tables will be created as updatable ledger tables, even if you don't specify `LEDGER = ON` in [CREATE TABLE](/sql/t-sql/statements/create-table-transact-sql). You can also leave this option unselected. You're then required to enable ledger functionality on a per-table basis when you create new tables by using Transact-SQL. - -1. In the **Digest Storage** section, **Enable automatic digest storage** is automatically selected. Then, a new Azure Storage account and container where your digests are stored is created. - -1. Select **Apply**. - - :::image type="content" source="media/ledger/ledger-configure-ledger-pane.png" alt-text="Screenshot that shows the Configure ledger (preview) pane in the Azure portal."::: - -1. Select **Review + create** at the bottom of the page. - - :::image type="content" source="media/ledger/ledger-review-security-tab.png" alt-text="Screenshot that shows reviewing and creating a ledger database on the Security tab of the Azure portal."::: - -1. On the **Review + create** page, after you review, select **Create**. - -# [The Azure CLI](#tab/azure-cli) - -You'll create a resource group, a logical database server, a single ledger database, and configure uploading ledger digests using The Azure CLI. - -## Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names and storage account names need to be globally unique across all of Azure so the $RANDOM function is used to create the server name and the storage account name. - -The resource name must be unique in your subscription. Replace `` with a unique name, and `` with your Subscription ID. - -Replace the 0.0.0.0 values in the ip address range to match your specific environment. - -Replace **westeurope** with your preferred Azure region name. - -```azurecli-interactive -resourceGroupName="" -location="westeurope" -serverName="mysqlserver"-$RANDOM -databaseName="myLedgerDatabase" -storageAccountName="mystorage"$RANDOM -subscription="" -adminLogin=azureuser -adminPassword=Azure1234567! -serverResourceId="/subscriptions/$subscription/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName" - -# The ip address range that you want to allow to access your server -startIP=0.0.0.0 -endIP=0.0.0.0 - -# Set variables for your digest storage location -storageAccountName="mystorage"$RANDOM -storageAccountURL1="https://" -storageAccountURL3=".blob.core.windows.net" -storageAccountURL=$storageAccountURL1$storageAccountName$storageAccountURL3 -storageAccountResourceId="/subscriptions/$subscription/resourceGroups/$resourceGroupName/providers/Microsoft.Storage/storageAccounts/$storageAccountName" - -# Show resource names -echo "Resource group name is" $resourceGroupName -echo "Server name is" $serverName -echo "Database name is" $databaseName -echo "Storage account name is" $storageAccountName -``` - -### Create a resource group - -Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. - -```azurecli-interactive -az group create --name $resourceGroupName --location $location -``` - -### Create a server with a managed identity - -Create a server with the [az sql server create](/cli/azure/sql/server) command. The command creates the server with a managed identity assigned. - -```azurecli-interactive -az sql server create \ - --name $serverName \ - --resource-group $resourceGroupName \ - --location $location \ - --admin-user $adminLogin \ - --admin-password $adminPassword \ - --assign-identity -``` - -This command stores the ID in a variable, which will later be used to grant the server permissions to upload ledger digests. - -```azurecli-interactive -# Retrieves the assigned identity to be used when granting the server access to the storage account -principalId=`az sql server show \ - --name $serverName \ - --resource-group $resourceGroupName \ - --query identity.principalId \ - --output tsv` -``` - -### Configure a firewall rule for the server - -Create a firewall rule with the [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule) command. - -```azurecli-interactive -az sql server firewall-rule create \ - --resource-group $resourceGroupName \ - --server $serverName \ - -n AllowYourIp \ - --start-ip-address $startIP \ - --end-ip-address $endIP -``` - -### Create a single ledger database - -Create a ledger database with the [az sql db create](/cli/azure/sql/db) command. The following command creates a serverless database with ledger enabled. - -```azurecli-interactive -az sql db create \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $databaseName \ - --edition GeneralPurpose \ - --family Gen5 \ - --capacity 2 \ - --compute-model Serverless \ - --ledger-on -``` - -### Create a storage account - -Create a storage account to store ledger digests with the [az storage account create](/cli/azure/sql/db) command. - -```azurecli-interactive -az storage account create \ - --name $storageAccountName \ - --resource-group $resourceGroupName \ - --location $location \ - --sku Standard_GRS \ - --kind StorageV2 -``` - -### Grant the server permissions to write ledger digests - -Assign the managed identity of the server to the [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor) role with the [az role assignment create](/cli/azure/sql/db) command. This gives the SQL server the appropriate permissions to publish database digests to the storage account. - -```azurecli-interactive -az role assignment create \ - --assignee-object-id $principalId \ - --assignee-principal-type "ServicePrincipal" \ - --role "Storage Blob Data Contributor" \ - --scope $storageAccountResourceId -``` - -### Enable database digest uploads - -Update the database to start uploading ledger digests to the storage account by using the [az sql db ledger-digest-uploads enable](/cli/azure/sql/db) command. - -```azurecli-interactive -az sql db ledger-digest-uploads enable \ - --name $databaseName \ - --resource-group $resourceGroupName \ - --server $serverName \ - --endpoint $storageAccountURL -``` - -### Configure a time-based retention policy - -To protect the digests from being deleted or updated, it is recommended you configure a time-based retention policy on the **sqldbledgerdigests** container by using the [az storage container immutability-policy create](/cli/azure/sql/db) and [az storage container immutability-policy lock](/cli/azure/sql/db) commands. The policy must allow protected append blobs writes. This ensures the database server can add blocks containing new digests to an existing blob, while deleting or updating the digests is disabled for the specified immutability period. - -> [!IMPORTANT] -> The below example uses the immutability period value of 1 day. In a production environment, you should use a much larger value. - -> [!NOTE] -> Once database digests begin to be uploaded to the storage account, you will not be able to delete the storage account until the immutability policy expires. Setting the immutability policy can be skipped if you plan to clean-up resources immediatly after this QuickStart. - -For more information about time-based retention policy for containers, see [Configure immutability policies for containers](../../storage/blobs/immutable-policy-configure-container-scope.md). - -```azurecli-interactive -az storage container immutability-policy create \ - --resource-group $resourceGroupName \ - --account-name $storageAccountName \ - --container-name sqldbledgerdigests \ - --period 1 \ - --allow-protected-append-writes true -``` - -```azurecli-interactive -# Retrieves the etag value of the policy to be used when the policy is locked -etag=`az storage container immutability-policy show \ - --account-name $storageAccountName \ - --container-name sqldbledgerdigests \ - --query etag \ - --output tsv` -etag="${etag/$'\r'/}" -``` - -```azurecli-interactive -az storage container immutability-policy lock \ - --resource-group $resourceGroupName \ - --account-name $storageAccountName \ - --container-name sqldbledgerdigests \ - --if-match $etag -``` - -# [PowerShell](#tab/azure-powershell) - -You'll create a resource group, a logical database server, a single ledger database, and configure uploading ledger digests using Windows PowerShell. - -### Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names and storage account names need to be globally unique across all of Azure so the Get-Random cmdlet is used to create the server name and the storage account name. - -The resource name must be unique in your subscription. Replace `` with a unique name. - -Replace the 0.0.0.0 values in the ip address range to match your specific environment. - -Replace **westeurope** with your preferred Azure region name. - -```azurepowershell-interactive -# Set variables for your server and database -$resourceGroupName = "" -$location = "westeurope" -$serverName = "mysqlserver-$(Get-Random)" -$databaseName = "myLedgerDatabase" -$storageAccountName = "mystorage$(Get-Random)" - -# The ip address range that you want to allow to access your server -$startIP = "0.0.0.0" -$endIP = "0.0.0.0" - -# Show resource names -Write-host "Resource group name is" $resourceGroupName -Write-host "Server name is" $serverName -Write-host "Storage account name is" $storageAccountName -``` - -### Create a resource group - -Create an Azure resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). A resource group is a logical container into which Azure resources are deployed and managed. - -```azurepowershell-interactive -Write-host "Creating resource group..." -$resourceGroup = New-AzResourceGroup ` - -Name $resourceGroupName ` - -Location $location -$resourceGroup -``` - -### Create a server - -Create a server with the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) cmdlet. - -The cmdlet creates the server with a managed identity assigned, which you will need later to grant the server permissions to upload ledger digests. - -When prompted, enter your SQL administrator username and a password. - -```azurepowershell-interactive -Write-host "Creating primary server..." -$server = New-AzSqlServer ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -Location $location ` - -AssignIdentity ` - -SqlAdministratorCredentials (Get-Credential) -$server -``` - -### Create a firewall rule - -Create a server firewall rule with the [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) cmdlet. - -```azurepowershell-interactive -Write-host "Configuring server firewall rule..." -$serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIP -EndIpAddress $endIP -$serverFirewallRule -``` - -### Create a single ledger database - -Create a single ledger database with the [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) cmdlet. - -The below example creates a serverless database. - -```azurepowershell-interactive -Write-host "Creating a gen5 2 vCore serverless ledger database..." -$database = New-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition GeneralPurpose ` - -ComputeModel Serverless ` - -ComputeGeneration Gen5 ` - -VCore 2 ` - -MinimumCapacity 2 ` - -EnableLedger -$database -``` - -### Create a storage account - -Create a storage account to store ledger digests with the [New-AzStorageAccount](/powershell/module/az.storage/new-azstorageaccount) cmdlet. - -```azurepowershell-interactive -Write-host "Creating a storage account for ledger digests..." -$storage = New-AzStorageAccount -ResourceGroupName $resourceGroupName ` - -Name $storageAccountName ` - -Location $location ` - -SkuName Standard_RAGRS ` - -Kind StorageV2 ` - -AccessTier Hot -$storage -``` - -### Grant the server permissions to write ledger digests - -Assign the managed identity of the server to the [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor) role with the [New-AzRoleAssignment](/powershell/module/az.Resources/New-azRoleAssignment) cmdlet. This gives the SQL server the appropriate permissions to publish database digests to the storage account. - -```azurepowershell-interactive -Write-host "Granting the server access to the storage account..." -$assignment = New-AzRoleAssignment ` - -ObjectId $server.Identity.PrincipalId ` - -RoleDefinitionName "Storage Blob Data Contributor" ` - -ResourceGroupName $resourceGroupName ` - -ResourceType "Microsoft.Storage/storageAccounts" ` - -ResourceName $storageAccountName -$assignment -``` - -### Enable database digest uploads - -Update the database to start uploading ledger digests to the storage account, by using the [Enable-AzSqlDatabaseLedgerDigestUpload](/powershell/module/az.sql/enable-azsqldatabaseledgerdigestupload) cmdlet. The database server will create a new container, named **sqldbledgerdigests**, within the storage account and it will start writing ledger digests to the container. - -```azurepowershell-interactive -Write-host "Enabling ledger digest upload..." -$ledgerDigestUploadConfig = Enable-AzSqlDatabaseLedgerDigestUpload ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Endpoint $storage.PrimaryEndpoints.Blob -$ledgerDigestUploadConfig -``` - -### Configure a time-based retention policy - -To protect the digests from being deleted or updated, it is recommended you configure a time-based retention policy on the **sqldbledgerdigests** container by using the [Set-AzRmStorageContainerImmutabilityPolicy](/powershell/module/az.storage/set-azrmstoragecontainerimmutabilitypolicy) and [Lock-AzRmStorageContainerImmutabilityPolicy](/powershell/module/az.storage/lock-azrmstoragecontainerimmutabilitypolicy) cmdlets. The policy must allow protected append blobs writes. This ensures the database server can add blocks containing new digests to an existing blob, while deleting or updating the digests is disabled for the specified immutability period. - -> [!IMPORTANT] -> The below example uses the immutability period value of 1 day. In a production environment, you should use a much larger value. - -> [!NOTE] -> You will not be able to delete the container or the storage account during the specified immutability period. - -For more information about time-based retention policy for containers, see [Configure immutability policies for containers](../../storage/blobs/immutable-policy-configure-container-scope.md). - -```azurepowershell-interactive -Write-host "Configuring a time-based retention policy..." -$immutabilityPerdiod = 1 -$containerName = "sqldbledgerdigests" -$policy = Set-AzRmStorageContainerImmutabilityPolicy ` - -ResourceGroupName $resourceGroupName ` - -StorageAccountName $storageAccountName ` - -ContainerName $containerName ` - -AllowProtectedAppendWrite $true ` - -ImmutabilityPeriod $immutabilityPerdiod - -Lock-AzRmStorageContainerImmutabilityPolicy ` - -ResourceGroupName $resourceGroupName ` - -StorageAccountName $storageAccountName ` - -ContainerName $containerName ` - -Etag $policy.Etag -``` - ---- - -## Clean up resources - -Keep the resource group, server, and single database for the next steps. You'll learn how to use the ledger feature of your database with different methods. - -When you're finished using these resources, delete the resource group you created. This action also deletes the server and single database within it, and the storage account. - -> [!NOTE] -> If you've configured and locked a time-based retention policy on the container, you need to wait until the specified immutability period ends before you can delete the storage account. - -# [Portal](#tab/azure-portal) - -To delete **myResourceGroup** and all its resources by using the Azure portal: - -1. In the portal, search for and select **Resource groups**. Then select **myResourceGroup** from the list. -1. On the resource group page, select **Delete resource group**. -1. Under **Type the resource group name**, enter **myResourceGroup**, and then select **Delete**. - -# [The Azure CLI](#tab/azure-cli) - -To delete the resource group and all its resources, run the following Azure CLI cmdlet, using the name of your resource group: - -```azurecli-interactive -az group delete -n resourceGroupName -``` - -# [PowerShell](#tab/azure-powershell) - -To delete the resource group and all its resources, run the following PowerShell cmdlet, using the name of your resource group: - -```azurepowershell-interactive -Remove-AzResourceGroup -Name $resourceGroupName -``` - ---- - -## Next steps - -Connect and query your database by using different tools and languages: - -- [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md) -- [Create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md) \ No newline at end of file diff --git a/articles/azure-sql/database/ledger-database-ledger.md b/articles/azure-sql/database/ledger-database-ledger.md deleted file mode 100644 index c57e202903d16..0000000000000 --- a/articles/azure-sql/database/ledger-database-ledger.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Database ledger" -description: This article provides information on ledger database tables and associated views in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# What is the database ledger? - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -The database ledger is part of the ledger feature of Azure SQL Database. The database ledger incrementally captures the state of a database as the database evolves over time, while updates occur on ledger tables. It logically uses a blockchain and [Merkle tree data structures](/archive/msdn-magazine/2018/march/blockchain-blockchain-fundamentals). - -To capture the state of the database, the database ledger stores an entry for every transaction. It captures metadata about the transaction, such as its commit timestamp and the identity of the user who executed it. It also captures the Merkle tree root of the rows updated in each ledger table. These entries are then appended to a tamper-evident data structure to allow verification of integrity in the future. - -:::image type="content" source="media/ledger/merkle-tree.png" alt-text="Diagram that shows a Merkle tree for the ledger feature."::: - -For more information on how Azure SQL Database ledger provides data integrity, see [Digest management and database verification](ledger-digest-management-and-database-verification.md). - -## Where are database transaction and block data stored? - -The data for transactions and blocks is physically stored as rows in two system catalog views: - -- [sys.database_ledger_transactions](/sql/relational-databases/system-catalog-views/sys-database-ledger-transactions-transact-sql): Maintains a row with the information of each transaction in the database ledger. The information includes the ID of the block where this transaction belongs and the ordinal of the transaction within the block. -- [sys.database_ledger_blocks](/sql/relational-databases/system-catalog-views/sys-database-ledger-blocks-transact-sql): Maintains a row for every block in the ledger, including the root of the Merkle tree over the transactions within the block and the hash of the previous block to form a blockchain. - -To view the database ledger, run the following T-SQL statements in [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -> [!IMPORTANT] -> Viewing the database ledger requires the **VIEW LEDGER CONTENT** permission. For details on permissions related to ledger tables, see [Permissions](/sql/relational-databases/security/permissions-database-engine#asdbpermissions). - -```sql -SELECT * FROM sys.database_ledger_transactions -GO - -SELECT * FROM sys.database_ledger_blocks -GO -``` - -The following example of a ledger table consists of four transactions that made up one block in the blockchain of the database ledger: - -:::image type="content" source="media/ledger/database-ledger-1.png" alt-text="Screenshot of an example ledger table."::: - -A block is closed every 30 seconds, or when the user manually generates a database digest by running the [sys.sp_generate_database_ledger_digest](/sql/relational-databases/system-stored-procedures/sys-sp-generate-database-ledger-digest-transact-sql) stored procedure. - -When a block is closed, new transactions will be inserted in a new block. The block generation process then: - -1. Retrieves all transactions that belong to the *closed* block from both the in-memory queue and the [sys.database_ledger_transactions](/sql/relational-databases/system-catalog-views/sys-database-ledger-transactions-transact-sql) system catalog view. -1. Computes the Merkle tree root over these transactions and the hash of the previous block. -1. Persists the closed block in the [sys.database_ledger_blocks](/sql/relational-databases/system-catalog-views/sys-database-ledger-blocks-transact-sql) system catalog view. - -Because this is a regular table update, the system automatically guarantees its durability. To maintain the single chain of blocks, this operation is single-threaded. But it's also efficient, because it only computes the hashes over the transaction information and happens asynchronously. It doesn't affect the transaction performance. - -## Next steps - -- [Azure SQL Database ledger overview](ledger-overview.md) -- [Security catalog views (Transact-SQL)](/sql/relational-databases/system-catalog-views/security-catalog-views-transact-sql) diff --git a/articles/azure-sql/database/ledger-digest-management-and-database-verification.md b/articles/azure-sql/database/ledger-digest-management-and-database-verification.md deleted file mode 100644 index bf5f007be5e22..0000000000000 --- a/articles/azure-sql/database/ledger-digest-management-and-database-verification.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: "Digest management and database verification" -description: This article provides information on digest management and database verification for a ledger database in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Digest management and database verification - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -Azure SQL Database ledger provides a form of data integrity called *forward integrity*, which provides evidence of data tampering on data in your ledger tables. For example, if a banking transaction occurs on a ledger table where a balance has been updated to value `x`, and an attacker later modifies the data by changing the balance from `x` to `y`, database verification will detect this tampering activity. - -The database verification process takes as input one or more previously generated database digests. It then recomputes the hashes stored in the database ledger based on the current state of the ledger tables. If the computed hashes don't match the input digests, the verification fails. The failure indicates that the data has been tampered with. The verification process reports all inconsistencies that it detects. - -## Database digests - -The hash of the latest block in the database ledger is called the *database digest*. It represents the state of all ledger tables in the database at the time when the block was generated. Generating a database digest is efficient, because it involves computing only the hashes of the blocks that were recently appended. - -Database digests can be generated either automatically by the system or manually by the user. You can use them later to verify the integrity of the database. - -Database digests are generated in the form of a JSON document that contains the hash of the latest block, together with metadata for the block ID. The metadata includes the time that the digest was generated and the commit time stamp of the last transaction in this block. - -The verification process and the integrity of the database depend on the integrity of the input digests. For this purpose, database digests that are extracted from the database need to be stored in trusted storage that the high-privileged users or attackers of the Azure SQL Database server can't tamper with. - -### Automatic generation and storage of database digests - -Azure SQL Database ledger integrates with the [immutable storage feature of Azure Blob Storage](../../storage/blobs/immutable-storage-overview.md) and [Azure Confidential Ledger](../../confidential-ledger/index.yml). This integration provides secure storage services in Azure to help protect the database digests from potential tampering. This integration provides a simple and cost-effective way for users to automate digest management without having to worry about their availability and geographic replication. - -You can configure automatic generation and storage of database digests through the Azure portal, PowerShell, or the Azure CLI. When you configure automatic generation and storage, database digests are generated on a predefined interval of 30 seconds and uploaded to the selected storage service. If no transactions occur in the system in the 30-second interval, a database digest won't be generated and uploaded. This mechanism ensures that database digests are generated only when data has been updated in your database. - -:::image type="content" source="media/ledger/automatic-digest-management.png" alt-text="Screenshot that shows the selections for enabling digest storage."::: - -> [!IMPORTANT] -> Configure an [immutability policy](../../storage/blobs/immutable-policy-configure-version-scope.md) on your container after provisioning to ensure that database digests are protected from tampering. - -### Manual generation and storage of database digests - -You can also use Azure SQL Database ledger to generate a database digest on demand so that you can manually store the digest in any service or device that you consider a trusted storage destination. For example, you might choose an on-premises write once, read many (WORM) device as a destination. You manually generate a database digest by running the [sys.sp_generate_database_ledger_digest](/sql/relational-databases/system-stored-procedures/sys-sp-generate-database-ledger-digest-transact-sql) stored procedure in either [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -> [!IMPORTANT] -> Generating database digests requires the **GENERATE LEDGER DIGEST** permission. For details on permissions related to ledger tables, see [Permissions](/sql/relational-databases/security/permissions-database-engine#asdbpermissions). - -```sql -EXECUTE sp_generate_database_ledger_digest -``` - -The returned result set is a single row of data. It should be saved to the trusted storage location as a JSON document as follows: - -```json - { - "database_name": "ledgerdb", - "block_id": 0, - "hash": "0xDC160697D823C51377F97020796486A59047EBDBF77C3E8F94EEE0FFF7B38A6A", - "last_transaction_commit_time": "2020-11-12T18:01:56.6200000", - "digest_time": "2020-11-12T18:39:27.7385724" - } -``` - -## Database verification - -The verification process scans all ledger and history tables. It recomputes the SHA-256 hashes of their rows and compares them against the database digest files passed to the verification stored procedure. - -For large ledger tables, database verification can be a resource-intensive process. You should use it only when you need to verify the integrity of a database. - -The verification process can be executed hourly or daily for cases where the integrity of the database needs to be frequently monitored. Or it can be executed only when the organization that's hosting the data goes through an audit and needs to provide cryptographic evidence about the integrity of the data. To reduce the cost of verification, ledger exposes options to verify individual ledger tables or only a subset of the ledger tables. - -You accomplish database verification through two stored procedures, depending on whether you [use automatic digest storage](#database-verification-that-uses-automatic-digest-storage) or you [manually manage digests](#database-verification-that-uses-manual-digest-storage). - -> [!IMPORTANT] -> Database verification requires the *View Ledger Content* permission. For details on permissions related to ledger tables, see [Permissions](/sql/relational-databases/security/permissions-database-engine#asdbpermissions). - -### Database verification that uses automatic digest storage - -When you're using automatic digest storage for generating and storing database digests, the location of the digest storage is in the system catalog view [sys.database_ledger_digest_locations](/sql/relational-databases/system-catalog-views/sys-database-ledger-digest-locations-transact-sql) as JSON objects. Running database verification consists of executing the [sp_verify_database_ledger_from_digest_storage](/sql/relational-databases/system-stored-procedures/sys-sp-verify-database-ledger-from-digest-storage-transact-sql) system stored procedure. Specify the JSON objects from the [sys.database_ledger_digest_locations](/sql/relational-databases/system-catalog-views/sys-database-ledger-digest-locations-transact-sql) system catalog view where database digests are configured to be stored. - -When you use automatic digest storage, you can change storage locations throughout the lifecycle of the ledger tables. For example, if you start by using Azure immutable storage to store your digest files, but later you want to use Azure Confidential Ledger instead, you can do so. This change in location is stored in [sys.database_ledger_digest_locations](/sql/relational-databases/system-catalog-views/sys-database-ledger-digest-locations-transact-sql). - -To simplify running verification when you use multiple digest storage locations, the following script will fetch the locations of the digests and execute verification by using those locations. - -```sql -DECLARE @digest_locations NVARCHAR(MAX) = (SELECT * FROM sys.database_ledger_digest_locations FOR JSON AUTO, INCLUDE_NULL_VALUES); -SELECT @digest_locations as digest_locations; -BEGIN TRY - EXEC sys.sp_verify_database_ledger_from_digest_storage @digest_locations; - SELECT 'Ledger verification succeeded.' AS Result; -END TRY -BEGIN CATCH - THROW; -END CATCH -``` - -### Database verification that uses manual digest storage - -When you're using manual digest storage for generating and storing database digests, the following stored procedure is used to verify the ledger database. The JSON content of the digest is appended in the stored procedure. When you're running database verification, you can choose to verify all tables in the database or verify specific tables. - -Here's the syntax for the [sp_verify_database_ledger](/sql/relational-databases/system-stored-procedures/sys-sp-verify-database-ledger-transact-sql) stored procedure: - -```sql -sp_verify_database_ledger , -``` - -The following code is an example of running the [sp_verify_database_ledger](/sql/relational-databases/system-stored-procedures/sys-sp-verify-database-ledger-transact-sql) stored procedure by passing two digests for verification: - -```sql -EXECUTE sp_verify_database_ledger N' -[ - { - "database_name": "ledgerdb", - "block_id": 0, - "hash": "0xDC160697D823C51377F97020796486A59047EBDBF77C3E8F94EEE0FFF7B38A6A", - "last_transaction_commit_time": "2020-11-12T18:01:56.6200000", - "digest_time": "2020-11-12T18:39:27.7385724" - }, - { - "database_name": "ledgerdb", - "block_id": 1, - "hash": "0xE5BE97FDFFA4A16ADF7301C8B2BEBC4BAE5895CD76785D699B815ED2653D9EF8", - "last_transaction_commit_time": "2020-11-12T18:39:35.6633333", - "digest_time": "2020-11-12T18:43:30.4701575" - } -] -' -``` - -Return codes for `sp_verify_database_ledger` and `sp_verify_database_ledger_from_digest_storage` are `0` (success) or `1` (failure). - -## Next steps - -- [Azure SQL Database ledger overview](ledger-overview.md) -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Database ledger](ledger-database-ledger.md) \ No newline at end of file diff --git a/articles/azure-sql/database/ledger-how-to-access-acl-digest.md b/articles/azure-sql/database/ledger-how-to-access-acl-digest.md deleted file mode 100644 index 60812c576225b..0000000000000 --- a/articles/azure-sql/database/ledger-how-to-access-acl-digest.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: "Access the digests stored in Azure Confidential Ledger" -description: Access the digests stored in Azure Confidential Ledger with an Azure SQL Database ledger. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: how-to -author: VanMSFT -ms.author: vanto ---- - -# Access the digests stored in Confidential Ledger - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -This article shows you how to access an [Azure SQL Database ledger](ledger-overview.md) digest stored in [Azure Confidential Ledger](../../confidential-ledger/index.yml) to get end-to-end security and integrity guarantees. Throughout this article, we'll explain how to access and verify integrity of the stored information. - -## Prerequisites - -- Python 2.7, 3.5.3, or later. -- Azure SQL Database with ledger enabled. If you haven't already created a database in SQL Database, see [Quickstart: Create a database in SQL Database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md). -- [Azure Confidential Ledger client library for Python](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/confidentialledger/azure-confidentialledger). -- A running instance of [Confidential Ledger](../../confidential-ledger/index.yml). - -## How does the integration work? - -Azure SQL Server calculates the digests of the [ledger databases](ledger-overview.md#ledger-database) periodically and stores them in Confidential Ledger. At any time, you can validate the integrity of the data. Download the digests from Confidential Ledger and compare them to the digests stored in a SQL Database ledger. The following steps explain the process. - -## 1. Find the digest location - -> [!NOTE] -> The query returns more than one row if multiple Confidential Ledger instances were used to store the digest. For each row, repeat steps 2 through 6 to download the digests from all instances of Confidential Ledger. - -Use [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) to run the following query. The output shows the endpoint of the Confidential Ledger instance where the digests are stored. - -```sql -SELECT * FROM sys.database_ledger_digest_locations WHERE path like '%.confidential-ledger.azure.com%' -``` - -## 2. Determine the subledgerid - -We're interested in the value in the path column from the query output. It consists of two parts, namely, the `host name` and the `subledgerid`. As an example, in the URL `https://contoso-ledger.confidential-ledger.azure.com/sqldbledgerdigests/ledgersvr2/ledgerdb/2021-04-13T21:20:51.0000000`, the `host name` is `https://contoso-ledger.confidential-ledger.azure.com` and the `subledgerid` is `sqldbledgerdigests/ledgersvr2/ledgerdb/2021-04-13T21:20:51.0000000`. We'll use it in step 4 to download the digests. - -## 3. Obtain an Azure AD token - -The Confidential Ledger API accepts an Azure Active Directory (Azure AD) bearer token as the caller identity. This identity needs access to Confidential Ledger via Azure Resource Manager during provisioning. When you enable ledger in SQL Database, you're automatically given administrator access to Confidential Ledger. To obtain a token, you need to authenticate by using the [Azure CLI](/cli/azure/install-azure-cli) with the same account that was used with the Azure portal. After you've authenticated, you can use [AzureCliCredential](/python/api/azure-identity/azure.identity.azureclicredential) to retrieve a bearer token and call the Confidential Ledger API. - -Sign in to Azure AD by using the identity with access to Confidential Ledger. - -```azure-cli -az login -``` - -Retrieve the bearer token. - -```python -from azure.identity import AzureCliCredential -credential = AzureCliCredential() -``` - -## 4. Download the digests from Confidential Ledger - -The following Python script downloads the digests from Confidential Ledger. The script uses the [Confidential Ledger client library for Python](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/confidentialledger/azure-confidentialledger). - -```python -from azure.identity import AzureCliCredential -from azure.confidentialledger import ConfidentialLedgerClient -from azure.confidentialledger.identity_service import ConfidentialLedgerIdentityServiceClient - -ledger_id = "contoso-ledger" -identity_server_url = "https://identity.confidential-ledger.core.azure.com" -sub_ledger_id = "sqldbledgerdigests/ledgersvr2/ledgerdb/2021-04-13T21:20:51.0000000" -ledger_host_url = f"https://{ledger_id}.confidential-ledger.azure.com" -initial_path = f"/app/transactions?api-version=0.1-preview&subLedgerId={sub_ledger_id}" - -identity_client = ConfidentialLedgerIdentityServiceClient(identity_server_url) -network_identity = identity_client.get_ledger_identity( - ledger_id=ledger_id -) - -ledger_tls_cert_file_name = f"{ledger_id}_certificate.pem" -with open(ledger_tls_cert_file_name, "w") as cert_file: - cert_file.write(network_identity.ledger_tls_certificate) - -credential = AzureCliCredential() -ledger_client = ConfidentialLedgerClient( - endpoint=ledger_host_url, - credential=credential, - ledger_certificate_path=ledger_tls_cert_file_name -) - -ranged_result = ledger_client.get_ledger_entries( - sub_ledger_id=sub_ledger_id -) - -entries = 0 - -for entry in ranged_result: - entries += 1 - print(f"\nTransaction id {entry.transaction_id} contents: {entry.contents}") - -if entries == 0: - print("\n***No digests are found for the supplied SubledgerID.") -else: - print("\n***No more digests were found for the supplied SubledgerID.") -``` - -## 5. Download the digests from the SQL server - -> [!NOTE] -> This step is a way to confirm that the hashes stored in the SQL Database ledger haven't changed over time. For a complete audit of the integrity of the SQL Database ledger, see [Verify a ledger table to detect tampering](ledger-verify-database.md). - -Use [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) to run the following query. The query returns the digests of the blocks from Genesis. - -```sql -SELECT * FROM sys.database_ledger_blocks -``` - -## 6. Comparison - -Compare the digest retrieved from Confidential Ledger to the digest returned from your database in SQL Database by using `block_id` as the key. For example, the digest of `block_id` = `1` is the value of the `previous_block_hash` column in the `block_id`= `2` row. Similarly, for `block_id` = `3`, it's the value of the `previous_block_id` column in the `block_id` = `4` row. A mismatch in the hash value is an indicator of potential data tampering. - -If you suspect data tampering, see [Verify a ledger table to detect tampering](ledger-verify-database.md) to perform a full audit of the SQL Database ledger. - -## Next steps - -- [Azure SQL Database ledger overview](ledger-overview.md) -- [Database ledger](ledger-database-ledger.md) -- [Digest management and database verification](ledger-digest-management-and-database-verification.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Verify a ledger table to detect tampering](ledger-verify-database.md) diff --git a/articles/azure-sql/database/ledger-how-to-append-only-ledger-tables.md b/articles/azure-sql/database/ledger-how-to-append-only-ledger-tables.md deleted file mode 100644 index a70ea9e09b69c..0000000000000 --- a/articles/azure-sql/database/ledger-how-to-append-only-ledger-tables.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "Create and use append-only ledger tables" -description: Learn how to create and use append-only ledger tables in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: how-to -author: VanMSFT -ms.author: vanto ---- - -# Create and use append-only ledger tables - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -This article shows you how to create an [append-only ledger table](ledger-append-only-ledger-tables.md) in Azure SQL Database. Next, you'll insert values in your append-only ledger table and then attempt to make updates to the data. Finally, you'll view the results by using the ledger view. We'll use an example of a card key access system for a facility, which is an append-only system pattern. Our example will give you a practical look at the relationship between the append-only ledger table and its corresponding ledger view. - -For more information, see [Append-only ledger tables](ledger-append-only-ledger-tables.md). - -## Prerequisites - -- Azure SQL Database with ledger enabled. If you haven't already created a database in SQL Database, see [Quickstart: Create a database in Azure SQL Database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md). -- [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -## Create an append-only ledger table - -We'll create a `KeyCardEvents` table with the following schema. - -| Column name | Data type | Description | -|--|--|--| -| EmployeeID | int | The unique ID of the employee accessing the building | -| AccessOperationDescription | nvarchar (MAX) | The access operation of the employee | -| Timestamp | datetime2 | The date and time the employee accessed the building | - -> [!IMPORTANT] -> Creating append-only ledger tables requires the **ENABLE LEDGER** permission. For more information on permissions related to ledger tables, see [Permissions](/sql/relational-databases/security/permissions-database-engine#asdbpermissions). - -1. Use [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) to create a new schema and table called `[AccessControl].[KeyCardEvents]`. - - ```sql - CREATE SCHEMA [AccessControl] - CREATE TABLE [AccessControl].[KeyCardEvents] - ( - [EmployeeID] INT NOT NULL, - [AccessOperationDescription] NVARCHAR (MAX) NOT NULL, - [Timestamp] Datetime2 NOT NULL - ) - WITH ( - LEDGER = ON ( - APPEND_ONLY = ON - ) - ); - ``` - -1. Add a new building access event in the `[AccessControl].[KeyCardEvents]` table with the following values. - - ```sql - INSERT INTO [AccessControl].[KeyCardEvents] - VALUES ('43869', 'Building42', '2020-05-02T19:58:47.1234567') - ``` - -1. View the contents of your KeyCardEvents table, and specify the [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns that are added to your [append-only ledger table](ledger-append-only-ledger-tables.md). - - ```sql - SELECT * - ,[ledger_start_transaction_id] - ,[ledger_start_sequence_number] - FROM [AccessControl].[KeyCardEvents] - ``` - - :::image type="content" source="media/ledger/append-only-how-to-keycardevent-table.png" alt-text="Screenshot that shows results from querying the KeyCardEvents table."::: - -1. Try to update the `KeyCardEvents` table by changing the `EmployeeID` from `43869` to `34184.` - - ```sql - UPDATE [AccessControl].[KeyCardEvents] SET [EmployeeID] = 34184 - ``` - - You'll receive an error message that states the updates aren't allowed for your append-only ledger table. - - :::image type="content" source="media/ledger/append-only-how-to-1.png" alt-text="Screenshot that shows the append-only error message."::: - -## Next steps - -- [Database ledger](ledger-database-ledger.md) -- [Digest management and database verification](ledger-digest-management-and-database-verification.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md) -- [Access the digests stored in Azure Confidential Ledger (ACL)](ledger-how-to-access-acl-digest.md) -- [Verify a ledger table to detect tampering](ledger-verify-database.md) diff --git a/articles/azure-sql/database/ledger-how-to-updatable-ledger-tables.md b/articles/azure-sql/database/ledger-how-to-updatable-ledger-tables.md deleted file mode 100644 index f12ad500eb235..0000000000000 --- a/articles/azure-sql/database/ledger-how-to-updatable-ledger-tables.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Create and use updatable ledger tables" -description: Learn how to create and use updatable ledger tables in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: how-to -author: VanMSFT -ms.author: vanto ---- - -# Create and use updatable ledger tables - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -This article shows you how to create an [updatable ledger table](ledger-updatable-ledger-tables.md) in Azure SQL Database. Next, you'll insert values in your updatable ledger table and then make updates to the data. Finally, you'll view the results by using the ledger view. We'll use an example of a banking application that tracks banking customers' balances in their accounts. Our example will give you a practical look at the relationship between the updatable ledger table and its corresponding history table and ledger view. - -## Prerequisites - -- Azure SQL Database with ledger enabled. If you haven't already created a database in SQL Database, see [Quickstart: Create a database in Azure SQL Database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md). -- [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -## Create an updatable ledger table - -We'll create an account balance table with the following schema. - -| Column name | Data type | Description | -| ----------- | -------------- | ----------------------------------- | -| CustomerID | int | Customer ID - Primary key clustered | -| LastName | varchar (50) | Customer last name | -| FirstName | varchar (50) | Customer first name | -| Balance | decimal (10,2) | Account balance | - -> [!IMPORTANT] -> Creating updatable ledger tables requires the **ENABLE LEDGER** permission. For more information on permissions related to ledger tables, see [Permissions](/sql/relational-databases/security/permissions-database-engine#asdbpermissions). - -1. Use [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) to create a new schema and table called `[Account].[Balance]`. - - ```sql - CREATE SCHEMA [Account] - GO - - CREATE TABLE [Account].[Balance] - ( - [CustomerID] INT NOT NULL PRIMARY KEY CLUSTERED, - [LastName] VARCHAR (50) NOT NULL, - [FirstName] VARCHAR (50) NOT NULL, - [Balance] DECIMAL (10,2) NOT NULL - ) - WITH - ( - SYSTEM_VERSIONING = ON, - LEDGER = ON - ); - GO - ``` - - > [!NOTE] - > Specifying the `LEDGER = ON` argument is optional if you enabled a ledger database when you created your database in SQL Database. - > - > In the preceding example, the system generates the names of the [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns in the table, the name of the [ledger view](ledger-updatable-ledger-tables.md#ledger-view), and the names of the [ledger view columns](ledger-updatable-ledger-tables.md#ledger-view-schema). - > - > The ledger view column names can be customized when you create the table by using the `` parameter with the [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true) statement. The `GENERATED ALWAYS` columns and the [history table](ledger-updatable-ledger-tables.md#history-table) name can be customized. For more information, see [ledger view options](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true#ledger-view-options) and the corresponding examples in [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true##x-creating-a-updatable-ledger-table). - -1. When your [updatable ledger table](ledger-updatable-ledger-tables.md) is created, the corresponding history table and ledger view are also created. Run the following T-SQL commands to see the new table and the new view. - - ```sql - SELECT - ts.[name] + '.' + t.[name] AS [ledger_table_name] - , hs.[name] + '.' + h.[name] AS [history_table_name] - , vs.[name] + '.' + v.[name] AS [ledger_view_name] - FROM sys.tables AS t - JOIN sys.tables AS h ON (h.[object_id] = t.[history_table_id]) - JOIN sys.views v ON (v.[object_id] = t.[ledger_view_id]) - JOIN sys.schemas ts ON (ts.[schema_id] = t.[schema_id]) - JOIN sys.schemas hs ON (hs.[schema_id] = h.[schema_id]) - JOIN sys.schemas vs ON (vs.[schema_id] = v.[schema_id]) - ``` - - :::image type="content" source="media/ledger/ledger-updatable-how-to-new-tables.png" alt-text="Screenshot that shows querying new ledger tables."::: - -1. Insert the name `Nick Jones` as a new customer with an opening balance of $50. - - ```sql - INSERT INTO [Account].[Balance] - VALUES (1, 'Jones', 'Nick', 50) - ``` - -1. Insert the names `John Smith`, `Joe Smith`, and `Mary Michaels` as new customers with opening balances of $500, $30, and $200, respectively. - - ```sql - INSERT INTO [Account].[Balance] - VALUES (2, 'Smith', 'John', 500), - (3, 'Smith', 'Joe', 30), - (4, 'Michaels', 'Mary', 200) - ``` - -1. View the `[Account].[Balance]` updatable ledger table, and specify the [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns added to the table. - - ```sql - SELECT * - ,[ledger_start_transaction_id] - ,[ledger_end_transaction_id] - ,[ledger_start_sequence_number] - ,[ledger_end_sequence_number] - FROM [Account].[Balance] - ``` - - In the results window, you'll first see the values inserted by your T-SQL commands, along with the system metadata that's used for data lineage purposes. - - - The `ledger_start_transaction_id` column notes the unique transaction ID associated with the transaction that inserted the data. Because `John`, `Joe`, and `Mary` were inserted by using the same transaction, they share the same transaction ID. - - The `ledger_start_sequence_number` column notes the order by which values were inserted by the transaction. - - :::image type="content" source="media/ledger/sql-updatable-how-to-1.png" alt-text="Screenshot that shows ledger table example 1."::: - -1. Update `Nick`'s balance from `50` to `100`. - - ```sql - UPDATE [Account].[Balance] SET [Balance] = 100 - WHERE [CustomerID] = 1 - ``` - -1. Copy the unique name of your history table. You'll need this information for the next step. - - ```sql - SELECT - ts.[name] + '.' + t.[name] AS [ledger_table_name] - , hs.[name] + '.' + h.[name] AS [history_table_name] - , vs.[name] + '.' + v.[name] AS [ledger_view_name] - FROM sys.tables AS t - JOIN sys.tables AS h ON (h.[object_id] = t.[history_table_id]) - JOIN sys.views v ON (v.[object_id] = t.[ledger_view_id]) - JOIN sys.schemas ts ON (ts.[schema_id] = t.[schema_id]) - JOIN sys.schemas hs ON (hs.[schema_id] = h.[schema_id]) - JOIN sys.schemas vs ON (vs.[schema_id] = v.[schema_id]) - ``` - - :::image type="content" source="media/ledger/sql-updatable-how-to-2.png" alt-text="Screenshot that shows ledger table example 2."::: - -1. View the `[Account].[Balance]` updatable ledger table, along with its corresponding history table and ledger view. - - > [!IMPORTANT] - > Replace `` with the name you copied in the previous step. - - ```sql - SELECT * - ,[ledger_start_transaction_id] - ,[ledger_end_transaction_id] - ,[ledger_start_sequence_number] - ,[ledger_end_sequence_number] - FROM [Account].[Balance] - GO - - SELECT * FROM [] - GO - - SELECT * FROM Account.Balance_Ledger - ORDER BY ledger_transaction_id - GO - ``` - - > [!TIP] - > We recommend that you query the history of changes through the [ledger view](ledger-updatable-ledger-tables.md#ledger-view) and not the [history table](ledger-updatable-ledger-tables.md#history-table). - -1. `Nick`'s account balance was successfully updated in the updatable ledger table to `100`. -1. The history table now shows the previous balance of `50` for `Nick`. -1. The ledger view shows that updating the ledger table is a `DELETE` of the original row with `50`. The balance with a corresponding `INSERT` of a new row with `100` shows the new balance for `Nick`. - - :::image type="content" source="media/ledger/sql-updatable-how-to-3.png" alt-text="Screenshot that shows ledger table example 3."::: - - -## Next steps - -- [Database ledger](ledger-database-ledger.md) -- [Digest management and database verification](ledger-digest-management-and-database-verification.md) -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md) -- [Access the digests stored in Azure Confidential Ledger (ACL)](ledger-how-to-access-acl-digest.md) -- [Verify a ledger table to detect tampering](ledger-verify-database.md) diff --git a/articles/azure-sql/database/ledger-landing.yml b/articles/azure-sql/database/ledger-landing.yml deleted file mode 100644 index 0c0d8dfaf9625..0000000000000 --- a/articles/azure-sql/database/ledger-landing.yml +++ /dev/null @@ -1,120 +0,0 @@ -### YamlMime:Landing - -title: Azure SQL Database ledger documentation -summary: "Find documentation about Azure SQL Database ledger" - -metadata: - title: Azure SQL Database ledger documentation - description: "Find ledger documentation for Azure SQL Database" - ms.service: sql-database - ms.subservice: security - ms.tgt_pltfrm: na - ms.devlang: - ms.topic: landing-page - author: VanMSFT - ms.author: vanto - ms.reviewer: kendralittle, mathoma - ms.date: 05/25/2021 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Azure SQL Database ledger overview - linkLists: - - linkListType: overview - links: - - text: What is Azure SQL Database ledger? - url: ledger-overview.md - - - # Card - - title: Ledger concepts - linkLists: - - linkListType: concept - links: - - text: What is the database ledger? - url: ledger-database-ledger.md - - text: Append-only ledger tables - url: ledger-append-only-ledger-tables.md - - text: Updatable ledger tables - url: ledger-updatable-ledger-tables.md - - - # Card - - title: Ledger quickstarts - linkLists: - - linkListType: quickstart - links: - - text: Create an Azure SQL Database with ledger enabled - url: ledger-create-a-single-database-with-ledger-enabled.md - - - # Card - - title: Using ledger - linkLists: - - linkListType: how-to-guide - links: - - text: Create append-only ledger tables - url: ledger-how-to-append-only-ledger-tables.md - - text: Create updatable ledger tables - url: ledger-how-to-updatable-ledger-tables.md - - text: How to access the digests stored in Azure Confidential Ledger (ACL) - url: ledger-how-to-access-acl-digest.md - - text: How to verify a ledger table to detect tampering - url: ledger-verify-database.md - - - # Card - - title: Ledger management, verification, and storage - linkLists: - - linkListType: concept - links: - - text: Digest management and database verification - url: ledger-digest-management-and-database-verification.md - - text: Ledger auditing - url: ledger-audit.md - - - # Card - - title: Ledger references - linkLists: - - linkListType: reference - links: - - text: Current ledger limitations - url: ledger-limits.md - - text: Azure SQL Database ledger whitepaper - url: https://aka.ms/sql-ledger-whitepaper - - - # Card - - title: Ledger System Catalog Views - linkLists: - - linkListType: reference - links: - - text: sys.database_ledger_blocks (Transact-SQL) - url: /sql/relational-databases/system-catalog-views/sys-database-ledger-blocks-transact-sql - - text: sys.database_ledger_transactions (Transact-SQL) - url: /sql/relational-databases/system-catalog-views/sys-database-ledger-transactions-transact-sql - - text: sys.database_ledger_digest_locations (Transact-SQL) - url: /sql/relational-databases/system-catalog-views/sys-database-ledger-digest-locations-transact-sql - - text: sys.ledger_table_history (Transact-SQL) - url: /sql/relational-databases/system-catalog-views/sys-ledger-table-history-transact-sql - - text: sys.ledger_column_history (Transact-SQL) - url: /sql/relational-databases/system-catalog-views/sys-ledger-column-history-transact-sql - - - # Card - - title: Ledger Store Procedures - linkLists: - - linkListType: reference - links: - - text: sys.sp_generate_database_ledger_digest (Transact-SQL) - url: /sql/relational-databases/system-stored-procedures/sys-sp-generate-database-ledger-digest-transact-sql - - text: sys.sp_verify_database_ledger (Transact-SQL) - url: /sql/relational-databases/system-stored-procedures/sys-sp-verify-database-ledger-transact-sql - - text: sys.sp_verify_database_ledger_from_digest_storage (Transact-SQL) - url: /sql/relational-databases/system-stored-procedures/sys-sp-verify-database-ledger-from-digest-storage-transact-sql diff --git a/articles/azure-sql/database/ledger-limits.md b/articles/azure-sql/database/ledger-limits.md deleted file mode 100644 index c59ff32b29ef1..0000000000000 --- a/articles/azure-sql/database/ledger-limits.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Limitations for Azure SQL Database ledger" -description: Limitations of the ledger feature in Azure SQL Database -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Limitations for Azure SQL Database ledger - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -This article provides an overview of the limitations of ledger tables used with Azure SQL Database. - -## Limitations - -| Function | Limitation | -| :--- | :--- | -| Disabling [ledger database](ledger-database-ledger.md) | After a ledger database is enabled, it can't be disabled. | -| Maximum number of columns | When an [updatable ledger table](ledger-updatable-ledger-tables.md) is created, it adds four [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns to the ledger table. An [append-only ledger table](ledger-append-only-ledger-tables.md) adds two columns to the ledger table. These new columns count against the maximum supported number of columns in SQL Database (1,024). | -| Restricted data types | XML, SqlVariant, User-defined type, and FILESTREAM data types aren't supported. | -| In-memory tables | In-memory tables aren't supported. | -| Sparse column sets | Sparse column sets aren't supported. | -| Ledger truncation | Deleting older data in [append-only ledger tables](ledger-append-only-ledger-tables.md) or the history table of [updatable ledger tables](ledger-updatable-ledger-tables.md) isn't supported. | -| Converting existing tables to ledger tables | Existing tables in a database that aren't ledger-enabled can't be converted to ledger tables. | -|Locally redundant storage (LRS) support for [automated digest management](ledger-digest-management-and-database-verification.md) | Automated digest management with ledger tables by using [Azure Storage immutable blobs](../../storage/blobs/immutable-storage-overview.md) doesn't offer the ability for users to use [LRS](../../storage/common/storage-redundancy.md#locally-redundant-storage) accounts.| - -## Remarks - -- When a ledger database is created, all new tables created by default (without specifying the `APPEND_ONLY = ON` clause) in the database will be [updatable ledger tables](ledger-updatable-ledger-tables.md). To create [append-only ledger tables](ledger-append-only-ledger-tables.md), use [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql) statements. -- Ledger tables can't be a FILETABLE. -- Ledger tables can't have full-text indexes. -- Ledger tables can't be renamed. -- Ledger tables can't be moved to a different schema. -- Only nullable columns can be added to ledger tables, and when they aren't specified WITH VALUES. -- Columns in ledger tables can't be dropped. -- Only deterministic-computed columns are allowed for ledger tables. -- Existing columns can't be altered in a way that modifies the format for this column. - - We allow changing: - - Nullability. - - Collation for nvarchar/ntext columns and when the code page isn't changing for char/text columns. - - The length of variable length columns. - - Sparseness. -- SWITCH IN/OUT isn't allowed for ledger tables. -- Long-term backups (LTR) aren't supported for databases that have `LEDGER = ON`. -- Versioning that's `LEDGER` or `SYSTEM_VERSIONING` can't be disabled for ledger tables. -- The `UPDATETEXT` and `WRITETEXT` APIs can't be used on ledger tables. -- A transaction can update up to 200 ledger tables. -- For updatable ledger tables, we inherit all of the limitations of temporal tables. -- Change tracking isn't allowed on ledger tables. -- Ledger tables can't have a rowstore non-clustered index when they have a clustered columnstore index. - -## Next steps - -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Database ledger](ledger-database-ledger.md) -- [Digest management and database verification](ledger-digest-management-and-database-verification.md) \ No newline at end of file diff --git a/articles/azure-sql/database/ledger-overview.md b/articles/azure-sql/database/ledger-overview.md deleted file mode 100644 index 6f5457bcb5f1b..0000000000000 --- a/articles/azure-sql/database/ledger-overview.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: "Azure SQL Database ledger overview" -description: Learn the basics of the Azure SQL Database ledger feature. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Azure SQL Database ledger - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -Establishing trust around the integrity of data stored in database systems has been a longstanding problem for all organizations that manage financial, medical, or other sensitive data. The ledger feature of [Azure SQL Database](sql-database-paas-overview.md) provides tamper-evidence capabilities in your database. You can cryptographically attest to other parties, such as auditors or other business parties, that your data hasn't been tampered with. - -Ledger helps protect data from any attacker or high-privileged user, including database administrators (DBAs), system administrators, and cloud administrators. As with a traditional ledger, the feature preserves historical data. If a row is updated in the database, its previous value is maintained and protected in a history table. Ledger provides a chronicle of all changes made to the database over time. - -Ledger and the historical data are managed transparently, offering protection without any application changes. The feature maintains historical data in a relational form to support SQL queries for auditing, forensics, and other purposes. It provides guarantees of cryptographic data integrity while maintaining the power, flexibility, and performance of Azure SQL Database. - -:::image type="content" source="media/ledger/ledger-table-architecture.png" alt-text="Diagram of the ledger table architecture."::: - -## Use cases for Azure SQL Database ledger - -### Streamlining audits - -Any production system's value is based on the ability to trust the data that the system is consuming and producing. If a malicious user has tampered with the data in your database, that can have disastrous results in the business processes relying on that data. - -Maintaining trust in your data requires a combination of enabling the proper security controls to reduce potential attacks, backup and restore practices, and thorough disaster recovery procedures. Audits by external parties ensure that these practices are put in place. - -Audit processes are highly time-intensive activities. Auditing requires on-site inspection of implemented practices such as reviewing audit logs, inspecting authentication, and inspecting access controls. Although these manual processes can expose potential gaps in security, they can't provide attestable proof that the data hasn't been maliciously altered. - -Ledger provides the cryptographic proof of data integrity to auditors. This proof can help streamline the auditing process. It also provides nonrepudiation regarding the integrity of the system's data. - -### Multiple-party business processes - -In some systems, such as supply-chain management systems, multiple organizations must share state from a business process with one another. These systems struggle with the challenge of how to share and trust data. Many organizations are turning to traditional blockchains, such as Ethereum or Hyperledger Fabric, to digitally transform their multiple-party business processes. - -Blockchain is a great solution for multiple-party networks where trust is low between parties that participate on the network. Many of these networks are fundamentally centralized solutions where trust is important, but a fully decentralized infrastructure is a heavyweight solution. - -Ledger provides a solution for these networks. Participants can verify the integrity of the centrally housed data, without the complexity and performance implications that network consensus introduces in a blockchain network. - -### Trusted off-chain storage for blockchain - -When a blockchain network is necessary for a multiple-party business process, the ability to query the data on the blockchain without sacrificing performance is a challenge. - -Typical patterns for solving this problem involve replicating data from the blockchain to an off-chain store, such as a database. But after the data is replicated to the database from the blockchain, the data integrity guarantees that a blockchain offer is lost. Ledger provides data integrity for off-chain storage of blockchain networks, which helps ensure complete data trust through the entire system. - -## How it works - -Each transaction that the database receives is cryptographically hashed (SHA-256). The hash function uses the value of the transaction, along with the hash of the previous transaction, as input to the hash function. (The value includes hashes of the rows contained in the transaction.) The function cryptographically links all transactions together, like a blockchain. - -Cryptographically hashed [database digests](#database-digests) represent the state of the database. They're periodically generated and stored outside Azure SQL Database in a tamper-proof storage location. An example of a storage location is the [immutable storage feature of Azure Blob Storage](../../storage/blobs/immutable-storage-overview.md) or [Azure Confidential Ledger](../../confidential-ledger/index.yml). Database digests are later used to verify the integrity of the database by comparing the value of the hash in the digest against the calculated hashes in database. - -Ledger functionality is introduced to tables in Azure SQL Database in two forms: - -- [Updatable ledger tables](#updatable-ledger-tables), which allow you to update and delete rows in your tables. -- [Append-only ledger tables](#append-only-ledger-tables), which only allow insertions to your tables. - -Both updatable ledger tables and append-only ledger tables provide tamper-evidence and digital forensics capabilities. Understanding which transactions submitted by which users resulted in changes to the database is important if you're remediating potential tampering events or proving to third parties that authorized users submitted transactions to the system. - -The ledger feature enables users, their partners, or auditors to analyze all historical operations and detect potential tampering. Each row operation is accompanied by the ID of the transaction that performed it. The ID enables users to get more information about the time that the transaction happened and the identity of the user who executed it. Users can then correlate the ID to other operations that the transaction has performed. - -For details about limitations of ledger tables, see [Limitations for Azure SQL Database ledger](ledger-limits.md). - -### Ledger database - -In a ledger database, all user data is tamper evident and stored in ledger tables. A ledger database can contain only ledger tables. Each table is, by default, created as an updatable ledger table. Ledger databases provide an easy-to-use solution for applications that require the integrity of all data to be protected. - -### Updatable ledger tables - -[Updatable ledger tables](ledger-updatable-ledger-tables.md) are ideal for application patterns that expect to issue updates and deletions to tables in your database, such as system of record (SOR) applications. Existing data patterns for your application don't need to change to enable ledger functionality. - -Updatable ledger tables track the history of changes to any rows in your database when transactions that perform updates or deletions occur. An updatable ledger table is a system-versioned table that contains a reference to another table with a mirrored schema. - -The other table is called the *history table*. The system uses this table to automatically store the previous version of the row each time a row in the ledger table is updated or deleted. The history table is automatically created when you create an updatable ledger table. - -The values in the updatable ledger table and its corresponding history table provide a chronicle of the values of your database over time. A system-generated ledger view joins the updatable ledger table and the history table so that you can easily query this chronicle of your database. - -For more information on updatable ledger tables, see [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md). - -### Append-only ledger tables - -[Append-only ledger tables](ledger-append-only-ledger-tables.md) are ideal for application patterns that are insert-only, such as security information and event management (SIEM) applications. Append-only ledger tables block updates and deletions at the API level. This blocking provides more tampering protection from privileged users such as system administrators and DBAs. - -Because only insertions are allowed into the system, append-only ledger tables don't have a corresponding history table because there's no history to capture. As with updatable ledger tables, a ledger view provides insights into the transaction that inserted rows into the append-only table, and the user that performed the insertion. - -For more information on append-only ledger tables, see [Create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md). - -### Database ledger - -The [database ledger](ledger-database-ledger.md) consists of system tables that store the cryptographic hashes of transactions processed in the system. Because transactions are the unit of [atomicity](/windows/win32/cossdk/acid-properties) for the database engine, this is the unit of work that the database ledger captures. - -Specifically, when a transaction commits, the SHA-256 hash of any rows modified by the transaction in the ledger table is appended as a *transaction entry* in the database ledger. The transaction entry also includes some metadata for the transaction, such as the identity of the user who executed it and its commit time stamp. - -Every 30 seconds, the transactions that the database processes are SHA-256 hashed together through a Merkle tree data structure. The result is a root hash that forms a block. The block is then SHA-256 hashed through the root hash of the block, along with the root hash of the previous block as input to the hash function. That hashing forms a blockchain. - -### Database digests - -The hash of the latest block in the database ledger is called the [database digest](ledger-digest-management-and-database-verification.md). It represents the state of all ledger tables in the database at the time that the block was generated. - -When a block is formed, its associated database digest is published and stored outside Azure SQL Database in tamper-proof storage. Because database digests represent the state of the database at the time that they were generated, protecting the digests from tampering is paramount. An attacker who has access to modify the digests would be able to: - -1. Tamper with the data in the database. -2. Generate the hashes that represent the database with those changes. -3. Modify the digests to represent the updated hash of the transactions in the block. - -Ledger provides the ability to automatically generate and store the database digests in [immutable storage](../../storage/blobs/immutable-storage-overview.md) or [Azure Confidential Ledger](../../confidential-ledger/index.yml), to prevent tampering. Alternatively, users can manually generate database digests and store them in the location of their choice. Database digests are used for later verifying that the data stored in ledger tables has not been tampered with. - -### Ledger verification - -The ledger feature doesn't allow users to modify its content. However, an attacker or system administrator who has control of the machine can bypass all system checks and directly tamper with the data. For example, an attacker or system administrator can edit the database files in storage. Ledger can't prevent such attacks but guarantees that any tampering will be detected when the ledger data is verified. - -The [ledger verification](ledger-digest-management-and-database-verification.md) process takes as input one or more previously generated database digests and recomputes the hashes stored in the database ledger based on the current state of the ledger tables. If the computed hashes don't match the input digests, the verification fails, indicating that the data has been tampered with. Ledger then reports all inconsistencies that it has detected. - -Because the ledger verification recomputes all of the hashes for transactions in the database, it can be a resource-intensive process for databases with large amounts of data. Users should run the ledger verification only when they need to verify the integrity of their database, rather than running it continuously. - -Ideally, users should run ledger verification only when the organization that's hosting the data goes through an audit and needs to provide cryptographic evidence about the integrity of the data to another party. To reduce the cost of verification, the feature exposes options to verify individual ledger tables or only a subset of the ledger tables. - -## Next steps - -- [Quickstart: Create a SQL database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md) -- [Access the digests stored in Azure Confidential Ledger](ledger-how-to-access-acl-digest.md) -- [Verify a ledger table to detect tampering](ledger-verify-database.md) diff --git a/articles/azure-sql/database/ledger-updatable-ledger-tables.md b/articles/azure-sql/database/ledger-updatable-ledger-tables.md deleted file mode 100644 index 57dd035ccc751..0000000000000 --- a/articles/azure-sql/database/ledger-updatable-ledger-tables.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Azure SQL Database updatable ledger tables" -description: This article provides information on updatable ledger tables, ledger schema, and ledger views in Azure SQL Database. -ms.date: "09/09/2021" -ms.service: sql-database -ms.subservice: security -ms.reviewer: kendralittle, mathoma -ms.topic: conceptual -author: VanMSFT -ms.author: vanto ---- - -# Azure SQL Database updatable ledger tables - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -Updatable ledger tables are system-versioned tables on which users can perform updates and deletes while also providing tamper-evidence capabilities. When updates or deletes occur, all earlier versions of a row are preserved in a secondary table, known as the history table. The history table mirrors the schema of the updatable ledger table. When a row is updated, the latest version of the row remains in the ledger table, while its earlier version is inserted into the history table by the system, transparently to the application. - -:::image type="content" source="media/ledger/ledger-table-architecture.png" alt-text="Diagram that shows ledger table architecture."::: - -## Updatable ledger tables vs. temporal tables - -Both updatable ledger tables and [temporal tables](/sql/relational-databases/tables/temporal-tables) are system-versioned tables, for which the Database Engine captures historical row versions in secondary history tables. Either technology provides unique benefits. Updatable ledger tables make both the current and historical data tamper evident. Temporal tables support querying the data stored at any point in time instead of only the data that's correct at the current moment in time. - -You can use both technologies together by creating tables that are both updatable ledger tables and temporal tables. -An updatable ledger table can be created in two ways: - -- When you create a new database in the Azure portal by selecting **Enable ledger on all future tables in this database** during ledger configuration, or through specifying the `LEDGER = ON` argument in your [CREATE DATABASE (Transact-SQL)](/sql/t-sql/statements/create-database-transact-sql) statement. This action creates a ledger database and ensures that all future tables created in your database are updatable ledger tables by default. -- When you create a new table on a database where ledger isn't enabled at the database level by specifying the `LEDGER = ON` argument in your [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql) statement. - -For information on options available when you specify the `LEDGER` argument in your T-SQL statement, see [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql). - -> [!IMPORTANT] -> After a ledger table is created, it can't be reverted to a table that isn't a ledger table. As a result, an attacker can't temporarily remove ledger capabilities on a ledger table, make changes, and then reenable ledger functionality. - -### Updatable ledger table schema - -An updatable ledger table needs to have the following [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns that contain metadata noting which transactions made changes to the table and the order of operations by which rows were updated by the transaction. This data is useful for forensics purposes in understanding how data was inserted over time. - -> [!NOTE] -> If you don't specify the required `GENERATED ALWAYS` columns of the ledger table and ledger history table in the [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true) statement, the system automatically adds the columns and uses the following default names. For more information, see examples in [Creating an updatable ledger table](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true#x-creating-a-updatable-ledger-table). - -| Default column name | Data type | Description | -| --- | --- | --- | -| ledger_start_transaction_id | bigint | The ID of the transaction that created a row version | -| ledger_end_transaction_id | bigint | The ID of the transaction that deleted a row version | -| ledger_start_sequence_number | bigint | The sequence number of an operation within a transaction that created a row version | -| ledger_end_sequence_number | bigint | The sequence number of an operation within a transaction that deleted a row version | - -## History table - -The history table is automatically created when an updatable ledger table is created. The history table captures the historical values of rows changed because of updates and deletes in the updatable ledger table. The schema of the history table mirrors that of the updatable ledger table it's associated with. - -When you create an updatable ledger table, you can either specify the name of the schema to contain your history table and the name of the history table or you have the system generate the name of the history table and add it to the same schema as the ledger table. History tables with system-generated names are called anonymous history tables. The naming convention for an anonymous history table is ``.``.MSSQL_LedgerHistoryFor_``. - -## Ledger view - -For every updatable ledger table, the system automatically generates a view, called the ledger view. The ledger view is a join of the updatable ledger table and its associated history table. The ledger view reports all row modifications that have occurred on the updatable ledger table by joining the historical data in the history table. This view enables users, their partners, or auditors to analyze all historical operations and detect potential tampering. Each row operation is accompanied by the ID of the acting transaction, along with whether the operation was a `DELETE` or an `INSERT`. Users can retrieve more information about the time the transaction was executed and the identity of the user who executed it and correlate it to other operations performed by this transaction. - -For example, if you want to track transaction history for a banking scenario, the ledger view provides a chronicle of transactions over time. By using the ledger view, you don't have to independently view the updatable ledger table and history tables or construct your own view to do so. - -For an example of using the ledger view, see [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md). - -The ledger view's schema mirrors the columns defined in the updatable ledger and history table, but the [GENERATED ALWAYS](/sql/t-sql/statements/create-table-transact-sql#generate-always-columns) columns are different than those of the updatable ledger and history tables. - -### Ledger view schema - -> [!NOTE] -> The ledger view column names can be customized when you create the table by using the `` parameter with the [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true) statement. For more information, see [ledger view options](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true#ledger-view-options) and the corresponding examples in [CREATE TABLE (Transact-SQL)](/sql/t-sql/statements/create-table-transact-sql?view=azuresqldb-current&preserve-view=true). - -| Default column name | Data type | Description | -| --- | --- | --- | -| ledger_transaction_id | bigint | The ID of the transaction that created or deleted a row version. | -| ledger_sequence_number | bigint | The sequence number of a row-level operation within the transaction on the table. | -| ledger_operation_type | tinyint | Contains `1` (**INSERT**) or `2` (**DELETE**). Inserting a row into the ledger table produces a new row in the ledger view that contains `1` in this column. Deleting a row from the ledger table produces a new row in the ledger view that contains `2` in this column. Updating a row in the ledger table produces two new rows in the ledger view. One row contains `2` (**DELETE**), and the other row contains `1` (**INSERT**) in this column. | -| ledger_operation_type_desc | nvarchar(128) | Contains `INSERT` or `DELETE`. For more information, see the preceding row. | - -## Next steps - -- [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md) -- [Create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md) diff --git a/articles/azure-sql/database/ledger-verify-database.md b/articles/azure-sql/database/ledger-verify-database.md deleted file mode 100644 index b63c711a3554d..0000000000000 --- a/articles/azure-sql/database/ledger-verify-database.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: Verify a ledger table to detect tampering -description: This article discusses how to verify if an Azure SQL Database table was tampered with. -ms.service: sql-database -ms.subservice: security -ms.devlang: -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: "09/09/2021" ---- - -# Verify a ledger table to detect tampering - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!NOTE] -> Azure SQL Database ledger is currently in public preview. - -In this article, you'll verify the integrity of the data in your Azure SQL Database ledger tables. If you selected **Enable automatic digest storage** when you [created your database in SQL Database](ledger-create-a-single-database-with-ledger-enabled.md), follow the Azure portal instructions to automatically generate the Transact-SQL (T-SQL) script needed to verify the database ledger in the [query editor](connect-query-portal.md). Otherwise, follow the T-SQL instructions by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -## Prerequisites - -- Have an active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- [Create a database in SQL Database with ledger enabled](ledger-create-a-single-database-with-ledger-enabled.md). -- [Create and use updatable ledger tables](ledger-how-to-updatable-ledger-tables.md) or [create and use append-only ledger tables](ledger-how-to-append-only-ledger-tables.md). - -## Run ledger verification for SQL Database - -# [Portal](#tab/azure-portal) - -1. Open the [Azure portal](https://portal.azure.com/), select **All resources**, and locate the database you want to verify. Select that database in SQL Database. - - :::image type="content" source="media/ledger/ledger-portal-all-resources.png" alt-text="Screenshot that shows the Azure portal with the All resources tab selected."::: - -1. In **Security**, select the **Ledger** option. - - :::image type="content" source="media/ledger/ledger-portal-manage-ledger.png" alt-text="Screenshot that shows the Azure portal with the Security Ledger tab selected."::: - -1. In the **Ledger** pane, select ** Verify database**, and select the **copy** icon in the pre-populated text in the window. - - :::image type="content" source="media/ledger/ledger-portal-verify.png" alt-text="Azure portal verify database button"::: - - > > [!IMPORTANT] - > If you haven't configured automatic digest storage for your database digests and are instead manually managing digests, don't copy this script. Continue to step 6. - -1. Open **Query editor** in the left menu. - - :::image type="content" source="media/ledger/ledger-portal-open-query-editor.png" alt-text="Screenshot that shows the Azure portal Query editor menu option."::: - -1. In the query editor, paste the T-SQL script you copied in step 3, and select **Run**. Continue to step 8. - - :::image type="content" source="media/ledger/ledger-portal-run-query-editor.png" alt-text="Screenshot that shows the Azure portal Run query editor to verify the database."::: - -1. If you're using manual digest storage, enter the following T-SQL into the query editor to retrieve your latest database digest. Copy the digest from the results returned for the next step. - - ```sql - EXECUTE sp_generate_database_ledger_digest - ``` - -1. In the query editor, paste the following T-SQL, replacing `` with the digest you copied in step 6, and select **Run**. - - ```sql - EXECUTE sp_verify_database_ledger N'' - ``` - -1. Verification returns the following messages in the **Results** window. - - - If there was no tampering in your database, the message is: - - ```output - Ledger verification successful - ``` - - - If there was tampering in your database, the following error appears in the **Messages** window. - - ```output - Failed to execute query. Error: The hash of block xxxx in the database ledger does not match the hash provided in the digest for this block. - ``` - -# [T-SQL using automatic digest storage](#tab/t-sql-automatic) - -1. Connect to your database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -1. Create a new query with the following T-SQL statement: - - ```sql - DECLARE @digest_locations NVARCHAR(MAX) = (SELECT * FROM sys.database_ledger_digest_locations FOR JSON AUTO, INCLUDE_NULL_VALUES);SELECT @digest_locations as digest_locations; - BEGIN TRY - EXEC sys.sp_verify_database_ledger_from_digest_storage @digest_locations; - SELECT 'Ledger verification succeeded.' AS Result; - END TRY - BEGIN CATCH - THROW; - END CATCH - ``` - -1. Execute the query. You'll see that **digest_locations** returns the current location of where your database digests are stored and any previous locations. **Result** returns the success or failure of ledger verification. - - :::image type="content" source="media/ledger/verification_script_exectution.png" alt-text="Screenshot of running ledger verification by using Azure Data Studio."::: - -1. Open the **digest_locations** result set to view the locations of your digests. The following example shows two digest storage locations for this database: - - - **path** indicates the location of the digests. - - **last_digest_block_id** indicates the block ID of the last digest stored in the **path** location. - - **is_current** indicates whether the location in **path** is the current (true) or previous (false) one. - - ```json - [ - { - "path": "https:\/\/digest1.blob.core.windows.net\/sqldbledgerdigests\/janderstestportal2server\/jandersnewdb\/2021-05-20T04:39:47.6570000", - "last_digest_block_id": 10016, - "is_current": true - }, - { - "path": "https:\/\/jandersneweracl.confidential-ledger.azure.com\/sqldbledgerdigests\/janderstestportal2server\/jandersnewdb\/2021-05-20T04:39:47.6570000", - "last_digest_block_id": 1704, - "is_current": false - } - ] - ``` - - > [!IMPORTANT] - > When you run ledger verification, inspect the location of **digest_locations** to ensure digests used in verification are retrieved from the locations you expect. You want to make sure that a privileged user hasn't changed locations of digest storage to an unprotected storage location, such as Azure Storage, without a configured and locked immutability policy. - -1. Verification returns the following message in the **Results** window. - - - If there was no tampering in your database, the message is: - - ```output - Ledger verification successful - ``` - - - If there was tampering in your database, the following error appears in the **Messages** window: - - ```output - Failed to execute query. Error: The hash of block xxxx in the database ledger doesn't match the hash provided in the digest for this block. - ``` - -# [T-SQL using manual digest storage](#tab/t-sql-manual) - -1. Connect to your database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). -1. Create a new query with the following T-SQL statement: - - ```sql - /****** This will retrieve the latest digest file ******/ - EXECUTE sp_generate_database_ledger_digest - ``` - -1. Execute the query. The results contain the latest database digest and represent the hash of the database at the current point in time. Copy the contents of the results to be used in the next step. - - :::image type="content" source="media/ledger/ledger-retrieve-digest.png" alt-text="Screenshot that shows retrieving digest results by using Azure Data Studio."::: - -1. Create a new query with the following T-SQL statement. Replace `` with the digest you copied in the previous step. - - ``` - /****** Verifies the integrity of the ledger using the referenced digest ******/ - EXECUTE sp_verify_database_ledger N' - - ' - ``` - -1. Execute the query. The **Messages** window contains the following success message. - - :::image type="content" source="media/ledger/ledger-verify-message.png" alt-text="Screenshot that shows the message after running T-SQL query for ledger verification by using Azure Data Studio."::: - - > [!TIP] - > Running ledger verification with the latest digest will only verify the database from the time the digest was generated until the time the verification was run. To verify that the historical data in your database wasn't tampered with, run verification by using multiple database digest files. Start with the point in time for which you want to verify the database. An example of a verification passing multiple digests would look similar to the following query. - - ``` - EXECUTE sp_verify_database_ledger N' - [ - { - "database_name": "ledgerdb", - "block_id": 0, - "hash": "0xDC160697D823C51377F97020796486A59047EBDBF77C3E8F94EEE0FFF7B38A6A", - "last_transaction_commit_time": "2020-11-12T18:01:56.6200000", - "digest_time": "2020-11-12T18:39:27.7385724" - }, - { - "database_name": "ledgerdb", - "block_id": 1, - "hash": "0xE5BE97FDFFA4A16ADF7301C8B2BEBC4BAE5895CD76785D699B815ED2653D9EF8", - "last_transaction_commit_time": "2020-11-12T18:39:35.6633333", - "digest_time": "2020-11-12T18:43:30.4701575" - } - ] - ``` - ---- - -## Next steps - -- [Azure SQL Database ledger overview](ledger-overview.md) -- [SQL Database ledger](ledger-database-ledger.md) -- [Digest management and database verification](ledger-digest-management-and-database-verification.md) -- [Append-only ledger tables](ledger-append-only-ledger-tables.md) -- [Updatable ledger tables](ledger-updatable-ledger-tables.md) -- [Access the digests stored in Azure Confidential Ledger](ledger-how-to-access-acl-digest.md) diff --git a/articles/azure-sql/database/logical-servers.md b/articles/azure-sql/database/logical-servers.md deleted file mode 100644 index c7cab2be51806..0000000000000 --- a/articles/azure-sql/database/logical-servers.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: What is a server in Azure SQL Database and Azure Synapse Analytics? -titleSuffix: "" -description: Learn about logical SQL servers used by Azure SQL Database and Azure Synapse Analytics, and how to manage them. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: devx-track-azurecli -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 03/12/2019 ---- -# What is a logical SQL server in Azure SQL Database and Azure Synapse? -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -In Azure SQL Database and Azure Synapse Analytics, a server is a logical construct that acts as a central administrative point for a collection of databases. At the server level, you can administer [logins](logins-create-manage.md), [firewall rules](firewall-configure.md), [auditing rules](/azure/azure-sql/database/auditing-overview), [threat detection policies](threat-detection-configure.md), and [auto-failover groups](auto-failover-group-overview.md). A server can be in a different region than its resource group. The server must exist before you can create a database in Azure SQL Database or a data warehouse database in Azure Synapse Analytics. All databases managed by a single server are created within the same region as the server. - -This server is distinct from a SQL Server instance that you may be familiar with in the on-premises world. Specifically, there are no guarantees regarding location of the databases or data warehouse database in relation to the server that manages them. Furthermore, neither Azure SQL Database nor Azure Synapse expose any instance-level access or features. In contrast, the instance databases in a managed instance are all physically co-located - in the same way that you are familiar with SQL Server in the on-premises or virtual machine world. - -When you create a server, you provide a server login account and password that has administrative rights to the master database on that server and all databases created on that server. This initial account is a SQL login account. Azure SQL Database and Azure Synapse Analytics support SQL authentication and Azure Active Directory Authentication for authentication. For information about logins and authentication, see [Managing Databases and Logins in Azure SQL Database](logins-create-manage.md). Windows Authentication is not supported. - -A server in SQL Database and Azure Synapse: - -- Is created within an Azure subscription, but can be moved with its contained resources to another subscription -- Is the parent resource for databases, elastic pools, and data warehouses -- Provides a namespace for databases, elastic pools, and data warehouse database -- Is a logical container with strong lifetime semantics - delete a server and it deletes its databases, elastic pools, and SQK pools -- Participates in [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md) - databases, elastic pools, and data warehouse database within a server inherit access rights from the server -- Is a high-order element of the identity of databases, elastic pools, and data warehouse database for Azure resource management purposes (see the URL scheme for databases and pools) -- Collocates resources in a region -- Provides a connection endpoint for database access (``.database.windows.net) -- Provides access to metadata regarding contained resources via DMVs by connecting to a master database -- Provides the scope for management policies that apply to its databases - logins, firewall, audit, threat detection, and such -- Is restricted by a quota within the parent subscription (six servers per subscription by default - [see Subscription limits here](../../azure-resource-manager/management/azure-subscription-service-limits.md)) -- Provides the scope for database quota and DTU or vCore quota for the resources it contains (such as 45,000 DTU) -- Is the versioning scope for capabilities enabled on contained resources -- Server-level principal logins can manage all databases on a server -- Can contain logins similar to those in instances of SQL Server in your on-premises environment that are granted access to one or more databases on the server, and can be granted limited administrative rights. For more information, see [Logins](logins-create-manage.md). -- The default collation for all databases created on a server is `SQL_LATIN1_GENERAL_CP1_CI_AS`, where `LATIN1_GENERAL` is English (United States), `CP1` is code page 1252, `CI` is case-insensitive, and `AS` is accent-sensitive. - -## Manage servers, databases, and firewalls using the Azure portal - -You can create the resource group for a server ahead of time or while creating the server itself. There are multiple methods for getting to a new SQL server form, either by creating a new SQL server or as part of creating a new database. - -### Create a blank server - -To create a server (without a database, elastic pool, or data warehouse database) using the [Azure portal](https://portal.azure.com), navigate to a blank SQL server (logical SQL server) form. - -### Create a blank or sample database in Azure SQL Database - -To create a database in SQL Database using the [Azure portal](https://portal.azure.com), navigate to a blank SQL Database form and provide the requested information. You can create the resource group and server ahead of time or while creating the database itself. You can create a blank database or create a sample database based on Adventure Works LT. - - ![create database-1](./media/logical-servers/create-database-1.png) - -> [!IMPORTANT] -> For information on selecting the pricing tier for your database, see [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md). - -To create a managed instance, see [Create a managed instance](../managed-instance/instance-create-quickstart.md) - -### Manage an existing server - -To manage an existing server, navigate to the server using a number of methods - such as from specific database page, the **SQL servers** page, or the **All resources** page. - -To manage an existing database, navigate to the **SQL databases** page and click the database you wish to manage. The following screenshot shows how to begin setting a server-level firewall for a database from the **Overview** page for a database. - - ![server firewall rule](./media/single-database-create-quickstart/server-firewall-rule.png) - -> [!IMPORTANT] -> To configure performance properties for a database, see [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md). -> [!TIP] -> For an Azure portal quickstart, see [Create a database in SQL Database in the Azure portal](single-database-create-quickstart.md). - -## Manage servers, databases, and firewalls using PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To create and manage servers, databases, and firewalls with Azure PowerShell, use the following PowerShell cmdlets. If you need to install or upgrade PowerShell, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). For creating and managing elastic pools, see [Elastic pools](elastic-pool-overview.md). - -| Cmdlet | Description | -| --- | --- | -|[New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase)|Creates a database | -|[Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase)|Gets one or more databases| -|[Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase)|Sets properties for a database, or moves an existing database into an elastic pool| -|[Remove-AzSqlDatabase](/powershell/module/az.sql/remove-azsqldatabase)|Removes a database| -|[New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup)|Creates a resource group| -|[New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver)|Creates a server| -|[Get-AzSqlServer](/powershell/module/az.sql/get-azsqlserver)|Returns information about servers| -|[Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver)|Modifies properties of a server| -|[Remove-AzSqlServer](/powershell/module/az.sql/remove-azsqlserver)|Removes a server| -|[New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule)|Creates a server-level firewall rule | -|[Get-AzSqlServerFirewallRule](/powershell/module/az.sql/get-azsqlserverfirewallrule)|Gets firewall rules for a server| -|[Set-AzSqlServerFirewallRule](/powershell/module/az.sql/set-azsqlserverfirewallrule)|Modifies a firewall rule in a server| -|[Remove-AzSqlServerFirewallRule](/powershell/module/az.sql/remove-azsqlserverfirewallrule)|Deletes a firewall rule from a server.| -| New-AzSqlServerVirtualNetworkRule | Creates a [*virtual network rule*](vnet-service-endpoint-rule-overview.md), based on a subnet that is a Virtual Network service endpoint. | - -> [!TIP] -> For a PowerShell quickstart, see [Create a database in Azure SQL Database using PowerShell](single-database-create-quickstart.md). For PowerShell example scripts, see [Use PowerShell to create a database in Azure SQL Database and configure a firewall rule](scripts/create-and-configure-database-powershell.md) and [Monitor and scale a database in Azure SQL Database using PowerShell](scripts/monitor-and-scale-database-powershell.md). -> - -## Manage servers, databases, and firewalls using the Azure CLI - -To create and manage servers, databases, and firewalls with the [Azure CLI](/cli/azure), use the following [Azure CLI SQL Database](/cli/azure/sql/db) commands. Use the [Cloud Shell](../../cloud-shell/overview.md) to run the CLI in your browser, or [install](/cli/azure/install-azure-cli) it on macOS, Linux, or Windows. For creating and managing elastic pools, see [Elastic pools](elastic-pool-overview.md). - -| Cmdlet | Description | -| --- | --- | -|[az sql db create](/cli/azure/sql/db#az-sql-db-create) |Creates a database| -|[az sql db list](/cli/azure/sql/db#az-sql-db-list)|Lists all databases managed by a server, or all databases in an elastic pool| -|[az sql db list-editions](/cli/azure/sql/db#az-sql-db-list-editions)|Lists available service objectives and storage limits| -|[az sql db list-usages](/cli/azure/sql/db#az-sql-db-list-usages)|Returns database usages| -|[az sql db show](/cli/azure/sql/db#az-sql-db-show)|Gets a database -|[az sql db update](/cli/azure/sql/db#az-sql-db-update)|Updates a database| -|[az sql db delete](/cli/azure/sql/db#az-sql-db-delete)|Removes a database| -|[az group create](/cli/azure/group#az-group-create)|Creates a resource group| -|[az sql server create](/cli/azure/sql/server#az-sql-server-create)|Creates a server| -|[az sql server list](/cli/azure/sql/server#az-sql-server-list)|Lists servers| -|[az sql server list-usages](/cli/azure/sql/server#az-sql-server-list-usages)|Returns server usages| -|[az sql server show](/cli/azure/sql/server#az-sql-server-show)|Gets a server| -|[az sql server update](/cli/azure/sql/server#az-sql-server-update)|Updates a server| -|[az sql server delete](/cli/azure/sql/server#az-sql-server-delete)|Deletes a server| -|[az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-create)|Creates a server firewall rule| -|[az sql server firewall-rule list](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-list)|Lists the firewall rules on a server| -|[az sql server firewall-rule show](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-show)|Shows the detail of a firewall rule| -|[az sql server firewall-rule update](/cli/azure/sql/server/firewall-rule##az-sql-server-firewall-rule-update)|Updates a firewall rule| -|[az sql server firewall-rule delete](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-delete)|Deletes a firewall rule| - -> [!TIP] -> For an Azure CLI quickstart, see [Create a database in Azure SQL Database using the Azure CLI](az-cli-script-samples-content-guide.md). For Azure CLI example scripts, see [Use the CLI to create a database in Azure SQL Database and configure a firewall rule](scripts/create-and-configure-database-cli.md) and [Use Azure CLI to monitor and scale a database in Azure SQL Database](scripts/monitor-and-scale-database-cli.md). -> - -## Manage servers, databases, and firewalls using Transact-SQL - -To create and manage servers, databases, and firewalls with Transact-SQL, use the following T-SQL commands. You can issue these commands using the Azure portal, [SQL Server Management Studio](/sql/ssms/use-sql-server-management-studio), [Visual Studio Code](https://code.visualstudio.com/docs), or any other program that can connect to a server and pass Transact-SQL commands. For managing elastic pools, see [Elastic pools](elastic-pool-overview.md). - -> [!IMPORTANT] -> You cannot create or delete a server using Transact-SQL. - -| Command | Description | -| --- | --- | -|[CREATE DATABASE (Azure SQL Database)](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true) | Creates a new database in Azure SQL Database. You must be connected to the master database to create a new database.| -|[CREATE DATABASE (Azure Synapse)](/sql/t-sql/statements/create-database-transact-sql?view=azure-sqldw-latest&preserve-view=true) | Creates a new data warehouse database in Azure Synapse. You must be connected to the master database to create a new database.| -| [ALTER DATABASE (Azure SQL Database)](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true) |Modifies database or elastic pool. | -|[ALTER DATABASE (Azure Synapse Analytics)](/sql/t-sql/statements/alter-database-transact-sql?view=azure-sqldw-latest&preserve-view=true&tabs=sqlpool)|Modifies a data warehouse database in Azure Synapse.| -|[DROP DATABASE (Transact-SQL)](/sql/t-sql/statements/drop-database-transact-sql)|Deletes a database.| -|[sys.database_service_objectives (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-database-service-objectives-azure-sql-database)|Returns the edition (service tier), service objective (pricing tier), and elastic pool name, if any, for a database. If logged on to the master database for a server, returns information on all databases. For Azure Synapse, you must be connected to the master database.| -|[sys.dm_db_resource_stats (Azure SQL Database)](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database)| Returns CPU, IO, and memory consumption for a database in Azure SQL Database. One row exists for every 15 seconds, even if there is no activity in the database.| -|[sys.resource_stats (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database)|Returns CPU usage and storage data for a database in Azure SQL Database. The data is collected and aggregated within five-minute intervals.| -|[sys.database_connection_stats (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-database-connection-stats-azure-sql-database)|Contains statistics for database connectivity events for Azure SQL Database, providing an overview of database connection successes and failures. | -|[sys.event_log (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-event-log-azure-sql-database)|Returns successful database connections and connection failures for Azure SQL Database. You can use this information to track or troubleshoot your database activity.| -|[sp_set_firewall_rule (Azure SQL Database)](/sql/relational-databases/system-stored-procedures/sp-set-firewall-rule-azure-sql-database)|Creates or updates the server-level firewall settings for your server. This stored procedure is only available in the master database to the server-level principal login. A server-level firewall rule can only be created using Transact-SQL after the first server-level firewall rule has been created by a user with Azure-level permissions| -|[sys.firewall_rules (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-firewall-rules-azure-sql-database)|Returns information about the server-level firewall settings associated with a server.| -|[sp_delete_firewall_rule (Azure SQL Database)](/sql/relational-databases/system-stored-procedures/sp-delete-firewall-rule-azure-sql-database)|Removes server-level firewall settings from a server. This stored procedure is only available in the master database to the server-level principal login.| -|[sp_set_database_firewall_rule (Azure SQL Database)](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database)|Creates or updates the database-level firewall rules for a database in Azure SQL Database. Database firewall rules can be configured for the master database, and for user databases in SQL Database. Database firewall rules are useful when using contained database users. Database firewall rules are not supported in Azure Synapse.| -|[sys.database_firewall_rules (Azure SQL Database)](/sql/relational-databases/system-catalog-views/sys-database-firewall-rules-azure-sql-database)|Returns information about the database-level firewall settings for a database in Azure SQL Database. | -|[sp_delete_database_firewall_rule (Azure SQL Database)](/sql/relational-databases/system-stored-procedures/sp-delete-database-firewall-rule-azure-sql-database)|Removes database-level firewall setting for a database of yours in Azure SQL Database. | - -> [!TIP] -> For a quickstart using SQL Server Management Studio on Microsoft Windows, see [Azure SQL Database: Use SQL Server Management Studio to connect and query data](connect-query-ssms.md). For a quickstart using Visual Studio Code on the macOS, Linux, or Windows, see [Azure SQL Database: Use Visual Studio Code to connect and query data](connect-query-vscode.md). - -## Manage servers, databases, and firewalls using the REST API - -To create and manage servers, databases, and firewalls, use these REST API requests. - -| Command | Description | -| --- | --- | -|[Servers - Create or update](/rest/api/sql/servers/createorupdate)|Creates or updates a new server.| -|[Servers - Delete](/rest/api/sql/servers/delete)|Deletes a server.| -|[Servers - Get](/rest/api/sql/servers/get)|Gets a server.| -|[Servers - List](/rest/api/sql/servers/list)|Returns a list of servers.| -|[Servers - List by resource group](/rest/api/sql/servers/listbyresourcegroup)|Returns a list of servers in a resource group.| -|[Servers - Update](/rest/api/sql/servers/update)|Updates an existing server.| -|[Databases - Create or update](/rest/api/sql/databases/createorupdate)|Creates a new database or updates an existing database.| -|[Databases - Delete](/rest/api/sql/databases/delete)|Deletes a database.| -|[Databases - Get](/rest/api/sql/databases/get)|Gets a database.| -|[Databases - List by elastic pool](/rest/api/sql/databases/listbyelasticpool)|Returns a list of databases in an elastic pool.| -|[Databases - List by server](/rest/api/sql/databases/listbyserver)|Returns a list of databases in a server.| -|[Databases - Update](/rest/api/sql/databases/update)|Updates an existing database.| -|[Firewall rules - Create or update](/rest/api/sql/firewallrules/createorupdate)|Creates or updates a firewall rule.| -|[Firewall rules - Delete](/rest/api/sql/firewallrules/delete)|Deletes a firewall rule.| -|[Firewall rules - Get](/rest/api/sql/firewallrules/get)|Gets a firewall rule.| -|[Firewall rules - List by server](/rest/api/sql/firewallrules/listbyserver)|Returns a list of firewall rules.| - -## Next steps - -- To learn about migrating a SQL Server database to Azure SQL Database, see [Migrate to Azure SQL Database](migrate-to-database-from-sql-server.md). -- - For information about supported features, see [Features](features-comparison.md). diff --git a/articles/azure-sql/database/logins-create-manage.md b/articles/azure-sql/database/logins-create-manage.md deleted file mode 100644 index 16bd1d5cb8b6d..0000000000000 --- a/articles/azure-sql/database/logins-create-manage.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Authorize server and database access using logins and user accounts -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: Learn about how Azure SQL Database, SQL Managed Instance, and Azure Synapse authenticate users for access using logins and user accounts. Also learn how to grant database roles and explicit permissions to authorize logins and users to perform actions and query data. -keywords: sql database security,database security management,login security,database security,database access -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=3 -ms.devlang: -ms.topic: conceptual -author: AndreasWolter -ms.author: anwolter -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 03/23/2020 ---- -# Authorize database access to SQL Database, SQL Managed Instance, and Azure Synapse Analytics -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -In this article, you learn about: - -- Options for configuring Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics to enable users to perform administrative tasks and to access the data stored in these databases. -- The access and authorization configuration after initially creating a new server. -- How to add logins and user accounts in the master database and user accounts and then grant these accounts administrative permissions. -- How to add user accounts in user databases, either associated with logins or as contained user accounts. -- Configure user accounts with permissions in user databases by using database roles and explicit permissions. - -> [!IMPORTANT] -> Databases in Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse are referred to collectively in the remainder of this article as databases, and the server is referring to the [server](logical-servers.md) that manages databases for Azure SQL Database and Azure Synapse. - -## Authentication and authorization - -[**Authentication**](security-overview.md#authentication) is the process of proving the user is who they claim to be. A user connects to a database using a user account. -When a user attempts to connect to a database, they provide a user account and authentication information. The user is authenticated using one of the following two authentication methods: - -- [SQL authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication). - - With this authentication method, the user submits a user account name and associated password to establish a connection. This password is stored in the master database for user accounts linked to a login or stored in the database containing the user accounts *not* linked to a login. -- [Azure Active Directory Authentication](authentication-aad-overview.md) - - With this authentication method, the user submits a user account name and requests that the service use the credential information stored in Azure Active Directory (Azure AD). - -**Logins and users**: A user account in a database can be associated with a login that is stored in the master database or can be a user name that is stored in an individual database. - -- A **login** is an individual account in the master database, to which a user account in one or more databases can be linked. With a login, the credential information for the user account is stored with the login. -- A **user account** is an individual account in any database that may be, but does not have to be, linked to a login. With a user account that is not linked to a login, the credential information is stored with the user account. - -[**Authorization**](security-overview.md#authorization) to access data and perform various actions are managed using database roles and explicit permissions. Authorization refers to the permissions assigned to a user, and determines what that user is allowed to do. Authorization is controlled by your user account's database [role memberships](/sql/relational-databases/security/authentication-access/database-level-roles) and [object-level permissions](/sql/relational-databases/security/permissions-database-engine). As a best practice, you should grant users the least privileges necessary. - -## Existing logins and user accounts after creating a new database - -When you first deploy Azure SQL, you specify an admin login and an associated password for that login. This administrative account is called **Server admin**. The following configuration of logins and users in the master and user databases occurs during deployment: - -- A SQL login with administrative privileges is created using the login name you specified. A [login](/sql/relational-databases/security/authentication-access/principals-database-engine#sa-login) is an individual user account for logging in to SQL Database, SQL Managed Instance, and Azure Synapse. -- This login is granted full administrative permissions on all databases as a [server-level principal](/sql/relational-databases/security/authentication-access/principals-database-engine). The login has all available permissions and can't be limited. In a SQL Managed Instance, this login is added to the [sysadmin fixed server role](/sql/relational-databases/security/authentication-access/server-level-roles) (this role does not exist in Azure SQL Database). -- A [user account](/sql/relational-databases/security/authentication-access/getting-started-with-database-engine-permissions#database-users) called `dbo` is created for this login in each user database. The [dbo](/sql/relational-databases/security/authentication-access/principals-database-engine) user has all database permissions in the database and is mapped to the `db_owner` fixed database role. Additional fixed database roles are discussed later in this article. - -To identify the administrator accounts for a database, open the Azure portal, and navigate to the **Properties** tab of your server or managed instance. - -![SQL Server Admins](./media/logins-create-manage/sql-admins.png) - -![Screenshot that highlights the Properties menu option.](./media/logins-create-manage/sql-admins2.png) - -> [!IMPORTANT] -> The admin login name can't be changed after it has been created. To reset the password for the server admin, go to the [Azure portal](https://portal.azure.com), click **SQL Servers**, select the server from the list, and then click **Reset Password**. To reset the password for the SQL Managed Instance, go to the Azure portal, click the instance, and click **Reset password**. You can also use PowerShell or the Azure CLI. - -## Create additional logins and users having administrative permissions - -At this point, your server or managed instance is only configured for access using a single SQL login and user account. To create additional logins with full or partial administrative permissions, you have the following options (depending on your deployment mode): - -- **Create an Azure Active Directory administrator account with full administrative permissions** - - Enable Azure Active Directory authentication and create an Azure AD administrator login. One Azure Active Directory account can be configured as an administrator of the Azure SQL deployment with full administrative permissions. This account can be either an individual or security group account. An Azure AD administrator **must** be configured if you want to use Azure AD accounts to connect to SQL Database, SQL Managed Instance, or Azure Synapse. For detailed information on enabling Azure AD authentication for all Azure SQL deployment types, see the following articles: - - - [Use Azure Active Directory authentication for authentication with SQL](authentication-aad-overview.md) - - [Configure and manage Azure Active Directory authentication with SQL](authentication-aad-configure.md) - -- **In SQL Managed Instance, create SQL logins with full administrative permissions** - - - Create an additional SQL login in the master database. - - Add the login to the [sysadmin fixed server role](/sql/relational-databases/security/authentication-access/server-level-roles) using the [ALTER SERVER ROLE](/sql/t-sql/statements/alter-server-role-transact-sql) statement. This login will have full administrative permissions. - - Alternatively, create an [Azure AD login](authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance) using the [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true) syntax. - -- **In SQL Database, create SQL logins with limited administrative permissions** - - - Create an additional SQL login in the master database. - - Create a user account in the master database associated with this new login. - - Add the user account to the `dbmanager`, the `loginmanager` role, or both in the `master` database using the [ALTER ROLE](/sql/t-sql/statements/alter-role-transact-sql) statement (for Azure Synapse, use the [sp_addrolemember](/sql/relational-databases/system-stored-procedures/sp-addrolemember-transact-sql) statement). - - > [!NOTE] - > `dbmanager` and `loginmanager` roles do **not** pertain to SQL Managed Instance deployments. - - Members of these [special master database roles](/sql/relational-databases/security/authentication-access/database-level-roles#special-roles-for--and-) for Azure SQL Database have authority to create and manage databases or to create and manage logins. In databases created by a user that is a member of the `dbmanager` role, the member is mapped to the `db_owner` fixed database role and can log into and manage that database using the `dbo` user account. These roles have no explicit permissions outside of the master database. - - > [!IMPORTANT] - > You can't create an additional SQL login with full administrative permissions in SQL Database. - -## Create accounts for non-administrator users - -You can create accounts for non-administrative users using one of two methods: - -- **Create a login** - - Create a SQL login in the master database. Then create a user account in each database to which that user needs access and associate the user account with that login. This approach is preferred when the user must access multiple databases and you wish to keep the passwords synchronized. However, this approach has complexities when used with geo-replication as the login must be created on both the primary server and the secondary server(s). For more information, see [Configure and manage Azure SQL Database security for geo-restore or failover](active-geo-replication-security-configure.md). -- **Create a user account** - - Create a user account in the database to which a user needs access (also called a [contained user](/sql/relational-databases/security/contained-database-users-making-your-database-portable)). - - - With SQL Database, you can always create this type of user account. - - With SQL Managed Instance supporting [Azure AD server principals](authentication-aad-configure.md#create-contained-users-mapped-to-azure-ad-identities), you can create user accounts to authenticate to the SQL Managed Instance without requiring database users to be created as a contained database user. - - With this approach, the user authentication information is stored in each database, and replicated to geo-replicated databases automatically. However, if the same account exists in multiple databases and you are using Azure SQL Authentication, you must keep the passwords synchronized manually. Additionally, if a user has an account in different databases with different passwords, remembering those passwords can become a problem. - -> [!IMPORTANT] -> To create contained users mapped to Azure AD identities, you must be logged in using an Azure AD account in the database in Azure SQL Database. In SQL Managed Instance, a SQL login with `sysadmin` permissions can also create an Azure AD login or user. - -For examples showing how to create logins and users, see: - -- [Create login for Azure SQL Database](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-current&preserve-view=true#examples-1) -- [Create login for Azure SQL Managed Instance](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true#examples-2) -- [Create login for Azure Synapse](/sql/t-sql/statements/create-login-transact-sql?view=azure-sqldw-latest&preserve-view=true#examples-3) -- [Create user](/sql/t-sql/statements/create-user-transact-sql#examples) -- [Creating Azure AD contained users](authentication-aad-configure.md#create-contained-users-mapped-to-azure-ad-identities) - -> [!TIP] -> For a security tutorial that includes creating users in Azure SQL Database, see [Tutorial: Secure Azure SQL Database](secure-database-tutorial.md). - -## Using fixed and custom database roles - -After creating a user account in a database, either based on a login or as a contained user, you can authorize that user to perform various actions and to access data in a particular database. You can use the following methods to authorize access: - -- **Fixed database roles** - - Add the user account to a [fixed database role](/sql/relational-databases/security/authentication-access/database-level-roles). There are 9 fixed database roles, each with a defined set of permissions. The most common fixed database roles are: **db_owner**, **db_ddladmin**, **db_datawriter**, **db_datareader**, **db_denydatawriter**, and **db_denydatareader**. **db_owner** is commonly used to grant full permission to only a few users. The other fixed database roles are useful for getting a simple database in development quickly, but are not recommended for most production databases. For example, the **db_datareader** fixed database role grants read access to every table in the database, which is more than is strictly necessary. - - - To add a user to a fixed database role: - - - In Azure SQL Database, use the [ALTER ROLE](/sql/t-sql/statements/alter-role-transact-sql) statement. For examples, see [ALTER ROLE examples](/sql/t-sql/statements/alter-role-transact-sql#examples) - - Azure Synapse, use the [sp_addrolemember](/sql/relational-databases/system-stored-procedures/sp-addrolemember-transact-sql) statement. For examples, see [sp_addrolemember examples](/sql/relational-databases/system-stored-procedures/sp-addrolemember-transact-sql#examples). - -- **Custom database role** - - Create a custom database role using the [CREATE ROLE](/sql/t-sql/statements/create-role-transact-sql) statement. A custom role enables you to create your own user-defined database roles and carefully grant each role the least permissions necessary for the business need. You can then add users to the custom role. When a user is a member of multiple roles, they aggregate the permissions of them all. -- **Grant permissions directly** - - Grant the user account [permissions](/sql/relational-databases/security/permissions-database-engine) directly. There are over 100 permissions that can be individually granted or denied in SQL Database. Many of these permissions are nested. For example, the `UPDATE` permission on a schema includes the `UPDATE` permission on each table within that schema. As in most permission systems, the denial of a permission overrides a grant. Because of the nested nature and the number of permissions, it can take careful study to design an appropriate permission system to properly protect your database. Start with the list of permissions at [Permissions (Database Engine)](/sql/relational-databases/security/permissions-database-engine) and review the [poster size graphic](/sql/relational-databases/security/media/database-engine-permissions.png) of the permissions. - -## Using groups - -Efficient access management uses permissions assigned to Active Directory security groups and fixed or custom roles instead of to individual users. - -- When using Azure Active Directory authentication, put Azure Active Directory users into an Azure Active Directory security group. Create a contained database user for the group. Add one or more database users as a member to custom or builtin database roles with the specific permissions appropriate to that group of users. - -- When using SQL authentication, create contained database users in the database. Place one or more database users into a custom database role with specific permissions appropriate to that group of users. - - > [!NOTE] - > You can also use groups for non-contained database users. - -You should familiarize yourself with the following features that can be used to limit or elevate permissions: - -- [Impersonation](/dotnet/framework/data/adonet/sql/customizing-permissions-with-impersonation-in-sql-server) and [module-signing](/dotnet/framework/data/adonet/sql/signing-stored-procedures-in-sql-server) can be used to securely elevate permissions temporarily. -- [Row-Level Security](/sql/relational-databases/security/row-level-security) can be used limit which rows a user can access. -- [Data Masking](dynamic-data-masking-overview.md) can be used to limit exposure of sensitive data. -- [Stored procedures](/sql/relational-databases/stored-procedures/stored-procedures-database-engine) can be used to limit the actions that can be taken on the database. - -## Next steps - -For an overview of all Azure SQL Database and SQL Managed Instance security features, see [Security overview](security-overview.md). diff --git a/articles/azure-sql/database/long-term-backup-retention-configure.md b/articles/azure-sql/database/long-term-backup-retention-configure.md deleted file mode 100644 index 35d3d1ad738ed..0000000000000 --- a/articles/azure-sql/database/long-term-backup-retention-configure.md +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: "Azure SQL Database: Manage long-term backup retention" -description: Learn how to store and restore automated backups for Azure SQL Database in Azure storage (for up to 10 years) using the Azure portal, Azure CLI, and PowerShell. -services: sql-database -ms.service: sql-db-mi -ms.subservice: backup-restore -ms.custom: devx-track-azurepowershell, devx-track-azurecli -ms.topic: how-to -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma -ms.date: 12/16/2020 ---- - -# Manage Azure SQL Database long-term backup retention -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -With Azure SQL Database, you can set a [long-term backup retention](long-term-retention-overview.md) policy (LTR) to automatically retain backups in separate Azure Blob storage containers for up to 10 years. You can then recover a database using these backups using the Azure portal, Azure CLI, or PowerShell. Long-term retention policies are also supported for [Azure SQL Managed Instance](../managed-instance/long-term-backup-retention-configure.md). - -## Prerequisites - -# [Portal](#tab/portal) - -An active Azure subscription. - -# [Azure CLI](#tab/azure-cli) - -Prepare your environment for the Azure CLI. - -[!INCLUDE[azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] - -# [PowerShell](#tab/powershell) - -Prepare your environment for PowerShell. - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -For **Get-AzSqlDatabaseLongTermRetentionBackup** and **Restore-AzSqlDatabase**, you will need to have one of the following roles: - -- Subscription Owner role or -- SQL Server Contributor role or -- Custom role with the following permissions: - - Microsoft.Sql/locations/longTermRetentionBackups/read - Microsoft.Sql/locations/longTermRetentionServers/longTermRetentionBackups/read - Microsoft.Sql/locations/longTermRetentionServers/longTermRetentionDatabases/longTermRetentionBackups/read - -For **Remove-AzSqlDatabaseLongTermRetentionBackup**, you will need to have one of the following roles: - -- Subscription Owner role or -- Custom role with the following permission: - - Microsoft.Sql/locations/longTermRetentionServers/longTermRetentionDatabases/longTermRetentionBackups/delete - -> [!NOTE] -> The SQL Server Contributor role does not have permission to delete LTR backups. - -Azure RBAC permissions could be granted in either *subscription* or *resource group* scope. However, to access LTR backups that belong to a dropped server, the permission must be granted in the *subscription* scope of that server. - -- Microsoft.Sql/locations/longTermRetentionServers/longTermRetentionDatabases/longTermRetentionBackups/delete - ---- - -## Create long-term retention policies - -# [Portal](#tab/portal) - -You can configure SQL Database to [retain automated backups](long-term-retention-overview.md) for a period longer than the retention period for your service tier. - -1. In the Azure portal, navigate to your server and then select **Backups**. Select the **Retention policies** tab to modify your backup retention settings. - - ![retention policies experience](./media/long-term-backup-retention-configure/ltr-policies-tab.png) - -2. On the Retention policies tab, select the database(s) on which you want to set or modify long-term backup retention policies. Unselected databases will not be affected. - - ![select database to configure backup retention policies](./media/long-term-backup-retention-configure/ltr-policies-tab-configure.png) - -3. In the **Configure policies** pane, specify your desired retention period for weekly, monthly, or yearly backups. Choose a retention period of '0' to indicate that no long-term backup retention should be set. - - ![configure policies pane](./media/long-term-backup-retention-configure/ltr-configure-policies.png) - -4. Select **Apply** to apply the chosen retention settings to all selected databases. - -> [!IMPORTANT] -> When you enable a long-term backup retention policy, it may take up to 7 days for the first backup to become visible and available to restore. For details of the LTR backup cadance, see [long-term backup retention](long-term-retention-overview.md). - -# [Azure CLI](#tab/azure-cli) - -Run the [az sql db ltr-policy set](/cli/azure/sql/db/ltr-policy#az-sql-db-ltr-policy-set) command to create an LTR policy. The following example sets a long-term retention policy for 12 weeks for the weekly backup. - -```azurecli -az sql db ltr-policy set \ - --resource-group mygroup \ - --server myserver \ - --name mydb \ - --weekly-retention "P12W" -``` - -This example sets a retention policy for 12 weeks for the weekly backup, 5 years for the yearly backup, and the week of April 15 in which to take the yearly LTR backup. - -```azurecli -az sql db ltr-policy set \ - --resource-group mygroup \ - --server myserver \ - --name mydb \ - --weekly-retention "P12W" \ - --yearly-retention "P5Y" \ - --week-of-year 16 -``` - -# [PowerShell](#tab/powershell) - -```powershell -# get the SQL server -$subId = "" -$serverName = "" -$resourceGroup = "" -$dbName = "" - -Connect-AzAccount -Select-AzSubscription -SubscriptionId $subId - -$server = Get-AzSqlServer -ServerName $serverName -ResourceGroupName $resourceGroup - -# create LTR policy with WeeklyRetention = 12 weeks. MonthlyRetention and YearlyRetention = 0 by default. -Set-AzSqlDatabaseBackupLongTermRetentionPolicy -ServerName $serverName -DatabaseName $dbName ` - -ResourceGroupName $resourceGroup -WeeklyRetention P12W - -# create LTR policy with WeeklyRetention = 12 weeks, YearlyRetention = 5 years and WeekOfYear = 16 (week of April 15). MonthlyRetention = 0 by default. -Set-AzSqlDatabaseBackupLongTermRetentionPolicy -ServerName $serverName -DatabaseName $dbName ` - -ResourceGroupName $resourceGroup -WeeklyRetention P12W -YearlyRetention P5Y -WeekOfYear 16 -``` - ---- - -## View backups and restore from a backup - -View the backups that are retained for a specific database with an LTR policy, and restore from those backups. - -# [Portal](#tab/portal) - -1. In the Azure portal, navigate to your server and then select **Backups**. To view the available LTR backups for a specific database, select **Manage** under the Available LTR backups column. A pane will appear with a list of the available LTR backups for the selected database. - - ![available backups experience](./media/long-term-backup-retention-configure/ltr-available-backups-tab.png) - -1. In the **Available LTR backups** pane that appears, review the available backups. You may select a backup to restore from or to delete. - - ![view available LTR backups](./media/long-term-backup-retention-configure/ltr-available-backups-manage.png) - -1. To restore from an available LTR backup, select the backup from which you want to restore, and then select **Restore**. - - ![restore from available LTR backup](./media/long-term-backup-retention-configure/ltr-available-backups-restore.png) - -1. Choose a name for your new database, then select **Review + Create** to review the details of your Restore. Select **Create** to restore your database from the chosen backup. - - ![configure restore details](./media/long-term-backup-retention-configure/restore-ltr.png) - -1. On the toolbar, select the notification icon to view the status of the restore job. - - ![restore job progress](./media/long-term-backup-retention-configure/restore-job-progress-long-term.png) - -1. When the restore job is completed, open the **SQL databases** page to view the newly restored database. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, such as to [extract a bit of data from the restored database to copy into the existing database or to delete the existing database and rename the restored database to the existing database name](recovery-using-backups.md#point-in-time-restore). - -# [Azure CLI](#tab/azure-cli) - -### View LTR policies - -Run the [az sql db ltr-policy show](/cli/azure/sql/db/ltr-policy#az-sql-db-ltr-policy-show) command to view the LTR policy for a single database on your server. - -```azurecli -az sql db ltr-policy show \ - --resource-group mygroup \ - --server myserver \ - --name mydb -``` - -### View LTR backups - -Use the [az sql db ltr-backup list](/cli/azure/sql/db/ltr-backup#az-sql-db-ltr-backup-list) command to list the LTR backups for a database. You can use this command to find the `name` parameter for use in other commands. - -```azurecli -az sql db ltr-backup list \ - --location eastus2 \ - --server myserver \ - --database mydb -``` - -### Delete LTR backups - -Run the [az sql db ltr-backup delete](/cli/azure/sql/db/ltr-backup#az-sql-db-ltr-backup-delete) command to remove an LTR backup. You can use [az sql db ltr-backup list](/cli/azure/sql/db/ltr-backup#az-sql-db-ltr-backup-list) to find the backup `name`. - -```azurecli -az sql db ltr-backup delete \ - --location eastus2 \ - --server myserver \ - --database mydb \ - --name "3214b3fb-fba9-43e7-96a3-09e35ffcb336;132292152080000000" -``` - -> [!IMPORTANT] -> Deleting LTR backup is non-reversible. To delete an LTR backup after the server has been deleted you must have Subscription scope permission. You can set up notifications about each delete in Azure Monitor by filtering for operation 'Deletes a long term retention backup'. The activity log contains information on who and when made the request. See [Create activity log alerts](../../azure-monitor/alerts/alerts-activity-log.md) for detailed instructions. - -### Restore from LTR backups - -Run the [az sql db ltr-backup restore](/cli/azure/sql/db/ltr-backup#az-sql-db-ltr-backup-restore) command to restore your database from an LTR backup. You can run [az sql db ltr-backup show](/cli/azure/sql/db/ltr-backup#az-sql-db-ltr-backup-show) to get the `backup-id`. - -1. Create a variable for the `backup-id` with the command `az sql db ltr-backup show' for future use. - - ```azurecli - get_backup_id=$(az sql db ltr-backup show - --location eastus2 \ - --server myserver \ - --database mydb \ - --name "3214b3fb-fba9-43e7-96a3-09e35ffcb336;132292152080000000" \ - --query 'id' \ - --output tsv) - ``` - -2. Restore your database from the LTR backup. - - ```azurecli - az sql db ltr-backup restore \ - --dest-database targetdb \ - --dest-server myserver \ - --dest-resource-group mygroup \ - --backup-id $get_backup_id - ``` - -> [!IMPORTANT] -> To restore from an LTR backup after the server or resource group has been deleted, you must have permissions scoped to the server's subscription and that subscription must be active. You must also omit the optional -ResourceGroupName parameter. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, like database swapping. See [point in time restore](recovery-using-backups.md#point-in-time-restore). - -# [PowerShell](#tab/powershell) - -### View LTR policies - -This example shows how to list the LTR policies within a server. - -```powershell -# get all LTR policies within a server -$ltrPolicies = Get-AzSqlDatabase -ResourceGroupName $resourceGroup -ServerName $serverName | ` - Get-AzSqlDatabaseLongTermRetentionPolicy - -# get the LTR policy of a specific database -$ltrPolicies = Get-AzSqlDatabaseBackupLongTermRetentionPolicy -ServerName $serverName -DatabaseName $dbName ` - -ResourceGroupName $resourceGroup -``` - -### Clear an LTR policy - -This example shows how to clear an LTR policy from a database. - -```powershell -Set-AzSqlDatabaseBackupLongTermRetentionPolicy -ServerName $serverName -DatabaseName $dbName ` - -ResourceGroupName $resourceGroup -RemovePolicy -``` - -### View LTR backups - -This example shows how to list the LTR backups within a server. - -```powershell -# get the list of all LTR backups in a specific Azure region -# backups are grouped by the logical database id, within each group they are ordered by the timestamp, the earliest backup first -$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $server.Location - -# get the list of LTR backups from the Azure region under the named server -$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $server.Location -ServerName $serverName - -# get the LTR backups for a specific database from the Azure region under the named server -$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $server.Location -ServerName $serverName -DatabaseName $dbName - -# list LTR backups only from live databases (you have option to choose All/Live/Deleted) -$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $server.Location -DatabaseState Live - -# only list the latest LTR backup for each database -$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $server.Location -ServerName $serverName -OnlyLatestPerDatabase -``` - -### Delete LTR backups - -This example shows how to delete an LTR backup from the list of backups. - -```powershell -# remove the earliest backup -$ltrBackup = $ltrBackups[0] -Remove-AzSqlDatabaseLongTermRetentionBackup -ResourceId $ltrBackup.ResourceId -``` - -> [!IMPORTANT] -> Deleting LTR backup is non-reversible. To delete an LTR backup after the server has been deleted you must have Subscription scope permission. You can set up notifications about each delete in Azure Monitor by filtering for operation 'Deletes a long term retention backup'. The activity log contains information on who and when made the request. See [Create activity log alerts](../../azure-monitor/alerts/alerts-activity-log.md) for detailed instructions. - -### Restore from LTR backups - -This example shows how to restore from an LTR backup. Note, this interface did not change but the resource ID parameter now requires the LTR backup resource ID. - -```powershell -# restore a specific LTR backup as an P1 database on the server $serverName of the resource group $resourceGroup -Restore-AzSqlDatabase -FromLongTermRetentionBackup -ResourceId $ltrBackup.ResourceId -ServerName $serverName -ResourceGroupName $resourceGroup ` - -TargetDatabaseName $dbName -ServiceObjectiveName P1 -``` - -> [!IMPORTANT] -> To restore from an LTR backup after the server or resource group has been deleted, you must have permissions scoped to the server's subscription and that subscription must be active. You must also omit the optional -ResourceGroupName parameter. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, such as to extract a bit of data from the restored database to copy into the existing database or to delete the existing database and rename the restored database to the existing database name. See [point in time restore](recovery-using-backups.md#point-in-time-restore). - ---- - -## Limitations -- When restoring from an LTR backup, the read scale property is disabled. To enable, read scale on the restored database, update the database after it has been created. -- You need to specify the target service level objective, when restoring from an LTR backup, which was created when the database was in an elastic pool. - -## Next steps - -- To learn about service-generated automatic backups, see [automatic backups](automated-backups-overview.md) -- To learn about long-term backup retention, see [long-term backup retention](long-term-retention-overview.md) diff --git a/articles/azure-sql/database/long-term-retention-overview.md b/articles/azure-sql/database/long-term-retention-overview.md deleted file mode 100644 index 0fd6250727aaf..0000000000000 --- a/articles/azure-sql/database/long-term-retention-overview.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Long-term backup retention" -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn how Azure SQL Database & Azure SQL Managed Instance support storing full database backups for up to 10 years via the long-term retention policy. -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: -ms.devlang: -ms.topic: conceptual -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma -ms.date: 07/13/2021 ---- -# Long-term retention - Azure SQL Database and Azure SQL Managed Instance - -Many applications have regulatory, compliance, or other business purposes that require you to retain database backups beyond the 7-35 days provided by Azure SQL Database and Azure SQL Managed Instance [automatic backups](automated-backups-overview.md). By using the long-term retention (LTR) feature, you can store specified SQL Database and SQL Managed Instance full backups in Azure Blob storage with [configured redundancy](automated-backups-overview.md#backup-storage-redundancy) for up to 10 years. LTR backups can then be restored as a new database. - -Long-term retention can be enabled for Azure SQL Database and for Azure SQL Managed Instance. This article provides a conceptual overview of long-term retention. To configure long-term retention, see [Configure Azure SQL Database LTR](long-term-backup-retention-configure.md) and [Configure Azure SQL Managed Instance LTR](../managed-instance/long-term-backup-retention-configure.md). - -> [!NOTE] -> You can use SQL Agent jobs to schedule [copy-only database backups](/sql/relational-databases/backup-restore/copy-only-backups-sql-server) as an alternative to LTR beyond 35 days. - - - -## How long-term retention works - -Long-term backup retention (LTR) leverages the full database backups that are [automatically created](automated-backups-overview.md) to enable point in time restore (PITR). If an LTR policy is configured, these backups are copied to different blobs for long-term storage. The copy is a background job that has no performance impact on the database workload. The LTR policy for each database in SQL Database can also specify how frequently the LTR backups are created. - -To enable LTR, you can define a policy using a combination of four parameters: weekly backup retention (W), monthly backup retention (M), yearly backup retention (Y), and week of year (WeekOfYear). If you specify W, one backup every week will be copied to the long-term storage. If you specify M, the first backup of each month will be copied to the long-term storage. If you specify Y, one backup during the week specified by WeekOfYear will be copied to the long-term storage. If the specified WeekOfYear is in the past when the policy is configured, the first LTR backup will be created in the following year. Each backup will be kept in the long-term storage according to the policy parameters that are configured when the LTR backup is created. - -> [!NOTE] -> Any change to the LTR policy applies only to future backups. For example, if weekly backup retention (W), monthly backup retention (M), or yearly backup retention (Y) is modified, the new retention setting will only apply to new backups. The retention of existing backups will not be modified. If your intention is to delete old LTR backups before their retention period expires, you will need to [manually delete the backups](./long-term-backup-retention-configure.md#delete-ltr-backups). -> - -Examples of the LTR policy: - -- W=0, M=0, Y=5, WeekOfYear=3 - - The third full backup of each year will be kept for five years. - -- W=0, M=3, Y=0 - - The first full backup of each month will be kept for three months. - -- W=12, M=0, Y=0 - - Each weekly full backup will be kept for 12 weeks. - -- W=6, M=12, Y=10, WeekOfYear=20 - - Each weekly full backup will be kept for six weeks. Except first full backup of each month, which will be kept for 12 months. Except the full backup taken on 20th week of year, which will be kept for 10 years. - -The following table illustrates the cadence and expiration of the long-term backups for the following policy: - -W=12 weeks (84 days), M=12 months (365 days), Y=10 years (3650 days), WeekOfYear=20 (week after May 13) - - ![ltr example](./media/long-term-retention-overview/ltr-example.png) - - -If you modify the above policy and set W=0 (no weekly backups), Azure only retains the monthly and yearly backups. No weekly backups are stored under the LTR policy. The storage amount needed to keep these backups reduces accordingly. - -> [!IMPORTANT] -> The timing of individual LTR backups is controlled by Azure. You cannot manually create an LTR backup or control the timing of the backup creation. After configuring an LTR policy, it may take up to 7 days before the first LTR backup will show up on the list of available backups. -> -> If you delete a server or a managed instance, all databases on that server or managed instance are also deleted and can't be recovered. You can't restore a deleted server or managed instance. However, if you had configured LTR for a database or managed instance, LTR backups are not deleted, and they can be used to restore databases on a different server or managed instance in the same subscription, to a point in time when an LTR backup was taken. - - -## Geo-replication and long-term backup retention - -If you're using active geo-replication or failover groups as your business continuity solution, you should prepare for eventual failovers and configure the same LTR policy on the secondary database or instance. Your LTR storage cost won't increase as backups aren't generated from the secondaries. The backups are only created when the secondary becomes primary. It ensures non-interrupted generation of the LTR backups when the failover is triggered and the primary moves to the secondary region. - -> [!NOTE] -> When the original primary database recovers from an outage that caused the failover, it will become a new secondary. Therefore, the backup creation will not resume and the existing LTR policy will not take effect until it becomes the primary again. - - -## Configure long-term backup retention - -You can configure long-term backup retention using the Azure portal and PowerShell for Azure SQL Database and Azure SQL Managed Instance. To restore a database from the LTR storage, you can select a specific backup based on its timestamp. The database can be restored to any existing server or managed instance under the same subscription as the original database. - -To learn how to configure long-term retention or restore a database from backup for SQL Database using the Azure portal or PowerShell, see [Manage Azure SQL Database long-term backup retention](long-term-backup-retention-configure.md). - -To learn how to configure long-term retention or restore a database from backup for SQL Managed Instance using the Azure portal or PowerShell, see [Manage Azure SQL Managed Instance long-term backup retention](../managed-instance/long-term-backup-retention-configure.md). - -## Next steps - -Because database backups protect data from accidental corruption or deletion, they're an essential part of any business continuity and disaster recovery strategy. - -- To learn about the other SQL Database business-continuity solutions, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). -- To learn about service-generated automatic backups, see [automatic backups](../database/automated-backups-overview.md) diff --git a/articles/azure-sql/database/maintenance-window-configure.md b/articles/azure-sql/database/maintenance-window-configure.md deleted file mode 100644 index 7b9582eaf95a9..0000000000000 --- a/articles/azure-sql/database/maintenance-window-configure.md +++ /dev/null @@ -1,420 +0,0 @@ ---- -title: Configure maintenance window -description: Learn how to set the time when planned maintenance should be performed on your Azure SQL databases, elastic pools, and managed instance databases. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.topic: how-to -author: scott-kim-sql -ms.author: scottkim -ms.reviewer: kendralittle, mathoma -ms.date: 03/07/2022 ---- -# Configure maintenance window - -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Configure the [maintenance window](maintenance-window.md) for an Azure SQL database, elastic pool, or Azure SQL Managed Instance database during resource creation, or anytime after a resource is created. - -The *System default* maintenance window is 5PM to 8AM daily (local time of the Azure region the resource is located) to avoid peak business hours interruptions. If the *System default* maintenance window is not the best time, select one of the other available maintenance windows. - -The ability to change to a different maintenance window is not available for every service level or in every region. For details on feature availability, see [Maintenance window availability](maintenance-window.md#feature-availability). - -> [!Important] -> Configuring maintenance window is a long running asynchronous operation, similar to changing the service tier of the Azure SQL resource. The resource is available during the operation, except a short reconfiguration that happens at the end of the operation and typically lasts up to 8 seconds even in case of interrupted long-running transactions. To minimize the impact of the reconfiguration you should perform the operation outside of the peak hours. - -## Configure maintenance window during database creation - -# [Portal](#tab/azure-portal) - -To configure the maintenance window when you create a database, elastic pool, or managed instance, set the desired **Maintenance window** on the **Additional settings** page. - -### Set the maintenance window while creating a single database or elastic pool - -For step-by-step information on creating a new database or pool, see [Create an Azure SQL Database single database](single-database-create-quickstart.md). - - :::image type="content" source="media/maintenance-window-configure/additional-settings.png" alt-text="Create database additional settings tab"::: - -### Set the maintenance window while creating a managed instance - -For step-by-step information on creating a new managed instance, see [Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - - :::image type="content" source="media/maintenance-window-configure/additional-settings-mi.png" alt-text="Create managed instance additional settings tab"::: - -# [PowerShell](#tab/azure-powershell) - -The following examples show how to configure the maintenance window using Azure PowerShell. You can [install Azure PowerShell](/powershell/azure/install-az-ps), or use the Azure Cloud Shell. - -### Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). - -When Cloud Shell opens, verify that **PowerShell** is selected for your environment. Subsequent sessions will use Azure CLI in a Bash environment, Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Discover available maintenance windows - -When setting the maintenance window, each region has its own maintenance window options that correspond to the timezone for the region the database or pool is located. - -#### Discover SQL Database and elastic pool maintenance windows - -The following example returns the available maintenance windows for the *eastus2* region using the [Get-AzMaintenancePublicConfiguration](/powershell/module/az.maintenance/get-azmaintenancepublicconfiguration) cmdlet. For databases and elastic pools, set `MaintenanceScope` to `SQLDB`. - - ```powershell-interactive - $location = "eastus2" - - Write-Host "Available maintenance schedules in ${location}:" - $configurations = Get-AzMaintenancePublicConfiguration - $configurations | ?{ $_.Location -eq $location -and $_.MaintenanceScope -eq "SQLDB"} - ``` - -#### Discover SQL Managed Instance maintenance windows - -The following example returns the available maintenance windows for the *eastus2* region using the [Get-AzMaintenancePublicConfiguration](/powershell/module/az.maintenance/get-azmaintenancepublicconfiguration) cmdlet. For managed instances, set `MaintenanceScope` to `SQLManagedInstance`. - - ```powershell-interactive - $location = "eastus2" - - Write-Host "Available maintenance schedules in ${location}:" - $configurations = Get-AzMaintenancePublicConfiguration - $configurations | ?{ $_.Location -eq $location -and $_.MaintenanceScope -eq "SQLManagedInstance"} - ``` - -### Set the maintenance window while creating a single database - -The following example creates a new database and sets the maintenance window using the [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) cmdlet. The `-MaintenanceConfigurationId` must be set to a valid value for your database's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell-interactive - # Set variables for your database - $resourceGroupName = "your_resource_group_name" - $serverName = "your_server_name" - $databaseName = "your_db_name" - - # Set selected maintenance window - $maintenanceConfig = "SQL_EastUS2_DB_1" - - Write-host "Creating a gen5 2 vCore database with maintenance window ${maintenanceConfig} ..." - $database = New-AzSqlDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition GeneralPurpose ` - -ComputeGeneration Gen5 ` - -VCore 2 ` - -MaintenanceConfigurationId $maintenanceConfig - $database - ``` - -### Set the maintenance window while creating an elastic pool - -The following example creates a new elastic pool and sets the maintenance window using the [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) cmdlet. The maintenance window is set on the elastic pool, so all databases in the pool have the pool's maintenance window schedule. The `-MaintenanceConfigurationId` must be set to a valid value for your pool's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell-interactive - # Set variables for your pool - $resourceGroupName = "your_resource_group_name" - $serverName = "your_server_name" - $poolName = "your_pool_name" - - # Set selected maintenance window - $maintenanceConfig = "SQL_EastUS2_DB_2" - - Write-host "Creating a Standard 50 pool with maintenance window ${maintenanceConfig} ..." - $pool = New-AzSqlElasticPool ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName ` - -Edition "Standard" ` - -Dtu 50 ` - -DatabaseDtuMin 10 ` - -DatabaseDtuMax 20 ` - -MaintenanceConfigurationId $maintenanceConfig - $pool - ``` - -### Set the maintenance window while creating a managed instance - -The following example creates a new managed instance and sets the maintenance window using the [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) cmdlet. The maintenance window is set on the instance, so all databases in the instance have the instance's maintenance window schedule. For `-MaintenanceConfigurationId`, the *MaintenanceConfigName* must be a valid value for your instance's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell - New-AzSqlInstance -Name "your_mi_name" ` - -ResourceGroupName "your_resource_group_name" ` - -Location "your_mi_location" ` - -SubnetId /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Network/virtualNetworks/{VNETName}/subnets/{SubnetName} ` - -MaintenanceConfigurationId "/subscriptions/{SubID}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_{Region}_{MaintenanceConfigName}" ` - -AsJob - ``` - -# [CLI](#tab/azure-cli) - -The following examples show how to configure the maintenance window using Azure CLI. You can [install Azure CLI](/cli/azure/install-azure-cli), or use the Azure Cloud Shell. - -Configuring the maintenance window with Azure CLI is only available for SQL Managed Instance. - -### Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). - -When Cloud Shell opens, verify that **Bash** is selected for your environment. Subsequent sessions will use Azure CLI in a Bash environment, Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Sign in to Azure - -Cloud Shell is automatically authenticated under the initial account signed-in with. Use the following script to sign in using a different subscription, replacing `` with your Azure Subscription ID. [!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -```azurecli-interactive -subscription="" # add subscription here - -az account set -s $subscription # ...or use 'az login' -``` - -For more information, see [set active subscription](/cli/azure/account#az-account-set) or [log in interactively](/cli/azure/reference-index#az-login) - -### Discover available maintenance windows - -When setting the maintenance window, each region has its own maintenance window options that correspond to the timezone for the region the database or pool is located. - -#### Discover SQL Database and elastic pool maintenance windows - -The following example returns the available maintenance windows for the *eastus2* region using the [az maintenance public-configuration list -](/cli/azure/maintenance/public-configuration#az-maintenance-public-configuration-list) command. For databases and elastic pools, set `maintenanceScope` to `SQLDB`. - - ```azurecli - location="eastus2" - - az maintenance public-configuration list --query "[?location=='$location'&&contains(maintenanceScope,'SQLDB')]" - ``` - -#### Discover SQL Managed Instance maintenance windows - -The following example returns the available maintenance windows for the *eastus2* region using the [az maintenance public-configuration list -](/cli/azure/maintenance/public-configuration#az-maintenance-public-configuration-list) command. For managed instances, set `maintenanceScope` to `SQLManagedInstance`. - - ```azurecli - az maintenance public-configuration list --query "[?location=='eastus2'&&contains(maintenanceScope,'SQLManagedInstance')]" - ``` - -### Set the maintenance window while creating a single database - -The following example creates a new database and sets the maintenance window using the [az sql db create](/cli/azure/sql/db#az-sql-db-create) command. The `--maint-config-id` (or `-m`) must be set to a valid value for your database's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - # Set variables for your database - resourceGroupName="your_resource_group_name" - serverName="your_server_name" - databaseName="your_db_name" - - # Set selected maintenance window - maintenanceConfig="SQL_EastUS2_DB_1" - - # Create database - az sql db create \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $databaseName \ - --edition GeneralPurpose \ - --family Gen5 \ - --capacity 2 \ - --maint-config-id $maintenanceConfig - ``` - -### Set the maintenance window while creating an elastic pool - -The following example creates a new elastic pool and sets the maintenance window using the [az sql elastic-pool create](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-create) cmdlet. The maintenance window is set on the elastic pool, so all databases in the pool have the pool's maintenance window schedule. The `--maint-config-id` (or `-m`) must be set to a valid value for your pool's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - # Set variables for your pool - resourceGroupName="your_resource_group_name" - serverName="your_server_name" - poolName="your_pool_name" - - # Set selected maintenance window - maintenanceConfig="SQL_EastUS2_DB_2" - - # Create elastic pool - az sql elastic-pool create \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $poolName \ - --edition GeneralPurpose \ - --family Gen5 \ - --capacity 2 \ - --maint-config-id $maintenanceConfig - ``` - -### Set the maintenance window while creating a managed instance - -The following example creates a new managed instance and sets the maintenance window using [az sql mi create](/cli/azure/sql/mi#az-sql-mi-create). The maintenance window is set on the instance, so all databases in the instance have the instance's maintenance window schedule. *MaintenanceConfigName* must be a valid value for your instance's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - az sql mi create -g mygroup -n myinstance -l mylocation -i -u myusername -p mypassword --subnet /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Network/virtualNetworks/{VNETName}/subnets/{SubnetName} -m /subscriptions/{SubID}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_{Region}_{MaintenanceConfigName} - ``` - ------ - -## Configure maintenance window for existing databases - -When applying a maintenance window selection to a database, a brief reconfiguration (several seconds) may be experienced in some cases as Azure applies the required changes. - -# [Portal](#tab/azure-portal) - -The following steps set the maintenance window on an existing database, elastic pool, or managed instance using the Azure portal: - -### Set the maintenance window for an existing database or elastic pool - -1. Navigate to the SQL database or elastic pool you want to set the maintenance window for. -1. In the **Settings** menu select **Maintenance**, then select the desired maintenance window. - - :::image type="content" source="media/maintenance-window-configure/maintenance.png" alt-text="SQL database Maintenance page"::: - -### Set the maintenance window for an existing managed instance - -1. Navigate to the managed instance you want to set the maintenance window for. -1. In the **Settings** menu select **Maintenance**, then select the desired maintenance window. - - :::image type="content" source="media/maintenance-window-configure/maintenance-mi.png" alt-text="SQL managed instance Maintenance page"::: - -# [PowerShell](#tab/azure-powershell) - -### Set the maintenance window for an existing database - -The following example sets the maintenance window on an existing database using the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) cmdlet. -The `-MaintenanceConfigurationId` must be set to a valid value for your database's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell-interactive - # Select different maintenance window - $maintenanceConfig = "SQL_EastUS2_DB_2" - - Write-host "Changing database maintenance window to ${maintenanceConfig} ..." - $database = Set-AzSqlDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -MaintenanceConfigurationId $maintenanceConfig - $database - ``` - -### Set the maintenance window on an existing elastic pool - -The following example sets the maintenance window on an existing elastic pool using the [Set-AzSqlElasticPool](/powershell/module/az.sql/set-azsqlelasticpool) cmdlet. -It's important to make sure that the `$maintenanceConfig` value is a valid value for your pool's region. To get valid values for a region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell-interactive - # Select different maintenance window - $maintenanceConfig = "SQL_EastUS2_DB_1" - - Write-host "Changing pool maintenance window to ${maintenanceConfig} ..." - $pool = Set-AzSqlElasticPool ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName ` - -MaintenanceConfigurationId $maintenanceConfig - $pool - ``` - -### Set the maintenance window on an existing managed instance - -The following example sets the maintenance window on an existing managed instance using the [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) cmdlet. -It's important to make sure that the `$maintenanceConfig` value must be a valid value for your instance's region. To get valid values for a region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```powershell-interactive - Set-AzSqlInstance -Name "your_mi_name" ` - -ResourceGroupName "your_resource_group_name" ` - -MaintenanceConfigurationId "/subscriptions/{SubID}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_{Region}_{MaintenanceConfigName}" ` - -AsJob - ``` - -# [CLI](#tab/azure-cli) - -The following examples show how to configure the maintenance window using Azure CLI. You can [install Azure CLI](/cli/azure/install-azure-cli), or use the Azure Cloud Shell. - -### Set the maintenance window for an existing database - -The following example sets the maintenance window on an existing database using the [az sql db update](/cli/azure/sql/db#az-sql-db-update) command. The `--maint-config-id` (or `-m`) must be set to a valid value for your database's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - # Select different maintenance window - maintenanceConfig="SQL_EastUS2_DB_2" - - # Update database - az sql db update \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $databaseName \ - --maint-config-id $maintenanceConfig - ``` - -### Set the maintenance window on an existing elastic pool - -The following example sets the maintenance window on an existing elastic pool using the [az sql elastic-pool update](/cli/azure/sql/elastic-pool#az-sql-elastic-pool-update) command. -It's important to make sure that the `maintenanceConfig` value is a valid value for your pool's region. To get valid values for a region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - # Select different maintenance window - maintenanceConfig="SQL_EastUS2_DB_1" - - # Update pool - az sql elastic-pool update \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $poolName \ - --maint-config-id $maintenanceConfig - ``` - -### Set the maintenance window on an existing managed instance - -The following example sets the maintenance window using [az sql mi update](/cli/azure/sql/mi#az-sql-mi-update). The maintenance window is set on the instance, so all databases in the instance have the instance's maintenance window schedule. For `-MaintenanceConfigurationId`, the *MaintenanceConfigName* must be a valid value for your instance's region. To get valid values for your region, see [Discover available maintenance windows](#discover-available-maintenance-windows). - - ```azurecli - az sql mi update -g mygroup -n myinstance -m /subscriptions/{SubID}/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_{Region}_{MainteanceConfigName} - ``` - ------ - -## Cleanup resources - -Be sure to delete unneeded resources after you're finished with them to avoid unnecessary charges. - -# [Portal](#tab/azure-portal) - -1. Navigate to the SQL database or elastic pool you no longer need. -1. On the **Overview** menu, select the option to delete the resource. - -# [PowerShell](#tab/azure-powershell) - - ```powershell-interactive - # Delete database - Remove-AzSqlDatabase ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName - - # Delete elastic pool - Remove-AzSqlElasticPool ` - -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -ElasticPoolName $poolName - ``` - -# [CLI](#tab/azure-cli) - - ```azurecli - az sql db delete \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $databaseName - - az sql elastic-pool delete \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name $poolName - ``` - ------ - -## Next steps - -- To learn more about maintenance window, see [Maintenance window](maintenance-window.md). -- For more information, see [Maintenance window FAQ](maintenance-window-faq.yml). -- To learn about optimizing performance, see [Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance](monitor-tune-overview.md). diff --git a/articles/azure-sql/database/maintenance-window-faq.yml b/articles/azure-sql/database/maintenance-window-faq.yml deleted file mode 100644 index 1928128143ff8..0000000000000 --- a/articles/azure-sql/database/maintenance-window-faq.yml +++ /dev/null @@ -1,122 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Maintenance Window FAQ - description: FAQ on how the Azure SQL Database and managed instance maintenance window can be configured. - services: sql-database - ms.service: sql-db-mi - ms.subservice: service-overview - ms.topic: faq - author: WilliamDAssafMSFT - ms.author: wiassaf - ms.reviewer: kendralittle, mathoma - ms.custom: - ms.date: 03/07/2022 - -title: Maintenance window FAQ -summary: This article answers frequently asked questions about the maintenance window for [Azure SQL database](sql-database-paas-overview.md) and [SQL managed instance](../managed-instance/sql-managed-instance-paas-overview.md). -sections: - - name: General - questions: - - question: What is the maintenance window feature? - answer: | - The maintenance window feature provides you with the ability to onboard Azure SQL resource to prescheduled time blocks outside of business hours. For more information, see [Maintenance window schedules](maintenance-window.md#gain-more-predictability-with-maintenance-window). - - - question: What is the default maintenance policy if I don't choose any specific window? - answer: | - Maintenance events will occur during the default window 5PM to 8AM local time, Monday - Sunday. - - - question: Users work in a different time zone than the Azure data center. Which time zone is local? - answer: | - Local time is determined by the location of Azure region that hosts the resource and may observe daylight saving time in accordance with local time zone definition. It isn't determined by the time zone configured on SQL database (always UTC) or managed instance. - - - question: Can I choose a specific time/day for the maintenance window? - answer: | - No, you can choose between pre-scheduled weekday or weekend windows. The maintenance can happen any time or day within the window. - - - question: What happens once I choose a maintenance window? - answer: | - Configuring maintenance window is a long running asynchronous operation, similar to changing the service tier of your Azure SQL resource. The resource is available during the process, except a short reconfiguration that happens at the end of the operation and typically lasts up to 8 seconds even in case of interrupted long-running transactions. To minimize the impact of the reconfiguration, you should perform the operation outside of the peak hours. In case of managed instance, IP address of the instance will change. - - - question: What types of updates are typically performed during a maintenance window? - answer: | - The maintenance event may contain updates for hardware, firmware, operating system, satellite software components, and SQL engine. They're typically combined into a single batch to minimize the incidence of maintenance events. - - - question: What can I expect during a maintenance event? - answer: | - During a maintenance update, databases are fully available and accessible but some of the maintenance updates require a reconfiguration as Azure takes SQL Databases offline for a short time to apply the maintenance updates (generally a few seconds in duration). Planned maintenance updates occur once every 35 days on average, which means customer can expect approximately one planned maintenance event per month per Azure SQL Database and managed instance, and only during the maintenance window slots selected by the customer. - - - question: Are there any prerequisites for configuring maintenance window? - answer: | - In case of managed instance [additional IP addresses](../managed-instance/vnet-subnet-determine-size.md#update-scenarios) are needed temporarily, as in scaling vCores scenario for corresponding service tier. - - - question: In which regions is choosing a maintenance window available? - answer: | - For a list of available regions, see [Maintenance window availability](maintenance-window.md#feature-availability). - - - question: I don’t see my region in the available regions list, what can I do? - answer: | - Contact [azsqlcmfeedback@microsoft.com](mailto:azsqlcmfeedback@microsoft.com) to share the feedback and check the plans for your region. - - - question: What is the pricing for maintenance window? - answer: | - Configuring and using maintenance window is free of charge to all eligible Azure subscription types, see [Maintenance window feature availability](maintenance-window.md#feature-availability). - - - question: Would I receive a notification of the maintenance window? - answer: | - You can opt in to receive notification 24 hours prior to the maintenance event, immediately before maintenance starts, and when the maintenance window is completed. The Resource health center can be checked for more information. To receive emails, advance notifications must be configured. For more information, see [Advance notifications](advance-notifications.md). - - - question: In which service level objectives (SLOs) can I choose a maintenance window? - answer: | - Choosing a maintenance window is available in most SLOs with some exceptions, see [maintenance window supported service level objectives](maintenance-window.md#supported-service-level-objectives). - - - question: If I've selected a maintenance window for a resource, and I downscale to a service level objective (SLO) where choosing a maintenance window isn't supported, what happens to my database? - answer: | - In this case, the maintenance window would revert to the default option, which is 5PM to 8AM local time. - - - question: Can I cancel or postpone a planned maintenance event based on the received notification? - answer: | - No. Cancelling or postponing an upcoming maintenance event isn't supported. Notifications help you to prepare for the event. - - - question: How long does it take to process a maintenance window schedule change? - answer: | - You can choose a different maintenance window at any time, but changes may take up to 24 hours to take effect. - - - question: What happens when a maintenance event fails? - answer: | - Although rare, failures or interruptions during a maintenance event can occur. In the event of a failure, changes are rolled back and the maintenance will be rescheduled to another time. - - - question: How do I validate that my databases are set up for maintenance window? - answer: | - Under settings of the Azure resource, the maintenance tab allows you to review and change the current maintenance plan schedule. You can also review and set the maintenance window schedules for multiple resources via PowerShell, CLI, or Azure API. - - - question: Can I configure a different maintenance window for each Azure SQL database in an elastic pool? - answer: | - If the database is part of an elastic pool, the maintenance window configuration of the elastic pool will be applied. Single databases outside of an elastic pool can have their own maintenance window configuration. - - - question: What are supported options to configure a maintenance window for an existing Azure SQL Database or SQL managed instance? - answer: | - Azure portal, PowerShell, Azure CLI, and REST API. - - - question: Can I configure a maintenance window during database restore, recovery, copy, import, or GeoDR scenarios? - answer: | - Not at this moment. The maintenance window can be configured once database is created. - - - question: I have the default maintenance window selected. Can I enable advance notifications for that maintenance? - answer: | - No, advance notifications can't be configured for the default maintenance window option. When choosing a maintenance window, choose an option other than **System default** to configure and enable advance notifications. - - - question: I'm not able to set up advance notifications for planned maintenance, will I still see planned maintenance event in Service Health dashboard? - answer: | - No, if advance notifications aren't configured, Service Health won't show the planned maintenance events. - - - question: Does advance notification cover all maintenance events? - answer: | - No, advance notifications focus only on planned maintenance events that can cause a service interruption. - - -additionalContent: | - ## See Also - - [Maintenance window](maintenance-window.md) - - [Configure maintenance window](maintenance-window-configure.md) - - [Configure maintenance window notifications](advance-notifications.md) - - [Plan for Azure maintenance events in Azure SQL Database and Azure SQL Managed Instance](planned-maintenance.md) diff --git a/articles/azure-sql/database/maintenance-window.md b/articles/azure-sql/database/maintenance-window.md deleted file mode 100644 index 410b9db962b55..0000000000000 --- a/articles/azure-sql/database/maintenance-window.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Maintenance Window -description: Understand how the Azure SQL Database and managed instance maintenance window can be configured. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma, urosmil -ms.custom: references_regions -ms.date: 04/19/2022 ---- - -# Maintenance window -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -The maintenance window feature allows you to configure maintenance schedule for [Azure SQL Database](sql-database-paas-overview.md) and [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) resources making impactful maintenance events predictable and less disruptive for your workload. - -> [!Note] -> The maintenance window feature only protects from planned impact from upgrades or scheduled maintenance. It does not protect from all failover causes; exceptions that may cause short connection interruptions outside of a maintenance window include hardware failures, cluster load balancing, and database reconfigurations due to events like a change in database Service Level Objective. - -[Advance notifications (Preview)](advance-notifications.md) are available for databases configured to use a non-default maintenance window. Advance notifications enable customers to configure notifications to be sent up to 24 hours in advance of any planned event. - -## Overview - -Azure periodically performs [planned maintenance](planned-maintenance.md) of SQL Database and SQL managed instance resources. During Azure SQL maintenance event, databases are fully available but can be subject to short reconfigurations within respective availability SLAs for [SQL Database](https://azure.microsoft.com/support/legal/sla/azure-sql-database) and [SQL managed instance](https://azure.microsoft.com/support/legal/sla/azure-sql-sql-managed-instance). - -Maintenance window is intended for production workloads that are not resilient to database or instance reconfigurations and cannot absorb short connection interruptions caused by planned maintenance events. By choosing a maintenance window you prefer, you can minimize the impact of planned maintenance as it will be occurring outside of your peak business hours. Resilient workloads and non-production workloads may rely on Azure SQL's default maintenance policy. - -The maintenance window is free of charge and can be configured on creation or for existing Azure SQL resources. It can be configured using the Azure portal, PowerShell, CLI, or Azure API. - -> [!Important] -> Configuring maintenance window is a long running asynchronous operation, similar to changing the service tier of the Azure SQL resource. The resource is available during the operation, except a short reconfiguration that happens at the end of the operation and typically lasts up to 8 seconds even in case of interrupted long-running transactions. To minimize the impact of the reconfiguration you should perform the operation outside of the peak hours. - -### Gain more predictability with maintenance window - -By default, Azure SQL maintenance policy blocks most impactful updates during the period **8AM to 5PM local time every day** to avoid any disruptions during typical peak business hours. Local time is determined by the location of [Azure region](https://azure.microsoft.com/global-infrastructure/geographies/) that hosts the resource and may observe daylight saving time in accordance with local time zone definition. - -You can further adjust the maintenance updates to a time suitable to your Azure SQL resources by choosing from two additional maintenance window slots: - -* **Weekday** window: 10:00 PM to 6:00 AM local time, Monday - Thursday -* **Weekend** window: 10:00 PM to 6:00 AM local time, Friday - Sunday - -Maintenance window days listed indicate the starting day of each eight-hour maintenance window. For example, "10:00 PM to 6:00 AM local time, Monday – Thursday" means that the maintenance windows start at 10:00 PM local time on each day (Monday through Thursday) and complete at 6:00 AM local time the following day (Tuesday through Friday). - -Once the maintenance window selection is made and service configuration completed, planned maintenance will occur only during the window of your choice. While maintenance events typically complete within a single window, some of them may span two or more adjacent windows. - -> [!Important] -> In very rare circumstances where any postponement of action could cause serious impact, like applying critical security patch, configured maintenance window may be temporarily overriden. - -## Advance notifications - -Maintenance notifications can be configured to alert you of upcoming planned maintenance events for your Azure SQL Database and Azure SQL Managed Instance. The alerts arrive 24 hours in advance, at the time of maintenance, and when the maintenance is complete. For more information, see [Advance Notifications](advance-notifications.md). - -## Feature availability - -### Supported subscription types - -Configuring and using maintenance window is available for the following [offer types](https://azure.microsoft.com/support/legal/offer-details/): Pay-As-You-Go, Cloud Solution Provider (CSP), Microsoft Enterprise Agreement, or Microsoft Customer Agreement. - -Offers restricted to dev/test usage only are not eligible (like Pay-As-You-Go Dev/Test or Enterprise Dev/Test as examples). - -> [!Note] -> An Azure offer is the type of the Azure subscription you have. For example, a subscription with [pay-as-you-go rates](https://azure.microsoft.com/offers/ms-azr-0003p/), [Azure in Open](https://azure.microsoft.com/offers/ms-azr-0111p/), and [Visual Studio Enterprise](https://azure.microsoft.com/offers/ms-azr-0063p/) are all Azure offers. Each offer or plan has different terms and benefits. Your offer or plan is shown on the subscription's Overview. For more information on switching your subscription to a different offer, see [Change your Azure subscription to a different offer](../../cost-management-billing/manage/switch-azure-offer.md). - -### Supported service level objectives - -Choosing a maintenance window other than the default is available on all SLOs **except for**: -* Instance pools -* Legacy Gen4 vCore -* Basic, S0 and S1 -* DC, Fsv2, M-series - -### Azure region support - -Choosing a maintenance window other than the default is currently available in the following regions: - -| Azure Region | SQL Managed Instance | SQL Database | SQL Database in an [Azure Availability Zone](high-availability-sla.md) | -|:---|:---|:---|:---| -| Australia Central 1 | Yes | | | -| Australia Central 2 | Yes | | | -| Australia East | Yes | Yes | Yes | -| Australia Southeast | Yes | Yes | | -| Brazil South | Yes | Yes | | -| Brazil Southeast | Yes | Yes | | -| Canada Central | Yes | Yes | Yes | -| Canada East | Yes | Yes | | -| Central India | Yes | Yes | | -| Central US | Yes | Yes | Yes | -| China East 2 |Yes | Yes || -| China North 2 |Yes|Yes || -| East US | Yes | Yes | Yes | -| East US 2 | Yes | Yes | Yes | -| East Asia | Yes | Yes | | -| France Central | Yes | Yes | | -| France South | Yes | Yes | | -| Germany West Central | Yes | Yes | | -| Germany North | Yes | | | -| Japan East | Yes | Yes | Yes | -| Japan West | Yes | Yes | | -| Korea Central | Yes | | | -| Korea South | Yes | | | -| North Central US | Yes | Yes | | -| North Europe | Yes | Yes | Yes | -| Norway East | Yes | | | -| Norway West | Yes | | | -| South Africa North | Yes | | | -| South Africa West | Yes | | | -| South Central US | Yes | Yes | Yes | -| South India | Yes | Yes | | -| Southeast Asia | Yes | Yes | Yes | -| Switzerland North | Yes | Yes | | -| Switzerland West | Yes | | | -| UAE Central | Yes | | | -| UAE North | Yes | Yes | | -| UK South | Yes | Yes | Yes | -| UK West | Yes | Yes | | -| US Gov Arizona | Yes | | | -| US Gov Texas| Yes | Yes | | -| US Gov Virginia | Yes | Yes | | -| West Central US | Yes | Yes | | -| West Europe | Yes | Yes | Yes | -| West India | Yes | | | -| West US | Yes | Yes | | -| West US 2 | Yes | Yes | Yes | -| West US 3 | Yes | | | - - -## Gateway maintenance - -To get the maximum benefit from maintenance windows, make sure your client applications are using the redirect connection policy. Redirect is the recommended connection policy, where clients establish connections directly to the node hosting the database, leading to reduced latency and improved throughput. - -* In Azure SQL Database, any connections using the proxy connection policy could be affected by both the chosen maintenance window and a gateway node maintenance window. However, client connections using the recommended redirect connection policy are unaffected by a gateway node maintenance reconfiguration. - -* In Azure SQL Managed Instance, the gateway nodes are hosted [within the virtual cluster](/azure/azure-sql/managed-instance/connectivity-architecture-overview#virtual-cluster-connectivity-architecture) and have the same maintenance window as the managed instance, but using the redirect connection policy is still recommended to minimize number of disruptions during the maintenance event. - -For more on the client connection policy in Azure SQL Database, see [Azure SQL Database Connection policy](../database/connectivity-architecture.md#connection-policy). - -For more on the client connection policy in Azure SQL Managed Instance, see [Azure SQL Managed Instance connection types](/azure/azure-sql/managed-instance/connection-types-overview). - -## Considerations for Azure SQL Managed Instance - -Azure SQL Managed Instance consists of service components hosted on a dedicated set of isolated virtual machines that run inside the customer's virtual network subnet. These virtual machines form [virtual cluster(s)](../managed-instance/connectivity-architecture-overview.md#high-level-connectivity-architecture) that can host multiple managed instances. Maintenance window configured on instances of one subnet can influence the number of virtual clusters within the subnet, distribution of instances among virtual clusters, and virtual cluster management operations. This may require a consideration of few effects. - -### Maintenance window configuration is a long running operation -All instances hosted in a virtual cluster share the maintenance window. By default, all managed instances are hosted in the virtual cluster with the default maintenance window. Specifying another maintenance window for managed instance during its creation or afterwards means that it must be placed in virtual cluster with corresponding maintenance window. If there is no such virtual cluster in the subnet, a new one must be created first to accommodate the instance. Accommodating additional instance in the existing virtual cluster may require cluster resize. Both operations contribute to the duration of configuring maintenance window for a managed instance. -Expected duration of configuring maintenance window on managed instance can be calculated using [estimated duration of instance management operations](../managed-instance/management-operations-overview.md#duration). - -> [!Important] -> A short reconfiguration happens at the end of the operation of configuring maintenance window and typically lasts up to 8 seconds even in case of interrupted long-running transactions. To minimize the impact of the reconfiguration, initiate the operation outside of the peak hours. - -### IP address space requirements -Each new virtual cluster in subnet requires additional IP addresses according to the [virtual cluster IP address allocation](../managed-instance/vnet-subnet-determine-size.md#determine-subnet-size). Changing maintenance window for existing managed instance also requires [temporary additional IP capacity](../managed-instance/vnet-subnet-determine-size.md#update-scenarios) as in scaling vCores scenario for corresponding service tier. - -### IP address change -Configuring and changing maintenance window causes change of the IP address of the instance, within the IP address range of the subnet. - -> [!Important] -> Make sure that NSG and firewall rules won't block data traffic after IP address change. - -### Serialization of virtual cluster management operations - -Operations affecting the virtual cluster, like service upgrades and virtual cluster resize (adding new or removing unneeded compute nodes) are serialized. In other words, a new virtual cluster management operation cannot start until the previous one is completed. In case that maintenance window closes before the ongoing service upgrade or maintenance operation is completed, any other virtual cluster management operations submitted in the meantime will be put on hold until next maintenance window opens and service upgrade or maintenance operation completes. It is not common for a maintenance operation to take longer than a single window per virtual cluster, but it can happen in case of very complex maintenance operations. - -The serialization of virtual cluster management operations is general behavior that applies to the default maintenance policy as well. With a maintenance window schedule configured, the period between two adjacent windows can be few days long. Submitted operations can also be on hold for few days if the maintenance operation spans two windows. That is very rare case, but creation of new instances or resize of the existing instances (if additional compute nodes are needed) may be blocked during this period. - -## Retrieving list of maintenance events - -[Azure Resource Graph](../../governance/resource-graph/overview.md) is an Azure service designed to extend Azure Resource Management. The Azure Resource Graph Explorer provides efficient and performant resource exploration with the ability to query at scale across a given set of subscriptions so that you can effectively govern your environment. - -You can use the Azure Resource Graph Explorer to query for maintenance events. For an introduction on how to run these queries, see [Quickstart: Run your first Resource Graph query using Azure Resource Graph Explorer](../../governance/resource-graph/first-query-portal.md). - -To check for the maintenance events for all SQL databases in your subscription, use the following sample query in Azure Resource Graph Explorer: - -```kusto -servicehealthresources -| where type =~ 'Microsoft.ResourceHealth/events' -| extend impact = properties.Impact -| extend impactedService = parse_json(impact[0]).ImpactedService -| where impactedService =~ 'SQL Database' -| extend eventType = properties.EventType, status = properties.Status, description = properties.Title, trackingId = properties.TrackingId, summary = properties.Summary, priority = properties.Priority, impactStartTime = todatetime(tolong(properties.ImpactStartTime)), impactMitigationTime = todatetime(tolong(properties.ImpactMitigationTime)) -| where eventType == 'PlannedMaintenance' -| order by impactStartTime desc -``` - -To check for the maintenance events for all managed instances in your subscription, use the following sample query in Azure Resource Graph Explorer: - -```kusto -servicehealthresources -| where type =~ 'Microsoft.ResourceHealth/events' -| extend impact = properties.Impact -| extend impactedService = parse_json(impact[0]).ImpactedService -| where impactedService =~ 'SQL Managed Instance' -| extend eventType = properties.EventType, status = properties.Status, description = properties.Title, trackingId = properties.TrackingId, summary = properties.Summary, priority = properties.Priority, impactStartTime = todatetime(tolong(properties.ImpactStartTime)), impactMitigationTime = todatetime(tolong(properties.ImpactMitigationTime)) -| where eventType == 'PlannedMaintenance' -| order by impactStartTime desc -``` - -For the full reference of the sample queries and how to use them across tools like PowerShell or Azure CLI, visit [Azure Resource Graph sample queries for Azure Service Health](../../service-health/resource-graph-samples.md). - -## Next steps - -* [Configure maintenance window](maintenance-window-configure.md) -* [Advance notifications](advance-notifications.md) - -## Learn more - -* [Maintenance window FAQ](maintenance-window-faq.yml) -* [Azure SQL Database](sql-database-paas-overview.md) -* [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) -* [Plan for Azure maintenance events in Azure SQL Database and Azure SQL Managed Instance](planned-maintenance.md) diff --git a/articles/azure-sql/database/manage-application-rolling-upgrade.md b/articles/azure-sql/database/manage-application-rolling-upgrade.md deleted file mode 100644 index ec716425241a0..0000000000000 --- a/articles/azure-sql/database/manage-application-rolling-upgrade.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: Rolling application upgrades -description: Learn how to use Azure SQL Database geo-replication to support rolling upgrades of your cloud application -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 02/13/2019 ---- - -# Manage rolling upgrades of cloud applications by using SQL Database active geo-replication -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Learn how to use [active geo-replication](auto-failover-group-overview.md) in Azure SQL Database to enable rolling upgrades of your cloud application. Because upgrades are disruptive operations, they should be part of your business-continuity planning and design. In this article, we look at two different methods of orchestrating the upgrade process and discuss the benefits and tradeoffs of each option. For the purposes of this article, we refer to an application that consists of a website that's connected to a single database as its data tier. Our goal is to upgrade version 1 (V1) of the application to version 2 (V2) without any significant impact on the user experience. - -When evaluating upgrade options, consider these factors: - -* Impact on application availability during upgrades, such as how long application functions might be limited or degraded. -* Ability to roll back if the upgrade fails. -* Vulnerability of the application if an unrelated, catastrophic failure occurs during the upgrade. -* Total dollar cost. This factor includes additional database redundancy and incremental costs of the temporary components used by the upgrade process. - -## Upgrade applications that rely on database backups for disaster recovery - -If your application relies on automatic database backups and uses geo-restore for disaster recovery, it's deployed to a single Azure region. To minimize user disruption, create a staging environment in that region with all the application components involved in the upgrade. The first diagram illustrates the operational environment before the upgrade process. The endpoint `contoso.azurewebsites.net` represents a production environment of the web app. To be able to roll back the upgrade, you must create a staging environment with a fully synchronized copy of the database. Follow these steps to create a staging environment for the upgrade: - -1. Create a secondary database in the same Azure region. Monitor the secondary to see if the seeding process is complete (1). -2. Create a new environment for your web app and call it 'Staging'. It will be registered in Azure DNS with the URL `contoso-staging.azurewebsites.net` (2). - -> [!NOTE] -> These preparation steps won't impact the production environment, which can function in full-access mode. - -![Diagram shows the SQL Database geo-replication configuration for cloud disaster recovery.](./media/manage-application-rolling-upgrade/option1-1.png) - -When the preparation steps are complete, the application is ready for the actual upgrade. The next diagram illustrates the steps involved in the upgrade process: - -1. Set the primary database to read-only mode (3). This mode guarantees that the production environment of the web app (V1) remains read-only during the upgrade, thus preventing data divergence between the V1 and V2 database instances. -2. Disconnect the secondary database by using the planned termination mode (4). This action creates a fully synchronized, independent copy of the primary database. This database will be upgraded. -3. Turn the secondary database to read-write mode and run the upgrade script (5). - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery that runs the upgrade script.](./media/manage-application-rolling-upgrade/option1-2.png) - -If the upgrade finishes successfully, you're now ready to switch users to the upgraded copy the application, which becomes a production environment. Switching involves a few more steps, as illustrated in the next diagram: - -1. Activate a swap operation between production and staging environments of the web app (6). This operation switches the URLs of the two environments. Now `contoso.azurewebsites.net` points to the V2 version of the web site and the database (production environment). -2. If you no longer need the V1 version, which became a staging copy after the swap, you can decommission the staging environment (7). - -![SQL Database geo-replication configuration for cloud disaster recovery.](./media/manage-application-rolling-upgrade/option1-3.png) - -If the upgrade process is unsuccessful (for example, due to an error in the upgrade script), consider the staging environment to be compromised. To roll back the application to the pre-upgrade state, revert the application in the production environment to full access. The next diagram shows the reversion steps: - -1. Set the database copy to read-write mode (8). This action restores the full V1 functionality of the production copy. -2. Perform the root-cause analysis and decommission the staging environment (9). - -At this point, the application is fully functional, and you can repeat the upgrade steps. - -> [!NOTE] -> The rollback doesn't require DNS changes because you did not yet perform a swap operation. - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery with the staging environment decommissioned.](./media/manage-application-rolling-upgrade/option1-4.png) - -The key advantage of this option is that you can upgrade an application in a single region by following a set of simple steps. The dollar cost of the upgrade is relatively low. - -The main tradeoff is that, if a catastrophic failure occurs during the upgrade, the recovery to the pre-upgrade state involves redeploying the application in a different region and restoring the database from backup by using geo-restore. This process results in significant downtime. - -## Upgrade applications that rely on database geo-replication for disaster recovery - -If your application uses active geo-replication or auto-failover groups for business continuity, it's deployed to at least two different regions. There's an active, primary database in a primary region and a read-only, secondary database in a backup region. Along with the factors mentioned at the beginning of this article, the upgrade process must also guarantee that: - -* The application remains protected from catastrophic failures at all times during the upgrade process. -* The geo-redundant components of the application are upgraded in parallel with the active components. - -To achieve these goals, in addition to using the Web Apps environments, you'll take advantage of Azure Traffic Manager by using a failover profile with one active endpoint and one backup endpoint. The next diagram illustrates the operational environment prior to the upgrade process. The web sites `contoso-1.azurewebsites.net` and `contoso-dr.azurewebsites.net` represent a production environment of the application with full geographic redundancy. The production environment includes the following components: - -* The production environment of the web app `contoso-1.azurewebsites.net` in the primary region (1) -* The primary database in the primary region (2) -* A standby instance of the web app in the backup region (3) -* The geo-replicated secondary database in the backup region (4) -* A Traffic Manager performance profile with an online endpoint called `contoso-1.azurewebsites.net` and an offline endpoint called `contoso-dr.azurewebsites.net` - -To make it possible to roll back the upgrade, you must create a staging environment with a fully synchronized copy of the application. Because you need to ensure that the application can quickly recover in case a catastrophic failure occurs during the upgrade process, the staging environment must be geo-redundant also. The following steps are required to create a staging environment for the upgrade: - -1. Deploy a staging environment of the web app in the primary region (6). -2. Create a secondary database in the primary Azure region (7). Configure the staging environment of the web app to connect to it. -3. Create another geo-redundant, secondary database in the backup region by replicating the secondary database in the primary region. (This method is called *chained geo-replication*.) (8). -4. Deploy a staging environment of the web app instance in the backup region (9) and configure it to connect the geo-redundant secondary database created at (8). - -> [!NOTE] -> These preparation steps won't impact the application in the production environment. It will remain fully functional in read-write mode. - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery with a fully synchronized copy of the application.](./media/manage-application-rolling-upgrade/option2-1.png) - -When the preparation steps are complete, the staging environment is ready for the upgrade. The next diagram illustrates these upgrade steps: - -1. Set the primary database in the production environment to read-only mode (10). This mode guarantees that the production database (V1) won't change during the upgrade, thus preventing the data divergence between the V1 and V2 database instances. - -```sql --- Set the production database to read-only mode -ALTER DATABASE [] -SET READ_ONLY -``` - -2. Terminate geo-replication by disconnecting the secondary (11). This action creates an independent but fully synchronized copy of the production database. This database will be upgraded. The following example uses Transact-SQL but [PowerShell](/powershell/module/az.sql/remove-azsqldatabasesecondary) is also available. - -```sql --- Disconnect the secondary, terminating geo-replication -ALTER DATABASE [] -REMOVE SECONDARY ON SERVER [] -``` - -3. Run the upgrade script against `contoso-1-staging.azurewebsites.net`, `contoso-dr-staging.azurewebsites.net`, and the staging primary database (12). The database changes will be replicated automatically to the staging secondary. - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery with database changes replicated to staging.](./media/manage-application-rolling-upgrade/option2-2.png) - -If the upgrade finishes successfully, you're now ready to switch users to the V2 version of the application. The next diagram illustrates the steps involved: - -1. Activate a swap operation between production and staging environments of the web app in the primary region (13) and in the backup region (14). V2 of the application now becomes a production environment, with a redundant copy in the backup region. -2. If you no longer need the V1 application (15 and 16), you can decommission the staging environment. - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery with optional decommissioning of the staging environment.](./media/manage-application-rolling-upgrade/option2-3.png) - -If the upgrade process is unsuccessful (for example, due to an error in the upgrade script), consider the staging environment to be in an inconsistent state. To roll back the application to the pre-upgrade state, revert to using V1 of the application in the production environment. The required steps are shown on the next diagram: - -1. Set the primary database copy in the production environment to read-write mode (17). This action restores full V1 functionality in the production environment. -2. Perform the root-cause analysis and repair or remove the staging environment (18 and 19). - -At this point, the application is fully functional, and you can repeat the upgrade steps. - -> [!NOTE] -> The rollback doesn't require DNS changes because you didn't perform a swap operation. - -![Diagram shows SQL Database geo-replication configuration for cloud disaster recovery with the upgrade process rolled back.](./media/manage-application-rolling-upgrade/option2-4.png) - -The key advantage of this option is that you can upgrade both the application and its geo-redundant copy in parallel without compromising your business continuity during the upgrade. - -The main tradeoff is that it requires double redundancy of each application component and therefore incurs higher dollar cost. It also involves a more complicated workflow. - -## Summary - -The two upgrade methods described in the article differ in complexity and dollar cost, but they both focus on minimizing how long the user is limited to read-only operations. That time is directly defined by the duration of the upgrade script. It doesn't depend on the database size, the service tier you chose, the website configuration, or other factors that you can't easily control. All preparation steps are decoupled from the upgrade steps and don't impact the production application. The efficiency of the upgrade script is a key factor that determines the user experience during upgrades. So, the best way to improve that experience is to focus your efforts on making the upgrade script as efficient as possible. - -## Next steps - -* For a business continuity overview and scenarios, see [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). -* To learn about Azure SQL Database active geo-replication, see [Create readable secondary databases using active geo-replication](active-geo-replication-overview.md). -* To learn about Azure SQL Database auto-failover groups, see [Use auto-failover groups to enable transparent and coordinated failover of multiple databases](auto-failover-group-overview.md). -* To learn about staging environments in Azure App Service, see [Set up staging environments in Azure App Service](../../app-service/deploy-staging-slots.md). -* To learn about Azure Traffic Manager profiles, see [Manage an Azure Traffic Manager profile](../../traffic-manager/traffic-manager-manage-profiles.md). diff --git a/articles/azure-sql/database/manage-data-after-migrating-to-database.md b/articles/azure-sql/database/manage-data-after-migrating-to-database.md deleted file mode 100644 index 6ff222992fac8..0000000000000 --- a/articles/azure-sql/database/manage-data-after-migrating-to-database.md +++ /dev/null @@ -1,333 +0,0 @@ ---- -title: Manage after migration -titleSuffix: Azure SQL Database -description: Learn how to manage your single and pooled databases after migration to Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: migration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: mokabiru -ms.author: mokabiru -ms.reviewer: kendralittle, mathoma -ms.date: 02/13/2019 ---- -# New DBA in the cloud – Managing Azure SQL Database after migration -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Moving from the traditional self-managed, self-controlled environment to a PaaS environment can seem a bit overwhelming at first. As an app developer or a DBA, you would want to know the core capabilities of the platform that would help you keep your application available, performant, secure and resilient - always. This article aims to do exactly that. The article succinctly organizes resources and gives you some guidance on how to best use the key capabilities of Azure SQL Database with single and pooled databases to manage and keep your application running efficiently and achieve optimal results in the cloud. Typical audience for this article would be those who: - -- Are evaluating migration of their application(s) to Azure SQL Database – Modernizing your application(s). -- Are In the process of migrating their application(s) – On-going migration scenario. -- Have recently completed the migration to Azure SQL Database – New DBA in the cloud. - -This article discusses some of the core characteristics of Azure SQL Database as a platform that you can readily leverage when working with single databases and pooled databases in elastic pools. They are the following: - -- Monitor databases using the Azure portal -- Business continuity and disaster recovery (BCDR) -- Security and compliance -- Intelligent database monitoring and maintenance -- Data movement - -## Monitor databases using the Azure portal - -In the [Azure portal](https://portal.azure.com/), you can monitor an individual database's utilization by selecting your database and clicking the **Monitoring** chart. This brings up a **Metric** window that you can change by clicking the **Edit chart** button. Add the following metrics: - -- CPU percentage -- DTU percentage -- Data IO percentage -- Database size percentage - -Once you've added these metrics, you can continue to view them in the **Monitoring** chart with more information on the **Metric** window. All four metrics show the average utilization percentage relative to the **DTU** of your database. See the [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md) articles for more information about service tiers. - -![Service tier monitoring of database performance.](./media/manage-data-after-migrating-to-database/sqldb_service_tier_monitoring.png) - -You can also configure alerts on the performance metrics. Click the **Add alert** button in the **Metric** window. Follow the wizard to configure your alert. You have the option to alert if the metrics exceed a certain threshold or if the metric falls below a certain threshold. - -For example, if you expect the workload on your database to grow, you can choose to configure an email alert whenever your database reaches 80% on any of the performance metrics. You can use this as an early warning to figure out when you might have to switch to the next highest compute size. - -The performance metrics can also help you determine if you are able to downgrade to a lower compute size. Assume you are using a Standard S2 database and all performance metrics show that the database on average does not use more than 10% at any given time. It is likely that the database will work well in Standard S1. However, be aware of workloads that spike or fluctuate before making the decision to move to a lower compute size. - -## Business continuity and disaster recovery (BCDR) - -Business continuity and disaster recovery abilities enable you to continue your business, as usual, in case of a disaster. The disaster could be a database level event (for example, someone mistakenly drops a crucial table) or a data-center level event (regional catastrophe, for example a tsunami). - -### How do I create and manage backups on SQL Database - -You don’t create backups on Azure SQL Database and that is because you don’t have to. SQL Database automatically backs up databases for you, so you no longer must worry about scheduling, taking and managing backups. The platform takes a full backup every week, differential backup every few hours and a log backup every 5 minutes to ensure the disaster recovery is efficient, and the data loss minimal. The first full backup happens as soon as you create a database. These backups are available to you for a certain period called the “Retention Period” and varies according to the service tier you choose. SQL Database provides you the ability to restore to any point in time within this retention period using [Point in Time Recovery (PITR)](recovery-using-backups.md#point-in-time-restore). - -|Service tier|Retention period in days| -|---|:---:| -|Basic|7| -|Standard|35| -|Premium|35| - - -In addition, the [Long-Term Retention (LTR)](long-term-retention-overview.md) feature allows you to hold onto your backup files for a much longer period specifically, for up to 10 years, and restore data from these backups at any point within that period. Furthermore, the database backups are kept in geo-replicated storage to ensure resilience from regional catastrophe. You can also restore these backups in any Azure region at any point of time within the retention period. See [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md). - -### How do I ensure business continuity in the event of a datacenter-level disaster or regional catastrophe - -Because your database backups are stored in geo-replicated storage to ensure that in case of a regional disaster, you can restore the backup to another Azure region. This is called geo-restore. The RPO (Recovery Point Objective) for this is generally < 1 hour and the ERT (Estimated Recovery Time – few minutes to hours). - -For mission-critical databases, Azure SQL Database offers, active geo-replication. What this essentially does is that it creates a geo-replicated secondary copy of your original database in another region. For example, if your database is initially hosted in Azure West US region and you want regional disaster resilience. You’d create an active geo replica of the database in West US to say East US. When the calamity strikes on West US, you can fail over to the East US region. Configuring them in an auto-failover Group is even better because this ensures that the database automatically fails over to the secondary in East US in case of a disaster. The RPO for this is < 5 seconds and the ERT < 30 seconds. - -If an auto-failover group is not configured, then your application needs to actively monitor for a disaster and initiate a failover to the secondary. You can create up to 4 such active geo-replicas in different Azure regions. It gets even better. You can also access these secondary active geo-replicas for read-only access. This comes in very handy to reduce latency for a geo-distributed application scenario. - -### How does my disaster recovery plan change from on-premises to SQL Database - -In summary, SQL Server setup requires you to actively manage your Availability by using features such as Failover Clustering, Database Mirroring, Transaction Replication, or Log Shipping and maintain and manage backups to ensure Business Continuity. With SQL Database, the platform manages these for you, so you can focus on developing and optimizing your database application and not worry about disaster management as much. You can have backup and disaster recovery plans configured and working with just a few clicks on the Azure portal (or a few commands using the PowerShell APIs). - -To learn more about Disaster recovery, see: [Azure SQL Database Disaster Recovery 101](https://azure.microsoft.com/blog/azure-sql-databases-disaster-recovery-101/) - -## Security and compliance - -SQL Database takes Security and Privacy very seriously. Security within SQL Database is available at the database level and at the platform level and is best understood when categorized into several layers. At each layer you get to control and provide optimal security for your application. The layers are: - -- Identity & authentication ([SQL authentication and Azure Active Directory [Azure AD] authentication](logins-create-manage.md)). -- Monitoring activity ([Auditing](/azure/azure-sql/database/auditing-overview) and [threat detection](threat-detection-configure.md)). -- Protecting actual data ([Transparent Data Encryption [TDE]](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql) and [Always Encrypted [AE]](/sql/relational-databases/security/encryption/always-encrypted-database-engine)). -- Controlling Access to sensitive and privileged data ([Row Level security](/sql/relational-databases/security/row-level-security) and [Dynamic Data Masking](/sql/relational-databases/security/dynamic-data-masking)). - -[Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/) offers centralized security management across workloads running in Azure, on-premises, and in other clouds. You can view whether essential SQL Database protection such as [Auditing](/azure/azure-sql/database/auditing-overview) and [Transparent Data Encryption [TDE]](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql) are configured on all resources, and create policies based on your own requirements. - -### What user authentication methods are offered in SQL Database - -There are two authentication methods offered in SQL Database: - -- [Azure Active Directory Authentication](authentication-aad-overview.md) -- [SQL authentication](/sql/relational-databases/security/choose-an-authentication-mode#connecting-through-sql-server-authentication) - -Traditional Windows authentication is not supported. Azure Active Directory (Azure AD) is a centralized identity and access management service. With this you can very conveniently provide single sign-on (SSO) access to the personnel in your organization. What this means is that the credentials are shared across Azure services for simpler authentication. - -Azure AD supports [Azure AD Multi-Factor Authentication](authentication-mfa-ssms-overview.md) and with a [few clicks](../../active-directory/hybrid/how-to-connect-install-express.md) Azure AD can be integrated with Windows Server Active Directory. SQL Authentication works exactly like you’ve been using it in the past. You provide a username/password and you can authenticate users to any database on a given server. This also allows SQL Database and Azure Synapse Analytics to offer Multi-Factor Authentication and guest user accounts within an Azure AD domain. If you already have an Active Directory on-premises, you can federate the directory with Azure Active Directory to extend your directory to Azure. - -|**If you...**|**SQL Database / Azure Synapse Analytics**| -|---|---| -|Prefer not to use Azure Active Directory (Azure AD) in Azure|Use [SQL authentication](security-overview.md)| -|Used AD on SQL Server on-premises|[Federate AD with Azure AD](../../active-directory/hybrid/whatis-hybrid-identity.md), and use Azure AD authentication. With this, you can use Single Sign-On.| -|Need to enforce Multi-Factor Authentication|Require Multi-Factor Authentication as a policy through [Microsoft Conditional Access](conditional-access-configure.md), and use [Azure AD Universal authentication with Multi-Factor Authentication support](authentication-mfa-ssms-overview.md).| -|Have guest accounts from Microsoft accounts (live.com, outlook.com) or other domains (gmail.com)|Use [Azure AD Universal authentication](authentication-mfa-ssms-overview.md) in SQL Database/Data Warehouse, which leverages [Azure AD B2B Collaboration](../../active-directory/external-identities/what-is-b2b.md).| -|Are logged in to Windows using your Azure AD credentials from a federated domain|Use [Azure AD integrated authentication](authentication-aad-configure.md).| -|Are logged in to Windows using credentials from a domain not federated with Azure|Use [Azure AD integrated authentication](authentication-aad-configure.md).| -|Have middle-tier services which need to connect to SQL Database or Azure Synapse Analytics|Use [Azure AD integrated authentication](authentication-aad-configure.md).| - - -### How do I limit or control connectivity access to my database - -There are multiple techniques at your disposal that you could use to attain optimal connectivity organization for your application. - -- Firewall Rules -- VNet Service Endpoints -- Reserved IPs - -#### Firewall - -A firewall prevents access to your server from an external entity by allowing only specific entities access to your server. By default, all connections to databases inside the server are disallowed, except (optionally7) connections coming in from other Azure Services. With a firewall rule you can open access to your server only to entities (for example, a developer machine) that you approve of, by allowing that computer’s IP address through the firewall. It also allows you to specify a range of IPs that you would want to allow access to the server. For example, developer machine IP addresses in your organization can be added at once by specifying a range in the Firewall settings page. - -You can create firewall rules at the server level or at the database level. Server level IP firewall rules can either be created using the Azure portal or with SSMS. For learning more about how to set a server-level and database-level firewall rule, see: [Create IP firewall rules in SQL Database](secure-database-tutorial.md#create-firewall-rules). - -#### Service endpoints - -By default, your database is configured to “Allow Azure services to access server” – which means any Virtual Machine in Azure may attempt to connect to your database. These attempts still do have to get authenticated. However, if you would not like your database to be accessible by any Azure IPs, you can disable “Allow Azure services to access server”. Additionally, you can configure [VNet Service Endpoints](vnet-service-endpoint-rule-overview.md). - -Service endpoints (SE) allow you to expose your critical Azure resources only to your own private virtual network in Azure. By doing so, you essentially eliminate public access to your resources. The traffic between your virtual network to Azure stays on the Azure backbone network. Without SE you get forced-tunneling packet routing. Your virtual network forces the internet traffic to your organization and the Azure Service traffic to go over the same route. With Service Endpoints, you can optimize this since the packets flow straight from your virtual network to the service on Azure backbone network. - -![VNet service endpoints](./media/manage-data-after-migrating-to-database/vnet-service-endpoints.png) - -#### Reserved IPs - -Another option is to provision [reserved IPs](/previous-versions/azure/virtual-network/virtual-networks-reserved-public-ip) for your VMs, and add those specific VM IP addresses in the server firewall settings. By assigning reserved IPs, you save the trouble of having to update the firewall rules with changing IP addresses. - -### What port do I connect to SQL Database on - -Port 1433. SQL Database communicates over this port. To connect from within a corporate network, you have to add an outbound rule in the firewall settings of your organization. As a guideline, avoid exposing port 1433 outside the Azure boundary. - -### How can I monitor and regulate activity on my server and database in SQL Database - -#### SQL Database Auditing - -With SQL Database, you can turn ON Auditing to track database events. [SQL Database Auditing](/azure/azure-sql/database/auditing-overview) records database events and writes them into an audit log file in your Azure Storage Account. Auditing is especially useful if you intend to gain insight into potential security and policy violations, maintain regulatory compliance etc. It allows you to define and configure certain categories of events that you think need auditing and based on that you can get preconfigured reports and a dashboard to get an overview of events occurring on your database. You can apply these auditing policies either at the database level or at the server level. A guide on how to turn on auditing for your server/database, see: [Enable SQL Database Auditing](secure-database-tutorial.md#enable-security-features). - -#### Threat detection - -With [threat detection](threat-detection-configure.md), you get the ability to act upon security or policy violations discovered by Auditing very easily. You don’t need to be a security expert to address potential threats or violations in your system. Threat detection also has some built-in capabilities like SQL Injection detection. SQL Injection is an attempt to alter or compromise the data and a quite common way of attacking a database application in general. Threat detection runs multiple sets of algorithms which detect potential vulnerabilities and SQL injection attacks, as well as anomalous database access patterns (such as access from an unusual location or by an unfamiliar principal). Security officers or other designated administrators receive an email notification if a threat is detected on the database. Each notification provides details of the suspicious activity and recommendations on how to further investigate and mitigate the threat. To learn how to turn on Threat detection, see: [Enable threat detection](secure-database-tutorial.md#enable-security-features). - -### How do I protect my data in general on SQL Database - -Encryption provides a strong mechanism to protect and secure your sensitive data from intruders. Your encrypted data is of no use to the intruder without the decryption key. Thus, it adds an extra layer of protection on top of the existing layers of security built in SQL Database. There are two aspects to protecting your data in SQL Database: - -- Your data that is at-rest in the data and log files -- Your data that is in-flight - -In SQL Database, by default, your data at rest in the data and log files on the storage subsystem is completely and always encrypted via [Transparent Data Encryption [TDE]](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql). Your backups are also encrypted. With TDE there are no changes required on your application side that is accessing this data. The encryption and decryption happen transparently; hence the name. -For protecting your sensitive data in-flight and at rest, SQL Database provides a feature called [Always Encrypted (AE)](/sql/relational-databases/security/encryption/always-encrypted-database-engine). AE is a form of client-side encryption which encrypts sensitive columns in your database (so they are in ciphertext to database administrators and unauthorized users). The server receives the encrypted data to begin with. The key for Always Encrypted is also stored on the client side, so only authorized clients can decrypt the sensitive columns. The server and data administrators cannot see the sensitive data since the encryption keys are stored on the client. AE encrypts sensitive columns in the table end to end, from unauthorized clients to the physical disk. AE supports equality comparisons today, so DBAs can continue to query encrypted columns as part of their SQL commands. Always Encrypted can be used with a variety of key store options, such as [Azure Key Vault](always-encrypted-azure-key-vault-configure.md), Windows certificate store, and local hardware security modules. - -|**Characteristics**|**Always Encrypted**|**Transparent Data Encryption**| -|---|---|---| -|**Encryption span**|End-to-end|At-rest data| -|**Server can access sensitive data**|No|Yes, since encryption is for the data at rest| -|**Allowed T-SQL operations**|Equality comparison|All T-SQL surface area is available| -|**App changes required to use the feature**|Minimal|Very Minimal| -|**Encryption granularity**|Column level|Database level| - -### How can I limit access to sensitive data in my database - -Every application has a certain bit of sensitive data in the database that needs to be protected from being visible to everyone. Certain personnel within the organization need to view this data, however others shouldn’t be able to view this data. One example is employee wages. A manager would need access to the wage information for their direct reports however, the individual team members shouldn’t have access to the wage information of their peers. Another scenario is data developers who might be interacting with sensitive data during development stages or testing, for example, SSNs of customers. This information again doesn’t need to be exposed to the developer. In such cases, your sensitive data either needs to be masked or not be exposed at all. SQL Database offers two such approaches to prevent unauthorized users from being able to view sensitive data: - -[Dynamic Data Masking](dynamic-data-masking-overview.md) is a data masking feature that enables you to limit sensitive data exposure by masking it to non-privileged users on the application layer. You define a masking rule that can create a masking pattern (for example, to only show last four digits of a national ID SSN: XXX-XX-0000 and mark most of it as Xs) and identify which users are to be excluded from the masking rule. The masking happens on-the-fly and there are various masking functions available for various data categories. Dynamic data masking allows you to automatically detect sensitive data in your database and apply masking to it. - -[Row Level security](/sql/relational-databases/security/row-level-security) enables you to control access at the row level. Meaning, certain rows in a database table based on the user executing the query (group membership or execution context) are hidden. The access restriction is done on the database tier instead of in an application tier, to simplify your app logic. You start by creating a filter predicate, filtering out rows that are not be exposed and the security policy next defining who has access to these rows. Finally, the end user runs their query and, depending on the user’s privilege, they either view those restricted rows or are unable to see them at all. - -### How do I manage encryption keys in the cloud - -There are key management options for both Always Encrypted (client-side encryption) and Transparent Data Encryption (encryption at rest). It’s recommended that you regularly rotate encryption keys. The rotation frequency should align with both your internal organization regulations and compliance requirements. - -#### Transparent Data Encryption (TDE) - -There is a two-key hierarchy in TDE – the data in each user database is encrypted by a symmetric AES-256 database-unique database encryption key (DEK), which in turn is encrypted by a server-unique asymmetric RSA 2048 master key. The master key can be managed either: - -- Automatically by the platform - SQL Database. -- Or by you using [Azure Key Vault](always-encrypted-azure-key-vault-configure.md) as the key store. - -By default, the master key for Transparent Data Encryption is managed by the SQL Database service for convenience. If your organization would like control over the master key, there is an option to use Azure Key Vault](always-encrypted-azure-key-vault-configure.md) as the key store. By using Azure Key Vault, your organization assumes control over key provisioning, rotation, and permission controls. [Rotation or switching the type of a TDE master key](/sql/relational-databases/security/encryption/transparent-data-encryption-byok-azure-sql-key-rotation) is fast, as it only re-encrypts the DEK. For organizations with separation of roles between security and data management, a security admin could provision the key material for the TDE master key in Azure Key Vault and provide an Azure Key Vault key identifier to the database administrator to use for encryption at rest on a server. The Key Vault is designed such that Microsoft does not see or extract any encryption keys. You also get a centralized management of keys for your organization. - -#### Always Encrypted - -There is also a [two-key hierarchy](/sql/relational-databases/security/encryption/overview-of-key-management-for-always-encrypted) in Always Encrypted - a column of sensitive data is encrypted by an AES 256-column encryption key (CEK), which in turn is encrypted by a column master key (CMK). The client drivers provided for Always Encrypted have no limitations on the length of CMKs. The encrypted value of the CEK is stored on the database, and the CMK is stored in a trusted key store, such as Windows Certificate Store, Azure Key Vault, or a hardware security module. - -- Both the [CEK and CMK](/sql/relational-databases/security/encryption/rotate-always-encrypted-keys-using-powershell) can be rotated. -- CEK rotation is a size of data operation and can be time-intensive depending on the size of the tables containing the encrypted columns. Hence it is prudent to plan CEK rotations accordingly. -- CMK rotation, however, does not interfere with database performance, and can be done with separated roles. - -The following diagram shows the key store options for the column master keys in Always Encrypted - -![Always encrypted CMK store providers](./media/manage-data-after-migrating-to-database/always-encrypted.png) - -### How can I optimize and secure the traffic between my organization and SQL Database - -The network traffic between your organization and SQL Database would generally get routed over the public network. However, if you choose to optimize this path and make it more secure, you can look into Azure ExpressRoute. ExpressRoute essentially lets you extend your corporate network into the Azure platform over a private connection. By doing so, you do not go over the public Internet. You also get higher security, reliability, and routing optimization that translates to lower network latencies and much faster speeds than you would normally experience going over the public internet. If you are planning on transferring a significant chunk of data between your organization and Azure, using ExpressRoute can yield cost benefits. You can choose from three different connectivity models for the connection from your organization to Azure: - -- [Cloud Exchange Co-location](../../expressroute/expressroute-connectivity-models.md#CloudExchange) -- [Any-to-any](../../expressroute/expressroute-connectivity-models.md#IPVPN) -- [Point-to-Point](../../expressroute/expressroute-connectivity-models.md#Ethernet) - -ExpressRoute also allows you to burst up to 2x the bandwidth limit you purchase for no additional charge. It is also possible to configure cross region connectivity using ExpressRoute. To see a list of ExpressRoute connectivity providers, see: [ExpressRoute Partners and Peering Locations](../../expressroute/expressroute-locations.md). The following articles describe Express Route in more detail: - -- [Introduction on Express Route](../../expressroute/expressroute-introduction.md) -- [Prerequisites](../../expressroute/expressroute-prerequisites.md) -- [Workflows](../../expressroute/expressroute-workflows.md) - -### Is SQL Database compliant with any regulatory requirements, and how does that help with my own organization's compliance - -SQL Database is compliant with a range of regulatory compliancies. To view the latest set of compliancies that have been met by SQL Database, visit the [Microsoft Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942) and drill down on the compliancies that are important to your organization to see if SQL Database is included under the compliant Azure services. It is important to note that although SQL Database may be certified as a compliant service, it aids in the compliance of your organization’s service but does not automatically guarantee it. - -## Intelligent database monitoring and maintenance after migration - -Once you’ve migrated your database to SQL Database, you are going to want to monitor your database (for example, check how the resource utilization is like or DBCC checks) and perform regular maintenance (for example, rebuild or reorganize indexes, statistics etc.). Fortunately, SQL Database is Intelligent in the sense that it uses the historical trends and recorded metrics and statistics to proactively help you monitor and maintain your database, so that your application runs optimally always. In some cases, Azure SQL Database can automatically perform maintenance tasks depending on your configuration setup. There are three facets to monitoring your database in SQL Database: - -- Performance monitoring and optimization. -- Security optimization. -- Cost optimization. - -### Performance monitoring and optimization - -With Query Performance Insights, you can get tailored recommendations for your database workload so that your applications can keep running at an optimal level - always. You can also set it up so that these recommendations get applied automatically and you do not have to bother performing maintenance tasks. With SQL Database Advisor, you can automatically implement index recommendations based on your workload - this is called Auto-Tuning. The recommendations evolve as your application workload changes to provide you with the most relevant suggestions. You also get the option to manually review these recommendations and apply them at your discretion. - -### Security optimization - -SQL Database provides actionable security recommendations to help you secure your data and threat detection for identifying and investigating suspicious database activities that may pose a potential thread to the database. [Vulnerability assessment](sql-vulnerability-assessment.md) is a database scanning and reporting service that allows you to monitor the security state of your databases at scale and identify security risks and drift from a security baseline defined by you. After every scan, a customized list of actionable steps and remediation scripts is provided, as well as an assessment report that can be used to help meet compliance requirements. - -With Microsoft Defender for Cloud, you identify the security recommendations across the board and apply them with a single click. - -### Cost optimization - -Azure SQL platform analyzes the utilization history across the databases in a server to evaluate and recommend cost-optimization options for you. This analysis usually takes a fortnight to analyze and build up actionable recommendations. Elastic pool is one such option. The recommendation appears on the portal as a banner: - -![elastic pool recommendations](./media/manage-data-after-migrating-to-database/elastic-pool-recommendations.png) - -You can also view this analysis under the “Advisor” section: - -![elastic pool recommendations-advisor](./media/manage-data-after-migrating-to-database/advisor-section.png) - -### How do I monitor the performance and resource utilization in SQL Database - -In SQL Database you can leverage the intelligent insights of the platform to monitor the performance and tune accordingly. You can monitor performance and resource utilization in SQL Database using the following methods: - -#### Azure portal - -The Azure portal shows a database’s utilization by selecting the database and clicking the chart in the Overview pane. You can modify the chart to show multiple metrics, including CPU percentage, DTU percentage, Data IO percentage, Sessions percentage, and Database size percentage. - -![Monitoring chart](./media/manage-data-after-migrating-to-database/monitoring-chart.png) - -![Monitoring chart2](./media/manage-data-after-migrating-to-database/chart.png) - -From this chart, you can also configure alerts by resource. These alerts allow you to respond to resource conditions with an email, write to an HTTPS/HTTP endpoint or perform an action. For more information, see [Create alerts](alerts-insights-configure-portal.md). - -#### Dynamic management views - -You can query the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) dynamic management view to return resource consumption statistics history from the last hour and the [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) system catalog view to return history for the last 14 days. - -#### Query Performance Insight - -[Query Performance Insight](query-performance-insight-use.md) allows you to see a history of the top resource-consuming queries and long-running queries for a specific database. You can quickly identify TOP queries by resource utilization, duration, and frequency of execution. You can track queries and detect regression. This feature requires [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) to be enabled and active for the database. - -![Query Performance Insight](./media/manage-data-after-migrating-to-database/query-performance-insight.png) - -#### Azure SQL Analytics (Preview) in Azure Monitor logs - -[Azure Monitor logs](../../azure-monitor/insights/azure-sql.md) allows you to collect and visualize key Azure SQL Database performance metrics, supporting up to 150,000 databases and 5,000 SQL Elastic pools per workspace. You can use it to monitor and receive notifications. You can monitor SQL Database and elastic pool metrics across multiple Azure subscriptions and elastic pools and can be used to identify issues at each layer of an application stack. - -### I am noticing performance issues: How does my SQL Database troubleshooting methodology differ from SQL Server - -A major portion of the troubleshooting techniques you would use for diagnosing query and database performance issues remain the same. After all the same database engine powers the cloud. However, the platform - Azure SQL Database has built in ‘intelligence’. It can help you troubleshoot and diagnose performance issues even more easily. It can also perform some of these corrective actions on your behalf and in some cases, proactively fix them - automatically. - -Your approach towards troubleshooting performance issues can significantly benefit by using intelligent features such as [Query Performance Insight(QPI)](query-performance-insight-use.md) and [Database Advisor](database-advisor-implement-performance-recommendations.md) in conjunction and so the difference in methodology differs in that respect – you no longer need to do the manual work of grinding out the essential details that might help you troubleshoot the issue at hand. The platform does the hard work for you. One example of that is QPI. With QPI, you can drill all the way down to the query level and look at the historical trends and figure out when exactly the query regressed. The Database Advisor gives you recommendations on things that might help you improve your overall performance in general like - missing indexes, dropping indexes, parameterizing your queries etc. - -With performance troubleshooting, it is important to identify whether it is just the application or the database backing it, that’s impacting your application performance. Often the performance problem lies in the application layer. It could be the architecture or the data access pattern. For example, consider you have a chatty application that is sensitive to network latency. In this case, your application suffers because there would be many short requests going back and forth ("chatty") between the application and the server and on a congested network, these roundtrips add up fast. To improve the performance in this case, you can use [Batch Queries](performance-guidance.md#batch-queries). Using batches helps you tremendously because now your requests get processed in a batch; thus, helping you cut down on the roundtrip latency and improve your application performance. - -Additionally, if you notice a degradation in the overall performance of your database, you can monitor the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) and [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) dynamic management views in order to understand CPU, IO, and memory consumption. Your performance maybe impacted because your database is starved of resources. It could be that you may need to change the compute size and/or service tier based on the growing and shrinking workload demands. - -For a comprehensive set of recommendations for tuning performance issues, see: [Tune your database](performance-guidance.md#tune-your-database). - -### How do I ensure I am using the appropriate service tier and compute size - -SQL Database offers various service tiers Basic, Standard, and Premium. Each service tier you get a guaranteed predictable performance tied to that service tier. Depending on your workload, you may have bursts of activity where your resource utilization might hit the ceiling of the current compute size that you are in. In such cases, it is useful to first start by evaluating whether any tuning can help (for example, adding or altering an index etc.). If you still encounter limit issues, consider moving to a higher service tier or compute size. - -|**Service tier**|**Common Use Case Scenarios**| -|---|---| -|**Basic**|Applications with a handful users and a database that doesn’t have high concurrency, scale, and performance requirements. | -|**Standard**|Applications with a considerable concurrency, scale, and performance requirements coupled with low to medium IO demands. | -|**Premium**|Applications with lots of concurrent users, high CPU/memory, and high IO demands. High concurrency, high throughput, and latency sensitive apps can leverage the Premium level. | - - -For making sure you’re on the right compute size, you can monitor your query and database resource consumption through one of the above-mentioned ways in “How do I monitor the performance and resource utilization in SQL Database”. Should you find that your queries/databases are consistently running hot on CPU/Memory etc. you can consider scaling up to a higher compute size. Similarly, if you note that even during your peak hours, you don’t seem to use the resources as much; consider scaling down from the current compute size. - -If you have a SaaS app pattern or a database consolidation scenario, consider using an Elastic pool for cost optimization. Elastic pool is a great way to achieve database consolidation and cost-optimization. To read more about managing multiple databases using elastic pool, see: [Manage pools and databases](elastic-pool-manage.md#azure-portal). - -### How often do I need to run database integrity checks for my database - -SQL Database uses some smart techniques that allow it to handle certain classes of data corruption automatically and without any data loss. These techniques are built in to the service and are leveraged by the service when need arises. On a regular basis, your database backups across the service are tested by restoring them and running DBCC CHECKDB on it. If there are issues, SQL Database proactively addresses them. [Automatic page repair](/sql/sql-server/failover-clusters/automatic-page-repair-availability-groups-database-mirroring) is leveraged for fixing pages that are corrupt or have data integrity issues. The database pages are always verified with the default CHECKSUM setting that verifies the integrity of the page. SQL Database proactively monitors and reviews the data integrity of your database and, if issues arise, addresses them with the highest priority. In addition to these, you may choose to optionally run your own integrity checks at your will. For more information, see [Data Integrity in SQL Database](https://azure.microsoft.com/blog/data-integrity-in-azure-sql-database/) - -## Data movement after migration - -### How do I export and import data as BACPAC files from SQL Database using the Azure portal - -- **Export**: You can export your database in Azure SQL Database as a BACPAC file from the Azure portal - - ![database export](./media/manage-data-after-migrating-to-database/database-export1.png) - -- **Import**: You can also import data as a BACPAC file into your database in Azure SQL Database using the Azure portal. - - ![database import](./media/manage-data-after-migrating-to-database/import1.png) - -### How do I synchronize data between SQL Database and SQL Server - -You have several ways to achieve this: - -- **[Data Sync](sql-data-sync-data-sql-server-sql-database.md)** – This feature helps you synchronize data bi-directionally between multiple SQL Server databases and SQL Database. To sync with SQL Server databases, you need to install and configure sync agent on a local computer or a virtual machine and open the outbound TCP port 1433. -- **[Transaction Replication](https://azure.microsoft.com/blog/transactional-replication-to-azure-sql-database-is-now-generally-available/)** – With transaction replication you can synchronize your data from a SQL Server database to Azure SQL Database with the SQL Server instance being the publisher and the Azure SQL Database being the subscriber. For now, only this setup is supported. For more information on how to migrate your data from a SQL Server database to Azure SQL with minimal downtime, see: [Use Transaction Replication](migrate-to-database-from-sql-server.md#method-2-use-transactional-replication) - -## Next steps - -Learn about [SQL Database](sql-database-paas-overview.md). diff --git a/articles/azure-sql/database/manage-hyperscale-database.md b/articles/azure-sql/database/manage-hyperscale-database.md deleted file mode 100644 index 2a860c0b007ed..0000000000000 --- a/articles/azure-sql/database/manage-hyperscale-database.md +++ /dev/null @@ -1,430 +0,0 @@ ---- -title: How to manage a Hyperscale database -description: How to manage a Hyperscale database, including migrating to Hyperscale, restoring to a different region, and reverse migration. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 2/17/2022 ---- - -# How to manage a Hyperscale database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The [Hyperscale service tier](service-tier-hyperscale.md) provides a highly scalable storage and compute performance tier that leverages the Azure architecture to scale out storage and compute resources for an Azure SQL Database substantially beyond the limits available for the General Purpose and Business Critical service tiers. This article describes how to carry out essential administration tasks for Hyperscale databases, including migrating an existing database to Hyperscale, restoring a Hyperscale database to a different region, reverse migrating from Hyperscale to another service tier, and monitoring the status of ongoing and recent operations against a Hyperscale database. - -Learn how to create a new Hyperscale database in [Quickstart: Create a Hyperscale database in Azure SQL Database](hyperscale-database-create-quickstart.md). - -## Migrate an existing database to Hyperscale - -You can migrate existing databases in Azure SQL Database to Hyperscale using the Azure portal, the Azure CLI, PowerShell, or Transact-SQL. - -The time required to move an existing database to Hyperscale consists of the time to copy data and the time to replay the changes made in the source database while copying data. The data copy time is proportional to data size. We recommend migrating to Hyperscale during a lower write activity period so that the time to replay accumulated changes to replay will be shorter. - -You will only experience a short period of downtime, generally a few minutes, during the final cutover to the Hyperscale service tier. - -### Prerequisites - -To move a database that is a part of a [geo-replication](active-geo-replication-overview.md) relationship, either as the primary or as a secondary, to Hyperscale, you need to first terminate data replication between the primary and secondary replica. Databases in a [failover group](auto-failover-group-overview.md) must be removed from the group first. - -Once a database has been moved to Hyperscale, you can create a new Hyperscale geo-replica for that database. Geo-replication for Hyperscale is in preview with certain [limitations](active-geo-replication-overview.md). - -### How to migrate a database to the Hyperscale service tier - -To migrate an existing database in Azure SQL Database to the Hyperscale service tier, first identify your target service objective. Review [resource limits for single databases](resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen4) if you aren't sure which service objective is right for your database. In many cases, you can choose a service objective with the same number of vCores and the same hardware generation as the original database. If needed, you will be able to [adjust this later with minimal downtime](scale-resources.md). - -Select the tab for your preferred tool to migrate your database: - -# [Portal](#tab/azure-portal) - -The Azure portal enables you to migrate to the Hyperscale service tier by modifying the pricing tier for your database. - -:::image type="content" source="media/manage-hyperscale-database/service-tier-dropdown-azure-sql-database-azure-portal.png" alt-text="Screenshot of the compute & storage panel of a database in Azure SQL Database. The service tier dropdown is expanded, displaying the option for the Hyperscale service tier." lightbox="media/manage-hyperscale-database/service-tier-dropdown-azure-sql-database-azure-portal.png"::: - -1. Navigate to the database you wish to migrate in the Azure portal. -1. In the left navigation bar, select **Compute + storage**. -1. Select the **Service tier** drop-down to expand the options for service tiers. -1. Select **Hyperscale (On-demand scalable storage)** from the dropdown menu. -1. Review the **Hardware Configuration** listed. If desired, select **Change configuration** to select the appropriate hardware configuration for your workload. -1. Review the option to **Save money**. Select it if you qualify for Azure Hybrid Benefit and wish to use it for this database. -1. Select the **vCores** slider if you wish to change the number of vCores available for your database under the Hyperscale service tier. -1. Select the **High-AvailabilitySecondaryReplicas** slider if you wish to change the number of replicas under the Hyperscale service tier. -1. Select **Apply**. - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - -# [Azure CLI](#tab/azure-cli) - -This code sample calls [az sql db update](/cli/azure/sql/db#az_sql_db_update) to migrate an existing database in Azure SQL Database to the Hyperscale service tier. You must specify both the edition and service objective. - -Replace `resourceGroupName`, `serverName`, `databaseName`, and `serviceObjective` with the appropriate values before running the following code sample: - -```azurecli-interactive -resourceGroupName="myResourceGroup" -serverName="server01" -databaseName="mySampleDatabase" -serviceObjective="HS_Gen5_2" - -az sql db update -g $resourceGroupName -s $serverName -n $databaseName \ - --edition Hyperscale --service-objective $serviceObjective - -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - -# [PowerShell](#tab/azure-powershell) - -The following example uses the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) cmdlet to migrate an existing database in Azure SQL Database to the Hyperscale service tier. You must specify both the edition and service objective. - -Replace `$resourceGroupName`, `$serverName`, `$databaseName`, and `$serviceObjective` with the appropriate values before running this code sample: - -```powershell-interactive -$resourceGroupName = "myResourceGroup" -$serverName = "server01" -$databaseName = "mySampleDatabase" -$serviceObjective = "HS_Gen5_2" - -Set-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName ` - -DatabaseName $databaseName -Edition "Hyperscale" ` - -RequestedServiceObjectiveName $serviceObjective - -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - -# [Transact-SQL](#tab/t-sql) - -To migrate an existing database in Azure SQL Database to the Hyperscale service tier with Transact-SQL, first connect to the master database on your [logical SQL server](logical-servers.md) using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -You must specify both the edition and service objective in the [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-current) statement. - -This example statement migrates a database named `mySampleDatabase` to the Hyperscale service tier with the `HS_Gen5_2` service objective. Replace the database name with the appropriate value before executing the statement. - -```sql -ALTER DATABASE [mySampleDatabase] - MODIFY (EDITION = 'Hyperscale', SERVICE_OBJECTIVE = 'HS_Gen5_2'); -GO -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - ---- - -## Reverse migrate from Hyperscale (preview) - -Reverse migration to the General Purpose service tier allows customers who have recently migrated an existing database in Azure SQL Database to the Hyperscale service tier to move back in an emergency, should Hyperscale not meet their needs. While reverse migration is initiated by a service tier change, it's essentially a size-of-data move between different architectures. - -### Limitations for reverse migration - -Reverse migration is available under the following conditions: - -- Reverse migration is only available within 45 days of the original migration to Hyperscale. -- Databases originally created in the Hyperscale service tier are not eligible for reverse migration. -- You may reverse migrate to the [General Purpose](service-tier-general-purpose.md) service tier only. Your migration from Hyperscale to General Purpose can target either the serverless or provisioned compute tiers. If you wish to migrate the database to another service tier, such as [Business Critical](service-tier-business-critical.md) or a [DTU based service tier](service-tiers-dtu.md), first reverse migrate to the General Purpose service tier, then change the service tier. - -### Duration and downtime - -Unlike regular service level objective change operations in Hyperscale, migrating to Hyperscale and reverse migration to General Purpose are size-of-data operations. - -The duration of a reverse migration depends mainly on the size of the database and concurrent write activities happening during the migration. The number of vCores you assign to the target General Purpose database will also impact the duration of the reverse migration. We recommend that the target General Purpose database be provisioned with a number of vCores greater than or equal to the number of vCores assigned to the source Hyperscale database to sustain similar workloads. - -During reverse migration, the source Hyperscale database may experience performance degradation if under substantial load. Specifically, transaction log rate may be reduced (throttled) to ensure that reverse migration is making progress. - -You will only experience a short period of downtime, generally a few minutes, during the final cutover to the new target General Purpose database. - -### Prerequisites - -Before you initiate a reverse migration from Hyperscale to the General Purpose service tier, you must ensure that your database meets the [limitations for reverse migration](#limitations-for-reverse-migration) and: - -- Your database does not have Geo Replication enabled. -- Your database does not have named replicas. -- Your database (allocated size) is small enough to fit into the target service tier. -- If you specify max database size for the target General Purpose database, ensure the allocated size of the database is small enough to fit into that maximum size. - -Prerequisite checks will occur before a reverse migration starts. If prerequisites are not met, the reverse migration will fail immediately. - -### Backup policies - -You will be [billed using the regular pricing](automated-backups-overview.md?tabs=single-database#backup-storage-costs) for all existing database backups within the [configured retention period](automated-backups-overview.md#backup-retention). You will be billed for the Hyperscale backup storage snapshots and for size-of-data storage blobs that must be retained to be able to restore the backup. - -You can migrate a database to Hyperscale and reverse migrate back to General Purpose multiple times. Only backups from the current and once-previous tier of your database will be available for restore. If you have moved from the General Purpose service tier to Hyperscale and back to General Purpose, the only backups available are the ones from the current General Purpose database and the immediately previous Hyperscale database. These retained backups are billed as per Azure SQL Database billing. Any previous tiers tried won't have backups available and will not be billed. - -For example, you could migrate between Hyperscale and non-Hyperscale service tiers: - -1. General Purpose -1. Migrate to Hyperscale -1. Reverse migrate to General Purpose -1. Service tier change to Business Critical -1. Migrate to Hyperscale -1. Reverse migrate to General Purpose - -In this case, the only backups available would be from steps 5 and 6 of the timeline, if they are still within the [configured retention period](automated-backups-overview.md#backup-retention). Any backups from previous steps would be unavailable. This should be a careful consideration when attempting multiple reverse migrations from Hyperscale to the General Purpose tier. - -### How to reverse migrate a Hyperscale database to the General Purpose service tier - -To reverse migrate an existing Hyperscale database in Azure SQL Database to the Hyperscale service tier, first identify your target service objective in the General Purpose service tier and whether you wish to migrate to the provisioned or serverless compute tiers. Review [resource limits for single databases](resource-limits-vcore-single-databases.md#gen5-compute-generation-part-1) if you aren't sure which service objective is right for your database. - -If you wish to perform an additional service tier change after reverse migrating to General Purpose, identify your eventual target service objective as well and ensure that your database's allocated size is small enough to fit in that service objective. - -Select the tab for your preferred method to reverse migrate your database: - -# [Portal](#tab/azure-portal) - -The Azure portal enables you to reverse migrate to the General Purpose service tier by modifying the pricing tier for your database. - -:::image type="content" source="media/manage-hyperscale-database/reverse-migrate-hyperscale-service-compute-tier-pane.png" alt-text="Screenshot of the compute & storage panel of a Hyperscale database in Azure SQL Database." lightbox="media/manage-hyperscale-database/reverse-migrate-hyperscale-service-compute-tier-pane.png"::: - -1. Navigate to the database you wish to migrate in the Azure portal. -1. In the left navigation bar, select **Compute + storage**. -1. Select the **Service tier** drop-down to expand the options for service tiers. -1. Select **General Purpose (Scalable compute and storage options)** from the dropdown menu. -1. Review the **Hardware Configuration** listed. If desired, select **Change configuration** to select the appropriate hardware configuration for your workload. -1. Review the option to **Save money**. Select it if you qualify for Azure Hybrid Benefit and wish to use it for this database. -1. Select the **vCores** slider if you wish to change the number of vCores available for your database under the General Purpose service tier. -1. Select **Apply**. - -# [Azure CLI](#tab/azure-cli) - -This code sample calls [az sql db update](/cli/azure/sql/db#az_sql_db_update) to reverse migrate an existing Hyperscale database to the General Purpose service tier. You must specify both the edition and service objective. You may select either `Provisioned` or `Serverless` for the target compute model. - -Replace `resourceGroupName`, `serverName`, `databaseName`, and `serviceObjective` with the appropriate values before running the following code sample: - -```azurecli-interactive -resourceGroupName="myResourceGroup" -serverName="server01" -databaseName="mySampleDatabase" -serviceObjective="GP_Gen5_2" -computeModel="Provisioned" - -az sql db update -g $resourceGroupName -s $serverName -n $databaseName \ - --edition GeneralPurpose --service-objective $serviceObjective \ - --compute-model $computeModel - -``` - -You can optionally include the `maxsize` argument. If the `maxsize` value exceeds the valid maximum size for the target service objective, an error will be returned. If the `maxsize` argument is not specified, the operation will default to the maximum size available for the given service objective. The following example specifies `maxsize`: - -```azurecli-interactive -resourceGroupName="myResourceGroup" -serverName="server01" -databaseName="mySampleDatabase" -serviceObjective="GP_Gen5_2" -computeModel="Provisioned" -maxsize="200GB" - -az sql db update -g $resourceGroupName -s $serverName -n $databaseName \ - --edition GeneralPurpose --service-objective $serviceObjective \ - --compute-model $computeModel --max-size $maxsize - -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - -# [PowerShell](#tab/azure-powershell) - -This code sample uses the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) cmdlet to reverse migrate an existing database from the Hyperscale service tier to the General Purpose service tier. You must specify both the edition and service objective. You may select either `Provisioned` or `Serverless` for the target compute tier. - -Replace `$resourceGroupName`, `$serverName`, `$databaseName`, `$serviceObjective`, and `$computeModel` with the appropriate values before running this code sample: - -```powershell-interactive -$resourceGroupName = "myResourceGroup" -$serverName = "server01" -$databaseName = "mySampleDatabase" -$serviceObjective = "GP_Gen5_2" -$computeModel = "Provisioned" - -Set-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName ` - -DatabaseName $databaseName -Edition "GeneralPurpose" -computemodel $computeModel ` - -RequestedServiceObjectiveName $serviceObjective - -``` - -You can optionally include the `maxsize` argument. If the `maxsize` value exceeds the valid maximum size for the target service objective, an error will be returned. If the `maxsize` argument is not specified, the operation will default to the maximum size available for the given service objective. The following example specifies `maxsize`: - -```powershell-interactive -$resourceGroupName = "myResourceGroup" -$serverName = "server01" -$databaseName = "mySampleDatabase" -$serviceObjective = "GP_Gen5_2" -$computeModel = "Provisioned" -$maxSizeBytes = "268435456000" - -Set-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName ` - -DatabaseName $databaseName -Edition "GeneralPurpose" -computemodel $computeModel ` - -RequestedServiceObjectiveName $serviceObjective -MaxSizeBytes $maxSizeBytes - -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - -# [Transact-SQL](#tab/t-sql) - -To reverse migrate a Hyperscale database to the General Purpose service tier with Transact-SQL, first connect to the master database on your [logical SQL server](logical-servers.md) using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) . - -You must specify both the edition and service objective in the [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-current) statement. - -This example statement migrates a database named `mySampleDatabase` to the General Purpose service tier with the `GP_Gen5_4` service objective. Replace the database name and service objective with the appropriate values before executing the statement. - -```sql -ALTER DATABASE [mySampleDatabase] - MODIFY (EDITION = 'GeneralPurpose', SERVICE_OBJECTIVE = 'GP_Gen5_2'); -GO -``` - -You can optionally include the `maxsize` argument. If the `maxsize` value exceeds the valid maximum size for the target service objective, an error will be returned. If the `maxsize` argument is not specified, the operation will default to the maximum size available for the given service objective. The following example specifies `maxsize`: - -```sql -ALTER DATABASE [mySampleDatabase] - MODIFY (EDITION = 'GeneralPurpose', SERVICE_OBJECTIVE = 'GP_Gen5_2', MAXSIZE = 200 GB); -GO -``` - -You can [monitor operations for a Hyperscale database](#monitor-operations-for-a-hyperscale-database) while the operation is ongoing. - ---- - -## Monitor operations for a Hyperscale database - -You can monitor the status of ongoing or recently completed operations for an Azure SQL Database using the Azure portal, the Azure CLI, PowerShell, or Transact-SQL. - -Select the tab for your preferred method to monitor operations. - -# [Portal](#tab/azure-portal) - -The Azure portal shows a notification for a database in Azure SQL Database when an operation such as a migration, reverse migration, or restore is in progress. - -:::image type="content" source="media/manage-hyperscale-database/ongoing-operation-notification-azure-sql-database-azure-portal.png" alt-text="Screenshot of the overview panel of a database in Azure SQL Database. A notification of an ongoing operation appears in the notification area at the bottom of the panel." lightbox="media/manage-hyperscale-database/ongoing-operation-notification-azure-sql-database-azure-portal.png"::: - -1. Navigate to the database in the Azure portal. -1. In the left navigation bar, select **Overview**. -1. Review the **Notifications** section at the bottom of the right pane. If operations are ongoing, a notification box will appear. -1. Select the notification box to view details. -1. The **Ongoing operations** pane will open. Review the details of the ongoing operations. - - -# [Azure CLI](#tab/azure-cli) - -This code sample calls [az sql db op list](/cli/azure/sql/db/op#az-sql-db-op-list) to return recent or ongoing operations for a database in Azure SQL Database. - -Replace `resourceGroupName`, `serverName`, `databaseName`, and `serviceObjective` with the appropriate values before running the following code sample: - -```azurecli-interactive -resourceGroupName="myResourceGroup" -serverName="server01" -databaseName="mySampleDatabase" - -az sql db op list -g $resourceGroupName -s $serverName --database $databaseName - -``` - -# [PowerShell](#tab/azure-powershell) - -The [Get-AzSqlDatabaseActivity](/powershell/module/az.sql/get-azsqldatabaseactivity) cmdlet returns recent or ongoing operations for a database in Azure SQL Database. - -Set the `$resourceGroupName`, `$serverName`, and `$databaseName` parameters to the appropriate values for your database before running the sample code: - -```powershell-interactive -$resourceGroupName = "myResourceGroup" -$serverName = "server01" -$databaseName = "mySampleDatabase" - -Get-AzSqlDatabaseActivity -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName - -``` - -# [Transact-SQL](#tab/t-sql) - -To monitor operations for a Hyperscale database, first connect to the master database on your [logical server](logical-servers.md) using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or the client of your choice to run Transact-SQL commands. - -Query the [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) Dynamic Management View to review information about recent operations performed on databases on your [logical server](logical-servers.md]. - -This code sample returns all entires in `sys.dm_operation_status` for the specified database, sorted by which operations began most recently. Replace the database name with the appropriate value before running the code sample. - -```sql -SELECT * -FROM sys.dm_operation_status -WHERE major_resource_id = 'mySampleDatabase' -ORDER BY start_time DESC; -GO -``` - ---- - -## View databases in the Hyperscale service tier - -After migrating a database to Hyperscale or reconfiguring a database within the Hyperscale service tier, you may wish to view and/or document the configuration of your Hyperscale database. - -# [Portal](#tab/azure-portal) - -The Azure portal shows a list of all databases on a [logical server](logical-servers.md). The **Pricing tier** column includes the service tier for each database. - -:::image type="content" source="media/manage-hyperscale-database/database-list-azure-portal.png" alt-text="Screenshot of the overview panel of a logical server in Azure SQL Database. A list of databases appears at the bottom of the panel." lightbox="media/manage-hyperscale-database/database-list-azure-portal.png"::: - -1. Navigate to your [logical server](logical-servers.md) in the Azure portal. -1. In the left navigation bar, select **Overview**. -1. Scroll to the list of resources at the bottom of the pane. The window will display SQL elastic pools and databases on the logical server. -1. Review the **Pricing tier** column to identify databases in the Hyperscale service tier. - -# [Azure CLI](#tab/azure-cli) - -This Azure CLI code sample calls [az sql db list](/cli/azure/sql/db/op#az-sql-db-list) to list Hyperscale databases on a [logical server](logical-servers.md) with their name, location, service level objective, maximum size, and number of high availability replicas. - -Replace `resourceGroupName` and `serverName` with the appropriate values before running the following code sample: - -```azurecli-interactive -resourceGroupName="myResourceGroup" -serverName="server01" - -az sql db list -g $resourceGroupName -s $serverName --query "[].{Name:name, Location:location, SLO:currentServiceObjectiveName, Tier:currentSku.tier, maxSizeBytes:maxSizeBytes,HAreplicas:highAvailabilityReplicaCount}[?Tier=='Hyperscale']" --output table - -``` - -# [PowerShell](#tab/azure-powershell) - -The Azure PowerShell [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) cmdlet returns a list of Hyperscale databases on a [logical server](logical-servers.md) with their name, location, service level objective, maximum size, and number of high availability replicas. - -Set the `$resourceGroupName` and `$serverName` parameters to the appropriate values before running the sample code: - -```powershell-interactive -$resourceGroupName = "myResourceGroup" -$serverName = "server01" - -Get-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName | ` - Where-Object { $_.Edition -eq 'Hyperscale' } | ` - Select-Object DatabaseName, Location, currentServiceObjectiveName, Edition, ` - MaxSizeBytes, HighAvailabilityReplicaCount | ` - Format-Table - -``` - -Review the **Edition** column to identify databases in the Hyperscale service tier. - -# [Transact-SQL](#tab/t-sql) - -To review the service tiers of all Hyperscale databases on a [logical server](logical-servers.md) with Transact-SQL, first connect to the master database using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) or [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio). - -Query the [sys.database_service_objectives](/sql/relational-databases/system-catalog-views/sys-database-service-objectives-azure-sql-database) system catalog view to review databases in the Hyperscale service tier: - -```sql -SELECT d.name, dso.edition, dso.service_objective -FROM sys.database_service_objectives AS dso -JOIN sys.databases as d on dso.database_id = d.database_id -WHERE dso.edition = 'Hyperscale'; -GO -``` - ---- - -## Next steps - -Learn more about Hyperscale databases in the following articles: - -- [Quickstart: Create a Hyperscale database in Azure SQL Database](hyperscale-database-create-quickstart.md) -- [Hyperscale service tier](service-tier-hyperscale.md) -- [Azure SQL Database Hyperscale FAQ](service-tier-hyperscale-frequently-asked-questions-faq.yml) -- [Hyperscale secondary replicas](service-tier-hyperscale-replicas.md) -- [Azure SQL Database Hyperscale named replicas FAQ](service-tier-hyperscale-named-replicas-faq.yml) diff --git a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image1.png b/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image1.png deleted file mode 100644 index 0d42a8798793a..0000000000000 Binary files a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image2.png b/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image2.png deleted file mode 100644 index da225cba82bd2..0000000000000 Binary files a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/image2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-add-api-access-azure-sql-db-delegated-permissions-checkbox-e14.png b/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-add-api-access-azure-sql-db-delegated-permissions-checkbox-e14.png deleted file mode 100644 index a434946721750..0000000000000 Binary files a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-add-api-access-azure-sql-db-delegated-permissions-checkbox-e14.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-azure-sql-db-d11.png b/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-azure-sql-db-d11.png deleted file mode 100644 index ab1af7570d470..0000000000000 Binary files a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-azure-sql-db-d11.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-c32.png b/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-c32.png deleted file mode 100644 index 7c1bc9612e1ba..0000000000000 Binary files a/articles/azure-sql/database/media/active-directory-interactive-connect-azure-sql-db/sshot-registered-app-settings-required-permissions-add-api-access-c32.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-create-geo-replica.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-create-geo-replica.png deleted file mode 100644 index 25b7fcec2d516..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-create-geo-replica.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-sql-create-secondary-server.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-sql-create-secondary-server.png deleted file mode 100644 index db5a2465fc76f..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-cli-sql-create-secondary-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-configure-geo-replica-database.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-configure-geo-replica-database.png deleted file mode 100644 index bb483487273bf..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-configure-geo-replica-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-and-configure-replica.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-and-configure-replica.png deleted file mode 100644 index 47694d5285c5b..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-and-configure-replica.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-geo-replica.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-geo-replica.png deleted file mode 100644 index 919645514b1f3..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-create-geo-replica.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-geo-replica-deployment.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-geo-replica-deployment.png deleted file mode 100644 index ec52e429e41cf..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-geo-replica-deployment.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-forced-failover.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-forced-failover.png deleted file mode 100644 index 5e07cbf3ac676..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-forced-failover.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-stop-replication.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-stop-replication.png deleted file mode 100644 index 9348b5c08516b..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-select-stop-replication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-sql-database-secondary-status.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-sql-database-secondary-status.png deleted file mode 100644 index 21518565b8a7d..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-portal-sql-database-secondary-status.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-sql-db-geo-replica-list.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-sql-db-geo-replica-list.png deleted file mode 100644 index 78057b7a17f43..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/azure-sql-db-geo-replica-list.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/configure-geo-replication.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/configure-geo-replication.png deleted file mode 100644 index c8dbfdacf5100..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/configure-geo-replication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/create-secondary.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/create-secondary.png deleted file mode 100644 index d1c733436ed7c..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/create-secondary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/remove-secondary.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/remove-secondary.png deleted file mode 100644 index 8af1d19bcc53e..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/remove-secondary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/secondaries.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/secondaries.png deleted file mode 100644 index c2944036bac2d..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/secondaries.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding-complete.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding-complete.png deleted file mode 100644 index 3da260bf6b6e2..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding-complete.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding0.png b/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding0.png deleted file mode 100644 index baba0d0eb7e66..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-configure-portal/seeding0.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication-relationship.png b/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication-relationship.png deleted file mode 100644 index 19593b0f0f7b8..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication-relationship.png and /dev/null differ diff --git a/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication.png b/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication.png deleted file mode 100644 index ea474e6cb92cc..0000000000000 Binary files a/articles/azure-sql/database/media/active-geo-replication-overview/geo-replication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advance-notifications/add-action-group.png b/articles/azure-sql/database/media/advance-notifications/add-action-group.png deleted file mode 100644 index 49f40fb9e2f09..0000000000000 Binary files a/articles/azure-sql/database/media/advance-notifications/add-action-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advance-notifications/create-action-group.png b/articles/azure-sql/database/media/advance-notifications/create-action-group.png deleted file mode 100644 index 79bf1a8bd153b..0000000000000 Binary files a/articles/azure-sql/database/media/advance-notifications/create-action-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advance-notifications/health-alerts.png b/articles/azure-sql/database/media/advance-notifications/health-alerts.png deleted file mode 100644 index 186501b68daa4..0000000000000 Binary files a/articles/azure-sql/database/media/advance-notifications/health-alerts.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advance-notifications/notifications.png b/articles/azure-sql/database/media/advance-notifications/notifications.png deleted file mode 100644 index 9d2141a50969e..0000000000000 Binary files a/articles/azure-sql/database/media/advance-notifications/notifications.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advanced-data-security/database_settings.png b/articles/azure-sql/database/media/advanced-data-security/database_settings.png deleted file mode 100644 index 868ec4fc3b5fb..0000000000000 Binary files a/articles/azure-sql/database/media/advanced-data-security/database_settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advanced-data-security/database_threat_detection_settings.png b/articles/azure-sql/database/media/advanced-data-security/database_threat_detection_settings.png deleted file mode 100644 index 361d1fe2b4bf4..0000000000000 Binary files a/articles/azure-sql/database/media/advanced-data-security/database_threat_detection_settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advanced-data-security/enable_ads.png b/articles/azure-sql/database/media/advanced-data-security/enable_ads.png deleted file mode 100644 index aa56a3b2a9cec..0000000000000 Binary files a/articles/azure-sql/database/media/advanced-data-security/enable_ads.png and /dev/null differ diff --git a/articles/azure-sql/database/media/advanced-data-security/server_settings.png b/articles/azure-sql/database/media/advanced-data-security/server_settings.png deleted file mode 100644 index 13246c30235fa..0000000000000 Binary files a/articles/azure-sql/database/media/advanced-data-security/server_settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/alerts-insights-configure-portal/action-group.png b/articles/azure-sql/database/media/alerts-insights-configure-portal/action-group.png deleted file mode 100644 index cd394e339801d..0000000000000 Binary files a/articles/azure-sql/database/media/alerts-insights-configure-portal/action-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/alerts-insights-configure-portal/alerts.png b/articles/azure-sql/database/media/alerts-insights-configure-portal/alerts.png deleted file mode 100644 index 1e053f3e216e5..0000000000000 Binary files a/articles/azure-sql/database/media/alerts-insights-configure-portal/alerts.png and /dev/null differ diff --git a/articles/azure-sql/database/media/alerts-insights-configure-portal/configure-signal-logic.png b/articles/azure-sql/database/media/alerts-insights-configure-portal/configure-signal-logic.png deleted file mode 100644 index 0a3ed6f8703a5..0000000000000 Binary files a/articles/azure-sql/database/media/alerts-insights-configure-portal/configure-signal-logic.png and /dev/null differ diff --git a/articles/azure-sql/database/media/alerts-insights-configure-portal/create-rule.png b/articles/azure-sql/database/media/alerts-insights-configure-portal/create-rule.png deleted file mode 100644 index dfc5a21439455..0000000000000 Binary files a/articles/azure-sql/database/media/alerts-insights-configure-portal/create-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/alerts-insights-configure-portal/select-signal.png b/articles/azure-sql/database/media/alerts-insights-configure-portal/select-signal.png deleted file mode 100644 index c895ae75eabe4..0000000000000 Binary files a/articles/azure-sql/database/media/alerts-insights-configure-portal/select-signal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/column-selection.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/column-selection.png deleted file mode 100644 index 1a4aade343419..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/column-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/connection-strings.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/connection-strings.png deleted file mode 100644 index 6ad33c8de8ef3..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/connection-strings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/create-database.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/create-database.png deleted file mode 100644 index 2f524eb31c13f..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/create-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/encrypt-columns.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/encrypt-columns.png deleted file mode 100644 index a80e6f31a0da3..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/encrypt-columns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/master-key-configuration.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/master-key-configuration.png deleted file mode 100644 index dff3790518a41..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/master-key-configuration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connect.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connect.png deleted file mode 100644 index 3303344b763a9..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connection-parameter.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connection-parameter.png deleted file mode 100644 index f978f2fc3be99..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-connection-parameter.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-encrypted.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-encrypted.png deleted file mode 100644 index 7daf27adf5f9a..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-encrypted.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-plaintext.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-plaintext.png deleted file mode 100644 index 4e82cc027495b..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/ssms-plaintext.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/summary.png b/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/summary.png deleted file mode 100644 index d16cdf35e840d..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-azure-key-vault-configure/summary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/column-selection.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/column-selection.png deleted file mode 100644 index 1a4aade343419..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/column-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/connection-strings.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/connection-strings.png deleted file mode 100644 index 35b36d6a86fc3..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/connection-strings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/console-app.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/console-app.png deleted file mode 100644 index fe04b95bfe319..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/console-app.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/create-database.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/create-database.png deleted file mode 100644 index 3c64f292bfaed..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/create-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/encrypt-columns.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/encrypt-columns.png deleted file mode 100644 index a80e6f31a0da3..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/encrypt-columns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/master-key-configuration.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/master-key-configuration.png deleted file mode 100644 index db98ca6f35946..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/master-key-configuration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connect.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connect.png deleted file mode 100644 index 3303344b763a9..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connection-parameter.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connection-parameter.png deleted file mode 100644 index f978f2fc3be99..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-connection-parameter.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-encrypted.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-encrypted.png deleted file mode 100644 index 7daf27adf5f9a..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-encrypted.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-plaintext.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-plaintext.png deleted file mode 100644 index 4e82cc027495b..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/ssms-plaintext.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/summary.png b/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/summary.png deleted file mode 100644 index f0cb711f4ed31..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-certificate-store-configure/summary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/allow-enclave-computations.png b/articles/azure-sql/database/media/always-encrypted-enclaves/allow-enclave-computations.png deleted file mode 100644 index 71a79ea4b306e..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/allow-enclave-computations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/attestation-provider-role-assigment.png b/articles/azure-sql/database/media/always-encrypted-enclaves/attestation-provider-role-assigment.png deleted file mode 100644 index 25e8041707497..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/attestation-provider-role-assigment.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/connect-to-server-configure-attestation.png b/articles/azure-sql/database/media/always-encrypted-enclaves/connect-to-server-configure-attestation.png deleted file mode 100644 index 62e431d40f12a..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/connect-to-server-configure-attestation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/connect-without-always-encrypted-ssms.png b/articles/azure-sql/database/media/always-encrypted-enclaves/connect-without-always-encrypted-ssms.png deleted file mode 100644 index 5bdb821ee3cb6..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/connect-without-always-encrypted-ssms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-attest-uri.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-attest-uri.png deleted file mode 100644 index 96da66482cf1a..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-attest-uri.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-attestation-policy.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-attestation-policy.png deleted file mode 100644 index d7b6653e5e322..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-attestation-policy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database-networking.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database-networking.png deleted file mode 100644 index a29b4cebe2b34..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database-networking.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database.png deleted file mode 100644 index 34d8f1b6bfbc5..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database-basics.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database-basics.png deleted file mode 100644 index a705560a12f3c..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database-basics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database.png deleted file mode 100644 index 45621632edb3c..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-configure-dc-series-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-create-attestation-provider-basics.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-create-attestation-provider-basics.png deleted file mode 100644 index d482a313aaa4d..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-create-attestation-provider-basics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-edit-attestation-policy.png b/articles/azure-sql/database/media/always-encrypted-enclaves/portal-edit-attestation-policy.png deleted file mode 100644 index c8bac4a668f41..0000000000000 Binary files a/articles/azure-sql/database/media/always-encrypted-enclaves/portal-edit-attestation-policy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-execution-plan-clustered-index-scan.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-execution-plan-clustered-index-scan.png deleted file mode 100644 index 44ccb5d181d16..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-execution-plan-clustered-index-scan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-deadlock-victim.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-deadlock-victim.png deleted file mode 100644 index 975f592f31c4f..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-deadlock-victim.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-process-list.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-process-list.png deleted file mode 100644 index e0c4270e924e9..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-process-list.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-resource-list.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-resource-list.png deleted file mode 100644 index 2627212ae2ab3..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-graph-resource-list.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview-with-deadlock-victim.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview-with-deadlock-victim.png deleted file mode 100644 index bf4c84e8c4ed3..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview-with-deadlock-victim.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview.png deleted file mode 100644 index ad1f5d5e8379b..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/deadlock-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-deadlock-graph-xdl-file-graphic-display.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-deadlock-graph-xdl-file-graphic-display.png deleted file mode 100644 index 4bc7e55aa0487..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-deadlock-graph-xdl-file-graphic-display.png and /dev/null differ diff --git a/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-save-deadlock-file-xdl.png b/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-save-deadlock-file-xdl.png deleted file mode 100644 index 92220995090f2..0000000000000 Binary files a/articles/azure-sql/database/media/analyze-prevent-deadlocks/ssms-save-deadlock-file-xdl.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/2_auditing_get_started_server_inherit.png b/articles/azure-sql/database/media/auditing-overview/2_auditing_get_started_server_inherit.png deleted file mode 100644 index f2418a39ab871..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/2_auditing_get_started_server_inherit.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/5_auditing_get_started_storage_key_regeneration.png b/articles/azure-sql/database/media/auditing-overview/5_auditing_get_started_storage_key_regeneration.png deleted file mode 100644 index f340494e4712d..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/5_auditing_get_started_storage_key_regeneration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/6_auditing_get_started_regenerate_key.png b/articles/azure-sql/database/media/auditing-overview/6_auditing_get_started_regenerate_key.png deleted file mode 100644 index 0cbe38fb22971..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/6_auditing_get_started_regenerate_key.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/7_auditing_get_started_blob_view_audit_logs.png b/articles/azure-sql/database/media/auditing-overview/7_auditing_get_started_blob_view_audit_logs.png deleted file mode 100644 index d9945c251ef68..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/7_auditing_get_started_blob_view_audit_logs.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/8_auditing_get_started_blob_audit_records.png b/articles/azure-sql/database/media/auditing-overview/8_auditing_get_started_blob_audit_records.png deleted file mode 100644 index bcc84967ff4b3..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/8_auditing_get_started_blob_audit_records.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/9_auditing_get_started_ssms_1.png b/articles/azure-sql/database/media/auditing-overview/9_auditing_get_started_ssms_1.png deleted file mode 100644 index 4ca7253f51732..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/9_auditing_get_started_ssms_1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard-data.png b/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard-data.png deleted file mode 100644 index 275bcdef54d66..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard-data.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard.png b/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard.png deleted file mode 100644 index 2d8f8725e795e..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics-dashboard.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics.png b/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics.png deleted file mode 100644 index 610bff24f4a4c..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-log-analytics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-select-destination.png b/articles/azure-sql/database/media/auditing-overview/auditing-select-destination.png deleted file mode 100644 index c86d84b27b43e..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-select-destination.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-support-operation-log-destination.png b/articles/azure-sql/database/media/auditing-overview/auditing-support-operation-log-destination.png deleted file mode 100644 index ccdc6d2e76ded..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-support-operation-log-destination.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-view-audit-logs.png b/articles/azure-sql/database/media/auditing-overview/auditing-view-audit-logs.png deleted file mode 100644 index e1dbbd18d400c..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-view-audit-logs.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing-view-dashboard.png b/articles/azure-sql/database/media/auditing-overview/auditing-view-dashboard.png deleted file mode 100644 index 5fc405f3baacc..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing-view-dashboard.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing_select_event_hub.png b/articles/azure-sql/database/media/auditing-overview/auditing_select_event_hub.png deleted file mode 100644 index a2a2b2265ec9c..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing_select_event_hub.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing_select_oms.png b/articles/azure-sql/database/media/auditing-overview/auditing_select_oms.png deleted file mode 100644 index 63e36dbeadd9d..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing_select_oms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/auditing_select_storage.png b/articles/azure-sql/database/media/auditing-overview/auditing_select_storage.png deleted file mode 100644 index a5a9b1bb46203..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/auditing_select_storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auditing-overview/support-operations.png b/articles/azure-sql/database/media/auditing-overview/support-operations.png deleted file mode 100644 index f3a4f116dca2c..0000000000000 Binary files a/articles/azure-sql/database/media/auditing-overview/support-operations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/12connect-using-pw-auth2.png b/articles/azure-sql/database/media/authentication-aad-configure/12connect-using-pw-auth2.png deleted file mode 100644 index 17213f78e0cac..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/12connect-using-pw-auth2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/13connect-to-db2.png b/articles/azure-sql/database/media/authentication-aad-configure/13connect-to-db2.png deleted file mode 100644 index a9da775586db6..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/13connect-to-db2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/aad.png b/articles/azure-sql/database/media/authentication-aad-configure/aad.png deleted file mode 100644 index 4ab679902a0d9..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/aad.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/active-directory-integrated.png b/articles/azure-sql/database/media/authentication-aad-configure/active-directory-integrated.png deleted file mode 100644 index 9a74ff95c3cd3..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/active-directory-integrated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/active-directory-pane.png b/articles/azure-sql/database/media/authentication-aad-configure/active-directory-pane.png deleted file mode 100644 index 241910a994a7d..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/active-directory-pane.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/add-azure-active-directory-admin.png b/articles/azure-sql/database/media/authentication-aad-configure/add-azure-active-directory-admin.png deleted file mode 100644 index 702cd8795829f..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/add-azure-active-directory-admin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/grant-permissions.png b/articles/azure-sql/database/media/authentication-aad-configure/grant-permissions.png deleted file mode 100644 index 4063d5d4ea6ff..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/grant-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/save-admin.png b/articles/azure-sql/database/media/authentication-aad-configure/save-admin.png deleted file mode 100644 index fdd53067fdfd9..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/save-admin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/save.png b/articles/azure-sql/database/media/authentication-aad-configure/save.png deleted file mode 100644 index 360f14e73b7c0..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/save.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/search-for-and-select-sql-servers.png b/articles/azure-sql/database/media/authentication-aad-configure/search-for-and-select-sql-servers.png deleted file mode 100644 index cc36a5233c326..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/search-for-and-select-sql-servers.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/select-azure-active-directory-admin.png b/articles/azure-sql/database/media/authentication-aad-configure/select-azure-active-directory-admin.png deleted file mode 100644 index db5dc0cde5b04..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/select-azure-active-directory-admin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/set-admin.png b/articles/azure-sql/database/media/authentication-aad-configure/set-admin.png deleted file mode 100644 index a2d14ba4dc566..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/set-admin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/sql-servers-set-active-directory-admin.png b/articles/azure-sql/database/media/authentication-aad-configure/sql-servers-set-active-directory-admin.png deleted file mode 100644 index 25e17ac1a7ecf..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/sql-servers-set-active-directory-admin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/success.png b/articles/azure-sql/database/media/authentication-aad-configure/success.png deleted file mode 100644 index bdde94d371fcd..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/success.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-configure/switch-directory.png b/articles/azure-sql/database/media/authentication-aad-configure/switch-directory.png deleted file mode 100644 index 8492c850a0784..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-configure/switch-directory.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-add-managed-instance-service-principal.png b/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-add-managed-instance-service-principal.png deleted file mode 100644 index 15f08345e40b1..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-add-managed-instance-service-principal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-group-created.png b/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-group-created.png deleted file mode 100644 index ba2c47828af71..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-group-created.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance-service-principal.png b/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance-service-principal.png deleted file mode 100644 index 891b9f0d0f05a..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance-service-principal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance.png b/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance.png deleted file mode 100644 index 71f3f4ecf289e..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/azure-ad-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/new-group.png b/articles/azure-sql/database/media/authentication-aad-directory-readers-role/new-group.png deleted file mode 100644 index a50b8e456c1a0..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-directory-readers-role/new-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-overview/1aad-auth-diagram.png b/articles/azure-sql/database/media/authentication-aad-overview/1aad-auth-diagram.png deleted file mode 100644 index f07dc6dae686c..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-overview/1aad-auth-diagram.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-overview/2subscription-relationship.png b/articles/azure-sql/database/media/authentication-aad-overview/2subscription-relationship.png deleted file mode 100644 index 0ce36d5a78ad4..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-overview/2subscription-relationship.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-overview/3admin-structure.png b/articles/azure-sql/database/media/authentication-aad-overview/3admin-structure.png deleted file mode 100644 index 90e45b7e4a356..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-overview/3admin-structure.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-app-registration-api-permissions.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-app-registration-api-permissions.png deleted file mode 100644 index 9c46d281e1966..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-app-registration-api-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-apps.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-apps.png deleted file mode 100644 index 3b518e9842662..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-apps.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-directory-reader-all-permissions.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-directory-reader-all-permissions.png deleted file mode 100644 index 6c6c4688b31f6..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-directory-reader-all-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-register-app.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-register-app.png deleted file mode 100644 index 588345a5602cb..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/aad-register-app.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/azure-ad-microsoft-graph-all-permissions.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/azure-ad-microsoft-graph-all-permissions.png deleted file mode 100644 index 33e9d44520989..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/azure-ad-microsoft-graph-all-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/enterprise-applications-object-id.png b/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/enterprise-applications-object-id.png deleted file mode 100644 index 5328337e248ca..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-aad-service-principals-tutorial/enterprise-applications-object-id.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/adding-scope-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/adding-scope-policy-azure-ad-only-authentication.png deleted file mode 100644 index 9ae669eaf03cb..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/adding-scope-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/assign-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/assign-policy-azure-ad-only-authentication.png deleted file mode 100644 index 50cbe9b7199b2..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/assign-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/check-compliance-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/check-compliance-policy-azure-ad-only-authentication.png deleted file mode 100644 index 4576e6be038f9..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/check-compliance-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/compliance-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/compliance-policy-azure-ad-only-authentication.png deleted file mode 100644 index 5b4e650a9c5a3..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/compliance-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/deny-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/deny-policy-azure-ad-only-authentication.png deleted file mode 100644 index c15555c30906b..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/deny-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/non-compliance-message-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/non-compliance-message-policy-azure-ad-only-authentication.png deleted file mode 100644 index de942938943d0..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/non-compliance-message-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/policy-azure-ad-only-authentication.png deleted file mode 100644 index ad2443920e0d0..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/selecting-scope-policy-azure-ad-only-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/selecting-scope-policy-azure-ad-only-authentication.png deleted file mode 100644 index ff0fc9d4291a8..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication-policy/selecting-scope-policy-azure-ad-only-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control-add-role.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control-add-role.png deleted file mode 100644 index 95dcdcdd216f1..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control-add-role.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control.png deleted file mode 100644 index 1b2de481ca6ec..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-access-control.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal-sql-server-activity-log.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal-sql-server-activity-log.png deleted file mode 100644 index 2b7db685df8d3..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal-sql-server-activity-log.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal.png deleted file mode 100644 index 6be3b413e4772..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-authentication-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic-choose-authentication.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic-choose-authentication.png deleted file mode 100644 index ac0852cd55eeb..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic-choose-authentication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic.png deleted file mode 100644 index 9f83c1a90e54f..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-only-managed-instance-create-basic.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-create-server.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-create-server.png deleted file mode 100644 index 98e5d2d649cc5..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-create-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-read-permissions.png b/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-read-permissions.png deleted file mode 100644 index 5c73c181c95ae..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-only-authentication/azure-ad-portal-read-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-check-user-assigned-managed-identity-permissions.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-check-user-assigned-managed-identity-permissions.png deleted file mode 100644 index f9e9da3762a49..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-check-user-assigned-managed-identity-permissions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-search-enterprise-applications.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-search-enterprise-applications.png deleted file mode 100644 index 5618f2ce1cb3c..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/azure-ad-search-enterprise-applications.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/create-server-configure-identities.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/create-server-configure-identities.png deleted file mode 100644 index affff36ecd8b2..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/create-server-configure-identities.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/existing-server-select-managed-identity.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/existing-server-select-managed-identity.png deleted file mode 100644 index 1dad862157ba0..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/existing-server-select-managed-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-primary-identity.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-primary-identity.png deleted file mode 100644 index 605aa52179ccb..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-primary-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-user-assigned-managed-identity.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-user-assigned-managed-identity.png deleted file mode 100644 index 9e24921fd0f0c..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-user-assigned-managed-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/user-assigned-managed-identity-configuration.png b/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/user-assigned-managed-identity-configuration.png deleted file mode 100644 index 6b18021f215f8..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-azure-ad-user-assigned-managed-identity/user-assigned-managed-identity-configuration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect-user.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect-user.png deleted file mode 100644 index 61263f991a192..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect-user.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect.png deleted file mode 100644 index 9f6aa8a0105c7..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/1mfa-universal-connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/2mfa-sign-in.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/2mfa-sign-in.png deleted file mode 100644 index c3fe99fd87228..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/2mfa-sign-in.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/3mfa-setup.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/3mfa-setup.png deleted file mode 100644 index 8b01a245af262..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/3mfa-setup.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/4mfa-verify-1.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/4mfa-verify-1.png deleted file mode 100644 index 1408f8d170fce..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/4mfa-verify-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/5mfa-verify-2.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/5mfa-verify-2.png deleted file mode 100644 index 2b185c200850a..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/5mfa-verify-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-no-tenant-ssms.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-no-tenant-ssms.png deleted file mode 100644 index 401a3e24f488e..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-no-tenant-ssms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-tenant-ssms.png b/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-tenant-ssms.png deleted file mode 100644 index 99805dd213f4e..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-configure/mfa-tenant-ssms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/1mfa-universal-connect.png b/articles/azure-sql/database/media/authentication-mfa-ssms-overview/1mfa-universal-connect.png deleted file mode 100644 index e7cc940b3d9e9..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/1mfa-universal-connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-no-tenant-ssms.png b/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-no-tenant-ssms.png deleted file mode 100644 index 401a3e24f488e..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-no-tenant-ssms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-tenant-ssms.png b/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-tenant-ssms.png deleted file mode 100644 index 99805dd213f4e..0000000000000 Binary files a/articles/azure-sql/database/media/authentication-mfa-ssms-overview/mfa-tenant-ssms.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-elastic-pool-to-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-elastic-pool-to-failover-group.png deleted file mode 100644 index c0abdc8c51aca..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-elastic-pool-to-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-failover-group.png deleted file mode 100644 index 902d9aa71c443..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-sqldb-to-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-sqldb-to-failover-group.png deleted file mode 100644 index d4a3a0ce2cf4d..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/add-sqldb-to-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/create-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/create-failover-group.png deleted file mode 100644 index b4d9e90b1d09d..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/create-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/failover-sql-db.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/failover-sql-db.png deleted file mode 100644 index df422a59632c4..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/failover-sql-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/find-failover-group-connection-string.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/find-failover-group-connection-string.png deleted file mode 100644 index 18e6d7941bce4..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/find-failover-group-connection-string.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/open-sql-db-server.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/open-sql-db-server.png deleted file mode 100644 index 26931371caa9c..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/open-sql-db-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/select-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/select-failover-group.png deleted file mode 100644 index d396265eecb02..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/select-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/server-for-elastic-pool.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/server-for-elastic-pool.png deleted file mode 100644 index b0ad892c97baa..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/server-for-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/sqldb-add-new-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/sqldb-add-new-failover-group.png deleted file mode 100644 index 30fb915e7c94b..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-configure-sql-db/sqldb-add-new-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group-local-ip-range-overlap.png b/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group-local-ip-range-overlap.png deleted file mode 100644 index 30b31f661c953..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group-local-ip-range-overlap.png and /dev/null differ diff --git a/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group.png b/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group.png deleted file mode 100644 index e1029a0e93187..0000000000000 Binary files a/articles/azure-sql/database/media/auto-failover-group-overview/auto-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/backup-metrics.png b/articles/azure-sql/database/media/automated-backups-overview/backup-metrics.png deleted file mode 100644 index 8f78509b45015..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/backup-metrics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/change-backup-storage-redundancy-managed-instance-notification.png b/articles/azure-sql/database/media/automated-backups-overview/change-backup-storage-redundancy-managed-instance-notification.png deleted file mode 100644 index 4358ca4f4a9d2..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/change-backup-storage-redundancy-managed-instance-notification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/check-backup-storage-cost-sql-mi.png b/articles/azure-sql/database/media/automated-backups-overview/check-backup-storage-cost-sql-mi.png deleted file mode 100644 index c31d2978e566c..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/check-backup-storage-cost-sql-mi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqldb.png b/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqldb.png deleted file mode 100644 index 53ae3d61c7ac1..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqldb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqlmi.png b/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqlmi.png deleted file mode 100644 index fd47879ff7a1d..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/configure-backup-retention-sqlmi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/open-configuration-blade-managed-instance.png b/articles/azure-sql/database/media/automated-backups-overview/open-configuration-blade-managed-instance.png deleted file mode 100644 index 739a0b6f4f7fd..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/open-configuration-blade-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/select-backup-storage-redundancy-managed-instance.png b/articles/azure-sql/database/media/automated-backups-overview/select-backup-storage-redundancy-managed-instance.png deleted file mode 100644 index eaed77da9205e..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/select-backup-storage-redundancy-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automated-backups-overview/sql-database-backup-storage-redundancy.png b/articles/azure-sql/database/media/automated-backups-overview/sql-database-backup-storage-redundancy.png deleted file mode 100644 index 4bf34e379d664..0000000000000 Binary files a/articles/azure-sql/database/media/automated-backups-overview/sql-database-backup-storage-redundancy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-01.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-01.png deleted file mode 100644 index 8e8ec5f1b6619..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-01.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-03.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-03.png deleted file mode 100644 index 34e240087f821..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-03.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-04.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-04.png deleted file mode 100644 index c234f7def78fa..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-04.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-05.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-05.png deleted file mode 100644 index b8ce6d01ec981..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-05.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-06.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-06.png deleted file mode 100644 index c98d723f985dc..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-06.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-07.png b/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-07.png deleted file mode 100644 index 08f548922cf1b..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-email-notifications-configure/howto-email-07.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-enable/database.png b/articles/azure-sql/database/media/automatic-tuning-enable/database.png deleted file mode 100644 index d17011b3a02e4..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-enable/database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-enable/server.png b/articles/azure-sql/database/media/automatic-tuning-enable/server.png deleted file mode 100644 index 5beb426d2e2f7..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-enable/server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/automatic-tuning-overview/how-does-automatic-tuning-work.png b/articles/azure-sql/database/media/automatic-tuning-overview/how-does-automatic-tuning-work.png deleted file mode 100644 index ea8bb74ed7ec9..0000000000000 Binary files a/articles/azure-sql/database/media/automatic-tuning-overview/how-does-automatic-tuning-work.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-add-contact-details.png b/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-add-contact-details.png deleted file mode 100644 index 19f420a118d44..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-add-contact-details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-alerts.png b/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-alerts.png deleted file mode 100644 index 4eecd45d88492..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-alerts.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-configure-emails.png b/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-configure-emails.png deleted file mode 100644 index ad8b38caf28e8..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection-configure-emails.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection.png b/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection.png deleted file mode 100644 index cbab94282a28c..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/advanced-threat-protection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender-sql-subscription-level.png b/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender-sql-subscription-level.png deleted file mode 100644 index 8c8322e2c668a..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender-sql-subscription-level.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender.png b/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender.png deleted file mode 100644 index 213d71ce3c88b..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/enable-azure-defender.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/enable-for-database-level.png b/articles/azure-sql/database/media/azure-defender-for-sql/enable-for-database-level.png deleted file mode 100644 index 5d9ee84078959..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/enable-for-database-level.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/enable-microsoft-defender-sql.png b/articles/azure-sql/database/media/azure-defender-for-sql/enable-microsoft-defender-sql.png deleted file mode 100644 index 763d2571bdf78..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/enable-microsoft-defender-sql.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/security-server-settings.png b/articles/azure-sql/database/media/azure-defender-for-sql/security-server-settings.png deleted file mode 100644 index 1149056a63099..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/security-server-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection-mi.png b/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection-mi.png deleted file mode 100644 index 87a3f150176b0..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection-mi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection.png b/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection.png deleted file mode 100644 index 60af95537b93b..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/set-up-advanced-threat-protection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/status-of-defender-for-sql.png b/articles/azure-sql/database/media/azure-defender-for-sql/status-of-defender-for-sql.png deleted file mode 100644 index 19d8a84aec82c..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/status-of-defender-for-sql.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-defender-for-sql/va-storage.png b/articles/azure-sql/database/media/azure-defender-for-sql/va-storage.png deleted file mode 100644 index 39e06fa78ab4a..0000000000000 Binary files a/articles/azure-sql/database/media/azure-defender-for-sql/va-storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png b/articles/azure-sql/database/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png deleted file mode 100644 index 64d11159001d2..0000000000000 Binary files a/articles/azure-sql/database/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png and /dev/null differ diff --git a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-re-register.png b/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-re-register.png deleted file mode 100644 index 16fd5ca688707..0000000000000 Binary files a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-re-register.png and /dev/null differ diff --git a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-register.png b/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-register.png deleted file mode 100644 index 731a19612b89c..0000000000000 Binary files a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud-register.png and /dev/null differ diff --git a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud.png b/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud.png deleted file mode 100644 index 1f246a49f04bf..0000000000000 Binary files a/articles/azure-sql/database/media/block-tsql-crud/block-tsql-crud.png and /dev/null differ diff --git a/articles/azure-sql/database/media/clustering-model-build-tutorial/elbow-graph.png b/articles/azure-sql/database/media/clustering-model-build-tutorial/elbow-graph.png deleted file mode 100644 index 72488c53fc961..0000000000000 Binary files a/articles/azure-sql/database/media/clustering-model-build-tutorial/elbow-graph.png and /dev/null differ diff --git a/articles/azure-sql/database/media/conditional-access-configure/conditional-access-blade.png b/articles/azure-sql/database/media/conditional-access-configure/conditional-access-blade.png deleted file mode 100644 index f6977880a1524..0000000000000 Binary files a/articles/azure-sql/database/media/conditional-access-configure/conditional-access-blade.png and /dev/null differ diff --git a/articles/azure-sql/database/media/conditional-access-configure/grant-access.png b/articles/azure-sql/database/media/conditional-access-configure/grant-access.png deleted file mode 100644 index d0a0893f49733..0000000000000 Binary files a/articles/azure-sql/database/media/conditional-access-configure/grant-access.png and /dev/null differ diff --git a/articles/azure-sql/database/media/conditional-access-configure/select-sql-database.png b/articles/azure-sql/database/media/conditional-access-configure/select-sql-database.png deleted file mode 100644 index e2c5a0c35f347..0000000000000 Binary files a/articles/azure-sql/database/media/conditional-access-configure/select-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/conditional-access-configure/select-users-and-groups.png b/articles/azure-sql/database/media/conditional-access-configure/select-users-and-groups.png deleted file mode 100644 index 4447b8e55fc8c..0000000000000 Binary files a/articles/azure-sql/database/media/conditional-access-configure/select-users-and-groups.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/connect-to-server.png b/articles/azure-sql/database/media/connect-excel/connect-to-server.png deleted file mode 100644 index 3df6b1becee38..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/connect-to-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/data-connection-wizard.png b/articles/azure-sql/database/media/connect-excel/data-connection-wizard.png deleted file mode 100644 index 0f4c5d760a3da..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/data-connection-wizard.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/excel_data_source.png b/articles/azure-sql/database/media/connect-excel/excel_data_source.png deleted file mode 100644 index cfe58fb959e15..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/excel_data_source.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/existing-connection.png b/articles/azure-sql/database/media/connect-excel/existing-connection.png deleted file mode 100644 index c511b9571bcfb..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/existing-connection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/import-data.png b/articles/azure-sql/database/media/connect-excel/import-data.png deleted file mode 100644 index d1caebf01dbfa..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/import-data.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/import-data2.png b/articles/azure-sql/database/media/connect-excel/import-data2.png deleted file mode 100644 index ac239791a0cb7..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/import-data2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/new-connection.png b/articles/azure-sql/database/media/connect-excel/new-connection.png deleted file mode 100644 index 2e9981bbe5cf7..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/new-connection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/power-pivot-results.png b/articles/azure-sql/database/media/connect-excel/power-pivot-results.png deleted file mode 100644 index d0a0c6835a149..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/power-pivot-results.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/recent-connections.png b/articles/azure-sql/database/media/connect-excel/recent-connections.png deleted file mode 100644 index 4353ddabcf7b7..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/recent-connections.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/save-data-connection.png b/articles/azure-sql/database/media/connect-excel/save-data-connection.png deleted file mode 100644 index 375d48265cf19..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/save-data-connection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/select-database-and-table.png b/articles/azure-sql/database/media/connect-excel/select-database-and-table.png deleted file mode 100644 index c5dbd99be1419..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/select-database-and-table.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-excel/server-name.png b/articles/azure-sql/database/media/connect-excel/server-name.png deleted file mode 100644 index 1f03ddb6c873f..0000000000000 Binary files a/articles/azure-sql/database/media/connect-excel/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-dotnet-core/adonet-connection-string2.png b/articles/azure-sql/database/media/connect-query-dotnet-core/adonet-connection-string2.png deleted file mode 100644 index a1370f0af0071..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-dotnet-core/adonet-connection-string2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-portal/find-query-editor.png b/articles/azure-sql/database/media/connect-query-portal/find-query-editor.png deleted file mode 100644 index ee98b1eafc2a8..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-portal/find-query-editor.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-portal/login-menu.png b/articles/azure-sql/database/media/connect-query-portal/login-menu.png deleted file mode 100644 index d5a279770ee7e..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-portal/login-menu.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-portal/query-editor-results.png b/articles/azure-sql/database/media/connect-query-portal/query-editor-results.png deleted file mode 100644 index 0c55602b366bd..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-portal/query-editor-results.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-portal/select-active-directory.png b/articles/azure-sql/database/media/connect-query-portal/select-active-directory.png deleted file mode 100644 index bc6e6a3e99a4d..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-portal/select-active-directory.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-ssms/connect.png b/articles/azure-sql/database/media/connect-query-ssms/connect.png deleted file mode 100644 index eb2ab84e55456..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-ssms/connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-ssms/connected.png b/articles/azure-sql/database/media/connect-query-ssms/connected.png deleted file mode 100644 index 1715bfd4f49be..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-ssms/connected.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-ssms/options-connect-to-db.png b/articles/azure-sql/database/media/connect-query-ssms/options-connect-to-db.png deleted file mode 100644 index 59c15f044fccd..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-ssms/options-connect-to-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-ssms/query2.png b/articles/azure-sql/database/media/connect-query-ssms/query2.png deleted file mode 100644 index a7ea01dc316de..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-ssms/query2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-ssms/result.png b/articles/azure-sql/database/media/connect-query-ssms/result.png deleted file mode 100644 index 31b9ac466d006..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-ssms/result.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connect-query-vscode/query.png b/articles/azure-sql/database/media/connect-query-vscode/query.png deleted file mode 100644 index 45f8459c9ebfc..0000000000000 Binary files a/articles/azure-sql/database/media/connect-query-vscode/query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connectivity-architecture/connectivity-azure.png b/articles/azure-sql/database/media/connectivity-architecture/connectivity-azure.png deleted file mode 100644 index 3b0ee74eeed99..0000000000000 Binary files a/articles/azure-sql/database/media/connectivity-architecture/connectivity-azure.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connectivity-architecture/connectivity-onprem.png b/articles/azure-sql/database/media/connectivity-architecture/connectivity-onprem.png deleted file mode 100644 index ce54cd280d515..0000000000000 Binary files a/articles/azure-sql/database/media/connectivity-architecture/connectivity-onprem.png and /dev/null differ diff --git a/articles/azure-sql/database/media/connectivity-architecture/connectivity-overview.png b/articles/azure-sql/database/media/connectivity-architecture/connectivity-overview.png deleted file mode 100644 index 8bf9fd067a74b..0000000000000 Binary files a/articles/azure-sql/database/media/connectivity-architecture/connectivity-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/cost-management/backup-storage.png b/articles/azure-sql/database/media/cost-management/backup-storage.png deleted file mode 100644 index 99c81896f99d0..0000000000000 Binary files a/articles/azure-sql/database/media/cost-management/backup-storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/cost-management/cost-analysis.png b/articles/azure-sql/database/media/cost-management/cost-analysis.png deleted file mode 100644 index f4fcbc10220d5..0000000000000 Binary files a/articles/azure-sql/database/media/cost-management/cost-analysis.png and /dev/null differ diff --git a/articles/azure-sql/database/media/cost-management/cost-estimate.png b/articles/azure-sql/database/media/cost-management/cost-estimate.png deleted file mode 100644 index 27d41f10b8f95..0000000000000 Binary files a/articles/azure-sql/database/media/cost-management/cost-estimate.png and /dev/null differ diff --git a/articles/azure-sql/database/media/cost-management/pricing-calc.png b/articles/azure-sql/database/media/cost-management/pricing-calc.png deleted file mode 100644 index 193f7520590b9..0000000000000 Binary files a/articles/azure-sql/database/media/cost-management/pricing-calc.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/11_data_classification_audit_log.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/11_data_classification_audit_log.png deleted file mode 100644 index dcb9bd83aedd7..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/11_data_classification_audit_log.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/2_data_classification_overview_dashboard.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/2_data_classification_overview_dashboard.png deleted file mode 100644 index 7a4a7c1361749..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/2_data_classification_overview_dashboard.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/6_data_classification_recommendations_list.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/6_data_classification_recommendations_list.png deleted file mode 100644 index f2793f22b3699..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/6_data_classification_recommendations_list.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/9_data_classification_manual_classification.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/9_data_classification_manual_classification.png deleted file mode 100644 index 97bcbfa95f6ff..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/9_data_classification_manual_classification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/data-discovery-and-classification.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/data-discovery-and-classification.png deleted file mode 100644 index 787abeab1d758..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/data-discovery-and-classification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/data_classification.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/data_classification.png deleted file mode 100644 index 2394b5940e9cf..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/data_classification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/manually-add-classification.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/manually-add-classification.png deleted file mode 100644 index 8e9986fc5def0..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/manually-add-classification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/data-discovery-and-classification-overview/recommendation.png b/articles/azure-sql/database/media/data-discovery-and-classification-overview/recommendation.png deleted file mode 100644 index c049622a60e44..0000000000000 Binary files a/articles/azure-sql/database/media/data-discovery-and-classification-overview/recommendation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/apply.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/apply.png deleted file mode 100644 index 97c9b4983c2d8..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/apply.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/details.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/details.png deleted file mode 100644 index 006721ac2c62c..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/operations.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/operations.png deleted file mode 100644 index 5b6625fae940e..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/operations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/query-insights.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/query-insights.png deleted file mode 100644 index 22950c7c759e1..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/query-insights.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/recommendations.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/recommendations.png deleted file mode 100644 index 12b22e03ef941..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/recommendations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/server.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/server.png deleted file mode 100644 index 5beb426d2e2f7..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/settings.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/settings.png deleted file mode 100644 index 9d53c6c90798c..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/sql-database-performance-recommendation.png b/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/sql-database-performance-recommendation.png deleted file mode 100644 index 36f7ad2621045..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-find-recommendations-portal/sql-database-performance-recommendation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-overview-annotated.png b/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-overview-annotated.png deleted file mode 100644 index 9af889894e07d..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-overview-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-recommendations-annotated.png b/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-recommendations-annotated.png deleted file mode 100644 index c5abdfa6ccb02..0000000000000 Binary files a/articles/azure-sql/database/media/database-advisor-implement-performance-recommendations/performance-recommendations-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-copy/database-copy.png b/articles/azure-sql/database/media/database-copy/database-copy.png deleted file mode 100644 index 261d43a36ca34..0000000000000 Binary files a/articles/azure-sql/database/media/database-copy/database-copy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-export/database-export1.png b/articles/azure-sql/database/media/database-export/database-export1.png deleted file mode 100644 index e1867642d465e..0000000000000 Binary files a/articles/azure-sql/database/media/database-export/database-export1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-export/database-export2.png b/articles/azure-sql/database/media/database-export/database-export2.png deleted file mode 100644 index 2097522d0ee41..0000000000000 Binary files a/articles/azure-sql/database/media/database-export/database-export2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-export/export-history.png b/articles/azure-sql/database/media/database-export/export-history.png deleted file mode 100644 index b5624f17e06c5..0000000000000 Binary files a/articles/azure-sql/database/media/database-export/export-history.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-azure-services-off/rdp.png b/articles/azure-sql/database/media/database-import-export-azure-services-off/rdp.png deleted file mode 100644 index ee5c68ce8500f..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-azure-services-off/rdp.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-azure-services-off/server-firewall-rule.png b/articles/azure-sql/database/media/database-import-export-azure-services-off/server-firewall-rule.png deleted file mode 100644 index 54b6fca8199f9..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-azure-services-off/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-azure-services-off/server-name.png b/articles/azure-sql/database/media/database-import-export-azure-services-off/server-name.png deleted file mode 100644 index 98ce5b4399111..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-azure-services-off/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-azure-services-off/vm.png b/articles/azure-sql/database/media/database-import-export-azure-services-off/vm.png deleted file mode 100644 index 77ca8570c7d2d..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-azure-services-off/vm.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link-storage.png b/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link-storage.png deleted file mode 100644 index 595ad12a5534a..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link-storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link.png b/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link.png deleted file mode 100644 index 2b2958b61d7f1..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/approve-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/export-database-private-link.png b/articles/azure-sql/database/media/database-import-export-private-link/export-database-private-link.png deleted file mode 100644 index a8303293d7f8b..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/export-database-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/import-database-private-link.png b/articles/azure-sql/database/media/database-import-export-private-link/import-database-private-link.png deleted file mode 100644 index e63690919601c..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/import-database-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/import-export-private-link.png b/articles/azure-sql/database/media/database-import-export-private-link/import-export-private-link.png deleted file mode 100644 index 32cd4f50ed3f1..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/import-export-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import-export-private-link/import-export-status.png b/articles/azure-sql/database/media/database-import-export-private-link/import-export-status.png deleted file mode 100644 index 3d2dcfcd32b73..0000000000000 Binary files a/articles/azure-sql/database/media/database-import-export-private-link/import-export-status.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import/sql-server-import-database-history.png b/articles/azure-sql/database/media/database-import/sql-server-import-database-history.png deleted file mode 100644 index ca74742e12ab5..0000000000000 Binary files a/articles/azure-sql/database/media/database-import/sql-server-import-database-history.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import/sql-server-import-database-settings.png b/articles/azure-sql/database/media/database-import/sql-server-import-database-settings.png deleted file mode 100644 index 6b2c725eea172..0000000000000 Binary files a/articles/azure-sql/database/media/database-import/sql-server-import-database-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/database-import/sql-server-import-database.png b/articles/azure-sql/database/media/database-import/sql-server-import-database.png deleted file mode 100644 index 7ca417684ecd9..0000000000000 Binary files a/articles/azure-sql/database/media/database-import/sql-server-import-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-database-server.png b/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-database-server.png deleted file mode 100644 index 3e9f488fe465b..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-database-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-empty-database.png b/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-empty-database.png deleted file mode 100644 index f2d630ee0a31f..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/create-empty-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/notification.png b/articles/azure-sql/database/media/design-first-database-csharp-tutorial/notification.png deleted file mode 100644 index 92738c212139b..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/notification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-firewall-rule.png b/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-firewall-rule.png deleted file mode 100644 index 66e960dbada39..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-name.png b/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-name.png deleted file mode 100644 index 6465a297149b6..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-csharp-tutorial/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/connect.png b/articles/azure-sql/database/media/design-first-database-tutorial/connect.png deleted file mode 100644 index a350d762596f3..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/connected.png b/articles/azure-sql/database/media/design-first-database-tutorial/connected.png deleted file mode 100644 index 7439b908f2ff2..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/connected.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/create-database-server.png b/articles/azure-sql/database/media/design-first-database-tutorial/create-database-server.png deleted file mode 100644 index 3e9f488fe465b..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/create-database-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/create-empty-database.png b/articles/azure-sql/database/media/design-first-database-tutorial/create-empty-database.png deleted file mode 100644 index f2d630ee0a31f..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/create-empty-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/create-tables.png b/articles/azure-sql/database/media/design-first-database-tutorial/create-tables.png deleted file mode 100644 index 0247f6f157d0c..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/create-tables.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/notification.png b/articles/azure-sql/database/media/design-first-database-tutorial/notification.png deleted file mode 100644 index 92738c212139b..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/notification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/options-connect-to-db.png b/articles/azure-sql/database/media/design-first-database-tutorial/options-connect-to-db.png deleted file mode 100644 index 12ef236f0af5c..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/options-connect-to-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/server-firewall-rule.png b/articles/azure-sql/database/media/design-first-database-tutorial/server-firewall-rule.png deleted file mode 100644 index 66e960dbada39..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/server-name.png b/articles/azure-sql/database/media/design-first-database-tutorial/server-name.png deleted file mode 100644 index 6465a297149b6..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/ssms-tables-created.png b/articles/azure-sql/database/media/design-first-database-tutorial/ssms-tables-created.png deleted file mode 100644 index 3e027871d16dd..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/ssms-tables-created.png and /dev/null differ diff --git a/articles/azure-sql/database/media/design-first-database-tutorial/tutorial-database-tables.png b/articles/azure-sql/database/media/design-first-database-tutorial/tutorial-database-tables.png deleted file mode 100644 index b917f96165339..0000000000000 Binary files a/articles/azure-sql/database/media/design-first-database-tutorial/tutorial-database-tables.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-a.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-a.png deleted file mode 100644 index 054b7e28d0bc0..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-a.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-b.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-b.png deleted file mode 100644 index 07769e90a627b..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-b.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-c.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-c.png deleted file mode 100644 index b3dece1adce0a..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario1-c.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-a.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-a.png deleted file mode 100644 index fbede9ac03e4e..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-a.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-b.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-b.png deleted file mode 100644 index b8a90bd3f9797..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-b.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-c.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-c.png deleted file mode 100644 index 32e7bde41b9f2..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario2-c.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-a.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-a.png deleted file mode 100644 index 45c62ffe1fedc..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-a.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-b.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-b.png deleted file mode 100644 index ba62167709d45..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-b.png and /dev/null differ diff --git a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-c.png b/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-c.png deleted file mode 100644 index ae6f1578e3914..0000000000000 Binary files a/articles/azure-sql/database/media/designing-cloud-solutions-for-disaster-recovery/scenario3-c.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/azureportal.png b/articles/azure-sql/database/media/develop-cplusplus-simple/azureportal.png deleted file mode 100644 index 042f1eed3c3b2..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/azureportal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/datasource.png b/articles/azure-sql/database/media/develop-cplusplus-simple/datasource.png deleted file mode 100644 index d5370d8e55b3d..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/datasource.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/dbconnection.png b/articles/azure-sql/database/media/develop-cplusplus-simple/dbconnection.png deleted file mode 100644 index 9169de8d75ff8..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/dbconnection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/ip.png b/articles/azure-sql/database/media/develop-cplusplus-simple/ip.png deleted file mode 100644 index bea345f62859c..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/ip.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/lib.png b/articles/azure-sql/database/media/develop-cplusplus-simple/lib.png deleted file mode 100644 index 96851428fef36..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/lib.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsole.png b/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsole.png deleted file mode 100644 index 22b19c6385f43..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsole.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsolewindow.png b/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsolewindow.png deleted file mode 100644 index 0877244605127..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/linuxconsolewindow.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/props.png b/articles/azure-sql/database/media/develop-cplusplus-simple/props.png deleted file mode 100644 index b48e4c48c13ea..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/props.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/sqlcommands.png b/articles/azure-sql/database/media/develop-cplusplus-simple/sqlcommands.png deleted file mode 100644 index 7bf7e3d8c9e36..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/sqlcommands.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/template.png b/articles/azure-sql/database/media/develop-cplusplus-simple/template.png deleted file mode 100644 index c2aa86e4344d0..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/template.png and /dev/null differ diff --git a/articles/azure-sql/database/media/develop-cplusplus-simple/tools.png b/articles/azure-sql/database/media/develop-cplusplus-simple/tools.png deleted file mode 100644 index 7a1d9a1090b3d..0000000000000 Binary files a/articles/azure-sql/database/media/develop-cplusplus-simple/tools.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-1.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-1.png deleted file mode 100644 index 8ab9583e02fc7..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-2.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-2.png deleted file mode 100644 index c0f5494b4b4c8..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-3.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-3.png deleted file mode 100644 index 830e7134475fe..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-3.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-4.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-4.png deleted file mode 100644 index 41e5422f0ce5e..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-4.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-5.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-5.png deleted file mode 100644 index 826ee8fd2fc82..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-5.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-6.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-6.png deleted file mode 100644 index 267ee7337d6df..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-6.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-7.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-7.png deleted file mode 100644 index 2e57ceffb4f93..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-7.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-8.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-8.png deleted file mode 100644 index f57439fa2f60b..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-8.png and /dev/null differ diff --git a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-9.png b/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-9.png deleted file mode 100644 index af7ef51799032..0000000000000 Binary files a/articles/azure-sql/database/media/disaster-recovery-strategies-for-applications-with-elastic-pool/diagram-9.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/5_ddm_recommendations.png b/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/5_ddm_recommendations.png deleted file mode 100644 index b163161525815..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/5_ddm_recommendations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/6_ddm_add_mask.png b/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/6_ddm_add_mask.png deleted file mode 100644 index 882baca679b11..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/6_ddm_add_mask.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/7_ddm_mask_field_format.png b/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/7_ddm_mask_field_format.png deleted file mode 100644 index 476f16ade033a..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/7_ddm_mask_field_format.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/8_ddm_excluded_users.png b/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/8_ddm_excluded_users.png deleted file mode 100644 index e4824cbc03ea8..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/8_ddm_excluded_users.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/dynamic-data-masking-in-portal.png b/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/dynamic-data-masking-in-portal.png deleted file mode 100644 index c92803a596127..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-configure-portal/dynamic-data-masking-in-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-overview/1_ddm_random_number.png b/articles/azure-sql/database/media/dynamic-data-masking-overview/1_ddm_random_number.png deleted file mode 100644 index 9b82c33b5dda8..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-overview/1_ddm_random_number.png and /dev/null differ diff --git a/articles/azure-sql/database/media/dynamic-data-masking-overview/2_ddm_custom_text.png b/articles/azure-sql/database/media/dynamic-data-masking-overview/2_ddm_custom_text.png deleted file mode 100644 index f5c65e6fbf837..0000000000000 Binary files a/articles/azure-sql/database/media/dynamic-data-masking-overview/2_ddm_custom_text.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/listmapping.png b/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/listmapping.png deleted file mode 100644 index 8a0a9b6b49d07..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/listmapping.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/multipleonsingledb.png b/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/multipleonsingledb.png deleted file mode 100644 index 8fd9bb6121977..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/multipleonsingledb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/rangemapping.png b/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/rangemapping.png deleted file mode 100644 index 7b556e2766b09..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-convert-to-use-elastic-tools/rangemapping.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-database-recovery-manager/recovery-manager.png b/articles/azure-sql/database/media/elastic-database-recovery-manager/recovery-manager.png deleted file mode 100644 index 58a0a8d035f11..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-database-recovery-manager/recovery-manager.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-jobs-overview/create-elastic-job-agent.png b/articles/azure-sql/database/media/elastic-jobs-overview/create-elastic-job-agent.png deleted file mode 100644 index b218fa37ee53e..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-jobs-overview/create-elastic-job-agent.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-jobs-overview/elastic-job-executions-overview.png b/articles/azure-sql/database/media/elastic-jobs-overview/elastic-job-executions-overview.png deleted file mode 100644 index 5923884388b32..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-jobs-overview/elastic-job-executions-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-jobs-overview/job-credentials.png b/articles/azure-sql/database/media/elastic-jobs-overview/job-credentials.png deleted file mode 100644 index 351e4ee5f9538..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-jobs-overview/job-credentials.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-credentials.png b/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-credentials.png deleted file mode 100644 index 351e4ee5f9538..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-credentials.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-execution-verification.png b/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-execution-verification.png deleted file mode 100644 index 3059ba9652f29..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-jobs-powershell-create/job-execution-verification.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-manage/configure-pool.png b/articles/azure-sql/database/media/elastic-pool-manage/configure-pool.png deleted file mode 100644 index ae08ad7b40af3..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-manage/configure-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/basic.png b/articles/azure-sql/database/media/elastic-pool-overview/basic.png deleted file mode 100644 index ee354f2441e78..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/basic.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/create-elastic-pool.png b/articles/azure-sql/database/media/elastic-pool-overview/create-elastic-pool.png deleted file mode 100644 index e64884b72e9ac..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/create-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/db-utilization.png b/articles/azure-sql/database/media/elastic-pool-overview/db-utilization.png deleted file mode 100644 index 34758d76af331..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/db-utilization.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/four-databases.png b/articles/azure-sql/database/media/elastic-pool-overview/four-databases.png deleted file mode 100644 index d3798b7429114..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/four-databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/one-database.png b/articles/azure-sql/database/media/elastic-pool-overview/one-database.png deleted file mode 100644 index fbd19663998f3..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/one-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-pool-overview/twenty-databases.png b/articles/azure-sql/database/media/elastic-pool-overview/twenty-databases.png deleted file mode 100644 index e8f01f3d18b90..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-pool-overview/twenty-databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-getting-started/cmd-prompt.png b/articles/azure-sql/database/media/elastic-query-getting-started/cmd-prompt.png deleted file mode 100644 index bda9754633345..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-getting-started/cmd-prompt.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-getting-started/details.png b/articles/azure-sql/database/media/elastic-query-getting-started/details.png deleted file mode 100644 index d0c07405147db..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-getting-started/details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-getting-started/exel-sources.png b/articles/azure-sql/database/media/elastic-query-getting-started/exel-sources.png deleted file mode 100644 index 25a86c0c93f5c..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-getting-started/exel-sources.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-getting-started/portal.png b/articles/azure-sql/database/media/elastic-query-getting-started/portal.png deleted file mode 100644 index 45a584820fe15..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-getting-started/portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-getting-started/tiers.png b/articles/azure-sql/database/media/elastic-query-getting-started/tiers.png deleted file mode 100644 index acb1049d4c351..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-getting-started/tiers.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-horizontal-partitioning/horizontalpartitioning.png b/articles/azure-sql/database/media/elastic-query-horizontal-partitioning/horizontalpartitioning.png deleted file mode 100644 index 2047dda18fc5c..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-horizontal-partitioning/horizontalpartitioning.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-overview/horizontalpartitioning.png b/articles/azure-sql/database/media/elastic-query-overview/horizontalpartitioning.png deleted file mode 100644 index f09c6fc2d3aab..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-overview/horizontalpartitioning.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-overview/overview.png b/articles/azure-sql/database/media/elastic-query-overview/overview.png deleted file mode 100644 index 4a719647d8b40..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-overview/overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-overview/topology1.png b/articles/azure-sql/database/media/elastic-query-overview/topology1.png deleted file mode 100644 index 34cbcdf6b67fd..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-overview/topology1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-overview/verticalpartitioning.png b/articles/azure-sql/database/media/elastic-query-overview/verticalpartitioning.png deleted file mode 100644 index 29d4a1cd3693d..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-overview/verticalpartitioning.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-overview/vertpartrrefdata.png b/articles/azure-sql/database/media/elastic-query-overview/vertpartrrefdata.png deleted file mode 100644 index d7b6459bf7001..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-overview/vertpartrrefdata.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-query-vertical-partitioning/verticalpartitioning.png b/articles/azure-sql/database/media/elastic-query-vertical-partitioning/verticalpartitioning.png deleted file mode 100644 index 9280fce9f32ae..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-query-vertical-partitioning/verticalpartitioning.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-get-started/click-CSharp.png b/articles/azure-sql/database/media/elastic-scale-get-started/click-CSharp.png deleted file mode 100644 index c4faf69c0f194..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-get-started/click-CSharp.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-get-started/click-online.png b/articles/azure-sql/database/media/elastic-scale-get-started/click-online.png deleted file mode 100644 index 8f36418753e45..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-get-started/click-online.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-get-started/java-client-library.PNG b/articles/azure-sql/database/media/elastic-scale-get-started/java-client-library.PNG deleted file mode 100644 index ac7ceb45af60a..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-get-started/java-client-library.PNG and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-get-started/newProject.png b/articles/azure-sql/database/media/elastic-scale-get-started/newProject.png deleted file mode 100644 index 49158618e2c81..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-get-started/newProject.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-get-started/output2.png b/articles/azure-sql/database/media/elastic-scale-get-started/output2.png deleted file mode 100644 index a0a52652e7430..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-get-started/output2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-glossary/glossary.png b/articles/azure-sql/database/media/elastic-scale-glossary/glossary.png deleted file mode 100644 index 770c4ed23625c..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-glossary/glossary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-glossary/h_versus_vert.png b/articles/azure-sql/database/media/elastic-scale-glossary/h_versus_vert.png deleted file mode 100644 index b216871f117d1..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-glossary/h_versus_vert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-glossary/mappings.png b/articles/azure-sql/database/media/elastic-scale-glossary/mappings.png deleted file mode 100644 index 6b0fe9daab51d..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-glossary/mappings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-glossary/multi-single-simple.png b/articles/azure-sql/database/media/elastic-scale-glossary/multi-single-simple.png deleted file mode 100644 index 98d5b72deab6a..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-glossary/multi-single-simple.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-glossary/shards-single-multi.png b/articles/azure-sql/database/media/elastic-scale-glossary/shards-single-multi.png deleted file mode 100644 index 3616eeb204a14..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-glossary/shards-single-multi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-introduction/h_versus_vert.png b/articles/azure-sql/database/media/elastic-scale-introduction/h_versus_vert.png deleted file mode 100644 index 968521a52b47b..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-introduction/h_versus_vert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-introduction/single_v_multi_tenant.png b/articles/azure-sql/database/media/elastic-scale-introduction/single_v_multi_tenant.png deleted file mode 100644 index f932c0de11fdc..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-introduction/single_v_multi_tenant.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-introduction/tools.png b/articles/azure-sql/database/media/elastic-scale-introduction/tools.png deleted file mode 100644 index efe03fbca0b20..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-introduction/tools.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics-config.png b/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics-config.png deleted file mode 100644 index d8e3427520e00..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics-config.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics.png b/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics.png deleted file mode 100644 index ec8b4696da32d..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/diagnostics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/split-merge-overview.png b/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/split-merge-overview.png deleted file mode 100644 index 1b3b3f7748313..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-overview-split-and-merge/split-merge-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-shard-map-management/glossary.png b/articles/azure-sql/database/media/elastic-scale-shard-map-management/glossary.png deleted file mode 100644 index cf77783fedecb..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-shard-map-management/glossary.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-shard-map-management/listmapping.png b/articles/azure-sql/database/media/elastic-scale-shard-map-management/listmapping.png deleted file mode 100644 index 8a0a9b6b49d07..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-shard-map-management/listmapping.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-shard-map-management/multipleonsingledb.png b/articles/azure-sql/database/media/elastic-scale-shard-map-management/multipleonsingledb.png deleted file mode 100644 index 8fd9bb6121977..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-shard-map-management/multipleonsingledb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-shard-map-management/rangemapping.png b/articles/azure-sql/database/media/elastic-scale-shard-map-management/rangemapping.png deleted file mode 100644 index 7b556e2766b09..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-shard-map-management/rangemapping.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-scale-working-with-dapper/dapperimage1.png b/articles/azure-sql/database/media/elastic-scale-working-with-dapper/dapperimage1.png deleted file mode 100644 index b7c77955ef694..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-scale-working-with-dapper/dapperimage1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-transactions-overview/distributed-transactions.png b/articles/azure-sql/database/media/elastic-transactions-overview/distributed-transactions.png deleted file mode 100644 index 5384b1115f595..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-transactions-overview/distributed-transactions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-transactions-overview/managed-instance-distributed-transactions-private-endpoint-limitations.png b/articles/azure-sql/database/media/elastic-transactions-overview/managed-instance-distributed-transactions-private-endpoint-limitations.png deleted file mode 100644 index 156f588e9c12a..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-transactions-overview/managed-instance-distributed-transactions-private-endpoint-limitations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-transactions-overview/server-trust-groups-azure-portal.png b/articles/azure-sql/database/media/elastic-transactions-overview/server-trust-groups-azure-portal.png deleted file mode 100644 index ed71731c1c79f..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-transactions-overview/server-trust-groups-azure-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/elastic-transactions-overview/sql-mi-distributed-transactions.png b/articles/azure-sql/database/media/elastic-transactions-overview/sql-mi-distributed-transactions.png deleted file mode 100644 index 30ce73b72b2b0..0000000000000 Binary files a/articles/azure-sql/database/media/elastic-transactions-overview/sql-mi-distributed-transactions.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-database-to-elastic-pool.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-database-to-elastic-pool.png deleted file mode 100644 index b1542d0ac0d7a..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-database-to-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-elastic-pool-to-failover-group.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-elastic-pool-to-failover-group.png deleted file mode 100644 index c0abdc8c51aca..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/add-elastic-pool-to-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/create-secondary-failover-server.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/create-secondary-failover-server.png deleted file mode 100644 index f9768122c0e2a..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/create-secondary-failover-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/elastic-pool-failover-group.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/elastic-pool-failover-group.png deleted file mode 100644 index cf37bf9cfd32c..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/elastic-pool-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/failover-sql-db.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/failover-sql-db.png deleted file mode 100644 index 774b0acf8c052..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/failover-sql-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-azure-sql-elastic-pool.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-azure-sql-elastic-pool.png deleted file mode 100644 index c59bbf496308d..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-azure-sql-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-failover-group.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-failover-group.png deleted file mode 100644 index 11d9f423fa7df..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/select-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/server-for-elastic-pool.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/server-for-elastic-pool.png deleted file mode 100644 index 9638e0a37a5b4..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/server-for-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/use-existing-server-for-elastic-pool.png b/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/use-existing-server-for-elastic-pool.png deleted file mode 100644 index d5c6910c7b110..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-elastic-pool-tutorial/use-existing-server-for-elastic-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/add-sqldb-to-failover-group.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/add-sqldb-to-failover-group.png deleted file mode 100644 index d4a3a0ce2cf4d..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/add-sqldb-to-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/create-secondary-failover-server.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/create-secondary-failover-server.png deleted file mode 100644 index aef3222e949a6..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/create-secondary-failover-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/failover-sql-db.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/failover-sql-db.png deleted file mode 100644 index df422a59632c4..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/failover-sql-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/open-sql-db-server.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/open-sql-db-server.png deleted file mode 100644 index 26931371caa9c..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/open-sql-db-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/select-failover-group.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/select-failover-group.png deleted file mode 100644 index d396265eecb02..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/select-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/sqldb-add-new-failover-group.png b/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/sqldb-add-new-failover-group.png deleted file mode 100644 index 30fb915e7c94b..0000000000000 Binary files a/articles/azure-sql/database/media/failover-group-add-single-database-tutorial/sqldb-add-new-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/file-space-manage/elastic-pool-allocated-unused.png b/articles/azure-sql/database/media/file-space-manage/elastic-pool-allocated-unused.png deleted file mode 100644 index 7659cf827ddde..0000000000000 Binary files a/articles/azure-sql/database/media/file-space-manage/elastic-pool-allocated-unused.png and /dev/null differ diff --git a/articles/azure-sql/database/media/file-space-manage/storage-types.png b/articles/azure-sql/database/media/file-space-manage/storage-types.png deleted file mode 100644 index 4f653f6b51625..0000000000000 Binary files a/articles/azure-sql/database/media/file-space-manage/storage-types.png and /dev/null differ diff --git a/articles/azure-sql/database/media/firewall-configure/sql-database-server-firewall-settings.png b/articles/azure-sql/database/media/firewall-configure/sql-database-server-firewall-settings.png deleted file mode 100644 index 9ea24e3530101..0000000000000 Binary files a/articles/azure-sql/database/media/firewall-configure/sql-database-server-firewall-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/firewall-configure/sql-database-server-set-firewall-rule.png b/articles/azure-sql/database/media/firewall-configure/sql-database-server-set-firewall-rule.png deleted file mode 100644 index 9bdaf7cff9a39..0000000000000 Binary files a/articles/azure-sql/database/media/firewall-configure/sql-database-server-set-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/firewall-configure/sqldb-firewall-1.png b/articles/azure-sql/database/media/firewall-configure/sqldb-firewall-1.png deleted file mode 100644 index 0f23c01261f6b..0000000000000 Binary files a/articles/azure-sql/database/media/firewall-configure/sqldb-firewall-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-firewall-rule.png b/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-firewall-rule.png deleted file mode 100644 index 54b6fca8199f9..0000000000000 Binary files a/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-name.png b/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-name.png deleted file mode 100644 index 98ce5b4399111..0000000000000 Binary files a/articles/azure-sql/database/media/firewall-create-server-level-portal-quickstart/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/configure-database.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/configure-database.png deleted file mode 100644 index 86a66a554bbd9..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/configure-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-additional-settings-tab.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-additional-settings-tab.png deleted file mode 100644 index 8c6a37fcc87f5..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-additional-settings-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-basics-tab.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-basics-tab.png deleted file mode 100644 index fb9cc28ff6596..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-basics-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-networking-tab.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-networking-tab.png deleted file mode 100644 index 19dbc4f493d77..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-networking-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-security-tab.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-security-tab.png deleted file mode 100644 index 12b1577861284..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/create-database-security-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-sql-database.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-sql-database.png deleted file mode 100644 index d78a602adf587..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-tracking.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-tracking.png deleted file mode 100644 index 3b84dd75d621f..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-tracking.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-usage-overview.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-usage-overview.png deleted file mode 100644 index 1566eaf2466f6..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/free-services-usage-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/search-sql-database.png b/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/search-sql-database.png deleted file mode 100644 index e60dc41af9b23..0000000000000 Binary files a/articles/azure-sql/database/media/free-sql-db-free-account-how-to-deploy/search-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/geo-distributed-application-configure-tutorial/geo-replication.png b/articles/azure-sql/database/media/geo-distributed-application-configure-tutorial/geo-replication.png deleted file mode 100644 index 9a3ee22d42dc6..0000000000000 Binary files a/articles/azure-sql/database/media/geo-distributed-application-configure-tutorial/geo-replication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-availability-sla/business-critical-service-tier.png b/articles/azure-sql/database/media/high-availability-sla/business-critical-service-tier.png deleted file mode 100644 index ffdb2765174bf..0000000000000 Binary files a/articles/azure-sql/database/media/high-availability-sla/business-critical-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-availability-sla/general-purpose-service-tier.png b/articles/azure-sql/database/media/high-availability-sla/general-purpose-service-tier.png deleted file mode 100644 index 8275855c3ebe7..0000000000000 Binary files a/articles/azure-sql/database/media/high-availability-sla/general-purpose-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-availability-sla/hyperscale-architecture.png b/articles/azure-sql/database/media/high-availability-sla/hyperscale-architecture.png deleted file mode 100644 index f682cf7ad7672..0000000000000 Binary files a/articles/azure-sql/database/media/high-availability-sla/hyperscale-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-availability-sla/zone-redundant-business-critical-service-tier.png b/articles/azure-sql/database/media/high-availability-sla/zone-redundant-business-critical-service-tier.png deleted file mode 100644 index 11f8b2acaf0bf..0000000000000 Binary files a/articles/azure-sql/database/media/high-availability-sla/zone-redundant-business-critical-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-availability-sla/zone-redundant-for-general-purpose.png b/articles/azure-sql/database/media/high-availability-sla/zone-redundant-for-general-purpose.png deleted file mode 100644 index b0f0697d50c34..0000000000000 Binary files a/articles/azure-sql/database/media/high-availability-sla/zone-redundant-for-general-purpose.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight-cpu-queries.png b/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight-cpu-queries.png deleted file mode 100644 index b0b53c6624e85..0000000000000 Binary files a/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight-cpu-queries.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight.png b/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight.png deleted file mode 100644 index 4eafc6cbabdab..0000000000000 Binary files a/articles/azure-sql/database/media/high-cpu-troubleshoot/azure-portal-query-performance-insight.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-resources-consumption.png b/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-resources-consumption.png deleted file mode 100644 index eb4c4cb4966a1..0000000000000 Binary files a/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-resources-consumption.png and /dev/null differ diff --git a/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-top-resource-consuming-queries.png b/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-top-resource-consuming-queries.png deleted file mode 100644 index ad19ef9d7e968..0000000000000 Binary files a/articles/azure-sql/database/media/high-cpu-troubleshoot/ssms-query-store-top-resource-consuming-queries.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-basics-tab.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-basics-tab.png deleted file mode 100644 index e2b4dd0e7fcae..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-basics-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-sample-data.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-sample-data.png deleted file mode 100644 index 797ed3ad20f95..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-database-sample-data.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-resource.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-resource.png deleted file mode 100644 index 08556f6de7c6b..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-create-resource.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-database-configure-network.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-database-configure-network.png deleted file mode 100644 index 6bda60b0b17c5..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/azure-sql-database-configure-network.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/create-database-select-hyperscale-service-tier.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/create-database-select-hyperscale-service-tier.png deleted file mode 100644 index ad7ea6b3b80fa..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/create-database-select-hyperscale-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-authenticate.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-authenticate.png deleted file mode 100644 index 0371487162c11..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-authenticate.png and /dev/null differ diff --git a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-run-query.png b/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-run-query.png deleted file mode 100644 index 55c94e5080816..0000000000000 Binary files a/articles/azure-sql/database/media/hyperscale-database-create-quickstart/query-editor-azure-portal-run-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-azure-sql-analytics.png b/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-azure-sql-analytics.png deleted file mode 100644 index a838124fab7c1..0000000000000 Binary files a/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-azure-sql-analytics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-concept.png b/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-concept.png deleted file mode 100644 index 610d4ff92bfb3..0000000000000 Binary files a/articles/azure-sql/database/media/intelligent-insights-overview/intelligent-insights-concept.png and /dev/null differ diff --git a/articles/azure-sql/database/media/intelligent-insights-troubleshoot-performance/intelligent-insights-troubleshooting-flowchart.png b/articles/azure-sql/database/media/intelligent-insights-troubleshoot-performance/intelligent-insights-troubleshooting-flowchart.png deleted file mode 100644 index 921ea9d66fe07..0000000000000 Binary files a/articles/azure-sql/database/media/intelligent-insights-troubleshoot-performance/intelligent-insights-troubleshooting-flowchart.png and /dev/null differ diff --git a/articles/azure-sql/database/media/job-automation-overview/conceptual-diagram.png b/articles/azure-sql/database/media/job-automation-overview/conceptual-diagram.png deleted file mode 100644 index a9ba4e6dec318..0000000000000 Binary files a/articles/azure-sql/database/media/job-automation-overview/conceptual-diagram.png and /dev/null differ diff --git a/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples1.png b/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples1.png deleted file mode 100644 index 9a90bc266d37b..0000000000000 Binary files a/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples2.png b/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples2.png deleted file mode 100644 index a0cfdae75a4ed..0000000000000 Binary files a/articles/azure-sql/database/media/job-automation-overview/targetgroup-examples2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/json-features/image_2.png b/articles/azure-sql/database/media/json-features/image_2.png deleted file mode 100644 index 668232aa2da9f..0000000000000 Binary files a/articles/azure-sql/database/media/json-features/image_2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/append-only-how-to-1.png b/articles/azure-sql/database/media/ledger/append-only-how-to-1.png deleted file mode 100644 index 607341028f8ff..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/append-only-how-to-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/append-only-how-to-keycardevent-table.png b/articles/azure-sql/database/media/ledger/append-only-how-to-keycardevent-table.png deleted file mode 100644 index 407cc4af44144..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/append-only-how-to-keycardevent-table.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/automatic-digest-management.png b/articles/azure-sql/database/media/ledger/automatic-digest-management.png deleted file mode 100644 index 623ed3a8d0270..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/automatic-digest-management.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/database-ledger-1.png b/articles/azure-sql/database/media/ledger/database-ledger-1.png deleted file mode 100644 index 5b6e0556112d1..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/database-ledger-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-configure-ledger-pane.png b/articles/azure-sql/database/media/ledger/ledger-configure-ledger-pane.png deleted file mode 100644 index 592020cf51e83..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-configure-ledger-pane.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-configure-ledger-security-tab.png b/articles/azure-sql/database/media/ledger/ledger-configure-ledger-security-tab.png deleted file mode 100644 index 43600effc1428..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-configure-ledger-security-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-create-database-networking-tab.png b/articles/azure-sql/database/media/ledger/ledger-create-database-networking-tab.png deleted file mode 100644 index 19a524759277c..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-create-database-networking-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-portal-all-resources.png b/articles/azure-sql/database/media/ledger/ledger-portal-all-resources.png deleted file mode 100644 index 12befb74caaf5..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-portal-all-resources.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-portal-manage-ledger.png b/articles/azure-sql/database/media/ledger/ledger-portal-manage-ledger.png deleted file mode 100644 index d98c2ba96d4e5..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-portal-manage-ledger.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-portal-open-query-editor.png b/articles/azure-sql/database/media/ledger/ledger-portal-open-query-editor.png deleted file mode 100644 index e42b445b7a9e5..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-portal-open-query-editor.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-portal-run-query-editor.png b/articles/azure-sql/database/media/ledger/ledger-portal-run-query-editor.png deleted file mode 100644 index 7333327604ce6..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-portal-run-query-editor.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-portal-verify.png b/articles/azure-sql/database/media/ledger/ledger-portal-verify.png deleted file mode 100644 index 99d4ceee3a075..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-portal-verify.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-retrieve-digest.png b/articles/azure-sql/database/media/ledger/ledger-retrieve-digest.png deleted file mode 100644 index 11053d5b15caf..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-retrieve-digest.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-review-security-tab.png b/articles/azure-sql/database/media/ledger/ledger-review-security-tab.png deleted file mode 100644 index cacdf427c2893..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-review-security-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-table-architecture-append-only.png b/articles/azure-sql/database/media/ledger/ledger-table-architecture-append-only.png deleted file mode 100644 index d3170681c4fb4..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-table-architecture-append-only.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-table-architecture.png b/articles/azure-sql/database/media/ledger/ledger-table-architecture.png deleted file mode 100644 index 6993bf007e110..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-table-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-updatable-how-to-new-tables.png b/articles/azure-sql/database/media/ledger/ledger-updatable-how-to-new-tables.png deleted file mode 100644 index 8b8e240f495ce..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-updatable-how-to-new-tables.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/ledger-verify-message.png b/articles/azure-sql/database/media/ledger/ledger-verify-message.png deleted file mode 100644 index ae3c304791699..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/ledger-verify-message.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/merkle-tree.png b/articles/azure-sql/database/media/ledger/merkle-tree.png deleted file mode 100644 index a7e9c7d4b3f66..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/merkle-tree.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-1.png b/articles/azure-sql/database/media/ledger/sql-updatable-how-to-1.png deleted file mode 100644 index dd82056f1cff4..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-2.png b/articles/azure-sql/database/media/ledger/sql-updatable-how-to-2.png deleted file mode 100644 index dabbd23c81631..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-3.png b/articles/azure-sql/database/media/ledger/sql-updatable-how-to-3.png deleted file mode 100644 index fd59a52a5c36a..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/sql-updatable-how-to-3.png and /dev/null differ diff --git a/articles/azure-sql/database/media/ledger/verification_script_exectution.png b/articles/azure-sql/database/media/ledger/verification_script_exectution.png deleted file mode 100644 index f6fd8ea7ead76..0000000000000 Binary files a/articles/azure-sql/database/media/ledger/verification_script_exectution.png and /dev/null differ diff --git a/articles/azure-sql/database/media/logical-servers/create-database-1.png b/articles/azure-sql/database/media/logical-servers/create-database-1.png deleted file mode 100644 index 29531be331183..0000000000000 Binary files a/articles/azure-sql/database/media/logical-servers/create-database-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/logins-create-manage/sql-admins.png b/articles/azure-sql/database/media/logins-create-manage/sql-admins.png deleted file mode 100644 index e477b2e1a1ac6..0000000000000 Binary files a/articles/azure-sql/database/media/logins-create-manage/sql-admins.png and /dev/null differ diff --git a/articles/azure-sql/database/media/logins-create-manage/sql-admins2.png b/articles/azure-sql/database/media/logins-create-manage/sql-admins2.png deleted file mode 100644 index a55610f8b3f56..0000000000000 Binary files a/articles/azure-sql/database/media/logins-create-manage/sql-admins2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-manage.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-manage.png deleted file mode 100644 index 8c0a3a96def06..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-manage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-restore.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-restore.png deleted file mode 100644 index eeea7cc6f11de..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-restore.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-tab.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-tab.png deleted file mode 100644 index 73b55aaa90b62..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-available-backups-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-configure-policies.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-configure-policies.png deleted file mode 100644 index de44735d4959b..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-configure-policies.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab-configure.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab-configure.png deleted file mode 100644 index aa7eceda656c7..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab-configure.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab.png deleted file mode 100644 index 6c71bdb2e903f..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/ltr-policies-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-job-progress-long-term.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-job-progress-long-term.png deleted file mode 100644 index 7b606ff469f7a..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-job-progress-long-term.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-ltr.png b/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-ltr.png deleted file mode 100644 index 2814e7b077257..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-backup-retention-configure/restore-ltr.png and /dev/null differ diff --git a/articles/azure-sql/database/media/long-term-retention-overview/ltr-example.png b/articles/azure-sql/database/media/long-term-retention-overview/ltr-example.png deleted file mode 100644 index 96f439d5b5f9d..0000000000000 Binary files a/articles/azure-sql/database/media/long-term-retention-overview/ltr-example.png and /dev/null differ diff --git a/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-installed-packages.png b/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-installed-packages.png deleted file mode 100644 index 9a39a7a6c14e2..0000000000000 Binary files a/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-installed-packages.png and /dev/null differ diff --git a/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-verify-package-install.png b/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-verify-package-install.png deleted file mode 100644 index dc7cfc15937a4..0000000000000 Binary files a/articles/azure-sql/database/media/machine-learning-services-add-r-packages/r-verify-package-install.png and /dev/null differ diff --git a/articles/azure-sql/database/media/maintenance-window-configure/additional-settings-mi.png b/articles/azure-sql/database/media/maintenance-window-configure/additional-settings-mi.png deleted file mode 100644 index 8c3e06b4c1af5..0000000000000 Binary files a/articles/azure-sql/database/media/maintenance-window-configure/additional-settings-mi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/maintenance-window-configure/additional-settings.png b/articles/azure-sql/database/media/maintenance-window-configure/additional-settings.png deleted file mode 100644 index 998299979df99..0000000000000 Binary files a/articles/azure-sql/database/media/maintenance-window-configure/additional-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/maintenance-window-configure/maintenance-mi.png b/articles/azure-sql/database/media/maintenance-window-configure/maintenance-mi.png deleted file mode 100644 index 291b63c1c8f42..0000000000000 Binary files a/articles/azure-sql/database/media/maintenance-window-configure/maintenance-mi.png and /dev/null differ diff --git a/articles/azure-sql/database/media/maintenance-window-configure/maintenance.png b/articles/azure-sql/database/media/maintenance-window-configure/maintenance.png deleted file mode 100644 index e9ac833fc77c8..0000000000000 Binary files a/articles/azure-sql/database/media/maintenance-window-configure/maintenance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-1.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-1.png deleted file mode 100644 index 4ab28e89881c1..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-2.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-2.png deleted file mode 100644 index e1f4dfd4b6ad1..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-3.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-3.png deleted file mode 100644 index bbbd643d3a479..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-3.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-4.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-4.png deleted file mode 100644 index a3eb659560a81..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option1-4.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-1.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-1.png deleted file mode 100644 index a0cfb6ea58c5a..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-2.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-2.png deleted file mode 100644 index aae01e80ae11c..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-3.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-3.png deleted file mode 100644 index 1da734b00c595..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-3.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-4.png b/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-4.png deleted file mode 100644 index 1cce3bffb22c8..0000000000000 Binary files a/articles/azure-sql/database/media/manage-application-rolling-upgrade/option2-4.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/advisor-section.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/advisor-section.png deleted file mode 100644 index 1115461b2c606..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/advisor-section.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/always-encrypted.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/always-encrypted.png deleted file mode 100644 index f46b3c6327c42..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/always-encrypted.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/chart.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/chart.png deleted file mode 100644 index 18f4f3398286e..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/chart.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/database-export1.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/database-export1.png deleted file mode 100644 index e1867642d465e..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/database-export1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/elastic-pool-recommendations.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/elastic-pool-recommendations.png deleted file mode 100644 index f4dbc98aa84be..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/elastic-pool-recommendations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/import1.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/import1.png deleted file mode 100644 index 6bfa3e7999ae7..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/import1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/monitoring-chart.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/monitoring-chart.png deleted file mode 100644 index 1c6034b819521..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/monitoring-chart.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/query-performance-insight.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/query-performance-insight.png deleted file mode 100644 index 4c00b0a26353f..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/query-performance-insight.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/sqldb_service_tier_monitoring.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/sqldb_service_tier_monitoring.png deleted file mode 100644 index 8aa5fd91ead99..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/sqldb_service_tier_monitoring.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/vnet-service-endpoints.png b/articles/azure-sql/database/media/manage-data-after-migrating-to-database/vnet-service-endpoints.png deleted file mode 100644 index 66fe9531842bb..0000000000000 Binary files a/articles/azure-sql/database/media/manage-data-after-migrating-to-database/vnet-service-endpoints.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-hyperscale-database/database-list-azure-portal.png b/articles/azure-sql/database/media/manage-hyperscale-database/database-list-azure-portal.png deleted file mode 100644 index 8d4748e947c4f..0000000000000 Binary files a/articles/azure-sql/database/media/manage-hyperscale-database/database-list-azure-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-hyperscale-database/ongoing-operation-notification-azure-sql-database-azure-portal.png b/articles/azure-sql/database/media/manage-hyperscale-database/ongoing-operation-notification-azure-sql-database-azure-portal.png deleted file mode 100644 index b85c8573f1234..0000000000000 Binary files a/articles/azure-sql/database/media/manage-hyperscale-database/ongoing-operation-notification-azure-sql-database-azure-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-hyperscale-database/reverse-migrate-hyperscale-service-compute-tier-pane.png b/articles/azure-sql/database/media/manage-hyperscale-database/reverse-migrate-hyperscale-service-compute-tier-pane.png deleted file mode 100644 index b27728d84bf5f..0000000000000 Binary files a/articles/azure-sql/database/media/manage-hyperscale-database/reverse-migrate-hyperscale-service-compute-tier-pane.png and /dev/null differ diff --git a/articles/azure-sql/database/media/manage-hyperscale-database/service-tier-dropdown-azure-sql-database-azure-portal.png b/articles/azure-sql/database/media/manage-hyperscale-database/service-tier-dropdown-azure-sql-database-azure-portal.png deleted file mode 100644 index 038d87e06ba7c..0000000000000 Binary files a/articles/azure-sql/database/media/manage-hyperscale-database/service-tier-dropdown-azure-sql-database-azure-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/architecture.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/architecture.png deleted file mode 100644 index db0153bbee484..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/azure-sql-sol-overview.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/azure-sql-sol-overview.png deleted file mode 100644 index b8029a1a25b14..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/azure-sql-sol-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-enable.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-enable.png deleted file mode 100644 index 53587cb7bb94e..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-enable.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-selection.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-selection.png deleted file mode 100644 index 051617e3d7959..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-enable.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-enable.png deleted file mode 100644 index 7a775630f1400..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-enable.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-selection.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-selection.png deleted file mode 100644 index 9e03848c76a20..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-enable.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-enable.png deleted file mode 100644 index 794ae90c460eb..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-enable.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-selection.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-selection.png deleted file mode 100644 index db0fa26aa4fc2..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-enable.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-enable.png deleted file mode 100644 index 8b614fc4cb36f..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-enable.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-selection.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-selection.png deleted file mode 100644 index 491c391c0e18a..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-selection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-configuration-blade.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-configuration-blade.png deleted file mode 100644 index 5c3d9bb03beb2..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-configuration-blade.png and /dev/null differ diff --git a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-in-marketplace.png b/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-in-marketplace.png deleted file mode 100644 index fc69d24d2b5da..0000000000000 Binary files a/articles/azure-sql/database/media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-in-marketplace.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/copy-data.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/copy-data.png deleted file mode 100644 index fcf3f30d00945..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/copy-data.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create-target.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create-target.png deleted file mode 100644 index 06e0f522bac95..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create-target.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create.png deleted file mode 100644 index 92ee78706686d..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-odbc.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-odbc.png deleted file mode 100644 index dd5c995dc7314..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-odbc.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-test-successful.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-test-successful.png deleted file mode 100644 index 22f5f3690f419..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-test-successful.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/provision-serverless.png b/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/provision-serverless.png deleted file mode 100644 index 79e33b55c7d36..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/provision-serverless.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-to-database-from-sql-server/azure-sql-migration-sql-db.png b/articles/azure-sql/database/media/migrate-to-database-from-sql-server/azure-sql-migration-sql-db.png deleted file mode 100644 index 6fdd62cdea06c..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-to-database-from-sql-server/azure-sql-migration-sql-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/migrate-to-database-from-sql-server/seedcloudtr.png b/articles/azure-sql/database/media/migrate-to-database-from-sql-server/seedcloudtr.png deleted file mode 100644 index 6532b50f415c0..0000000000000 Binary files a/articles/azure-sql/database/media/migrate-to-database-from-sql-server/seedcloudtr.png and /dev/null differ diff --git a/articles/azure-sql/database/media/monitor-tune-overview/azure-sql-insights-horizontal-analytics-full-diagram.svg b/articles/azure-sql/database/media/monitor-tune-overview/azure-sql-insights-horizontal-analytics-full-diagram.svg deleted file mode 100644 index 1b1339fed1e9c..0000000000000 --- a/articles/azure-sql/database/media/monitor-tune-overview/azure-sql-insights-horizontal-analytics-full-diagram.svg +++ /dev/null @@ -1 +0,0 @@ -Azure SubscriptionActivity LogSQL Database, SQL Managed Instance, or SQL Server on Azure VMsResource LogsNon-Azure DestinationsPlatform MetricsAutomaticAutomaticPartnerDatabase engineDiagnostic SettingIntelligent Insights(SQLInsights log category)VMCollectionAgentGatheredtelemetrySQL InsightsAzure nativepartnersEvent HubAzure StorageMetricsLogsAzureDiagnosticsTableAzure SQL AnalyticssolutionActivityLogLogsInsightsMetricsTableLog alertsLog AnalyticsWorkbooksLog alertsLog AnalyticsMetric alertsMetric explorerActivity LogalertsAzure portalWorkbooks \ No newline at end of file diff --git a/articles/azure-sql/database/media/monitor-tune-overview/resource-metrics.png b/articles/azure-sql/database/media/monitor-tune-overview/resource-metrics.png deleted file mode 100644 index c4ba9af733bf8..0000000000000 Binary files a/articles/azure-sql/database/media/monitor-tune-overview/resource-metrics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/monitoring-with-dmvs/sql_db_resource_utilization.png b/articles/azure-sql/database/media/monitoring-with-dmvs/sql_db_resource_utilization.png deleted file mode 100644 index 7935435d60ec2..0000000000000 Binary files a/articles/azure-sql/database/media/monitoring-with-dmvs/sql_db_resource_utilization.png and /dev/null differ diff --git a/articles/azure-sql/database/media/monitoring-with-dmvs/sys_resource_stats.png b/articles/azure-sql/database/media/monitoring-with-dmvs/sys_resource_stats.png deleted file mode 100644 index 40bc2bd228e9e..0000000000000 Binary files a/articles/azure-sql/database/media/monitoring-with-dmvs/sys_resource_stats.png and /dev/null differ diff --git a/articles/azure-sql/database/media/outbound-firewall-rules/Step1.jpg b/articles/azure-sql/database/media/outbound-firewall-rules/Step1.jpg deleted file mode 100644 index c4ecec5eb6818..0000000000000 Binary files a/articles/azure-sql/database/media/outbound-firewall-rules/Step1.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/outbound-firewall-rules/Step2.jpg b/articles/azure-sql/database/media/outbound-firewall-rules/Step2.jpg deleted file mode 100644 index 39aadabea1638..0000000000000 Binary files a/articles/azure-sql/database/media/outbound-firewall-rules/Step2.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/outbound-firewall-rules/Step3.jpg b/articles/azure-sql/database/media/outbound-firewall-rules/Step3.jpg deleted file mode 100644 index e1a7a59f00b0b..0000000000000 Binary files a/articles/azure-sql/database/media/outbound-firewall-rules/Step3.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/outbound-firewall-rules/Step4.jpg b/articles/azure-sql/database/media/outbound-firewall-rules/Step4.jpg deleted file mode 100644 index cb458c4701867..0000000000000 Binary files a/articles/azure-sql/database/media/outbound-firewall-rules/Step4.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_plan_corrected_indexes.png b/articles/azure-sql/database/media/performance-guidance/query_plan_corrected_indexes.png deleted file mode 100644 index 8d1cf7017eb7a..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_plan_corrected_indexes.png and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_plan_missing_indexes.png b/articles/azure-sql/database/media/performance-guidance/query_plan_missing_indexes.png deleted file mode 100644 index 674c15bb822d6..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_plan_missing_indexes.png and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_tuning_1.png b/articles/azure-sql/database/media/performance-guidance/query_tuning_1.png deleted file mode 100644 index 91d2aa198ba47..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_tuning_1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_tuning_2.png b/articles/azure-sql/database/media/performance-guidance/query_tuning_2.png deleted file mode 100644 index cac9045c339a8..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_tuning_2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_tuning_3.png b/articles/azure-sql/database/media/performance-guidance/query_tuning_3.png deleted file mode 100644 index e21752dc8ce25..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_tuning_3.png and /dev/null differ diff --git a/articles/azure-sql/database/media/performance-guidance/query_tuning_4.png b/articles/azure-sql/database/media/performance-guidance/query_tuning_4.png deleted file mode 100644 index 46eafd8d80bdb..0000000000000 Binary files a/articles/azure-sql/database/media/performance-guidance/query_tuning_4.png and /dev/null differ diff --git a/articles/azure-sql/database/media/predictive-model-build-compare-tutorial/compare-models.png b/articles/azure-sql/database/media/predictive-model-build-compare-tutorial/compare-models.png deleted file mode 100644 index 43d781f58e617..0000000000000 Binary files a/articles/azure-sql/database/media/predictive-model-build-compare-tutorial/compare-models.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pe-connect-overview.png b/articles/azure-sql/database/media/private-endpoint/pe-connect-overview.png deleted file mode 100644 index a8c8b6d62f58d..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pe-connect-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-approve.png b/articles/azure-sql/database/media/private-endpoint/pec-approve.png deleted file mode 100644 index 40bb588b8999a..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-approve.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-click.png b/articles/azure-sql/database/media/private-endpoint/pec-click.png deleted file mode 100644 index 7e5f9beab4b14..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-click.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-deny-public-access.png b/articles/azure-sql/database/media/private-endpoint/pec-deny-public-access.png deleted file mode 100644 index ac5117b1ce2ef..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-deny-public-access.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-ip-display.png b/articles/azure-sql/database/media/private-endpoint/pec-ip-display.png deleted file mode 100644 index bc86b78d48ae3..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-ip-display.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-list-after.png b/articles/azure-sql/database/media/private-endpoint/pec-list-after.png deleted file mode 100644 index f6d1c58fd8c95..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-list-after.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-list-before.png b/articles/azure-sql/database/media/private-endpoint/pec-list-before.png deleted file mode 100644 index 1cfe7e0685c7c..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-list-before.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-nic-click.png b/articles/azure-sql/database/media/private-endpoint/pec-nic-click.png deleted file mode 100644 index 4c4eae2af3007..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-nic-click.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/pec-select.png b/articles/azure-sql/database/media/private-endpoint/pec-select.png deleted file mode 100644 index 467bfe56268cc..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/pec-select.png and /dev/null differ diff --git a/articles/azure-sql/database/media/private-endpoint/telnet-result.png b/articles/azure-sql/database/media/private-endpoint/telnet-result.png deleted file mode 100644 index 4cb318767c16d..0000000000000 Binary files a/articles/azure-sql/database/media/private-endpoint/telnet-result.png and /dev/null differ diff --git a/articles/azure-sql/database/media/purchasing-models/bounding-box.png b/articles/azure-sql/database/media/purchasing-models/bounding-box.png deleted file mode 100644 index 3565868dcc060..0000000000000 Binary files a/articles/azure-sql/database/media/purchasing-models/bounding-box.png and /dev/null differ diff --git a/articles/azure-sql/database/media/purchasing-models/pricing-model.png b/articles/azure-sql/database/media/purchasing-models/pricing-model.png deleted file mode 100644 index af35b50864a26..0000000000000 Binary files a/articles/azure-sql/database/media/purchasing-models/pricing-model.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/annotation-details.png b/articles/azure-sql/database/media/query-performance-insight-use/annotation-details.png deleted file mode 100644 index 1f8a43b839e4a..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/annotation-details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/annotation.png b/articles/azure-sql/database/media/query-performance-insight-use/annotation.png deleted file mode 100644 index c4cdadb71baeb..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/annotation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/custom-tab.png b/articles/azure-sql/database/media/query-performance-insight-use/custom-tab.png deleted file mode 100644 index 0e4e31bc333b6..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/custom-tab.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/details.png b/articles/azure-sql/database/media/query-performance-insight-use/details.png deleted file mode 100644 index 2c5b72b8bc888..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/ia.png b/articles/azure-sql/database/media/query-performance-insight-use/ia.png deleted file mode 100644 index c1e37ac332c4c..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/ia.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/opening-title.png b/articles/azure-sql/database/media/query-performance-insight-use/opening-title.png deleted file mode 100644 index 84444f838fd02..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/opening-title.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/performance.png b/articles/azure-sql/database/media/query-performance-insight-use/performance.png deleted file mode 100644 index 50e436772da5c..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/performance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/qds-off.png b/articles/azure-sql/database/media/query-performance-insight-use/qds-off.png deleted file mode 100644 index 1ef1698290c2e..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/qds-off.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/query-details.png b/articles/azure-sql/database/media/query-performance-insight-use/query-details.png deleted file mode 100644 index 751513381da12..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/query-details.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/tile.png b/articles/azure-sql/database/media/query-performance-insight-use/tile.png deleted file mode 100644 index 52e01f88c42c9..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/tile.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/top-duration.png b/articles/azure-sql/database/media/query-performance-insight-use/top-duration.png deleted file mode 100644 index 65cb205020995..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/top-duration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/top-execution.png b/articles/azure-sql/database/media/query-performance-insight-use/top-execution.png deleted file mode 100644 index 699a54ec24aca..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/top-execution.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/top-queries.png b/articles/azure-sql/database/media/query-performance-insight-use/top-queries.png deleted file mode 100644 index 2b05c8a7d706a..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/top-queries.png and /dev/null differ diff --git a/articles/azure-sql/database/media/query-performance-insight-use/zoom.png b/articles/azure-sql/database/media/query-performance-insight-use/zoom.png deleted file mode 100644 index 8569a151cdb70..0000000000000 Binary files a/articles/azure-sql/database/media/query-performance-insight-use/zoom.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quickstart-create-single-database/manage-connectivity-settings.png b/articles/azure-sql/database/media/quickstart-create-single-database/manage-connectivity-settings.png deleted file mode 100644 index 067b9056c8c02..0000000000000 Binary files a/articles/azure-sql/database/media/quickstart-create-single-database/manage-connectivity-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quickstart-create-single-database/manage-server-firewall.png b/articles/azure-sql/database/media/quickstart-create-single-database/manage-server-firewall.png deleted file mode 100644 index 70321347b2bb7..0000000000000 Binary files a/articles/azure-sql/database/media/quickstart-create-single-database/manage-server-firewall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quickstart-create-single-database/new-server2.png b/articles/azure-sql/database/media/quickstart-create-single-database/new-server2.png deleted file mode 100644 index a1ab6007d77e1..0000000000000 Binary files a/articles/azure-sql/database/media/quickstart-create-single-database/new-server2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quickstart-sql-github-actions/github-actions-run-sql.png b/articles/azure-sql/database/media/quickstart-sql-github-actions/github-actions-run-sql.png deleted file mode 100644 index 67eb4cb27c237..0000000000000 Binary files a/articles/azure-sql/database/media/quickstart-sql-github-actions/github-actions-run-sql.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/hardware-in-new-region.png b/articles/azure-sql/database/media/quota-increase-request/hardware-in-new-region.png deleted file mode 100644 index ae2f767cb7085..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/hardware-in-new-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/help-plus-support.png b/articles/azure-sql/database/media/quota-increase-request/help-plus-support.png deleted file mode 100644 index 862fefac47afb..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/help-plus-support.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/new-support-request.png b/articles/azure-sql/database/media/quota-increase-request/new-support-request.png deleted file mode 100644 index 5bcc68eefe1b2..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/new-support-request.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/provide-details-link.png b/articles/azure-sql/database/media/quota-increase-request/provide-details-link.png deleted file mode 100644 index c49576adb5516..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/provide-details-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/quota-details-dtus.png b/articles/azure-sql/database/media/quota-increase-request/quota-details-dtus.png deleted file mode 100644 index 02bd21d3ff990..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/quota-details-dtus.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/quota-details-managed-instance.png b/articles/azure-sql/database/media/quota-increase-request/quota-details-managed-instance.png deleted file mode 100644 index 6d8355712313c..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/quota-details-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/quota-details-servers.png b/articles/azure-sql/database/media/quota-increase-request/quota-details-servers.png deleted file mode 100644 index 9bf8d55a9fa8c..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/quota-details-servers.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/quota-m-series.png b/articles/azure-sql/database/media/quota-increase-request/quota-m-series.png deleted file mode 100644 index 0d11207958ae6..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/quota-m-series.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/quota-request.png b/articles/azure-sql/database/media/quota-increase-request/quota-request.png deleted file mode 100644 index f34415b4e004f..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/quota-request.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/select-quota-issue-type.png b/articles/azure-sql/database/media/quota-increase-request/select-quota-issue-type.png deleted file mode 100644 index 9ba326ca13895..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/select-quota-issue-type.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/select-quota-type.png b/articles/azure-sql/database/media/quota-increase-request/select-quota-type.png deleted file mode 100644 index bc4f42692ef4b..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/select-quota-type.png and /dev/null differ diff --git a/articles/azure-sql/database/media/quota-increase-request/select-subscription-support-request.png b/articles/azure-sql/database/media/quota-increase-request/select-subscription-support-request.png deleted file mode 100644 index 3242f50a22496..0000000000000 Binary files a/articles/azure-sql/database/media/quota-increase-request/select-subscription-support-request.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-script-create-quickstart/r-data-generated-output.png b/articles/azure-sql/database/media/r-script-create-quickstart/r-data-generated-output.png deleted file mode 100644 index 89ddd71fee723..0000000000000 Binary files a/articles/azure-sql/database/media/r-script-create-quickstart/r-data-generated-output.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-script-create-quickstart/r-installed-packages.png b/articles/azure-sql/database/media/r-script-create-quickstart/r-installed-packages.png deleted file mode 100644 index cf1b71ba5ccab..0000000000000 Binary files a/articles/azure-sql/database/media/r-script-create-quickstart/r-installed-packages.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-script-create-quickstart/r-output-rtestdata.png b/articles/azure-sql/database/media/r-script-create-quickstart/r-output-rtestdata.png deleted file mode 100644 index 24bdc7838f72a..0000000000000 Binary files a/articles/azure-sql/database/media/r-script-create-quickstart/r-output-rtestdata.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-script-create-quickstart/select-rtestdata.png b/articles/azure-sql/database/media/r-script-create-quickstart/select-rtestdata.png deleted file mode 100644 index fb3cc61eb564f..0000000000000 Binary files a/articles/azure-sql/database/media/r-script-create-quickstart/select-rtestdata.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-predict-stopping-distance-resultset.png b/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-predict-stopping-distance-resultset.png deleted file mode 100644 index 5ae9d4ea96de1..0000000000000 Binary files a/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-predict-stopping-distance-resultset.png and /dev/null differ diff --git a/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-train-model-with-additional-output.png b/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-train-model-with-additional-output.png deleted file mode 100644 index 0a70195faf036..0000000000000 Binary files a/articles/azure-sql/database/media/r-train-score-model-create-quickstart/r-train-model-with-additional-output.png and /dev/null differ diff --git a/articles/azure-sql/database/media/read-scale-out/business-critical-service-tier-read-scale-out.png b/articles/azure-sql/database/media/read-scale-out/business-critical-service-tier-read-scale-out.png deleted file mode 100644 index bd999bdf633d2..0000000000000 Binary files a/articles/azure-sql/database/media/read-scale-out/business-critical-service-tier-read-scale-out.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-2.png b/articles/azure-sql/database/media/recovery-using-backups/geo-restore-2.png deleted file mode 100644 index b86324d7453f8..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-azure-sql-database-list-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/geo-restore-azure-sql-database-list-annotated.png deleted file mode 100644 index 2c5a88be8df2b..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-azure-sql-database-list-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-sql-managed-instance-list-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/geo-restore-sql-managed-instance-list-annotated.png deleted file mode 100644 index 6adc33807ddb6..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/geo-restore-sql-managed-instance-list-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-managed-instance-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-managed-instance-annotated.png deleted file mode 100644 index 22317120b1ffe..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-managed-instance-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-sql-database-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-sql-database-annotated.png deleted file mode 100644 index a38492154ad83..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/pitr-backup-sql-database-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-database-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-database-annotated.png deleted file mode 100644 index 623fd835d110f..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-database-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-managed-instance-annotated.png b/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-managed-instance-annotated.png deleted file mode 100644 index 51be5ffc13dba..0000000000000 Binary files a/articles/azure-sql/database/media/recovery-using-backups/restore-deleted-sql-managed-instance-annotated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/replication-to-sql-database/replication-to-sql-database.png b/articles/azure-sql/database/media/replication-to-sql-database/replication-to-sql-database.png deleted file mode 100644 index 1fbd4e70486bd..0000000000000 Binary files a/articles/azure-sql/database/media/replication-to-sql-database/replication-to-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/reserved-capacity-overview/sql-reserved-vcores-purchase.png b/articles/azure-sql/database/media/reserved-capacity-overview/sql-reserved-vcores-purchase.png deleted file mode 100644 index 78abb7ffbd92e..0000000000000 Binary files a/articles/azure-sql/database/media/reserved-capacity-overview/sql-reserved-vcores-purchase.png and /dev/null differ diff --git a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-available.jpg b/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-available.jpg deleted file mode 100644 index 86c7d82aa61d4..0000000000000 Binary files a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-available.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-degraded.jpg b/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-degraded.jpg deleted file mode 100644 index 7e2e38a319453..0000000000000 Binary files a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-degraded.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-overview.jpg b/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-overview.jpg deleted file mode 100644 index 68806c3927a87..0000000000000 Binary files a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-overview.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unavailable.jpg b/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unavailable.jpg deleted file mode 100644 index d8f40751039e6..0000000000000 Binary files a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unavailable.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unknown.jpg b/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unknown.jpg deleted file mode 100644 index ccfaf9a6f5e86..0000000000000 Binary files a/articles/azure-sql/database/media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unknown.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication-after-failover.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication-after-failover.png deleted file mode 100644 index 48d57e1f4f22e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication-after-failover.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication.png deleted file mode 100644 index eba8497b30308..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/dr-in-progress-offline-contosoconcerthall.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/dr-in-progress-offline-contosoconcerthall.png deleted file mode 100644 index 063d44fee9458..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/dr-in-progress-offline-contosoconcerthall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-offlinemode.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-offlinemode.png deleted file mode 100644 index cab02c9990e36..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-offlinemode.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-original-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-original-region.png deleted file mode 100644 index efee592e05e05..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-original-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-repatriated.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-repatriated.png deleted file mode 100644 index db1e591d16cb1..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-repatriated.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-with-hawthorn-hall.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-with-hawthorn-hall.png deleted file mode 100644 index ad11c2d45d787..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/events-hub-with-hawthorn-hall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/failover-process.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/failover-process.png deleted file mode 100644 index fe5d71b27ccfe..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/failover-process.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/hawthornhallevents.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/hawthornhallevents.png deleted file mode 100644 index 6bfcc0a6f1150..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/hawthornhallevents.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/recovery-architecture.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/recovery-architecture.png deleted file mode 100644 index 260776fdc55c7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/recovery-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-architecture.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-architecture.png deleted file mode 100644 index 51ae66e97094d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-process.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-process.png deleted file mode 100644 index ba143ad400d6c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/repatriation-process.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/replication-process.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/replication-process.png deleted file mode 100644 index d2dedaa766cf3..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/replication-process.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/resources-in-recovery-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/resources-in-recovery-region.png deleted file mode 100644 index dc6e6a67ca691..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/resources-in-recovery-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/sync-process.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/sync-process.png deleted file mode 100644 index 105d44c37c9aa..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-replication/sync-process.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-original-location.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-original-location.png deleted file mode 100644 index e51a5dee01362..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-original-location.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-recovery-location.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-recovery-location.png deleted file mode 100644 index 779afa37d6a78..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/contoso-recovery-location.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress-offline-contosoconcerthall.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress-offline-contosoconcerthall.png deleted file mode 100644 index 063d44fee9458..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress-offline-contosoconcerthall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress.png deleted file mode 100644 index 9a813eef14df3..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/dr-in-progress.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-original-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-original-region.png deleted file mode 100644 index b42a2bd0f41d9..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-original-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-tenants-offline-in-recovery-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-tenants-offline-in-recovery-region.png deleted file mode 100644 index 27a8f980add69..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-tenants-offline-in-recovery-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-with-hawthorn-hall.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-with-hawthorn-hall.png deleted file mode 100644 index e65476d04cbb5..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/events-hub-with-hawthorn-hall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-architecture.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-architecture.png deleted file mode 100644 index fa1f053f289f5..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-repatriation.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-repatriation.png deleted file mode 100644 index 52e7a406accb4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/geo-restore-repatriation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/hawthorn-hall-provisioned-in-recovery-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/hawthorn-hall-provisioned-in-recovery-region.png deleted file mode 100644 index c356d68c4600a..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/hawthorn-hall-provisioned-in-recovery-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/resources-in-recovery-region.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/resources-in-recovery-region.png deleted file mode 100644 index 9389b3668f87c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/resources-in-recovery-region.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/sync-process.png b/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/sync-process.png deleted file mode 100644 index 790cfac7fca9b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-dr-geo-restore/sync-process.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/databases.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/databases.png deleted file mode 100644 index 39f1e2c8fbf11..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/events-hub.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/events-hub.png deleted file mode 100644 index 529d259ebccf4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/events-hub.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/fabrikam.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/fabrikam.png deleted file mode 100644 index d103358137a9b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/fabrikam.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/monitor-pool.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/monitor-pool.png deleted file mode 100644 index a7165a4a777ab..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/monitor-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/red-maple-racing.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/red-maple-racing.png deleted file mode 100644 index cf69fd9957bbd..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/red-maple-racing.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/server.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/server.png deleted file mode 100644 index 5328d63f55e6e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/succeeded.png b/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/succeeded.png deleted file mode 100644 index b069a7f4df407..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-get-started-deploy/succeeded.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/click-overview.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/click-overview.png deleted file mode 100644 index 538131aeb94f7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/click-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database-metrics.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database-metrics.png deleted file mode 100644 index 44c3558bda218..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database-metrics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database.png deleted file mode 100644 index 55e7b0de33a75..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-open.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-open.png deleted file mode 100644 index dd93153d409c2..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-open.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-overview.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-overview.png deleted file mode 100644 index 2744ebf496375..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-pool-metrics.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-pool-metrics.png deleted file mode 100644 index 9a509799e8cfc..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-pool-metrics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-resource-info.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-resource-info.png deleted file mode 100644 index 497e1b88530eb..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-resource-info.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-server.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-server.png deleted file mode 100644 index 9a3a23529656d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-time-filter.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-time-filter.png deleted file mode 100644 index a9ec669e5b74b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-time-filter.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-workspace-oms-portal.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-workspace-oms-portal.png deleted file mode 100644 index 4249afe059a5e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/log-analytics-workspace-oms-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/overview.png b/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/overview.png deleted file mode 100644 index 56ce606cbbcf8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-log-analytics/overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/add-alert.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/add-alert.png deleted file mode 100644 index 044e43f7cd2ac..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/add-alert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/alert-rule.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/alert-rule.png deleted file mode 100644 index b12baa63399e2..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/alert-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/app-diagram.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/app-diagram.png deleted file mode 100644 index 53c159747456c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/app-diagram.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/configure-pool.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/configure-pool.png deleted file mode 100644 index 347525ec6b051..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/configure-pool.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/database-utilization.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/database-utilization.png deleted file mode 100644 index 4e81f7f3d6954..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/database-utilization.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/pool1.png b/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/pool1.png deleted file mode 100644 index 41985c20e30e2..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-performance-monitoring/pool1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/breakpoint.png b/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/breakpoint.png deleted file mode 100644 index 76f618b37c8ce..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/breakpoint.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/database-list.png b/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/database-list.png deleted file mode 100644 index f7d435886d26b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/database-list.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/debug.png b/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/debug.png deleted file mode 100644 index 838952f29984a..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/debug.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/new-tenant.png b/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/new-tenant.png deleted file mode 100644 index ff97f789fd115..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-provision-and-catalog/new-tenant.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/events-hub.png b/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/events-hub.png deleted file mode 100644 index 3cb8fca878987..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/events-hub.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event-deleted.png b/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event-deleted.png deleted file mode 100644 index bf63f4ad26e20..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event-deleted.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event.png b/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event.png deleted file mode 100644 index df16d81312abb..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-restore-single-tenant/last-event.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-dbpertenant-wingtip-app-overview/app-architecture.png b/articles/azure-sql/database/media/saas-dbpertenant-wingtip-app-overview/app-architecture.png deleted file mode 100644 index 8ceb66fa299a4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-dbpertenant-wingtip-app-overview/app-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/adhocreportingpattern_shardedmultitenantdb.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/adhocreportingpattern_shardedmultitenantdb.png deleted file mode 100644 index 51062dfc08a70..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/adhocreportingpattern_shardedmultitenantdb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-credential.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-credential.png deleted file mode 100644 index 8b65c2a5bfe42..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-credential.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-external-data-source.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-external-data-source.png deleted file mode 100644 index 701eeca8339fb..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-external-data-source.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-table.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-table.png deleted file mode 100644 index 512c6f2f1a728..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/create-table.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/external-tables.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/external-tables.png deleted file mode 100644 index 66ad34d2dc2ef..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/external-tables.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query1-plan.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query1-plan.png deleted file mode 100644 index ae6a9182709a0..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query1-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query2-plan.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query2-plan.png deleted file mode 100644 index 1fcf5907842bf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query2-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query3-plan.png b/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query3-plan.png deleted file mode 100644 index 6dda753363b5d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-adhoc-reporting/query3-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/catalog-server.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/catalog-server.png deleted file mode 100644 index a1a5e3afb5fc1..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/catalog-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/deploy.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/deploy.png deleted file mode 100644 index 86ee345fc5ddc..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/deploy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/events-hub.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/events-hub.png deleted file mode 100644 index 529d259ebccf4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/events-hub.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/fabrikam.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/fabrikam.png deleted file mode 100644 index d103358137a9b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/fabrikam.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-salix.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-salix.png deleted file mode 100644 index 6ad248a9a9ab9..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-salix.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-tenants1.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-tenants1.png deleted file mode 100644 index b9dcdfe2bc7ae..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/monitor-tenants1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/red-maple-racing.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/red-maple-racing.png deleted file mode 100644 index 06d2cd619adcf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/red-maple-racing.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/resource-group.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/resource-group.png deleted file mode 100644 index 363c7469337e2..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/resource-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/salix-salsa.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/salix-salsa.png deleted file mode 100644 index bc909c7939dfc..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/salix-salsa.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/succeeded.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/succeeded.png deleted file mode 100644 index b069a7f4df407..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/succeeded.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/tenants-server.png b/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/tenants-server.png deleted file mode 100644 index bba71c39bc47d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-get-started-deploy/tenants-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/add-alert.png b/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/add-alert.png deleted file mode 100644 index dbf7faa2b1344..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/add-alert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/multitenantdb.png b/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/multitenantdb.png deleted file mode 100644 index 0e43bdd6c2927..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/multitenantdb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/set-alert.png b/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/set-alert.png deleted file mode 100644 index c055e6b831a0c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-performance-monitoring/set-alert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint.png deleted file mode 100644 index 0d17bfef816f3..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint2.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint2.png deleted file mode 100644 index 9ea9f94a855da..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/breakpoint2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/bushwillow.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/bushwillow.png deleted file mode 100644 index 2ed058a4ff5fa..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/bushwillow.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/databases.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/databases.png deleted file mode 100644 index f4e4562fbc286..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/debug.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/debug.png deleted file mode 100644 index 37dd22d4a6431..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/debug.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/extendedtenantsview.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/extendedtenantsview.png deleted file mode 100644 index 50b195def1bf5..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/extendedtenantsview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/multitenantcatalog.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/multitenantcatalog.png deleted file mode 100644 index 626d28a61e594..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/multitenantcatalog.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/sequoiasoccer.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/sequoiasoccer.png deleted file mode 100644 index b39319cf3a05c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/sequoiasoccer.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/ssmsconnection.png b/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/ssmsconnection.png deleted file mode 100644 index 6be8b0eabf7aa..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-provision-and-catalog/ssmsconnection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-schema-management/schema-management.png b/articles/azure-sql/database/media/saas-multitenantdb-schema-management/schema-management.png deleted file mode 100644 index 0b7685cc2fd83..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-schema-management/schema-management.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/architectureoverview.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/architectureoverview.png deleted file mode 100644 index c757aa5b7ddc4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/architectureoverview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/avgticketsbyvenues.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/avgticketsbyvenues.png deleted file mode 100644 index 275de5888f6b6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/avgticketsbyvenues.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/databasesignin.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/databasesignin.png deleted file mode 100644 index 2d0280dcdee93..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/databasesignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/eventsaletrends.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/eventsaletrends.png deleted file mode 100644 index 9f940778e8caf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/eventsaletrends.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/powerbisignin.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/powerbisignin.png deleted file mode 100644 index 2f7a4b754f632..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/powerbisignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saledistributionperday.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saledistributionperday.png deleted file mode 100644 index 39b8079b88e39..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saledistributionperday.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saleversusdate.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saleversusdate.png deleted file mode 100644 index 0a737976309e7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/saleversusdate.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/shreddingjob.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/shreddingjob.png deleted file mode 100644 index ed696fa8e4481..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/shreddingjob.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ssmssignin.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ssmssignin.png deleted file mode 100644 index 10eb7c3aed5a8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ssmssignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/starschema.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/starschema.png deleted file mode 100644 index e6dc38744c043..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/starschema.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/tenantanalytics.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/tenantanalytics.png deleted file mode 100644 index 7bae438ee4213..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/tenantanalytics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ticketextracts.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ticketextracts.png deleted file mode 100644 index a85c055a581c7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/ticketextracts.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/totalticketsbyvenues.png b/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/totalticketsbyvenues.png deleted file mode 100644 index ba77e5554ebc7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-multitenantdb-tenant-analytics/totalticketsbyvenues.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/deploy.png b/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/deploy.png deleted file mode 100644 index 86ee345fc5ddc..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/deploy.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/fabrikam.png b/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/fabrikam.png deleted file mode 100644 index d103358137a9b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-get-started-deploy/fabrikam.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/breakpoint.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/breakpoint.png deleted file mode 100644 index 3cb16cc188066..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/breakpoint.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/data-explorer-tenantsextended.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/data-explorer-tenantsextended.png deleted file mode 100644 index 3e1addae5d998..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/data-explorer-tenantsextended.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing-resources.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing-resources.png deleted file mode 100644 index 0e920dafa31d1..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing-resources.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing.png deleted file mode 100644 index 8b689fc758786..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/redmapleracing.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern-with-catalog.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern-with-catalog.png deleted file mode 100644 index 69ba0f6a74d23..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern-with-catalog.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern.png b/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern.png deleted file mode 100644 index 8d691bcc14a7d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-13.png b/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-13.png deleted file mode 100644 index c15c43eabc20e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-13.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-pool-15.png b/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-pool-15.png deleted file mode 100644 index 8d1f3fea1beb6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-pool-15.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-sharded-multi-tenant-databases-17.png b/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-sharded-multi-tenant-databases-17.png deleted file mode 100644 index 48662fea56be4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-sharded-multi-tenant-databases-17.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-standalone-app-single-tenant-database-11.png b/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-standalone-app-single-tenant-database-11.png deleted file mode 100644 index af78554d7f269..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-app-design-patterns/saas-standalone-app-single-tenant-database-11.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-credential.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-credential.png deleted file mode 100644 index 8b65c2a5bfe42..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-credential.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-external-data-source.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-external-data-source.png deleted file mode 100644 index 701eeca8339fb..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-external-data-source.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-table.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-table.png deleted file mode 100644 index 512c6f2f1a728..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/create-table.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/cross-tenant-distributed-query.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/cross-tenant-distributed-query.png deleted file mode 100644 index 3a9cdc4927ca6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/cross-tenant-distributed-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/external-tables.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/external-tables.png deleted file mode 100644 index 66ad34d2dc2ef..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/external-tables.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query1-plan.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query1-plan.png deleted file mode 100644 index ae6a9182709a0..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query1-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query2-plan.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query2-plan.png deleted file mode 100644 index 1fcf5907842bf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query2-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query3-plan.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query3-plan.png deleted file mode 100644 index 6dda753363b5d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/query3-plan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/views.png b/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/views.png deleted file mode 100644 index 1b6f3b773b7d3..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-cross-tenant-reporting/views.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-elastic-tools-multi-tenant-row-level-security/blogging-app.png b/articles/azure-sql/database/media/saas-tenancy-elastic-tools-multi-tenant-row-level-security/blogging-app.png deleted file mode 100644 index c1024ae8ad42c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-elastic-tools-multi-tenant-row-level-security/blogging-app.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-schema-management/schema-management-dpt.png b/articles/azure-sql/database/media/saas-tenancy-schema-management/schema-management-dpt.png deleted file mode 100644 index 2e98d4296a132..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-schema-management/schema-management-dpt.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory-portal.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory-portal.png deleted file mode 100644 index 04e5b981c0560..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory-portal.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory.png deleted file mode 100644 index cc3d9c89f0859..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-data-factory.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-staging-storage.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-staging-storage.png deleted file mode 100644 index b320ee6168d86..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf-staging-storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_author_tab.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_author_tab.jpg deleted file mode 100644 index 9400e5cc0a867..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_author_tab.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_monitoring.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_monitoring.jpg deleted file mode 100644 index de1b0f85b3176..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_monitoring.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_overview.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_overview.png deleted file mode 100644 index a270dc8ea6a55..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_trigger.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_trigger.jpg deleted file mode 100644 index d482455ea4638..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/adf_trigger.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/avgticketsbyvenues-dw.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/avgticketsbyvenues-dw.png deleted file mode 100644 index d2cae54e7b9d6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/avgticketsbyvenues-dw.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/databasesignin.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/databasesignin.png deleted file mode 100644 index 01c7dfcd6ec32..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/databasesignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/dwtables.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/dwtables.jpg deleted file mode 100644 index 47645e23b2718..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/dwtables.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/eventsaletrends.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/eventsaletrends.png deleted file mode 100644 index 9f940778e8caf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/eventsaletrends.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/linkedservices.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/linkedservices.jpg deleted file mode 100644 index dd9501b1a55e8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/linkedservices.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/loadingpattern.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/loadingpattern.jpg deleted file mode 100644 index d1f0fab27e3bf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/loadingpattern.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/powerbisignin.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/powerbisignin.png deleted file mode 100644 index 2f7a4b754f632..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/powerbisignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saledistributionperday-dw.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saledistributionperday-dw.png deleted file mode 100644 index b1c71a1fb7e4c..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saledistributionperday-dw.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saleversusdate-dw.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saleversusdate-dw.png deleted file mode 100644 index 55021f9e4ae1e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/saleversusdate-dw.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/ssmssignin.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/ssmssignin.jpg deleted file mode 100644 index 3aa1935691a11..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/ssmssignin.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/starschematables.jpg b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/starschematables.jpg deleted file mode 100644 index e24767f615589..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/starschematables.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/totalticketsbyvenues-dw.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/totalticketsbyvenues-dw.png deleted file mode 100644 index 49b02dfc492df..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics-adf/totalticketsbyvenues-dw.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/architectureoverview.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/architectureoverview.png deleted file mode 100644 index bb4e2287b2131..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/architectureoverview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/avgticketsbyvenues.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/avgticketsbyvenues.png deleted file mode 100644 index a8f1de52c4d98..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/avgticketsbyvenues.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/databasesignin.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/databasesignin.png deleted file mode 100644 index 01c7dfcd6ec32..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/databasesignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/eventsaletrends.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/eventsaletrends.png deleted file mode 100644 index 9f940778e8caf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/eventsaletrends.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/powerbisignin.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/powerbisignin.png deleted file mode 100644 index 2f7a4b754f632..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/powerbisignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saledistributionperday.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saledistributionperday.png deleted file mode 100644 index 9907a1e0abb12..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saledistributionperday.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saleversusdate.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saleversusdate.png deleted file mode 100644 index c65d2cd276e6b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/saleversusdate.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/shreddingjob.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/shreddingjob.png deleted file mode 100644 index c2f8a545d35a4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/shreddingjob.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ssmssignin.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ssmssignin.png deleted file mode 100644 index 99f052bc342e7..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ssmssignin.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/starschema.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/starschema.png deleted file mode 100644 index e6dc38744c043..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/starschema.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/tenantanalytics.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/tenantanalytics.png deleted file mode 100644 index 7bae438ee4213..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/tenantanalytics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ticketextracts.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ticketextracts.png deleted file mode 100644 index 2e5ac24dcdf19..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/ticketextracts.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/totalticketsbyvenues.png b/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/totalticketsbyvenues.png deleted file mode 100644 index a18a45ba38ec5..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-tenant-analytics/totalticketsbyvenues.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00003-brk3120-welcome-myob-design-saas-app-sql-db.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00003-brk3120-welcome-myob-design-saas-app-sql-db.png deleted file mode 100644 index 3a9176adc9a9a..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00003-brk3120-welcome-myob-design-saas-app-sql-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00311-session-objectives-takeaway.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00311-session-objectives-takeaway.png deleted file mode 100644 index 12fdd53dd19aa..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00311-session-objectives-takeaway.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00417-agenda-app-management-models-patterns.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00417-agenda-app-management-models-patterns.png deleted file mode 100644 index 286a0c9769e8f..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00417-agenda-app-management-models-patterns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00505-wingtip-saas-app-mt-web.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00505-wingtip-saas-app-mt-web.png deleted file mode 100644 index 5ac8b75ed7cf8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00505-wingtip-saas-app-mt-web.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00555-app-form-contoso-concert-hall-night-opera.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00555-app-form-contoso-concert-hall-night-opera.png deleted file mode 100644 index f7d414e210b33..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00555-app-form-contoso-concert-hall-night-opera.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00931-saas-data-management-concerns.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00931-saas-data-management-concerns.png deleted file mode 100644 index 9361244b3ebbb..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00931-saas-data-management-concerns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01159-db-models-multi-tenant-saas-apps.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01159-db-models-multi-tenant-saas-apps.png deleted file mode 100644 index d8ace81affbcf..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01159-db-models-multi-tenant-saas-apps.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01301-hybrib-model-blends-benefits-mt-st.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01301-hybrib-model-blends-benefits-mt-st.png deleted file mode 100644 index c3e07b823fb11..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01301-hybrib-model-blends-benefits-mt-st.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01644-st-mt-pros-cons.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01644-st-mt-pros-cons.png deleted file mode 100644 index 26a4f2034eb25..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01644-st-mt-pros-cons.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01936-pools-cost-effective-unpredictable-workloads.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01936-pools-cost-effective-unpredictable-workloads.png deleted file mode 100644 index 7a3f2c65079b8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01936-pools-cost-effective-unpredictable-workloads.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02008-demo-st-hybrid.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02008-demo-st-hybrid.png deleted file mode 100644 index f59cfc1c72ef2..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02008-demo-st-hybrid.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02029-app-form-dogwwod-dojo.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02029-app-form-dogwwod-dojo.png deleted file mode 100644 index f46ecb736d5ec..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02029-app-form-dogwwod-dojo.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02854-myob-no-dba.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02854-myob-no-dba.png deleted file mode 100644 index f93ced2085db4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02854-myob-no-dba.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02940-myob-elastic-pool-usage-example.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02940-myob-elastic-pool-usage-example.png deleted file mode 100644 index 06d27cfa23f1e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02940-myob-elastic-pool-usage-example.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min03136-myob-isv-saas-patterns-design-scale.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min03136-myob-isv-saas-patterns-design-scale.png deleted file mode 100644 index c125e1dd49815..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min03136-myob-isv-saas-patterns-design-scale.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04315-patterns-compose-into-e2e-saas-scenario-st-mt.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04315-patterns-compose-into-e2e-saas-scenario-st-mt.png deleted file mode 100644 index a71054a448e85..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04315-patterns-compose-into-e2e-saas-scenario-st-mt.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04733-canonical-hybrid-mt-saas-app.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04733-canonical-hybrid-mt-saas-app.png deleted file mode 100644 index f9b354bc3db64..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04733-canonical-hybrid-mt-saas-app.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04810-saas-sample-app-descr-of-modules-links.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04810-saas-sample-app-descr-of-modules-links.png deleted file mode 100644 index dfd2c70296ae3..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04810-saas-sample-app-descr-of-modules-links.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04910-scenarios-patterns-explored-tutorials.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04910-scenarios-patterns-explored-tutorials.png deleted file mode 100644 index 3156dcd7fb645..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04910-scenarios-patterns-explored-tutorials.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05018-demo-saas-tutorials-github-repo.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05018-demo-saas-tutorials-github-repo.png deleted file mode 100644 index 899ed3fe3b513..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05018-demo-saas-tutorials-github-repo.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05038-github-repo-wingtipsaas.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05038-github-repo-wingtipsaas.png deleted file mode 100644 index 101ab0a3942e0..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05038-github-repo-wingtipsaas.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05620-exploring-patterns-tutorials.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05620-exploring-patterns-tutorials.png deleted file mode 100644 index 7fde6f312745f..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05620-exploring-patterns-tutorials.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05744-provisioning-tenants-connecting-run-time-1.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05744-provisioning-tenants-connecting-run-time-1.png deleted file mode 100644 index a1f36d7f215c8..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05744-provisioning-tenants-connecting-run-time-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05858-provisioning-tenants-connecting-run-time-2.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05858-provisioning-tenants-connecting-run-time-2.png deleted file mode 100644 index fa9fd4555549e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05858-provisioning-tenants-connecting-run-time-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05943-demo-management-scripts-provisioning-st.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05943-demo-management-scripts-provisioning-st.png deleted file mode 100644 index 3f76a1f2cc5f6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05943-demo-management-scripts-provisioning-st.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10002-powershell-code.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10002-powershell-code.png deleted file mode 100644 index 6af7b0aaf9b9e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10002-powershell-code.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10330-ssms-tenantcatalog.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10330-ssms-tenantcatalog.png deleted file mode 100644 index ad4c8891463dc..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10330-ssms-tenantcatalog.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10436-managing-unpredictable-tenant-workloads.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10436-managing-unpredictable-tenant-workloads.png deleted file mode 100644 index 4d51957c39762..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10436-managing-unpredictable-tenant-workloads.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10639-elastic-pool-monitoring.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10639-elastic-pool-monitoring.png deleted file mode 100644 index ff985ea7c5146..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10639-elastic-pool-monitoring.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10942-schema-management-scale.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10942-schema-management-scale.png deleted file mode 100644 index 4733d42603a0d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10942-schema-management-scale.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11033-schema-manage-1000s-dbs-one.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11033-schema-manage-1000s-dbs-one.png deleted file mode 100644 index 5ed4931d4efaa..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11033-schema-manage-1000s-dbs-one.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11221-distributed-query-all-tenants-asif-single-db.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11221-distributed-query-all-tenants-asif-single-db.png deleted file mode 100644 index 90e2a5b65fbf6..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11221-distributed-query-all-tenants-asif-single-db.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11232-demo-ticket-generation-distributed-query.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11232-demo-ticket-generation-distributed-query.png deleted file mode 100644 index a711fee7ead94..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11232-demo-ticket-generation-distributed-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11246-tsql-adhoc-analystics-db-elastic-query.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11246-tsql-adhoc-analystics-db-elastic-query.png deleted file mode 100644 index 3a798e1a3c538..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11246-tsql-adhoc-analystics-db-elastic-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11632-extract-tenant-data-analytics-db-dw.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11632-extract-tenant-data-analytics-db-dw.png deleted file mode 100644 index f205ae566621d..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11632-extract-tenant-data-analytics-db-dw.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11648-graph-daily-sale-contoso-concert-hall.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11648-graph-daily-sale-contoso-concert-hall.png deleted file mode 100644 index 0f87cfe214648..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11648-graph-daily-sale-contoso-concert-hall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11952-wrap-call-action-saasfeedback.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11952-wrap-call-action-saasfeedback.png deleted file mode 100644 index 0f80d825bd88e..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11952-wrap-call-action-saasfeedback.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min12042-resources-blog-github-tutorials-get-started.png b/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min12042-resources-blog-github-tutorials-get-started.png deleted file mode 100644 index 657ddeb202d63..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min12042-resources-blog-github-tutorials-get-started.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-welcome-wingtip-tickets-app/three-tenancy-patterns.png b/articles/azure-sql/database/media/saas-tenancy-welcome-wingtip-tickets-app/three-tenancy-patterns.png deleted file mode 100644 index 869640377b50b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-welcome-wingtip-tickets-app/three-tenancy-patterns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/connect.png b/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/connect.png deleted file mode 100644 index b71e8ea821a1b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/object-explorer.png b/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/object-explorer.png deleted file mode 100644 index d6bfe7e7f2068..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/object-explorer.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/tenants1-connect.png b/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/tenants1-connect.png deleted file mode 100644 index 210e45baaf1a4..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/tenants1-connect.png and /dev/null differ diff --git a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/three-tenancy-patterns.png b/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/three-tenancy-patterns.png deleted file mode 100644 index 869640377b50b..0000000000000 Binary files a/articles/azure-sql/database/media/saas-tenancy-wingtip-app-guidance-tips/three-tenancy-patterns.png and /dev/null differ diff --git a/articles/azure-sql/database/media/scale-resources/scale-performance.svg b/articles/azure-sql/database/media/scale-resources/scale-performance.svg deleted file mode 100644 index 9353c383f5ddc..0000000000000 --- a/articles/azure-sql/database/media/scale-resources/scale-performance.svg +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/articles/azure-sql/database/media/scale-resources/single_db_dtus.png b/articles/azure-sql/database/media/scale-resources/single_db_dtus.png deleted file mode 100644 index ce7468aaa39d7..0000000000000 Binary files a/articles/azure-sql/database/media/scale-resources/single_db_dtus.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/8choose-ad.png b/articles/azure-sql/database/media/secure-database-tutorial/8choose-ad.png deleted file mode 100644 index 1ca5341832f5d..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/8choose-ad.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/admin-select.png b/articles/azure-sql/database/media/secure-database-tutorial/admin-select.png deleted file mode 100644 index 39609740ead73..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/admin-select.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/admin-settings.png b/articles/azure-sql/database/media/secure-database-tutorial/admin-settings.png deleted file mode 100644 index 871c25560b19e..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/admin-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/audit-records.png b/articles/azure-sql/database/media/secure-database-tutorial/audit-records.png deleted file mode 100644 index 74608127e3994..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/audit-records.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/audit-settings.png b/articles/azure-sql/database/media/secure-database-tutorial/audit-settings.png deleted file mode 100644 index 69480709c9640..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/audit-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/connection.png b/articles/azure-sql/database/media/secure-database-tutorial/connection.png deleted file mode 100644 index c2146e0dd4fbc..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/connection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/encryption-settings.png b/articles/azure-sql/database/media/secure-database-tutorial/encryption-settings.png deleted file mode 100644 index 2211aba01bb14..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/encryption-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/mask-query.png b/articles/azure-sql/database/media/secure-database-tutorial/mask-query.png deleted file mode 100644 index ec8b9adff1576..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/mask-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/mask-settings.png b/articles/azure-sql/database/media/secure-database-tutorial/mask-settings.png deleted file mode 100644 index 144c6d5693d5d..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/mask-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/server-firewall-rule2.png b/articles/azure-sql/database/media/secure-database-tutorial/server-firewall-rule2.png deleted file mode 100644 index bc968b506b8b8..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/server-firewall-rule2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/server-name.png b/articles/azure-sql/database/media/secure-database-tutorial/server-name.png deleted file mode 100644 index 96ffa51b6ccbe..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/threat-email.png b/articles/azure-sql/database/media/secure-database-tutorial/threat-email.png deleted file mode 100644 index 08fe1cbfbe508..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/threat-email.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/threat-settings.png b/articles/azure-sql/database/media/secure-database-tutorial/threat-settings.png deleted file mode 100644 index cfe129496f1d5..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/threat-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/secure-database-tutorial/threat-status.png b/articles/azure-sql/database/media/secure-database-tutorial/threat-status.png deleted file mode 100644 index a02d91156ef62..0000000000000 Binary files a/articles/azure-sql/database/media/secure-database-tutorial/threat-status.png and /dev/null differ diff --git a/articles/azure-sql/database/media/security-overview/azure-database-ae.png b/articles/azure-sql/database/media/security-overview/azure-database-ae.png deleted file mode 100644 index 0d5f918a7fb02..0000000000000 Binary files a/articles/azure-sql/database/media/security-overview/azure-database-ae.png and /dev/null differ diff --git a/articles/azure-sql/database/media/security-overview/azure-database-ddm.png b/articles/azure-sql/database/media/security-overview/azure-database-ddm.png deleted file mode 100644 index 0a0d3e9bb3e3b..0000000000000 Binary files a/articles/azure-sql/database/media/security-overview/azure-database-ddm.png and /dev/null differ diff --git a/articles/azure-sql/database/media/security-overview/azure-database-rls.png b/articles/azure-sql/database/media/security-overview/azure-database-rls.png deleted file mode 100644 index 318f3404e6fc7..0000000000000 Binary files a/articles/azure-sql/database/media/security-overview/azure-database-rls.png and /dev/null differ diff --git a/articles/azure-sql/database/media/security-overview/azure-database-td.jpg b/articles/azure-sql/database/media/security-overview/azure-database-td.jpg deleted file mode 100644 index ad57e1531b4ac..0000000000000 Binary files a/articles/azure-sql/database/media/security-overview/azure-database-td.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/security-overview/sql-security-layer.png b/articles/azure-sql/database/media/security-overview/sql-security-layer.png deleted file mode 100644 index 61507d21d649c..0000000000000 Binary files a/articles/azure-sql/database/media/security-overview/sql-security-layer.png and /dev/null differ diff --git a/articles/azure-sql/database/media/serverless-tier-overview/serverless-billing.png b/articles/azure-sql/database/media/serverless-tier-overview/serverless-billing.png deleted file mode 100644 index 0a147639630c8..0000000000000 Binary files a/articles/azure-sql/database/media/serverless-tier-overview/serverless-billing.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tier-business-critical/business-critical-service-tier.png b/articles/azure-sql/database/media/service-tier-business-critical/business-critical-service-tier.png deleted file mode 100644 index a70e2b6ffba1f..0000000000000 Binary files a/articles/azure-sql/database/media/service-tier-business-critical/business-critical-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tier-general-purpose/general-purpose-service-tier.png b/articles/azure-sql/database/media/service-tier-general-purpose/general-purpose-service-tier.png deleted file mode 100644 index b876ac6806663..0000000000000 Binary files a/articles/azure-sql/database/media/service-tier-general-purpose/general-purpose-service-tier.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tier-hyperscale/hyperscale-architecture.png b/articles/azure-sql/database/media/service-tier-hyperscale/hyperscale-architecture.png deleted file mode 100644 index 2e82205f41747..0000000000000 Binary files a/articles/azure-sql/database/media/service-tier-hyperscale/hyperscale-architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/change-hardware.png b/articles/azure-sql/database/media/service-tiers-vcore/change-hardware.png deleted file mode 100644 index fc22a77d8f53f..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/change-hardware.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/change-managed-instance-hardware.png b/articles/azure-sql/database/media/service-tiers-vcore/change-managed-instance-hardware.png deleted file mode 100644 index 0c33bc533ee6d..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/change-managed-instance-hardware.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/configure-managed-instance.png b/articles/azure-sql/database/media/service-tiers-vcore/configure-managed-instance.png deleted file mode 100644 index 5522c1300bd44..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/configure-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/configure-sql-database.png b/articles/azure-sql/database/media/service-tiers-vcore/configure-sql-database.png deleted file mode 100644 index 8855c01b59525..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/configure-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/request-dc-series.png b/articles/azure-sql/database/media/service-tiers-vcore/request-dc-series.png deleted file mode 100644 index 6b83cb1c2079b..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/request-dc-series.png and /dev/null differ diff --git a/articles/azure-sql/database/media/service-tiers-vcore/select-hardware.png b/articles/azure-sql/database/media/service-tiers-vcore/select-hardware.png deleted file mode 100644 index c007e882c7b54..0000000000000 Binary files a/articles/azure-sql/database/media/service-tiers-vcore/select-hardware.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/additional-settings.png b/articles/azure-sql/database/media/single-database-create-quickstart/additional-settings.png deleted file mode 100644 index d4ceaa315663b..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/additional-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/cloudshell.png b/articles/azure-sql/database/media/single-database-create-quickstart/cloudshell.png deleted file mode 100644 index 3a3f079690715..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/cloudshell.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/configure-database.png b/articles/azure-sql/database/media/single-database-create-quickstart/configure-database.png deleted file mode 100644 index 140d8c921cb93..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/configure-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-flowchart.png b/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-flowchart.png deleted file mode 100644 index b3d9805c44071..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-flowchart.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-settings.png b/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-settings.png deleted file mode 100644 index c9ba8ad0b7ed3..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/manage-connectivity-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/networking.png b/articles/azure-sql/database/media/single-database-create-quickstart/networking.png deleted file mode 100644 index 6927f5663eb42..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/networking.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/new-sql-database-basics.png b/articles/azure-sql/database/media/single-database-create-quickstart/new-sql-database-basics.png deleted file mode 100644 index ecb31df6691ef..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/new-sql-database-basics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-login.png b/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-login.png deleted file mode 100644 index 780d5cdb59a52..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-login.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-results.png b/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-results.png deleted file mode 100644 index 37b5de7f0cf2e..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/query-editor-results.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/select-deployment.png b/articles/azure-sql/database/media/single-database-create-quickstart/select-deployment.png deleted file mode 100644 index 9e112ef6fba30..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/select-deployment.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-create-quickstart/server-firewall-rule.png b/articles/azure-sql/database/media/single-database-create-quickstart/server-firewall-rule.png deleted file mode 100644 index 54b6fca8199f9..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-create-quickstart/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-manage/create-database-1.png b/articles/azure-sql/database/media/single-database-manage/create-database-1.png deleted file mode 100644 index 29531be331183..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-manage/create-database-1.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-manage/create-logical-sql-server.png b/articles/azure-sql/database/media/single-database-manage/create-logical-sql-server.png deleted file mode 100644 index e57279cf1ce0e..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-manage/create-logical-sql-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-manage/server-firewall-rule.png b/articles/azure-sql/database/media/single-database-manage/server-firewall-rule.png deleted file mode 100644 index 54b6fca8199f9..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-manage/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-scale/cancel-ongoing-operation.png b/articles/azure-sql/database/media/single-database-scale/cancel-ongoing-operation.png deleted file mode 100644 index 1d4d68a325b85..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-scale/cancel-ongoing-operation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/single-database-scale/ongoing-operations.png b/articles/azure-sql/database/media/single-database-scale/ongoing-operations.png deleted file mode 100644 index bb01a83f72892..0000000000000 Binary files a/articles/azure-sql/database/media/single-database-scale/ongoing-operations.png and /dev/null differ diff --git a/articles/azure-sql/database/media/spark-connector/architecture.png b/articles/azure-sql/database/media/spark-connector/architecture.png deleted file mode 100644 index c4854ba3b30ac..0000000000000 Binary files a/articles/azure-sql/database/media/spark-connector/architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-1069.png b/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-1069.png deleted file mode 100644 index 93e6adb299862..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-1069.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-cant-submit-agent-key.png b/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-cant-submit-agent-key.png deleted file mode 100644 index 27ea6f52e58fd..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-agent-overview/sync-error-cant-submit-agent-key.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-data-overview.png b/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-data-overview.png deleted file mode 100644 index d08e71dcfbe44..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-data-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-private-link-overview.png b/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-private-link-overview.png deleted file mode 100644 index 3cef5cb82e0b5..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-data-sql-server-sql-database/sync-private-link-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-email-notifications.png b/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-email-notifications.png deleted file mode 100644 index 7d31b9bc0c354..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-email-notifications.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-monitoring-dashboard.png b/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-monitoring-dashboard.png deleted file mode 100644 index 5de4d0dcc1fd4..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-monitor-sync/sync-monitoring-dashboard.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/add-sync-members.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/add-sync-members.png deleted file mode 100644 index 5d56d6b50ebad..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/add-sync-members.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link-update.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link-update.png deleted file mode 100644 index 01dce698df033..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link-update.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link.png deleted file mode 100644 index 727bf2960700d..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-privatelink.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-privatelink.png deleted file mode 100644 index 727bf2960700d..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/approve-privatelink.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/configure-sync-group.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/configure-sync-group.png deleted file mode 100644 index 2aebff83d4988..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/configure-sync-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/create-sync-group.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/create-sync-group.png deleted file mode 100644 index 15faa713529bf..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/create-sync-group.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-adddb.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-adddb.png deleted file mode 100644 index 52306500f4cd8..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-adddb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-dbadded.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-dbadded.png deleted file mode 100644 index 88e54450ebbc4..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-dbadded.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-enterkey.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-enterkey.png deleted file mode 100644 index 94f44e3ae0d3d..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-agent-enterkey.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-clientagent.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-clientagent.png deleted file mode 100644 index 489e3a39aee19..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-clientagent.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-selectdb.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-selectdb.png deleted file mode 100644 index f8efc7800a559..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-preview-selectdb.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-sync.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-sync.png deleted file mode 100644 index 17b8235304ec5..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/datasync-sync.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-private-link.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-private-link.png deleted file mode 100644 index 820e96091f373..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-private-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-privatelink.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-privatelink.png deleted file mode 100644 index 820e96091f373..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/new-sync-group-privatelink.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/search-for-sql-databases.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/search-for-sql-databases.png deleted file mode 100644 index 52c3e6fa7046e..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/search-for-sql-databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/select-sql-database.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/select-sql-database.png deleted file mode 100644 index b953eb420cd70..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/select-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/step-two-configure.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/step-two-configure.png deleted file mode 100644 index 7dc1138458bdd..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/step-two-configure.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepone.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepone.png deleted file mode 100644 index 3f6fcce0ac445..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepone.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepthree.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepthree.png deleted file mode 100644 index fdf2e09a23516..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/stepthree.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-agent.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-agent.png deleted file mode 100644 index 2183bbae189ab..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-agent.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-configure.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-configure.png deleted file mode 100644 index 7dc1138458bdd..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo-configure.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo.png deleted file mode 100644 index aab35876b91dc..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/steptwo.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/sync-to-other-databases.png b/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/sync-to-other-databases.png deleted file mode 100644 index 187c471562737..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-sql-server-configure/sync-to-other-databases.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-data-sync-update-sync-schema/automate-schema-changes.png b/articles/azure-sql/database/media/sql-data-sync-update-sync-schema/automate-schema-changes.png deleted file mode 100644 index 7b165588c7690..0000000000000 Binary files a/articles/azure-sql/database/media/sql-data-sync-update-sync-schema/automate-schema-changes.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/manage-connectivity-settings.png b/articles/azure-sql/database/media/sql-database-get-started-portal/manage-connectivity-settings.png deleted file mode 100644 index 28429a001a0ef..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/manage-connectivity-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/manage-server-firewall.png b/articles/azure-sql/database/media/sql-database-get-started-portal/manage-server-firewall.png deleted file mode 100644 index 70321347b2bb7..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/manage-server-firewall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/new-server2.png b/articles/azure-sql/database/media/sql-database-get-started-portal/new-server2.png deleted file mode 100644 index a1ab6007d77e1..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/new-server2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/pe-connect-overview.png b/articles/azure-sql/database/media/sql-database-get-started-portal/pe-connect-overview.png deleted file mode 100644 index a8c8b6d62f58d..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/pe-connect-overview.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-approve.png b/articles/azure-sql/database/media/sql-database-get-started-portal/pec-approve.png deleted file mode 100644 index 40bb588b8999a..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-approve.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-after.png b/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-after.png deleted file mode 100644 index eda926f604a7a..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-after.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-before.png b/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-before.png deleted file mode 100644 index 1cfe7e0685c7c..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-list-before.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-select.png b/articles/azure-sql/database/media/sql-database-get-started-portal/pec-select.png deleted file mode 100644 index 467bfe56268cc..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/pec-select.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/server-firewall-rule.png b/articles/azure-sql/database/media/sql-database-get-started-portal/server-firewall-rule.png deleted file mode 100644 index 54b6fca8199f9..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/server-firewall-rule.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/server-name.png b/articles/azure-sql/database/media/sql-database-get-started-portal/server-name.png deleted file mode 100644 index 98ce5b4399111..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/server-name.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-get-started-portal/telnet-result.png b/articles/azure-sql/database/media/sql-database-get-started-portal/telnet-result.png deleted file mode 100644 index 4cb318767c16d..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-get-started-portal/telnet-result.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-paas-overview/architecture.png b/articles/azure-sql/database/media/sql-database-paas-overview/architecture.png deleted file mode 100644 index db0153bbee484..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-paas-overview/architecture.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-paas-overview/deployment-options.png b/articles/azure-sql/database/media/sql-database-paas-overview/deployment-options.png deleted file mode 100644 index c92a804de65a5..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-paas-overview/deployment-options.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-paas-overview/sqldb_elastic_pools.png b/articles/azure-sql/database/media/sql-database-paas-overview/sqldb_elastic_pools.png deleted file mode 100644 index 6d8af6c721fda..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-paas-overview/sqldb_elastic_pools.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-service-endpoint.png b/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-service-endpoint.png deleted file mode 100644 index 516995da93d48..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-service-endpoint.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-subnets.png b/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-subnets.png deleted file mode 100644 index d2466fcbac82c..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/mi-subnets.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-allow-microsoft-services.png b/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-allow-microsoft-services.png deleted file mode 100644 index 8cabbeed38609..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-allow-microsoft-services.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-firewall.png b/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-firewall.png deleted file mode 100644 index 0e79f8d27a68c..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/storage-firewall.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/va-storage.png b/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/va-storage.png deleted file mode 100644 index 19395c5ba67ab..0000000000000 Binary files a/articles/azure-sql/database/media/sql-database-vulnerability-assessment-storage/va-storage.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/baseline-approval.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/baseline-approval.png deleted file mode 100644 index 2ec4483c2f685..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/baseline-approval.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/examining-vulnerability-findings.gif b/articles/azure-sql/database/media/sql-vulnerability-assessment/examining-vulnerability-findings.gif deleted file mode 100644 index 68cc632fb91dd..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/examining-vulnerability-findings.gif and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/on-demand-vulnerability-scan.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/on-demand-vulnerability-scan.png deleted file mode 100644 index 4a4b6f6489333..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/on-demand-vulnerability-scan.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/opening-sql-configuration.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/opening-sql-configuration.png deleted file mode 100644 index 74525a2b04060..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/opening-sql-configuration.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/passed-per-custom-baseline.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/passed-per-custom-baseline.png deleted file mode 100644 index d58c5ae74429f..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/passed-per-custom-baseline.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_baseline.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_baseline.png deleted file mode 100644 index 5f437070a4ec8..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_baseline.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_remediation.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_remediation.png deleted file mode 100644 index 47b22331d3d37..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_fail_rule_show_remediation.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_main_getstarted.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_main_getstarted.png deleted file mode 100644 index bfa4d3cae8f43..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_main_getstarted.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_pass_main_with_baselines.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_pass_main_with_baselines.png deleted file mode 100644 index 6e5c63980f9a5..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_pass_main_with_baselines.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_recurring_scans.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_recurring_scans.png deleted file mode 100644 index 87f05556c0ca6..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_recurring_scans.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_va_initialize.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_va_initialize.png deleted file mode 100644 index 0cc116215d403..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/pp_va_initialize.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/sample-sql-vulnerabilities-report.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/sample-sql-vulnerabilities-report.png deleted file mode 100644 index 99542867cb80e..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/sample-sql-vulnerabilities-report.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/sql-vulnerability-scan-settings.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/sql-vulnerability-scan-settings.png deleted file mode 100644 index 75e1a9f3d9fa4..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/sql-vulnerability-scan-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/sql-vulnerability-assessment/view-additional-findings-link.png b/articles/azure-sql/database/media/sql-vulnerability-assessment/view-additional-findings-link.png deleted file mode 100644 index 4239979d18037..0000000000000 Binary files a/articles/azure-sql/database/media/sql-vulnerability-assessment/view-additional-findings-link.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job-output.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job-output.png deleted file mode 100644 index 43573178c08b9..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job-output.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job.png deleted file mode 100644 index 6e626db80b7f5..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create-job.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create.png deleted file mode 100644 index 90a554959250d..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/create.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/jobs.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/jobs.png deleted file mode 100644 index 320e8d3368578..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/jobs.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/start-job.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/start-job.png deleted file mode 100644 index de99f49c102ce..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/start-job.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics-flow.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics-flow.png deleted file mode 100644 index 9fa9351d09adb..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics-flow.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics.png deleted file mode 100644 index 00bec8f533416..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/stream-analytics.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-query.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-query.png deleted file mode 100644 index ee737b95df9c8..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-query.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results-schema.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results-schema.png deleted file mode 100644 index aaa8c3d9e0e53..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results-schema.png and /dev/null differ diff --git a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results.png b/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results.png deleted file mode 100644 index f5ade7d791e64..0000000000000 Binary files a/articles/azure-sql/database/media/stream-data-stream-analytics-integration/test-results.png and /dev/null differ diff --git a/articles/azure-sql/database/media/temporal-tables-retention-policy/cciretention.png b/articles/azure-sql/database/media/temporal-tables-retention-policy/cciretention.png deleted file mode 100644 index 1a08406ae6f1c..0000000000000 Binary files a/articles/azure-sql/database/media/temporal-tables-retention-policy/cciretention.png and /dev/null differ diff --git a/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanhistorytable.png b/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanhistorytable.png deleted file mode 100644 index a592d2169c800..0000000000000 Binary files a/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanhistorytable.png and /dev/null differ diff --git a/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanwithretention.png b/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanwithretention.png deleted file mode 100644 index 25a53e1ee4745..0000000000000 Binary files a/articles/azure-sql/database/media/temporal-tables-retention-policy/queryexecplanwithretention.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection-overview/active_threats.png b/articles/azure-sql/database/media/threat-detection-overview/active_threats.png deleted file mode 100644 index 98214d5ad413c..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection-overview/active_threats.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection-overview/anomalous_activity_report.png b/articles/azure-sql/database/media/threat-detection-overview/anomalous_activity_report.png deleted file mode 100644 index ff81a76030969..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection-overview/anomalous_activity_report.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection-overview/specific_alert.png b/articles/azure-sql/database/media/threat-detection-overview/specific_alert.png deleted file mode 100644 index c67dda240dda9..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection-overview/specific_alert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert.png b/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert.png deleted file mode 100644 index f775878adec86..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert_atp.png b/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert_atp.png deleted file mode 100644 index 838e904955f2f..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection-overview/threat_detection_alert_atp.png and /dev/null differ diff --git a/articles/azure-sql/database/media/threat-detection/set_up_threat_detection.png b/articles/azure-sql/database/media/threat-detection/set_up_threat_detection.png deleted file mode 100644 index dcb0b7b6ed250..0000000000000 Binary files a/articles/azure-sql/database/media/threat-detection/set_up_threat_detection.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-identity.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-identity.png deleted file mode 100644 index affff36ecd8b2..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-tde-for-server.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-tde-for-server.png deleted file mode 100644 index 22a0306fb5e40..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/configure-tde-for-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/create-server.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/create-server.png deleted file mode 100644 index 761e750ccde4d..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/create-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/identity-configuration-managed-identity.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/identity-configuration-managed-identity.png deleted file mode 100644 index 6b18021f215f8..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/identity-configuration-managed-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/networking-settings.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/networking-settings.png deleted file mode 100644 index ce301740e1acf..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/networking-settings.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/select-key-for-tde.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/select-key-for-tde.png deleted file mode 100644 index 89c38707c4c1b..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/select-key-for-tde.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-primary-identity-for-server.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-primary-identity-for-server.png deleted file mode 100644 index f720bedd69aa1..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-primary-identity-for-server.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-user-assigned-managed-identity.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-user-assigned-managed-identity.png deleted file mode 100644 index ce2e51058e5d2..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-create-server/selecting-user-assigned-managed-identity.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-inaccessible-database.jpg b/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-inaccessible-database.jpg deleted file mode 100644 index 3148984bcffa7..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-inaccessible-database.jpg and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-bcdr.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-bcdr.png deleted file mode 100644 index c88c4d189fe10..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-bcdr.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-ha.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-ha.png deleted file mode 100644 index 8419ec6ec840d..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-ha.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-roles.png b/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-roles.png deleted file mode 100644 index 912f4b44e8bab..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-byok-overview/customer-managed-tde-with-roles.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/service-managed-transparent-data-encryption.png b/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/service-managed-transparent-data-encryption.png deleted file mode 100644 index d0a69d20cf4b2..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/service-managed-transparent-data-encryption.png and /dev/null differ diff --git a/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/tde-byok-support.png b/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/tde-byok-support.png deleted file mode 100644 index da90ca496a2e0..0000000000000 Binary files a/articles/azure-sql/database/media/transparent-data-encryption-tde-overview/tde-byok-support.png and /dev/null differ diff --git a/articles/azure-sql/database/media/troubleshoot-common-errors-issues/cannot-open-database-master.png b/articles/azure-sql/database/media/troubleshoot-common-errors-issues/cannot-open-database-master.png deleted file mode 100644 index 4b1ef4e14e86b..0000000000000 Binary files a/articles/azure-sql/database/media/troubleshoot-common-errors-issues/cannot-open-database-master.png and /dev/null differ diff --git a/articles/azure-sql/database/media/tutorial-add-single-database-failover-group/create-secondary-failover-server (1).png b/articles/azure-sql/database/media/tutorial-add-single-database-failover-group/create-secondary-failover-server (1).png deleted file mode 100644 index aef3222e949a6..0000000000000 Binary files a/articles/azure-sql/database/media/tutorial-add-single-database-failover-group/create-secondary-failover-server (1).png and /dev/null differ diff --git a/articles/azure-sql/database/media/tutorial-design-first-database-csharp/erd-dept-empl-fky-2.png b/articles/azure-sql/database/media/tutorial-design-first-database-csharp/erd-dept-empl-fky-2.png deleted file mode 100644 index 3fcae268e70dd..0000000000000 Binary files a/articles/azure-sql/database/media/tutorial-design-first-database-csharp/erd-dept-empl-fky-2.png and /dev/null differ diff --git a/articles/azure-sql/database/media/xevent-code-event-file/event-file-powershell-ise-b30.png b/articles/azure-sql/database/media/xevent-code-event-file/event-file-powershell-ise-b30.png deleted file mode 100644 index 91dbbf90a5e20..0000000000000 Binary files a/articles/azure-sql/database/media/xevent-code-event-file/event-file-powershell-ise-b30.png and /dev/null differ diff --git a/articles/azure-sql/database/metrics-diagnostic-telemetry-logging-streaming-export-configure.md b/articles/azure-sql/database/metrics-diagnostic-telemetry-logging-streaming-export-configure.md deleted file mode 100644 index b824ed306eff7..0000000000000 --- a/articles/azure-sql/database/metrics-diagnostic-telemetry-logging-streaming-export-configure.md +++ /dev/null @@ -1,758 +0,0 @@ ---- -title: Configure streaming export of metrics and resource logs -description: Learn how to configure streaming export of metrics and resource logs, including intelligent diagnostic analysis from Azure SQL Database and Azure SQL Managed Instance to the destination of your choice to store information about resource utilization and query execution statistics. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: seoapril2019, devx-track-azurepowershell -ms.topic: how-to -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 3/10/2022 ---- - -# Configure streaming export of Azure SQL Database and SQL Managed Instance diagnostic telemetry -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -In this article, you will learn about the performance metrics and resource logs for Azure SQL Database that you can export to one of several destinations for analysis. You will learn how to configure the streaming export of this diagnostic telemetry through the Azure portal, PowerShell, Azure CLI, the REST API, and Azure Resource Manager templates. - -You will also learn about the destinations to which you can stream this diagnostic telemetry and how to choose among these choices. Your destination options include: - -- [Log Analytics and SQL Analytics](#stream-into-sql-analytics) -- [Event Hubs](#stream-into-event-hubs) -- [Azure Storage](#stream-into-azure-storage) - -## Diagnostic telemetry for export - -Most important among the diagnostic telemetry that you can export is the Intelligent Insights (SQLInsights) log (unrelated to [Azure Monitor SQL Insights (preview)](../../azure-sql/database/monitoring-sql-database-azure-monitor.md)). [Intelligent Insights](intelligent-insights-overview.md) uses built-in intelligence to continuously monitor database usage through artificial intelligence and detect disruptive events that cause poor performance. Once detected, a detailed analysis is performed that generates a Intelligent Insights log with an intelligent assessment of the issue. This assessment consists of a root cause analysis of the database performance issue and, where possible, recommendations for performance improvements. You need to configure the streaming export of this log to view its contents. - -In addition to streaming the export of the Intelligent Insights log, you can also export a variety of performance metrics and additional database logs. The following table describes the performance metrics and resources logs that you can configure for streaming export to one of several destinations. This diagnostic telemetry can be configured for single databases, elastic pools and pooled databases, and managed instances and instance databases. - -| Diagnostic telemetry for databases | Azure SQL Database support | Azure SQL Managed Instance support | -| :------------------- | ----- | ----- | -| [Basic metrics](#basic-metrics): Contains DTU/CPU percentage, DTU/CPU limit, physical data read percentage, log write percentage, Successful/Failed/Blocked by firewall connections, sessions percentage, workers percentage, storage, storage percentage, and XTP storage percentage. | Yes | No | -| [Instance and App Advanced](#advanced-metrics): Contains tempdb system database data and log file size and tempdb percent log file used. | Yes | No | -| [QueryStoreRuntimeStatistics](#query-store-runtime-statistics): Contains information about the query runtime statistics such as CPU usage and query duration statistics. | Yes | Yes | -| [QueryStoreWaitStatistics](#query-store-wait-statistics): Contains information about the query wait statistics (what your queries waited on) such are CPU, LOG, and LOCKING. | Yes | Yes | -| [Errors](#errors-dataset): Contains information about SQL errors on a database. | Yes | Yes | -| [DatabaseWaitStatistics](#database-wait-statistics-dataset): Contains information about how much time a database spent waiting on different wait types. | Yes | No | -| [Timeouts](#time-outs-dataset): Contains information about timeouts on a database. | Yes | No | -| [Blocks](#blockings-dataset): Contains information about blocking events on a database. | Yes | No | -| [Deadlocks](#deadlocks-dataset): Contains information about deadlock events on a database. | Yes | No | -| [AutomaticTuning](#automatic-tuning-dataset): Contains information about automatic tuning recommendations for a database. | Yes | No | -| [SQLInsights](#intelligent-insights-dataset): Contains Intelligent Insights into performance for a database. To learn more, see [Intelligent Insights](intelligent-insights-overview.md). | Yes | Yes | -| Workload Management: *Available for Azure Synapse only* For more information, see [Azure Synapse Analytics – Workload Management Portal Monitoring](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-workload-management-portal-monitor.md)| No | No | - -> [!NOTE] -> Diagnostic settings cannot be configured for the **system databases**, such as `master`, `msdb`, `model`, resource and `tempdb` databases. - -## Streaming export destinations - -This diagnostic telemetry can be streamed to one of the following Azure resources for analysis. - -- **[Log Analytics workspace](#stream-into-sql-analytics)**: - - Data streamed to a [Log Analytics workspace](../../azure-monitor/essentials/resource-logs.md#send-to-log-analytics-workspace) can be consumed by [SQL Analytics](../../azure-monitor/insights/azure-sql.md). SQL Analytics is a cloud only monitoring solution that provides intelligent monitoring of your databases that includes performance reports, alerts, and mitigation recommendations. Data streamed to a Log Analytics workspace can be analyzed with other monitoring data collected and also enables you to leverage other Azure Monitor features such as alerts and visualizations -- **[Azure Event Hubs](#stream-into-event-hubs)**: - - Data streamed to an [Azure Event Hub](../../azure-monitor/essentials/resource-logs.md#send-to-azure-event-hubs)provides the following functionality: - - - **Stream logs to 3rd party logging and telemetry systems**: Stream all of your metrics and resource logs to a single event hub to pipe log data to a third-party SIEM or log analytics tool. - - **Build a custom telemetry and logging platform**: The highly scalable publish-subscribe nature of Azure Event Hubs allows you to flexibly ingest metrics and resource logs into a custom telemetry platform. See [Designing and Sizing a Global Scale Telemetry Platform on Azure Event Hubs](https://azure.microsoft.com/documentation/videos/build-2015-designing-and-sizing-a-global-scale-telemetry-platform-on-azure-event-Hubs/) for details. - - **View service health by streaming data to Power BI**: Use Event Hubs, Stream Analytics, and Power BI to transform your diagnostics data into near real-time insights on your Azure services. See [Stream Analytics and Power BI: A real-time analytics dashboard for streaming data](../../stream-analytics/stream-analytics-power-bi-dashboard.md) for details on this solution. -- **[Azure Storage](#stream-into-azure-storage)**: - - Data streamed to [Azure Storage](../../azure-monitor/essentials/resource-logs.md#send-to-azure-storage) enables you to archive vast amounts of diagnostic telemetry for a fraction of the cost of the previous two streaming options. - -This diagnostic telemetry streamed to one of these destinations can be used to gauge resource utilization and query execution statistics for easier performance monitoring. - -![Diagram shows many SQL databases and databases in managed instances sending telemetry to Azure Diagnostics, which forwards information to Azure SQL Analytics, Event Hub, and storage.](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/architecture.png) - -## Enable and configure the streaming export of diagnostic telemetry - -You can enable and manage metrics and diagnostic telemetry logging by using one of the following methods: - -- Azure portal -- PowerShell -- Azure CLI -- Azure Monitor REST API -- Azure Resource Manager template - -> [!NOTE] -> To enable audit log streaming of security telemetry, see [Set up auditing for your database](./auditing-overview.md#setup-auditing) and [auditing logs in Azure Monitor logs and Azure Event Hubs](https://techcommunity.microsoft.com/t5/Azure-SQL-Database/SQL-Audit-logs-in-Azure-Log-Analytics-and-Azure-Event-Hubs/ba-p/386242). - -## Configure the streaming export of diagnostic telemetry - -You can use the **Diagnostics settings** menu in the Azure portal to enable and configure streaming of diagnostic telemetry. Additionally, you can use PowerShell, the Azure CLI, the [REST API](/rest/api/monitor/diagnosticsettings), and [Resource Manager templates](../../azure-monitor/essentials/resource-manager-diagnostic-settings.md) to configure streaming of diagnostic telemetry. You can set the following destinations to stream the diagnostic telemetry: Azure Storage, Azure Event Hubs, and Azure Monitor logs. - -> [!IMPORTANT] -> The streaming export of diagnostic telemetry is not enabled by default. - -Select one of the following tabs for step-by-step guidance for configuring the streaming export of diagnostic telemetry in the Azure portal and for scripts for accomplishing the same with PowerShell and the Azure CLI. - -# [Azure portal](#tab/azure-portal) - -### Elastic pools in Azure SQL Database - -You can set up an elastic pool resource to collect the following diagnostic telemetry: - -| Resource | Monitoring telemetry | -| :------------------- | ------------------- | -| **Elastic pool** | [Basic metrics](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#basic-metrics) contains eDTU/CPU percentage, eDTU/CPU limit, physical data read percentage, log write percentage, sessions percentage, workers percentage, storage, storage percentage, storage limit, and XTP storage percentage. | - -To configure streaming of diagnostic telemetry for elastic pools and pooled databases, you need to separately configure each separately: - -- Enable streaming of diagnostic telemetry for an elastic pool -- Enable streaming of diagnostic telemetry for each database in elastic pool - -The elastic pool container has its own telemetry separate from each individual pooled database's telemetry. - -To enable streaming of diagnostic telemetry for an elastic pool resource, follow these steps: - -1. Go to the **elastic pool** resource in Azure portal. -2. Select **Diagnostics settings**. -3. Select **Turn on diagnostics** if no previous settings exist, or select **Edit setting** to edit a previous setting. - - ![Enable diagnostics for elastic pools](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-enable.png) - -4. Enter a setting name for your own reference. -5. Select a destination resource for the streaming diagnostics data: **Archive to storage account**, **Stream to an event hub**, or **Send to Log Analytics**. -6. For log analytics, select **Configure** and create a new workspace by selecting **+Create New Workspace**, or select an existing workspace. -7. Select the check box for elastic pool diagnostic telemetry: **Basic** metrics. - ![Configure diagnostics for elastic pools](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-elasticpool-selection.png) - -8. Select **Save**. -9. In addition, configure streaming of diagnostic telemetry for each database within the elastic pool you want to monitor by following steps described in the next section. - -> [!IMPORTANT] -> In addition to configuring diagnostic telemetry for an elastic pool, you also need to configure diagnostic telemetry for each database in the elastic pool. - -### Databases in Azure SQL Database - -You can set up a database resource to collect the following diagnostic telemetry: - -| Resource | Monitoring telemetry | -| :------------------- | ------------------- | -| **Single or pooled database** | [Basic metrics](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#basic-metrics) contains DTU percentage, DTU used, DTU limit, CPU percentage, physical data read percentage, log write percentage, Successful/Failed/Blocked by firewall connections, sessions percentage, workers percentage, storage, storage percentage, XTP storage percentage, and deadlocks. | - -To enable streaming of diagnostic telemetry for a single or a pooled database, follow these steps: - -1. Go to Azure **SQL database** resource. -2. Select **Diagnostics settings**. -3. Select **Turn on diagnostics** if no previous settings exist, or select **Edit setting** to edit a previous setting. You can create up to three parallel connections to stream diagnostic telemetry. -4. Select **Add diagnostic setting** to configure parallel streaming of diagnostics data to multiple resources. - - ![Enable diagnostics for single and pooled databases](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-enable.png) - -5. Enter a setting name for your own reference. -6. Select a destination resource for the streaming diagnostics data: **Archive to storage account**, **Stream to an event hub**, or **Send to Log Analytics**. -7. For the standard, event-based monitoring experience, select the following check boxes for database diagnostics log telemetry: **SQLInsights**, **AutomaticTuning**, **QueryStoreRuntimeStatistics**, **QueryStoreWaitStatistics**, **Errors**, **DatabaseWaitStatistics**, **Timeouts**, **Blocks**, and **Deadlocks**. -8. For an advanced, one-minute-based monitoring experience, select the check box for **Basic** metrics. - - ![Configure diagnostics for Azure SQL Database](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-sql-selection.png) -9. Select **Save**. -10. Repeat these steps for each database you want to monitor. - -> [!TIP] -> Repeat these steps for each single and pooled database you want to monitor. - -### Instances in Azure SQL Managed Instance - -You can set up a managed instance resource to collect the following diagnostic telemetry: - -| Resource | Monitoring telemetry | -| :------------------- | ------------------- | -| **Managed instance** | [ResourceUsageStats](#resource-usage-stats-for-managed-instances) contains vCores count, average CPU percentage, IO requests, bytes read/written, reserved storage space, and used storage space. | - -To configure streaming of diagnostic telemetry for managed instance and instance databases, you will need to separately configure each: - -- Enable streaming of diagnostic telemetry for managed instance -- Enable streaming of diagnostic telemetry for each instance database - -The managed instance container has its own telemetry separate from each instance database's telemetry. - -To enable streaming of diagnostic telemetry for a managed instance resource, follow these steps: - -1. Go to the **managed instance** resource in Azure portal. -2. Select **Diagnostics settings**. -3. Select **Turn on diagnostics** if no previous settings exist, or select **Edit setting** to edit a previous setting. - - ![Enable diagnostics for managed instance](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-enable.png) - -4. Enter a setting name for your own reference. -5. Select a destination resource for the streaming diagnostics data: **Archive to storage account**, **Stream to an event hub**, or **Send to Log Analytics**. -6. For log analytics, select **Configure** and create a new workspace by selecting **+Create New Workspace**, or use an existing workspace. -7. Select the check box for instance diagnostic telemetry: **ResourceUsageStats**. - - ![Configure diagnostics for managed instance](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-container-mi-selection.png) - -8. Select **Save**. -9. In addition, configure streaming of diagnostic telemetry for each instance database within the managed instance you want to monitor by following the steps described in the next section. - -> [!IMPORTANT] -> In addition to configuring diagnostic telemetry for a managed instance, you also need to configure diagnostic telemetry for each instance database. - -### Databases in Azure SQL Managed Instance - -You can set up an instance database resource to collect the following diagnostic telemetry: - -| Resource | Monitoring telemetry | -| :------------------- | ------------------- | -| **Instance database** | [ResourceUsageStats](#resource-usage-stats-for-managed-instances) contains vCores count, average CPU percentage, IO requests, bytes read/written, reserved storage space, and used storage space. | - -To enable streaming of diagnostic telemetry for an instance database, follow these steps: - -1. Go to **instance database** resource within managed instance. -2. Select **Diagnostics settings**. -3. Select **Turn on diagnostics** if no previous settings exist, or select **Edit setting** to edit a previous setting. - - You can create up to three (3) parallel connections to stream diagnostic telemetry. - - Select **+Add diagnostic setting** to configure parallel streaming of diagnostics data to multiple resources. - - ![Enable diagnostics for instance databases](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-enable.png) - -4. Enter a setting name for your own reference. -5. Select a destination resource for the streaming diagnostics data: **Archive to storage account**, **Stream to an event hub**, or **Send to Log Analytics**. -6. Select the check boxes for database diagnostic telemetry: **SQLInsights**, **QueryStoreRuntimeStatistics**, **QueryStoreWaitStatistics**, and **Errors**. - ![Configure diagnostics for instance databases](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/diagnostics-settings-database-mi-selection.png) -7. Select **Save**. -8. Repeat these steps for each instance database you want to monitor. - -> [!TIP] -> Repeat these steps for each instance database you want to monitor. - -# [PowerShell](#tab/azure-powershell) - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -You can enable metrics and diagnostics logging by using PowerShell. - -- To enable storage of metrics and resource logs in a storage account, use this command: - - ```powershell - Set-AzDiagnosticSetting -ResourceId [your resource id] -StorageAccountId [your storage account id] -Enabled $true - ``` - - The storage account ID is the resource ID for the destination storage account. - -- To enable streaming of metrics and resource logs to an event hub, use this command: - - ```powershell - Set-AzDiagnosticSetting -ResourceId [your resource id] -ServiceBusRuleId [your service bus rule id] -Enabled $true - ``` - - The Azure Service Bus rule ID is a string with this format: - - ```powershell - {service bus resource ID}/authorizationrules/{key name} - ``` - -- To enable sending metrics and resource logs to a Log Analytics workspace, use this command: - - ```powershell - Set-AzDiagnosticSetting -ResourceId [your resource id] -WorkspaceId [resource id of the log analytics workspace] -Enabled $true - ``` - -- You can obtain the resource ID of your Log Analytics workspace by using the following command: - - ```powershell - (Get-AzOperationalInsightsWorkspace).ResourceId - ``` - -You can combine these parameters to enable multiple output options. - -**To configure multiple Azure resources** - -To support multiple subscriptions, use the PowerShell script from [Enable Azure resource metrics logging using PowerShell](/archive/blogs/msoms/enable-azure-resource-metrics-logging-using-powershell). - -Provide the workspace resource ID \<$WSID\> as a parameter when executing the script `Enable-AzureRMDiagnostics.ps1` to send diagnostic data from multiple resources to the workspace. - -- To get the workspace ID \<$WSID\> of the destination for your diagnostic data, use the following script: - - ```powershell - $WSID = "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" - .\Enable-AzureRMDiagnostics.ps1 -WSID $WSID - ``` - - Replace \ with the subscription ID, \ with the resource group name, and \ with the workspace name. - -# [Azure CLI](#tab/azure-cli) - -You can enable metrics and diagnostics logging by using the Azure CLI. - -> [!IMPORTANT] -> Scripts to enable diagnostics logging are supported for Azure CLI v1.0. Azure CLI v2.0 is unsupported at this time. - -- To enable the storage of metrics and resource logs in a storage account, use this command: - - ```azurecli-interactive - azure insights diagnostic set --resourceId --storageId --enabled true - ``` - - The storage account ID is the resource ID for the destination storage account. - -- To enable the streaming of metrics and resource logs to an event hub, use this command: - - ```azurecli-interactive - azure insights diagnostic set --resourceId --serviceBusRuleId --enabled true - ``` - - The Service Bus rule ID is a string with this format: - - ```azurecli-interactive - {service bus resource ID}/authorizationrules/{key name} - ``` - -- To enable the sending of metrics and resource logs to a Log Analytics workspace, use this command: - - ```azurecli-interactive - azure insights diagnostic set --resourceId --workspaceId --enabled true - ``` - -You can combine these parameters to enable multiple output options. - ---- - -## Stream into SQL Analytics - -Azure SQL Database and Azure SQL Managed Instance metrics and resource logs that are streamed into a Log Analytics workspace can be consumed by Azure SQL Analytics. Azure SQL Analytics is a cloud solution that monitors the performance of single databases, elastic pools and pooled databases, and managed instances and instance databases at scale and across multiple subscriptions. It can help you collect and visualize performance metrics, and it has built-in intelligence for performance troubleshooting. - -![Azure SQL Analytics Overview](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/azure-sql-sol-overview.png) - -### Installation overview - -You can monitor a collection of databases and database collections with Azure SQL Analytics by performing the following steps: - -1. Create an Azure SQL Analytics solution from the Azure Marketplace. -2. Create a Log Analytics workspace in the solution. -3. Configure databases to stream diagnostic telemetry into the workspace. - -You can configure the streaming export of this diagnostic telemetry by using the built-in **Send to Log Analytics** option in the diagnostics settings tab in the Azure portal. You can also enable streaming into a Log Analytics workspace by using diagnostics settings via [PowerShell cmdlets](metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-powershell#configure-the-streaming-export-of-diagnostic-telemetry), the [Azure CLI](metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-cli#configure-the-streaming-export-of-diagnostic-telemetry), the [Azure Monitor REST API](/rest/api/monitor/diagnosticsettings), or [Resource Manager templates](../../azure-monitor/essentials/resource-manager-diagnostic-settings.md). - -### Create an Azure SQL Analytics resource - -1. Search for Azure SQL Analytics in Azure Marketplace and select it. - - ![Search for Azure SQL Analytics in portal](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-in-marketplace.png) - -2. Select **Create** on the solution's overview screen. - -3. Fill in the Azure SQL Analytics form with the additional information that is required: workspace name, subscription, resource group, location, and pricing tier. - - ![Configure Azure SQL Analytics in portal](./media/metrics-diagnostic-telemetry-logging-streaming-export-configure/sql-analytics-configuration-blade.png) - -4. Select **OK** to confirm, and then select **Create**. - -### Configure the resource to record metrics and resource logs - -You need to separately configure diagnostic telemetry streaming for single and pooled databases, elastic pools, managed instances, and instance databases. The easiest way to configure where a resource records its metrics is by using the Azure portal. For detailed steps, see [Configure the streaming export of diagnostic telemetry](metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-portal#configure-the-streaming-export-of-diagnostic-telemetry). - -### Use Azure SQL Analytics for monitoring and alerting - -You can use SQL Analytics as a hierarchical dashboard to view your database resources. - -- To learn how to use Azure SQL Analytics, see [Monitor by using SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- To learn how to set up alerts for in SQL Analytics, see [Creating alerts for database, elastic pools, and managed instances](../../azure-monitor/insights/azure-sql.md#analyze-data-and-create-alerts). - -## Stream into Event Hubs - -You can stream Azure SQL Database and Azure SQL Managed Instance metrics and resource logs into Event Hubs by using the built-in **Stream to an event hub** option in the Azure portal. You also can enable the Service Bus rule ID by using diagnostics settings via PowerShell cmdlets, the Azure CLI, or the Azure Monitor REST API. Be sure that the event hub is in the same region as your database and server. - -### What to do with metrics and resource logs in Event Hubs - -After the selected data is streamed into Event Hubs, you're one step closer to enabling advanced monitoring scenarios. Event Hubs acts as the front door for an event pipeline. After data is collected into an event hub, it can be transformed and stored by using a real-time analytics provider or a storage adapter. Event Hubs decouples the production of a stream of events from the consumption of those events. In this way, event consumers can access the events on their own schedule. For more information on Event Hubs, see: - -- [What are Azure Event Hubs?](../../event-hubs/event-hubs-about.md) -- [Get started with Event Hubs](../../event-hubs/event-hubs-dotnet-standard-getstarted-send.md) - -You can use streamed metrics in Event Hubs to: - -- **View service health by streaming hot-path data to Power BI** - - By using Event Hubs, Stream Analytics, and Power BI, you can easily transform your metrics and diagnostics data into near real-time insights on your Azure services. For an overview of how to set up an event hub, process data with Stream Analytics, and use Power BI as an output, see [Stream Analytics and Power BI](../../stream-analytics/stream-analytics-power-bi-dashboard.md). - -- **Stream logs to third-party logging and telemetry streams** - - By using Event Hubs streaming, you can get your metrics and resource logs into various third-party monitoring and log analytics solutions. - -- **Build a custom telemetry and logging platform** - - Do you already have a custom-built telemetry platform or are considering building one? The highly scalable publish-subscribe nature of Event Hubs allows you to flexibly ingest metrics and resource logs. See [Dan Rosanova's guide to using Event Hubs in a global-scale telemetry platform](https://azure.microsoft.com/documentation/videos/build-2015-designing-and-sizing-a-global-scale-telemetry-platform-on-azure-event-Hubs/). - -## Stream into Azure Storage - -You can store metrics and resource logs in Azure Storage by using the built-in **Archive to a storage account** option in the Azure portal. You can also enable Storage by using diagnostics settings via PowerShell cmdlets, the Azure CLI, or the Azure Monitor REST API. - -### Schema of metrics and resource logs in the storage account - -After you set up metrics and resource logs collection, a storage container is created in the storage account you selected when the first rows of data are available. The structure of the blobs is: - -```powershell -insights-{metrics|logs}-{category name}/resourceId=/SUBSCRIPTIONS/{subscription ID}/ RESOURCEGROUPS/{resource group name}/PROVIDERS/Microsoft.SQL/servers/{resource_server}/ databases/{database_name}/y={four-digit numeric year}/m={two-digit numeric month}/d={two-digit numeric day}/h={two-digit 24-hour clock hour}/m=00/PT1H.json -``` - -Or, more simply: - -```powershell -insights-{metrics|logs}-{category name}/resourceId=/{resource Id}/y={four-digit numeric year}/m={two-digit numeric month}/d={two-digit numeric day}/h={two-digit 24-hour clock hour}/m=00/PT1H.json -``` - -For example, a blob name for Basic metrics might be: - -```powershell -insights-metrics-minute/resourceId=/SUBSCRIPTIONS/s1id1234-5679-0123-4567-890123456789/RESOURCEGROUPS/TESTRESOURCEGROUP/PROVIDERS/MICROSOFT.SQL/ servers/Server1/databases/database1/y=2016/m=08/d=22/h=18/m=00/PT1H.json -``` - -A blob name for storing data from an elastic pool looks like: - -```powershell -insights-{metrics|logs}-{category name}/resourceId=/SUBSCRIPTIONS/{subscription ID}/ RESOURCEGROUPS/{resource group name}/PROVIDERS/Microsoft.SQL/servers/{resource_server}/ elasticPools/{elastic_pool_name}/y={four-digit numeric year}/m={two-digit numeric month}/d={two-digit numeric day}/h={two-digit 24-hour clock hour}/m=00/PT1H.json -``` - -## Data retention policy and pricing - -If you select Event Hubs or a Storage account, you can specify a retention policy. This policy deletes data that is older than a selected time period. If you specify Log Analytics, the retention policy depends on the selected pricing tier. In this case, the provided free units of data ingestion can enable free monitoring of several databases each month. Any consumption of diagnostic telemetry in excess of the free units might incur costs. - -> [!IMPORTANT] -> Active databases with heavier workloads ingest more data than idle databases. For more information, see [Log analytics pricing](https://azure.microsoft.com/pricing/details/monitor/). - -If you are using Azure SQL Analytics, you can monitor your data ingestion consumption by selecting **OMS Workspace** on the navigation menu of Azure SQL Analytics, and then selecting **Usage** and **Estimated Costs**. - -## Metrics and logs available - -The monitoring telemetry available for single databases, pooled databases, elastic pools, managed instance, and instance databases is documented in this section of the article. Collected monitoring telemetry inside SQL Analytics can be used for your own custom analysis and application development using [Azure Monitor log queries](../../azure-monitor/logs/get-started-queries.md) language. - -### Basic metrics - -Refer to the following tables for details about Basic metrics by resource. - -> [!NOTE] -> Basic metrics option was formerly known as All metrics. The change made was to the naming only and there was no change to the metrics monitored. This change was initiated to allow for introduction of additional metric categories in the future. - -#### Basic metrics for elastic pools - -|**Resource**|**Metrics**| -|---|---| -|Elastic pool|eDTU percentage, eDTU used, eDTU limit, CPU percentage, physical data read percentage, log write percentage, sessions percentage, workers percentage, storage, storage percentage, storage limit, XTP storage percentage | - -#### Basic metrics for single and pooled databases - -|**Resource**|**Metrics**| -|---|---| -|Single and pooled database|DTU percentage, DTU used, DTU limit, CPU percentage, physical data read percentage, log write percentage, Successful/Failed/Blocked by firewall connections, sessions percentage, workers percentage, storage, storage percentage, XTP storage percentage, and deadlocks | - -### Advanced metrics - -Refer to the following table for details about advanced metrics. - -|**Metric**|**Metric Display Name**|**Description**| -|---|---|---| -|sqlserver_process_core_percent1|SQL process core percent|CPU usage percentage for the SQL process, as measured by the operating system.| -|sqlserver_process_memory_percent1 |SQL process memory percent|Memory usage percentage for the SQL process, as measured by the operating system.| -|tempdb_data_size2| Tempdb Data File Size Kilobytes |Tempdb Data File Size Kilobytes.| -|tempdb_log_size2| Tempdb Log File Size Kilobytes |Tempdb Log File Size Kilobytes.| -|tempdb_log_used_percent2| Tempdb Percent Log Used |Tempdb Percent Log Used.| - -1 This metric is available for databases using the vCore purchasing model with 2 vCores and higher, or 200 DTU and higher for DTU-based purchasing models. - -2 This metric is available for databases using the vCore purchasing model with 2 vCores and higher, or 200 DTU and higher for DTU-based purchasing models. This metric is not currently available for Synapse Analytics SQL pools. - -> [!NOTE] -> Both Basic and Advanced metrics may be unavailable for databases that have been inactive for 7 days or longer. - -### Basic logs - -Details of telemetry available for all logs are documented in the following tables. For more information, see [supported diagnostic telemetry](#diagnostic-telemetry-for-export). - -#### Resource usage stats for managed instances - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure| -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: ResourceUsageStats | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: MANAGEDINSTANCES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the managed instance | -|ResourceId|Resource URI | -|SKU_s|SQL Managed Instance product SKU | -|virtual_core_count_s|Number of vCores available | -|avg_cpu_percent_s|Average CPU percentage | -|reserved_storage_mb_s|Reserved storage capacity on the managed instance | -|storage_space_used_mb_s|Used storage on the managed instance | -|io_requests_s|IOPS count | -|io_bytes_read_s|IOPS bytes read | -|io_bytes_written_s|IOPS bytes written | - -#### Query Store runtime statistics - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: QueryStoreRuntimeStatistics | -|OperationName|Name of the operation. Always: QueryStoreRuntimeStatisticsEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|query_hash_s|Query hash | -|query_plan_hash_s|Query plan hash | -|statement_sql_handle_s|Statement sql handle | -|interval_start_time_d|Start datetimeoffset of the interval in number of ticks from 1900-1-1 | -|interval_end_time_d|End datetimeoffset of the interval in number of ticks from 1900-1-1 | -|logical_io_writes_d|Total number of logical IO writes | -|max_logical_io_writes_d|Max number of logical IO writes per execution | -|physical_io_reads_d|Total number of physical IO reads | -|max_physical_io_reads_d|Max number of logical IO reads per execution | -|logical_io_reads_d|Total number of logical IO reads | -|max_logical_io_reads_d|Max number of logical IO reads per execution | -|execution_type_d|Execution type | -|count_executions_d|Number of executions of the query | -|cpu_time_d|Total CPU time consumed by the query in microseconds | -|max_cpu_time_d|Max CPU time consumer by a single execution in microseconds | -|dop_d|Sum of degrees of parallelism | -|max_dop_d|Max degree of parallelism used for single execution | -|rowcount_d|Total number of rows returned | -|max_rowcount_d|Max number of rows returned in single execution | -|query_max_used_memory_d|Total amount of memory used in KB | -|max_query_max_used_memory_d|Max amount of memory used by a single execution in KB | -|duration_d|Total execution time in microseconds | -|max_duration_d|Max execution time of a single execution | -|num_physical_io_reads_d|Total number of physical reads | -|max_num_physical_io_reads_d|Max number of physical reads per execution | -|log_bytes_used_d|Total amount of log bytes used | -|max_log_bytes_used_d|Max amount of log bytes used per execution | -|query_id_d|ID of the query in Query Store | -|plan_id_d|ID of the plan in Query Store | - -Learn more about [Query Store runtime statistics data](/sql/relational-databases/system-catalog-views/sys-query-store-runtime-stats-transact-sql). - -#### Query Store wait statistics - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: QueryStoreWaitStatistics | -|OperationName|Name of the operation. Always: QueryStoreWaitStatisticsEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|wait_category_s|Category of the wait | -|is_parameterizable_s|Is the query parameterizable | -|statement_type_s|Type of the statement | -|statement_key_hash_s|Statement key hash | -|exec_type_d|Type of execution | -|total_query_wait_time_ms_d|Total wait time of the query on the specific wait category | -|max_query_wait_time_ms_d|Max wait time of the query in individual execution on the specific wait category | -|query_param_type_d|0 | -|query_hash_s|Query hash in Query Store | -|query_plan_hash_s|Query plan hash in Query Store | -|statement_sql_handle_s|Statement handle in Query Store | -|interval_start_time_d|Start datetimeoffset of the interval in number of ticks from 1900-1-1 | -|interval_end_time_d|End datetimeoffset of the interval in number of ticks from 1900-1-1 | -|count_executions_d|Count of executions of the query | -|query_id_d|ID of the query in Query Store | -|plan_id_d|ID of the plan in Query Store | - -Learn more about [Query Store wait statistics data](/sql/relational-databases/system-catalog-views/sys-query-store-wait-stats-transact-sql). - -#### Errors dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: Errors | -|OperationName|Name of the operation. Always: ErrorEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|Message|Error message in plain text | -|user_defined_b|Is the error user defined bit | -|error_number_d|Error code | -|Severity|Severity of the error | -|state_d|State of the error | -|query_hash_s|Query hash of the failed query, if available | -|query_plan_hash_s|Query plan hash of the failed query, if available | - -Learn more about [SQL error messages](/sql/relational-databases/errors-events/database-engine-events-and-errors). - -#### Database wait statistics dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: DatabaseWaitStatistics | -|OperationName|Name of the operation. Always: DatabaseWaitStatisticsEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|wait_type_s|Name of the wait type | -|start_utc_date_t [UTC]|Measured period start time | -|end_utc_date_t [UTC]|Measured period end time | -|delta_max_wait_time_ms_d|Max waited time per execution | -|delta_signal_wait_time_ms_d|Total signals wait time | -|delta_wait_time_ms_d|Total wait time in the period | -|delta_waiting_tasks_count_d|Number of waiting tasks | - -Learn more about [database wait statistics](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql). - -#### Time-outs dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: Timeouts | -|OperationName|Name of the operation. Always: TimeoutEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|error_state_d|A numeric state value associated with the query timeout (an [attention](/sql/relational-databases/errors-events/mssqlserver-3617-database-engine-error) event) | -|query_hash_s|Query hash, if available | -|query_plan_hash_s|Query plan hash, if available | - -#### Blockings dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: Blocks | -|OperationName|Name of the operation. Always: BlockEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|lock_mode_s|Lock mode used by the query | -|resource_owner_type_s|Owner of the lock | -|blocked_process_filtered_s|Blocked process report XML | -|duration_d|Duration of the lock in microseconds | - -#### Deadlocks dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC] |Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: Deadlocks | -|OperationName|Name of the operation. Always: DeadlockEvent | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|deadlock_xml_s|Deadlock report XML | - -#### Automatic tuning dataset - -|Property|Description| -|---|---| -|TenantId|Your tenant ID | -|SourceSystem|Always: Azure | -|TimeGenerated [UTC]|Time stamp when the log was recorded | -|Type|Always: AzureDiagnostics | -|ResourceProvider|Name of the resource provider. Always: MICROSOFT.SQL | -|Category|Name of the category. Always: AutomaticTuning | -|Resource|Name of the resource | -|ResourceType|Name of the resource type. Always: SERVERS/DATABASES | -|SubscriptionId|Subscription GUID for the database | -|ResourceGroup|Name of the resource group for the database | -|LogicalServerName_s|Name of the server for the database | -|LogicalDatabaseName_s|Name of the database | -|ElasticPoolName_s|Name of the elastic pool for the database, if any | -|DatabaseName_s|Name of the database | -|ResourceId|Resource URI | -|RecommendationHash_s|Unique hash of Automatic tuning recommendation | -|OptionName_s|Automatic tuning operation | -|Schema_s|Database schema | -|Table_s|Table affected | -|IndexName_s|Index name | -|IndexColumns_s|Column name | -|IncludedColumns_s|Columns included | -|EstimatedImpact_s|Estimated impact of Automatic tuning recommendation JSON | -|Event_s|Type of Automatic tuning event | -|Timestamp_t|Last updated timestamp | - -#### Intelligent Insights dataset - -Learn more about the [Intelligent Insights log format](intelligent-insights-use-diagnostics-log.md). - -## Next steps - -To learn how to enable logging and to understand the metrics and log categories supported by the various Azure services, see: - -- [Overview of metrics in Microsoft Azure](../../azure-monitor/data-platform.md) -- [Overview of Azure platform logs](../../azure-monitor/essentials/platform-logs-overview.md) - -To learn about Event Hubs, read: - -- [What is Azure Event Hubs?](../../event-hubs/event-hubs-about.md) -- [Get started with Event Hubs](../../event-hubs/event-hubs-dotnet-standard-getstarted-send.md) - -To learn how to set up alerts based on telemetry from log analytics see: - -- [Creating alerts for Azure SQL Database and Azure SQL Managed Instance](../../azure-monitor/insights/azure-sql.md#analyze-data-and-create-alerts) \ No newline at end of file diff --git a/articles/azure-sql/database/migrate-dtu-to-vcore.md b/articles/azure-sql/database/migrate-dtu-to-vcore.md deleted file mode 100644 index 0f8ed09670b1d..0000000000000 --- a/articles/azure-sql/database/migrate-dtu-to-vcore.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: Migrate from DTU to vCore -description: Migrate a database in Azure SQL Database from the DTU model to the vCore model. Migrating to vCore is similar to upgrading or downgrading between the standard and premium tiers. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.topic: conceptual -ms.custom: sqldbrb=1 -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, moslake -ms.date: 04/06/2022 ---- -# Migrate Azure SQL Database from the DTU-based model to the vCore-based model -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes how to migrate your database in Azure SQL Database from the [DTU-based purchasing model](service-tiers-dtu.md) to the [vCore-based purchasing model](service-tiers-vcore.md). - -## Migrate a database - -Migrating a database from the DTU-based purchasing model to the vCore-based purchasing model is similar to scaling between service objectives in the Basic, Standard, and Premium service tiers, with similar [duration](single-database-scale.md#latency) and a [minimal downtime](scale-resources.md) at the end of the migration process. A database migrated to the vCore-based purchasing model can be migrated back to the DTU-based purchasing model at any time in the same fashion, with the exception of databases migrated to the [Hyperscale](service-tier-hyperscale.md) service tier. - -## Choose the vCore service tier and service objective - -For most DTU to vCore migration scenarios, databases and elastic pools in the Basic and Standard service tiers will map to the [General Purpose](service-tier-general-purpose.md) service tier. Databases and elastic pools in the Premium service tier will map to the [Business Critical](service-tier-business-critical.md) service tier. Depending on application scenario and requirements, the [Hyperscale](service-tier-hyperscale.md) service tier can often be used as the migration target for single databases in all DTU service tiers. - -To choose the service objective, or compute size, for the migrated database in the vCore model, you can use a simple but approximate rule of thumb: every 100 DTUs in the Basic or Standard tiers require *at least* 1 vCore, and every 125 DTUs in the Premium tier require *at least* 1 vCore. - -> [!TIP] -> This rule is approximate because it does not consider the specific type of hardware used for the DTU database or elastic pool. - -In the DTU model, the system may select any available [hardware configuration](service-tiers-dtu.md#hardware-configuration) for your database or elastic pool. Further, in the DTU model you have only indirect control over the number of vCores (logical CPUs) by choosing higher or lower DTU or eDTU values. - -In the vCore model, customers must make an explicit choice of both the hardware configuration and the number of vCores (logical CPUs). While DTU model does not offer these choices, hardware type and the number of logical CPUs used for every database and elastic pool are exposed via dynamic management views. This makes it possible to determine the matching vCore service objective more precisely. - -The following approach uses this information to determine a vCore service objective with a similar allocation of resources, to obtain a similar level of performance after migration to the vCore model. - -### DTU to vCore mapping - -A T-SQL query below, when executed in the context of a DTU database to be migrated, returns a matching (possibly fractional) number of vCores in each hardware configuration in the vCore model. By rounding this number to the closest number of vCores available for [databases](resource-limits-vcore-single-databases.md) and [elastic pools](resource-limits-vcore-elastic-pools.md) in each hardware configuration in the vCore model, customers can choose the vCore service objective that is the closest match for their DTU database or elastic pool. - -Sample migration scenarios using this approach are described in the [Examples](#dtu-to-vcore-migration-examples) section. - -Execute this query in the context of the database to be migrated, rather than in the `master` database. When migrating an elastic pool, execute the query in the context of any database in the pool. - -```SQL -WITH dtu_vcore_map AS -( -SELECT rg.slo_name, - DATABASEPROPERTYEX(DB_NAME(), 'Edition') AS dtu_service_tier, - CASE WHEN rg.slo_name LIKE '%SQLG4%' THEN 'Gen4' - WHEN rg.slo_name LIKE '%SQLGZ%' THEN 'Gen4' - WHEN rg.slo_name LIKE '%SQLG5%' THEN 'Gen5' - WHEN rg.slo_name LIKE '%SQLG6%' THEN 'Gen5' - WHEN rg.slo_name LIKE '%SQLG7%' THEN 'Gen5' - END AS dtu_hardware_gen, - s.scheduler_count * CAST(rg.instance_cap_cpu/100. AS decimal(3,2)) AS dtu_logical_cpus, - CAST((jo.process_memory_limit_mb / s.scheduler_count) / 1024. AS decimal(4,2)) AS dtu_memory_per_core_gb -FROM sys.dm_user_db_resource_governance AS rg -CROSS JOIN (SELECT COUNT(1) AS scheduler_count FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS s -CROSS JOIN sys.dm_os_job_object AS jo -WHERE rg.dtu_limit > 0 - AND - DB_NAME() <> 'master' - AND - rg.database_id = DB_ID() -) -SELECT dtu_logical_cpus, - dtu_hardware_gen, - dtu_memory_per_core_gb, - dtu_service_tier, - CASE WHEN dtu_service_tier = 'Basic' THEN 'General Purpose' - WHEN dtu_service_tier = 'Standard' THEN 'General Purpose or Hyperscale' - WHEN dtu_service_tier = 'Premium' THEN 'Business Critical or Hyperscale' - END AS vcore_service_tier, - CASE WHEN dtu_hardware_gen = 'Gen4' THEN dtu_logical_cpus - WHEN dtu_hardware_gen = 'Gen5' THEN dtu_logical_cpus * 0.7 - END AS Gen4_vcores, - 7 AS Gen4_memory_per_core_gb, - CASE WHEN dtu_hardware_gen = 'Gen4' THEN dtu_logical_cpus * 1.7 - WHEN dtu_hardware_gen = 'Gen5' THEN dtu_logical_cpus - END AS Gen5_vcores, - 5.05 AS Gen5_memory_per_core_gb, - CASE WHEN dtu_hardware_gen = 'Gen4' THEN dtu_logical_cpus - WHEN dtu_hardware_gen = 'Gen5' THEN dtu_logical_cpus * 0.8 - END AS Fsv2_vcores, - 1.89 AS Fsv2_memory_per_core_gb, - CASE WHEN dtu_hardware_gen = 'Gen4' THEN dtu_logical_cpus * 1.4 - WHEN dtu_hardware_gen = 'Gen5' THEN dtu_logical_cpus * 0.9 - END AS M_vcores, - 29.4 AS M_memory_per_core_gb -FROM dtu_vcore_map; -``` - -### Additional factors - -Besides the number of vCores (logical CPUs) and the type of hardware, several other factors may influence the choice of vCore service objective: - -- The mapping Transact-SQL query matches DTU and vCore service objectives in terms of their CPU capacity, therefore the results will be more accurate for CPU-bound workloads. -- For the same hardware type and the same number of vCores, IOPS and transaction log throughput resource limits for vCore databases are often higher than for DTU databases. For IO-bound workloads, it may be possible to lower the number of vCores in the vCore model to achieve the same level of performance. Actual resource limits for DTU and vCore databases are exposed in the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) view. Comparing these values between the DTU database or pool to be migrated, and a vCore database or pool with an approximately matching service objective will help you select the vCore service objective more precisely. -- The mapping query also returns the amount of memory per core for the DTU database or elastic pool to be migrated, and for each hardware configuration in the vCore model. Ensuring similar or higher total memory after migration to vCore is important for workloads that require a large memory data cache to achieve sufficient performance, or workloads that require large memory grants for query processing. For such workloads, depending on actual performance, it may be necessary to increase the number of vCores to get sufficient total memory. -- The [historical resource utilization](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) of the DTU database should be considered when choosing the vCore service objective. DTU databases with consistently under-utilized CPU resources may need fewer vCores than the number returned by the mapping query. Conversely, DTU databases where consistently high CPU utilization causes inadequate workload performance may require more vCores than returned by the query. -- If migrating databases with intermittent or unpredictable usage patterns, consider the use of [Serverless](serverless-tier-overview.md) compute tier. Note that the max number of concurrent [workers](resource-limits-logical-server.md#sessions-workers-and-requests) in serverless is 75% of the limit in provisioned compute for the same number of max vCores configured. Also, the max memory available in serverless is 3 GB times the maximum number of vCores configured, which is less than the per-core memory for provisioned compute. For example, on Gen5 max memory is 120 GB when 40 max vCores are configured in serverless, vs. 204 GB for a 40 vCore provisioned compute. -- In the vCore model, the supported maximum database size may differ depending on hardware. For large databases, check supported maximum sizes in the vCore model for [single databases](resource-limits-vcore-single-databases.md) and [elastic pools](resource-limits-vcore-elastic-pools.md). -- For elastic pools, the [DTU](resource-limits-dtu-elastic-pools.md) and [vCore](resource-limits-vcore-elastic-pools.md) models have differences in the maximum supported number of databases per pool. This should be considered when migrating elastic pools with many databases. -- Some hardware configurations may not be available in every region. Check availability under [Hardware configuration for SQL Database](./service-tiers-sql-database-vcore.md#hardware-configuration). - -> [!IMPORTANT] -> The DTU to vCore sizing guidelines above are provided to help in the initial estimation of the target database service objective. -> -> The optimal configuration of the target database is workload-dependent. Thus, to achieve the optimal price/performance ratio after migration, you may need to leverage the flexibility of the vCore model to adjust the number of vCores, hardware configuration, and service and compute tiers. You may also need to adjust database configuration parameters, such as [maximum degree of parallelism](configure-max-degree-of-parallelism.md), and/or change the database [compatibility level](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level) to enable recent improvements in the database engine. -> - -### DTU to vCore migration examples - -> [!NOTE] -> The values in the examples below are for illustration purposes only. Actual values returned in described scenarios may be different. -> - -**Migrating a Standard S9 database** - -The mapping query returns the following result (some columns not shown for brevity): - -|dtu_logical_cpus|dtu_hardware_gen|dtu_memory_per_core_gb|Gen4_vcores|Gen4_memory_per_core_gb|Gen5_vcores|Gen5_memory_per_core_gb| -|----------------|----------------|----------------------|-----------|-----------------------|-----------|-----------------------| -|24.00|Gen5|5.40|16.800|7|24.000|5.05| - -We see that the DTU database has 24 logical CPUs (vCores), with 5.4 GB of memory per vCore, and is using Gen5 hardware. The direct match to that is a General Purpose 24 vCore database on Gen5 hardware, i.e. the **GP_Gen5_24** vCore service objective. - -**Migrating a Standard S0 database** - -The mapping query returns the following result (some columns not shown for brevity): - -|dtu_logical_cpus|dtu_hardware_gen|dtu_memory_per_core_gb|Gen4_vcores|Gen4_memory_per_core_gb|Gen5_vcores|Gen5_memory_per_core_gb| -|----------------|----------------|----------------------|-----------|-----------------------|-----------|-----------------------| -|0.25|Gen4|0.42|0.250|7|0.425|5.05| - -We see that the DTU database has the equivalent of 0.25 logical CPUs (vCores), with 0.42 GB of memory per vCore, and is using Gen4 hardware. The smallest vCore service objectives in the Gen4 and Gen5 hardware configurations, **GP_Gen4_1** and **GP_Gen5_2**, provide more compute resources than the Standard S0 database, so a direct match is not possible. Since Gen4 hardware is being [decommissioned](https://azure.microsoft.com/updates/gen-4-hardware-on-azure-sql-database-approaching-end-of-life-in-2020/), the **GP_Gen5_2** option is preferred. Additionally, if the workload is well-suited for the [Serverless](serverless-tier-overview.md) compute tier, then **GP_S_Gen5_1** would be a closer match. - -**Migrating a Premium P15 database** - -The mapping query returns the following result (some columns not shown for brevity): - -|dtu_logical_cpus|dtu_hardware_gen|dtu_memory_per_core_gb|Gen4_vcores|Gen4_memory_per_core_gb|Gen5_vcores|Gen5_memory_per_core_gb| -|----------------|----------------|----------------------|-----------|-----------------------|-----------|-----------------------| -|42.00|Gen5|4.86|29.400|7|42.000|5.05| - -We see that the DTU database has 42 logical CPUs (vCores), with 4.86 GB of memory per vCore, and is using Gen5 hardware. While there is not a vCore service objective with 42 cores, the **BC_Gen5_40** service objective is very close both in terms of CPU and memory capacity, and is a good match. - -**Migrating a Basic 200 eDTU elastic pool** - -The mapping query returns the following result (some columns not shown for brevity): - -|dtu_logical_cpus|dtu_hardware_gen|dtu_memory_per_core_gb|Gen4_vcores|Gen4_memory_per_core_gb|Gen5_vcores|Gen5_memory_per_core_gb| -|----------------|----------------|----------------------|-----------|-----------------------|-----------|-----------------------| -|4.00|Gen5|5.40|2.800|7|4.000|5.05| - -We see that the DTU elastic pool has 4 logical CPUs (vCores), with 5.4 GB of memory per vCore, and is using Gen5 hardware. The direct match in the vCore model is a **GP_Gen5_4** elastic pool. However, this service objective supports a maximum of 200 databases per pool, while the Basic 200 eDTU elastic pool supports up to 500 databases. If the elastic pool to be migrated has more than 200 databases, the matching vCore service objective would have to be **GP_Gen5_6**, which supports up to 500 databases. - -## Migrate geo-replicated databases - -Migrating from the DTU-based model to the vCore-based purchasing model is similar to upgrading or downgrading the geo-replication relationships between databases in the standard and premium service tiers. During migration, you don't have to stop geo-replication, but you must follow these sequencing rules: - -- When upgrading, you must upgrade the secondary database first, and then upgrade the primary. -- When downgrading, reverse the order: you must downgrade the primary database first, and then downgrade the secondary. - -When you're using geo-replication between two elastic pools, we recommend that you designate one pool as the primary and the other as the secondary. In that case, when you're migrating elastic pools you should use the same sequencing guidance. However, if you have elastic pools that contain both primary and secondary databases, treat the pool with the higher utilization as the primary and follow the sequencing rules accordingly. - -The following table provides guidance for specific migration scenarios: - -|Current service tier|Target service tier|Migration type|User actions| -|---|---|---|---| -|Standard|General purpose|Lateral|Can migrate in any order, but need to ensure appropriate vCore sizing as described above| -|Premium|Business critical|Lateral|Can migrate in any order, but need to ensure appropriate vCore sizing as described above| -|Standard|Business critical|Upgrade|Must migrate secondary first| -|Business critical|Standard|Downgrade|Must migrate primary first| -|Premium|General purpose|Downgrade|Must migrate primary first| -|General purpose|Premium|Upgrade|Must migrate secondary first| -|Business critical|General purpose|Downgrade|Must migrate primary first| -|General purpose|Business critical|Upgrade|Must migrate secondary first| - - -## Migrate failover groups - -Migration of failover groups with multiple databases requires individual migration of the primary and secondary databases. During that process, the same considerations and sequencing rules apply. After the databases are converted to the vCore-based purchasing model, the failover group will remain in effect with the same policy settings. - -### Create a geo-replication secondary database - -You can create a geo-replication secondary database (a geo-secondary) only by using the same service tier as you used for the primary database. For databases with a high log-generation rate, we recommend creating the geo-secondary with the same compute size as the primary. - -If you're creating a geo-secondary in the elastic pool for a single primary database, make sure the `maxVCore` setting for the pool matches the primary database's compute size. If you're creating a geo-secondary for a primary in another elastic pool, we recommend that the pools have the same `maxVCore` settings. - -## Use database copy to migrate from DTU to vCore - -You can copy any database with a DTU-based compute size to a database with a vCore-based compute size without restrictions or special sequencing as long as the target compute size supports the maximum database size of the source database. Database copy creates a transactionally consistent snapshot of the data as of a point in time after the copy operation starts. It doesn't synchronize data between the source and the target after that point in time. - -## Next steps - -- For the specific compute sizes and storage size choices available for single databases, see [SQL Database vCore-based resource limits for single databases](resource-limits-vcore-single-databases.md). -- For the specific compute sizes and storage size choices available for elastic pools, see [SQL Database vCore-based resource limits for elastic pools](resource-limits-vcore-elastic-pools.md). \ No newline at end of file diff --git a/articles/azure-sql/database/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial.md b/articles/azure-sql/database/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial.md deleted file mode 100644 index 4a83c2ce39de5..0000000000000 --- a/articles/azure-sql/database/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Tutorial: How to migrate your SQLite database to Azure SQL Database serverless" -description: Learn to perform an offline migration from SQLite to Azure SQL Database serverless by using Azure Data Factory. -services: sql-database -author: joplum -ms.author: joplum -ms.service: sql-database -ms.subservice: migration -ms.workload: data-services -ms.topic: tutorial -ms.date: 01/08/2020 -ms.custom: sqldbrb=1 -ms.reviewer: kendralittle, mathoma ---- - -# How to migrate your SQLite database to Azure SQL Database serverless -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -For many people, SQLite provides their first experience of databases and SQL programming. Its inclusion in many operating systems and popular applications makes SQLite one the most widely deployed and used database engines in the world. And because it is likely the first database engine many people use, it can often end up as a central part of projects or applications. In such cases where the project or application outgrows the initial SQLite implementation, developers may need to migrate their data to a reliable, centralized data store. - -Azure SQL Database Serverless is a compute tier for single databases that automatically scales compute based on workload demand, and bills for the amount of compute used per second. The serverless compute tier also automatically pauses databases during inactive periods when only storage is billed and automatically resumes databases when activity returns. - -Once you have followed the below steps, your database will be migrated into Azure SQL Database Serverless, enabling you to make your database available to other users or applications in the cloud and only pay for what you use, with minimal application code changes. - -## Prerequisites - -- An Azure Subscription -- SQLite2 or SQLite3 database that you wish to migrate -- A Windows environment - - If you do not have a local Windows environment, you can use a Windows VM in Azure for the migration. Move and make your SQLite database file available on the VM using Azure Files and Storage Explorer. - -## Steps - -1. Provision a new Azure SQL Database in the Serverless compute tier. - - ![Screenshot of the Azure portal showing provisioning example for Azure SQL Database Serverless](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/provision-serverless.png) - -2. Ensure you have your SQLite database file available in your Windows environment. Install a SQLite ODBC Driver if you do not already have one (there are many available in Open Source, for example, http://www.ch-werner.de/sqliteodbc/). - -3. Create a System DSN for the database. Ensure you use the Data Source Administrator application that matches your system architecture (32-bit vs 64-bit). You can find which version you are running in your system settings. - - - Open ODBC Data Source Administrator in your environment. - - Click the system DSN tab and click "Add" - - Select the SQLite ODBC connector you installed and give the connection a meaningful name, for example, sqlitemigrationsource - - Set the database name to the .db file - - Save and exit - -4. Download and install the self-hosted integration runtime. The easiest way to do this is the Express install option, as detailed in the documentation. If you opt for a manual install, you will need to provide the application with an authentication key, which can be located in your Data Factory instance by: - - - Starting up ADF (Author and Monitor from the service in the Azure portal) - - Click the "Author" tab (Blue pencil) on the left - - Click Connections (bottom left), then Integration runtimes - - Add new Self-Hosted Integration Runtime, give it a name, select *Option 2*. - -5. Create a new linked service for the source SQLite database in your Data Factory. - - ![Screenshot showing empty linked services blade in Azure Data Factory](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create.png) - -6. In **Connections**, under **Linked Service**, click **New**. - -7. Search for and select the "ODBC" connector - - ![Screenshot showing ODBC connector logo in the linked services blade in Azure Data Factory](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-odbc.png) - -8. Give the linked service a meaningful name, for example, "sqlite_odbc". Select your integration runtime from the "Connect via integration runtime" dropdown. Enter the below into the connection string, replacing the Initial Catalog variable with the filepath for the .db file, and the DSN with the name of the system DSN connection: - - ``` - Connection string: Provider=MSDASQL.1;Persist Security Info=False;Mode=ReadWrite;Initial Catalog=C:\sqlitemigrationsource.db;DSN=sqlitemigrationsource - ``` - -9. Set the authentication type to Anonymous - -10. Test the connection - - ![Screenshot showing successful connection in Azure Data Factory](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-test-successful.png) - -11. Create another linked service for your Serverless SQL target. Select the database using the linked service wizard, and provide the SQL authentication credentials. - - ![Screenshot showing Azure SQL Database selected in Azure Data Factory](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/linked-services-create-target.png) - -12. Extract the CREATE TABLE statements from your SQLite database. You can do this by executing the below Python script on your database file. - - ``` - #!/usr/bin/python - import sqlite3 - conn = sqlite3.connect("sqlitemigrationsource.db") - c = conn.cursor() - - print("Starting extract job..") - with open('CreateTables.sql', 'w') as f: - for tabledetails in c.execute("SELECT * FROM sqlite_master WHERE type='table'"): - print("Extracting CREATE statement for " + (str(tabledetails[1]))) - print(tabledetails) - f.write(str(tabledetails[4].replace('\n','') + ';\n')) - c.close() - ``` - -13. Create the landing tables in your Serverless SQL target environment by copying the CREATE table statements from the CreateTables.sql file and running the SQL statements in the Query Editor in the Azure portal. - -14. Return to the home screen of your Data Factory and click "Copy Data" to run through the job creation wizard. - - ![Screenshot showing the Copy Data wizard logo in Azure Data Factory](./media/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial/copy-data.png) - -15. Select all tables from the source SQLite database using the check boxes, and map them to the target tables in Azure SQL. Once the job has run, you have successfully migrated your data from SQLite to Azure SQL! - -## Next steps - -- To get started, see [Quickstart: Create a single database in Azure SQL Database using the Azure portal](single-database-create-quickstart.md). -- For resource limits, see [Serverless compute tier resource limits](./resource-limits-vcore-single-databases.md#general-purpose---serverless-compute---gen5). diff --git a/articles/azure-sql/database/migrate-to-database-from-sql-server.md b/articles/azure-sql/database/migrate-to-database-from-sql-server.md deleted file mode 100644 index 20210cdfd04f8..0000000000000 --- a/articles/azure-sql/database/migrate-to-database-from-sql-server.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: SQL Server database migration to Azure SQL Database -description: Learn about SQL Server database migration to Azure SQL Database. -keywords: database migration,sql server database migration,database migration tools,migrate database,migrate sql database -services: sql-database -ms.service: sql-database -ms.subservice: migration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: kendralittle, mathoma -ms.date: 02/11/2019 ---- -# SQL Server database migration to Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this article, you learn about the primary methods for migrating a SQL Server 2005 or later database to Azure SQL Database. For information on migrating to Azure SQL Managed Instance, see [Migrate a SQL Server instance to Azure SQL Managed Instance](../migration-guides/managed-instance/sql-server-to-managed-instance-overview.md). -For guidance on choosing migration options and tools to migrate to Azure SQL, see [Migrate to Azure SQL](../migration-guides/index.yml) - - -## Migrate to a single database or a pooled database - -There are two primary methods for migrating a SQL Server 2005 or later database to Azure SQL Database. The first method is simpler but requires some, possibly substantial, downtime during the migration. The second method is more complex, but substantially eliminates downtime during the migration. - -In both cases, you need to ensure that the source database is compatible with Azure SQL Database using the [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595). SQL Database is approaching [feature parity](features-comparison.md) with SQL Server, other than issues related to server-level and cross-database operations. Databases and applications that rely on [partially supported or unsupported functions](transact-sql-tsql-differences-sql-server.md) need some [re-engineering to fix these incompatibilities](migrate-to-database-from-sql-server.md#resolving-database-migration-compatibility-issues) before the SQL Server database can be migrated. - -> [!NOTE] -> To migrate a non-SQL Server database, including Microsoft Access, Sybase, MySQL Oracle, and DB2 to Azure SQL Database, see [SQL Server Migration Assistant](https://blogs.msdn.microsoft.com/datamigration/2017/09/29/release-sql-server-migration-assistant-ssma-v7-6/). - -## Method 1: Migration with downtime during the migration - - Use this method to migrate to a single or a pooled database if you can afford some downtime or you're performing a test migration of a production database for later migration. For a tutorial, see [Migrate a SQL Server database](../../dms/tutorial-sql-server-to-azure-sql.md). - -The following list contains the general workflow for a SQL Server database migration of a single or a pooled database using this method. For migration to SQL Managed Instance, see [SQL Server to Azure SQL Managed Instance Guide](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md). - - ![VSSSDT migration diagram](./media/migrate-to-database-from-sql-server/azure-sql-migration-sql-db.png) - -1. [Assess](/sql/dma/dma-assesssqlonprem) the database for compatibility by using the latest version of the [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595). -2. Prepare any necessary fixes as Transact-SQL scripts. -3. Make a transactionally consistent copy of the source database being migrated or halt new transactions from occurring in the source database while migration is occurring. Methods to accomplish this latter option include disabling client connectivity or creating a [database snapshot](/sql/relational-databases/databases/create-a-database-snapshot-transact-sql). After migration, you may be able to use transactional replication to update the migrated databases with changes that occur after the cutoff point for the migration. See [Migrate using Transactional Migration](migrate-to-database-from-sql-server.md#method-2-use-transactional-replication). -4. Deploy the Transact-SQL scripts to apply the fixes to the database copy. -5. [Migrate](/sql/dma/dma-migrateonpremsql) the database copy to a new database in Azure SQL Database by using the Data Migration Assistant. - -> [!NOTE] -> Rather than using DMA, you can also use a BACPAC file. See [Import a BACPAC file to a new database in Azure SQL Database](database-import.md). - -### Optimizing data transfer performance during migration - -The following list contains recommendations for best performance during the import process. - -- Choose the highest service tier and compute size that your budget allows to maximize the transfer performance. You can scale down after the migration completes to save money. -- Minimize the distance between your BACPAC file and the destination data center. -- Disable autostatistics during migration -- Partition tables and indexes -- Drop indexed views, and recreate them once finished -- Remove rarely queried historical data to another database and migrate this historical data to a separate database in Azure SQL Database. You can then query this historical data using [elastic queries](elastic-query-overview.md). - -### Optimize performance after the migration completes - -[Update statistics](/sql/t-sql/statements/update-statistics-transact-sql) with full scan after the migration is completed. - -## Method 2: Use Transactional Replication - -When you can't afford to remove your SQL Server database from production while the migration is occurring, you can use SQL Server transactional replication as your migration solution. To use this method, the source database must meet the [requirements for transactional replication](./replication-to-sql-database.md) and be compatible for Azure SQL Database. For information about SQL replication with Always On, see [Configure Replication for Always On Availability Groups (SQL Server)](/sql/database-engine/availability-groups/windows/configure-replication-for-always-on-availability-groups-sql-server). - -To use this solution, you configure your database in Azure SQL Database as a subscriber to the SQL Server instance that you wish to migrate. The transactional replication distributor synchronizes data from the database to be synchronized (the publisher) while new transactions continue occur. - -With transactional replication, all changes to your data or schema show up in your database in Azure SQL Database. Once the synchronization is complete and you're ready to migrate, change the connection string of your applications to point them to your database. Once transactional replication drains any changes left on your source database and all your applications point to Azure DB, you can uninstall transactional replication. Your database in Azure SQL Database is now your production system. - - ![SeedCloudTR diagram](./media/migrate-to-database-from-sql-server/SeedCloudTR.png) - -> [!TIP] -> You can also use transactional replication to migrate a subset of your source database. The publication that you replicate to Azure SQL Database can be limited to a subset of the tables in the database being replicated. For each table being replicated, you can limit the data to a subset of the rows and/or a subset of the columns. - -## Migration to SQL Database using Transaction Replication workflow - -> [!IMPORTANT] -> Use the latest version of SQL Server Management Studio to remain synchronized with updates to Azure and SQL Database. Older versions of SQL Server Management Studio cannot set up SQL Database as a subscriber. [Update SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - -1. Set up Distribution - - [Using SQL Server Management Studio (SSMS)](/sql/relational-databases/replication/configure-publishing-and-distribution/) - - [Using Transact-SQL](/sql/relational-databases/replication/configure-publishing-and-distribution/) - -2. Create Publication - - [Using SQL Server Management Studio (SSMS)](/sql/relational-databases/replication/configure-publishing-and-distribution/) - - [Using Transact-SQL](/sql/relational-databases/replication/configure-publishing-and-distribution/) -3. Create Subscription - - [Using SQL Server Management Studio (SSMS)](/sql/relational-databases/replication/create-a-push-subscription/) - - [Using Transact-SQL](/sql/relational-databases/replication/create-a-push-subscription/) - -Some tips and differences for migrating to SQL Database - -- Use a local distributor - - Doing so causes a performance impact on the server. - - If the performance impact is unacceptable, you can use another server but it adds complexity in management and administration. -- When selecting a snapshot folder, make sure the folder you select is large enough to hold a BCP of every table you want to replicate. -- Snapshot creation locks the associated tables until it's complete, so schedule your snapshot appropriately. -- Only push subscriptions are supported in Azure SQL Database. You can only add subscribers from the source database. - -## Resolving database migration compatibility issues - -There are a wide variety of compatibility issues that you might encounter, depending both on the version of SQL Server in the source database and the complexity of the database you're migrating. Older versions of SQL Server have more compatibility issues. Use the following resources, in addition to a targeted Internet search using your search engine of choices: - -- [SQL Server database features not supported in Azure SQL Database](transact-sql-tsql-differences-sql-server.md) -- [Discontinued Database Engine Functionality in SQL Server 2016](/sql/database-engine/discontinued-database-engine-functionality-in-sql-server) -- [Discontinued Database Engine Functionality in SQL Server 2014](/sql/database-engine/discontinued-database-engine-functionality-in-sql-server?viewFallbackFrom=sql-server-2014) -- [Discontinued Database Engine Functionality in SQL Server 2012](/previous-versions/sql/sql-server-2012/ms144262(v=sql.110)) -- [Discontinued Database Engine Functionality in SQL Server 2008 R2](/previous-versions/sql/sql-server-2008-r2/ms144262(v=sql.105)) -- [Discontinued Database Engine Functionality in SQL Server 2005](/previous-versions/sql/sql-server-2005/ms144262(v=sql.90)) - -In addition to searching the Internet and using these resources, use the [Microsoft Q&A question page for Azure SQL Database](/answers/topics/azure-sql-database.html) or [StackOverflow](https://stackoverflow.com/). - -> [!IMPORTANT] -> Azure SQL Managed Instance enables you to migrate an existing SQL Server instance and its databases with minimal to no compatibility issues. See [What is a managed instance](../managed-instance/sql-managed-instance-paas-overview.md). - -## Next steps - -- Use the script on the Azure SQL EMEA Engineers blog to [Monitor tempdb usage during migration](/archive/blogs/azuresqlemea/lesson-learned-10-monitoring-tempdb-usage). -- Use the script on the Azure SQL EMEA Engineers blog to [Monitor the transaction log space of your database while migration is occurring](/archive/blogs/azuresqlemea/lesson-learned-7-monitoring-the-transaction-log-space-of-my-database). -- For a SQL Server Customer Advisory Team blog about migrating using BACPAC files, see [Migrating from SQL Server to Azure SQL Database using BACPAC Files](/archive/blogs/sqlcat/migrating-from-sql-server-to-azure-sql-database-using-bacpac-files). -- For information about working with UTC time after migration, see [Modifying the default time zone for your local time zone](/archive/blogs/azuresqlemea/lesson-learned-4-modifying-the-default-time-zone-for-your-local-time-zone). -- For information about changing the default language of a database after migration, see [How to change the default language of Azure SQL Database](/archive/blogs/azuresqlemea/lesson-learned-16-how-to-change-the-default-language-of-azure-sql-database). diff --git a/articles/azure-sql/database/monitor-tune-overview.md b/articles/azure-sql/database/monitor-tune-overview.md deleted file mode 100644 index a6081916be173..0000000000000 --- a/articles/azure-sql/database/monitor-tune-overview.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Monitoring and performance tuning -description: An overview of monitoring and performance tuning capabilities and methodology in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, urmilano, wiassaf -ms.date: 04/14/2022 ---- -# Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -To monitor the performance of a database in Azure SQL Database and Azure SQL Managed Instance, start by monitoring the CPU and IO resources used by your workload relative to the level of database performance you chose in selecting a particular service tier and performance level. To accomplish this, Azure SQL Database and Azure SQL Managed Instance emit resource metrics that can be viewed in the Azure portal or by using one of these SQL Server management tools: - - [Azure Data Studio](/sql/azure-data-studio/what-is), based on [Visual Studio Code](https://code.visualstudio.com/). - - [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) (SSMS), based on [Microsoft Visual Studio](https://visualstudio.microsoft.com/downloads/). - -| Monitoring solution | SQL Database | SQL Managed Instance | Requires agent on a customer-owned VM | -|:--|:--|:--| -| [Query Performance Insight](#generate-intelligent-assessments-of-performance-issues) | **Yes** | No | No | -| [Monitor using DMVs](monitoring-with-dmvs.md) | **Yes** | **Yes** | No | -| [Monitor using query store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) | **Yes** | **Yes** | No | -| [SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) in [Azure Monitor](../../azure-monitor/essentials/monitor-azure-resource.md) | **Yes** | **Yes** | **Yes** | -| [Azure SQL Analytics (preview)](../../azure-monitor/insights/azure-sql.md) using [Azure Monitor Logs](../../azure-monitor/logs/data-platform-logs.md) \* | **Yes** | **Yes** | No | - -\* For solutions requiring low latency monitoring, Azure SQL Analytics (preview) is not recommended. - -## Database advisors in the Azure portal - -Azure SQL Database provides a number of Database Advisors to provide intelligent performance tuning recommendations and automatic tuning options to improve performance. - -Additionally, the [Query Performance Insight](query-performance-insight-use.md) page shows you details about the queries responsible for the most CPU and IO usage for single and pooled databases. - - - Query Performance Insight is available in the Azure portal in the Overview pane of your Azure SQL Database under "Intelligent Performance". Use the automatically collected information to identify queries and begin optimizing your workload performance. - - You can also configure [automatic tuning](automatic-tuning-overview.md) to implement these recommendations automatically, such as forcing a query execution plan to prevent regression, or creating and dropping nonclustered indexes based on workload patterns. Automatic tuning also is available in the Azure portal in the Overview pane of your Azure SQL Database under "Intelligent Performance". - -Azure SQL Database and Azure SQL Managed Instance provide advanced monitoring and tuning capabilities backed by artificial intelligence to assist you in troubleshooting and maximizing the performance of your databases and solutions. You can choose to configure the [streaming export](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#diagnostic-telemetry-for-export) of these [Intelligent Insights](intelligent-insights-overview.md) and other database resource logs and metrics to one of several destinations for consumption and analysis. - -Outside of the Azure portal, the database engine has its own monitoring and diagnostic capabilities that Azure SQL Database and SQL Managed Instance leverage, such as [query store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) and [dynamic management views (DMVs)](/sql/relational-databases/system-dynamic-management-views/system-dynamic-management-views). See [Monitoring using DMVs](monitoring-with-dmvs.md) for scripts to monitor for a variety of performance issues in Azure SQL Database and Azure SQL Managed Instance. - -### Azure SQL Insights (preview) and Azure SQL Analytics (preview) - -Both offerings use different pipelines to present data to a variety of endpoints for coming Azure SQL Database metrics. - -- [Azure SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) is project inside Azure Monitor that can provide advanced insights into Azure SQL database activity. It is deployed via a customer-managed VM using Telegraf as a collection agent that connects to SQL sources, collects data, and moves data into Log Analytics. - -- [Azure SQL Analytics (preview)](../../azure-monitor/insights/azure-sql.md) also requires Log Analytics to provide advanced insights into Azure SQL database activity. - -- Azure diagnostic telemetry is a separate, streaming source of data for Azure SQL Database and Azure SQL Managed Instance. Not to be confused with the Azure SQL Insights (preview) product, SQLInsights is a log inside Intelligent Insights, and is one of several packages of telemetry emitted by Azure diagnostic settings. Diagnostic settings are a feature that contains Resource Log categories (formerly known as Diagnostic Logs). For more information, see [Diagnostic telemetry for export](metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-portal#diagnostic-telemetry-for-export). - - Azure SQL Analytics (preview) consumes the resource logs coming from the diagnostic telemetry (configurable under **Diagnostic Settings** in the Azure portal), while Azure SQL Insights (preview) uses a different pipeline to collect Azure SQL telemetry. - -### Monitoring and diagnostic telemetry - -The following diagram details all the database engine, platform metrics, resource logs, and Azure activity logs generated by Azure SQL products, how they are processed, and how they can be surfaced for analysis. - -:::image type="content" source="media/monitor-tune-overview/azure-sql-insights-horizontal-analytics-full-diagram.svg" alt-text="Diagram showing complete logging and diagnostic information paths for Azure SQL products."::: - -## Monitor and tune Azure SQL in the Azure portal - -In the Azure portal, Azure SQL Database and Azure SQL Managed Instance provide monitoring of resource metrics. Azure SQL Database provides database advisors, and Query Performance Insight provides query tuning recommendations and query performance analysis. In the Azure portal, you can enable automatic tuning for [logical SQL servers](logical-servers.md) and their single and pooled databases. - -> [!NOTE] -> Databases with extremely low usage may show in the portal with less than actual usage. Due to the way telemetry is emitted when converting a double value to the nearest integer certain usage amounts less than 0.5 will be rounded to 0 which causes a loss in granularity of the emitted telemetry. For details, see [Low database and elastic pool metrics rounding to zero](#low-database-and-elastic-pool-metrics-rounding-to-zero). - -### Azure SQL Database and Azure SQL Managed Instance resource monitoring - -You can quickly monitor a variety of resource metrics in the Azure portal in the **Metrics** view. These metrics enable you to see if a database is reaching 100% of processor, memory, or IO resources. High DTU or processor percentage, as well as high IO percentage, indicates that your workload might need more CPU or IO resources. It might also indicate queries that need to be optimized. - - ![Resource metrics](./media/monitor-tune-overview/resource-metrics.png) - -### Database advisors in Azure SQL Database - -Azure SQL Database includes [database advisors](database-advisor-implement-performance-recommendations.md) that provide performance tuning recommendations for single and pooled databases. These recommendations are available in the Azure portal as well as by using [PowerShell](/powershell/module/az.sql/get-azsqldatabaseadvisor). You can also enable [automatic tuning](automatic-tuning-overview.md) so that Azure SQL Database can automatically implement these tuning recommendations. - -### Query Performance Insight in Azure SQL Database - -[Query Performance Insight](query-performance-insight-use.md) shows the performance in the Azure portal of top consuming and longest running queries for single and pooled databases. - -### Low database and elastic pool metrics rounding to zero - -Starting in September 2020, databases with extremely low usage may show in the portal with less than actual usage. Due to the way telemetry is emitted when converting a double value to the nearest integer certain usage amounts less than 0.5 will be rounded to 0, which causes a loss in granularity of the emitted telemetry. - -For example: Consider a 1-minute window with the following four data points: 0.1, 0.1, 0.1, 0.1, these low values are rounded down to 0, 0, 0, 0 and present an average of 0. If any of the data points are greater than 0.5, for example: 0.1, 0.1, 0.9, 0.1, they are rounded to 0, 0, 1, 0 and show an avg of 0.25. - -Affected database metrics: -- cpu_percent -- log_write_percent -- workers_percent -- sessions_percent -- physical_data_read_percent -- dtu_consumption_percent2 -- xtp_storage_percent - -Affected elastic pool metrics: -- cpu_percent -- physical_data_read_percent -- log_write_percent -- memory_usage_percent -- data_storage_percent -- peak_worker_percent -- peak_session_percent -- xtp_storage_percent -- allocated_data_storage_percent - - -## Generate intelligent assessments of performance issues - -[Intelligent Insights](intelligent-insights-overview.md) for Azure SQL Database and Azure SQL Managed Instance uses built-in intelligence to continuously monitor database usage through artificial intelligence and detect disruptive events that cause poor performance. Intelligent Insights automatically detects performance issues with databases based on query execution wait times, errors, or time-outs. Once detected, a detailed analysis is performed by Intelligent Insights that generates a resource log called SQLInsights (unrelated to the [Azure Monitor SQL Insights (preview)](../../azure-sql/database/monitoring-sql-database-azure-monitor.md)). SQLInsights is an [intelligent assessment of the issues](intelligent-insights-troubleshoot-performance.md). This assessment consists of a root cause analysis of the database performance issue and, where possible, recommendations for performance improvements. - -Intelligent Insights is a unique capability of Azure built-in intelligence that provides the following value: - -- Proactive monitoring -- Tailored performance insights -- Early detection of database performance degradation -- Root cause analysis of issues detected -- Performance improvement recommendations -- Scale out capability on hundreds of thousands of databases -- Positive impact to DevOps resources and the total cost of ownership - -## Enable the streaming export of metrics and resource logs - -You can enable and configure the [streaming export of diagnostic telemetry](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#diagnostic-telemetry-for-export) to one of several destinations, including the Intelligent Insights resource log. - -You configure diagnostic settings to stream categories of metrics and resource logs for single databases, pooled databases, elastic pools, managed instances, and instance databases to one of the following Azure resources. - -### Log Analytics workspace in Azure Monitor - -You can stream metrics and resource logs to a [Log Analytics workspace in Azure Monitor](../../azure-monitor/essentials/resource-logs.md#send-to-log-analytics-workspace). Data streamed here can be consumed by [SQL Analytics (preview)](../../azure-monitor/insights/azure-sql.md), which is a cloud only monitoring solution that provides intelligent monitoring of your databases that includes performance reports, alerts, and mitigation recommendations. Data streamed to a Log Analytics workspace can be analyzed with other monitoring data collected and also enables you to leverage other Azure Monitor features such as alerts and visualizations. - -> [!NOTE] -> Azure SQL Analytics (preview) is an integration with Azure Monitor, where many monitoring solutions are no longer in active development. [Monitor your SQL deployments with SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md). - -### Azure Event Hubs - -You can stream metrics and resource logs to [Azure Event Hubs](../../azure-monitor/essentials/resource-logs.md#send-to-azure-event-hubs). Streaming diagnostic telemetry to event hubs to provide the following functionality: - -- **Stream logs to third-party logging and telemetry systems** - - Stream all of your metrics and resource logs to a single event hub to pipe log data to a third-party SIEM or log analytics tool. -- **Build a custom telemetry and logging platform** - - The highly scalable publish-subscribe nature of event hubs allows you to flexibly ingest metrics and resource logs into a custom telemetry platform. See [Designing and Sizing a Global Scale Telemetry Platform on Azure Event Hubs](https://azure.microsoft.com/documentation/videos/build-2015-designing-and-sizing-a-global-scale-telemetry-platform-on-azure-event-Hubs/) for details. -- **View service health by streaming data to Power BI** - - Use Event Hubs, Stream Analytics, and Power BI to transform your diagnostics data into near real-time insights on your Azure services. See [Stream Analytics and Power BI: A real-time analytics dashboard for streaming data](../../stream-analytics/stream-analytics-power-bi-dashboard.md) for details on this solution. - -### Azure Storage - -Stream metrics and resource logs to [Azure Storage](../../azure-monitor/essentials/resource-logs.md#send-to-azure-storage). Use Azure storage to archive vast amounts of diagnostic telemetry for a fraction of the cost of the previous two streaming options. - -## Use Extended Events - -Additionally, you can use [Extended Events](/sql/relational-databases/extended-events/extended-events) for advanced monitoring and troubleshooting in SQL Server, Azure SQL Database, and Azure SQL Managed Instance. Extended Events is a "tracing" tool and event architecture, superior to SQL Trace, that enables users to collect as much or as little data as is necessary to troubleshoot or identify a performance problem, while mitigating impact to ongoing application performance. Extended Events replace deprecated SQL Trace and SQL Server Profiler features. For information about using extended events in Azure SQL Database, see [Extended events in Azure SQL Database](xevent-db-diff-from-svr.md). In Azure SQL Database and SQL Managed Instance, use an [Event File target hosted in Azure Blob Storage](xevent-code-event-file.md). - -## Next steps - -- For more information about intelligent performance recommendations for single and pooled databases, see [Database advisor performance recommendations](database-advisor-implement-performance-recommendations.md). -- For more information about automatically monitoring database performance with automated diagnostics and root cause analysis of performance issues, see [Azure SQL Intelligent Insights](intelligent-insights-overview.md). -- [Monitor your SQL deployments with SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/monitoring-sql-database-azure-monitor-reference.md b/articles/azure-sql/database/monitoring-sql-database-azure-monitor-reference.md deleted file mode 100644 index fa1385d16ad94..0000000000000 --- a/articles/azure-sql/database/monitoring-sql-database-azure-monitor-reference.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Monitoring Azure SQL Database with Azure Monitor reference -description: Important reference material needed when you monitor Azure SQL Database with Azure Monitor -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.topic: conceptual -ms.reviewer: mathoma, dfurman -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: subject-monitoring -ms.date: 03/14/2022 ---- - -# Monitoring Azure SQL Database data reference - -This article contains reference for monitoring Azure SQL Database with Azure Monitor. See [Monitoring Azure SQL Database](monitoring-sql-database-azure-monitor.md) for details on collecting and analyzing monitoring data for Azure SQL Database with Azure Monitor SQL Insights (preview). - -## Metrics - -For more on using Azure Monitor SQL Insights (preview) for all products in the [Azure SQL family](../../azure-sql/index.yml), see [Monitor your SQL deployments with SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md). - -For data specific to Azure SQL Database, see [Data for Azure SQL Database](../../azure-monitor/insights/sql-insights-overview.md#data-for-azure-sql-database). - -For a complete list of metrics, see: -- [Microsoft.Sql/servers/databases](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserversdatabases) -- [Microsoft.Sql/managedInstances](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlmanagedinstances) -- [Microsoft.Sql/servers/elasticPools](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) - -## Resource logs - -This section lists the types of resource logs you can collect for Azure SQL Database. - -For reference, see a list of [all resource logs category types supported in Azure Monitor](../../azure-monitor/essentials/resource-logs-schema.md). - -For a reference of resource log types collected for Azure SQL Database, see [Streaming export of Azure SQL Database Diagnostic telemetry for export](metrics-diagnostic-telemetry-logging-streaming-export-configure.md#diagnostic-telemetry-for-export) - -## Azure Monitor Logs tables - -This section refers to all of the Azure Monitor Logs tables relevant to Azure SQL Database and available for query by Log Analytics, which can be queried with KQL. - -Tables for all resources types are referenced here, for example, [Azure Monitor tables for SQL Databases](/azure/azure-monitor/reference/tables/tables-resourcetype.md#sql-databases). - -|Resource Type | Notes | -|-------|-----| -| [AzureActivity](/azure/azure-monitor/reference/tables/azureactivity.md) | Entries from the Azure Activity log that provides insight into any subscription-level or management group level events that have occurred in Azure. | -| [AzureDiagnostics](/azure/azure-monitor/reference/tables/azurediagnostics.md) | Azure Diagnostics reveals diagnostic data of specific resources and features for numerous Azure products including SQL databases, SQL elastic pools, and SQL managed instances. For more information, see [Diagnostics metrics]( metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-portal#basic-metrics).| -| [AzureMetrics](/azure/azure-monitor/reference/tables/azuremetrics.md) | Metric data emitted by Azure services that measure their health and performance. Activity from Azure products including SQL databases, SQL elastic pools, and SQL managed instances.| - -## Activity log - -The Activity log contains records of management operations performed on your Azure SQL Database resources. All maintenance operations related to Azure SQL Database that have been implemented here may appear in the Activity log. - -For more information on the schema of Activity Log entries, see [Activity Log schema](/azure/azure-monitor/essentials/activity-log-schema). - -## Next steps - -- See [Monitoring Azure SQL Database with Azure Monitor](monitoring-sql-database-azure-monitor.md) for a description of monitoring Azure SQL Database. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/insights/monitor-azure-resources) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-sql/database/monitoring-sql-database-azure-monitor.md b/articles/azure-sql/database/monitoring-sql-database-azure-monitor.md deleted file mode 100644 index cb40c5f4ad674..0000000000000 --- a/articles/azure-sql/database/monitoring-sql-database-azure-monitor.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Monitoring Azure SQL Database with Azure Monitor -description: Start here to learn how to monitor Azure SQL Database with Azure Monitor -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.topic: conceptual -ms.reviewer: mathoma -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: subject-monitoring -ms.date: 12/07/2021 ---- - -# Monitor Azure SQL Database with Azure Monitor - -When you have critical applications and business processes relying on Azure resources, you want to monitor those resources for their availability, performance, and operation. - -This article describes the monitoring data generated by Azure SQL Database. Azure SQL Database can be monitored by [Azure Monitor](../../azure-monitor/overview.md). If you are unfamiliar with the features of Azure Monitor common to all Azure services that use it, read [Monitoring Azure resources with Azure Monitor](../../azure-monitor/essentials/monitor-azure-resource.md). - -## Monitoring overview page in Azure portal - -View your Azure Monitor metrics for all connected resources by going to the Azure Monitor page directly in the Azure Portal. Or, on the **Overview** page of an Azure SQL DB, click on **Metrics** under the Monitoring heading to reach Azure Monitor. - -## Azure Monitor SQL Insights (preview) - -Some services in Azure have a focused, pre-built monitoring dashboard in the Azure portal that can be enabled to provide a starting point for monitoring your service. These special dashboards are called "insights" and are not enabled by default. For more on using Azure Monitor SQL Insights for all products in the [Azure SQL family](../../azure-sql/index.yml), see [Monitor your SQL deployments with SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md). - -After creating a monitoring profile, you can configure your Azure Monitor SQL Insights for SQL-specific metrics for Azure SQL Database, SQL Managed Instance, and Azure VMs running SQL Server. - -> [!NOTE] -> Azure SQL Analytics (preview) is an integration with Azure Monitor, where many monitoring solutions are no longer in active development. For more monitoring options, see [Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance](../../azure-sql/database/monitor-tune-overview.md). - -## Monitoring data - -Azure SQL Database collects the same kinds of monitoring data as other Azure resources that are described in [Monitoring data from Azure resources](../../azure-monitor/essentials/monitor-azure-resource.md). - -See [Monitoring Azure SQL Database with Azure Monitor reference](monitoring-sql-database-azure-monitor-reference.md) for detailed information on the metrics and logs metrics created by Azure SQL Database. - -## Collection and routing - -Platform metrics and the Activity log are collected and stored automatically, but can be routed to other locations by using a diagnostic setting. - -Resource Logs are not collected and stored until you create a diagnostic setting and route them to one or more locations. Resource logs were previously referred to as diagnostic logs. - -Diagnostic settings available include: - -- **log**: SQLInsights, AutomaticTuning, QueryStoreRuntimeStatistics, QueryStoreWaitStatistics, Errors, DatabaseWaitStatistics, Timeouts, Blocks, Deadlocks -- **metric**: All Azure Monitor metrics in the **Basic** and **InstanceAndAppAdvanced** categories -- **destination details**: Send to Log Analytics workspace, Archive to a storage account, Stream to an event hub, Send to partner solution - - For more information on these options, see [Create diagnostic settings in Azure portal](../../azure-monitor/essentials/diagnostic-settings.md#create-in-azure-portal). - -For more information on the resource logs and diagnostics available, see [Diagnostic telemetry for export](metrics-diagnostic-telemetry-logging-streaming-export-configure.md?tabs=azure-portal#diagnostic-telemetry-for-export). - -See [Create diagnostic setting to collect platform logs and metrics in Azure](../../azure-monitor/essentials/diagnostic-settings.md) for the detailed process for creating a diagnostic setting using the Azure portal, CLI, or PowerShell. When you create a diagnostic setting, you specify which categories of logs to collect. The categories for Azure SQL Database are listed in [Azure SQL Database monitoring data reference](monitoring-sql-database-azure-monitor-reference.md#resource-logs). - -## Analyzing metrics - -You can analyze metrics for Azure SQL Database with metrics from other Azure services using metrics explorer by opening **Metrics** from the **Azure Monitor** menu. See [Getting started with Azure Metrics Explorer](../../azure-monitor/essentials/metrics-getting-started.md) for details on using this tool. - -For a list of the platform metrics collected for Azure SQL Database, see [Monitoring Azure SQL Database data reference metrics](monitoring-sql-database-azure-monitor-reference.md#metrics) - -For reference, you can see a list of [all resource metrics supported in Azure Monitor](../../azure-monitor/essentials/metrics-supported.md). - -## Analyzing logs - -Data in Azure Monitor Logs is stored in tables where each table has its own set of unique properties. This data is optionally collected via Diagnostic settings. - -All resource logs in Azure Monitor have the same fields followed by service-specific fields. The common schema is outlined in [Azure Monitor resource log schema](../../azure-monitor/essentials/resource-logs-schema.md). - -The [Activity log](../../azure-monitor/essentials/activity-log.md) is a type of platform log in Azure that provides insight into subscription-level events. You can view it independently or route it to Azure Monitor Logs, where you can do much more complex queries using Log Analytics. - -For a list of the types of resource logs collected for Azure SQL Database, see [Monitoring Azure SQL Database data reference](monitoring-sql-database-azure-monitor-reference.md#resource-logs). - -For a list of the tables used by Azure Monitor Logs and queryable by Log Analytics, see [Monitoring Azure SQL Database data reference](monitoring-sql-database-azure-monitor-reference.md#azure-monitor-logs-tables). - -### Sample Kusto queries - -> [!IMPORTANT] -> When you select **Logs** from the Monitoring menu of an Azure SQL database, Log Analytics is opened with the query scope set to the current database. This means that log queries will only include data from that resource. If you want to run a query that includes data from other databases or data from other Azure services, select **Logs** from the **Azure Monitor** menu. See [Log query scope and time range in Azure Monitor Log Analytics](../../azure-monitor/logs/scope.md) for details. - -Following are queries that you can use to help you monitor your database. You may see different options available depending on your purchase model. - -Example A: **Log_write_percent** from the past hour - -```Kusto -AzureMetrics -| where ResourceProvider == "MICROSOFT.SQL" -| where TimeGenerated >= ago(60min) -| where MetricName in ('log_write_percent') -| parse _ResourceId with * "/microsoft.sql/servers/" Resource -| summarize Log_Maximum_last60mins = max(Maximum), Log_Minimum_last60mins = min(Minimum), Log_Average_last60mins = avg(Average) by Resource, MetricName -``` - -Example B: **SQL Server wait types** from the past 15 minutes - -```Kusto -AzureDiagnostics -| where ResourceProvider == "MICROSOFT.SQL" -| where TimeGenerated >= ago(15min) -| parse _ResourceId with * "/microsoft.sql/servers/" LogicalServerName "/databases/" DatabaseName -| summarize Total_count_15mins = sum(delta_waiting_tasks_count_d) by LogicalServerName, DatabaseName, wait_type_s -``` - -Example C: **SQL Server deadlocks** from the past 60 minutes - -```Kusto -AzureMetrics -| where ResourceProvider == "MICROSOFT.SQL" -| where TimeGenerated >= ago(60min) -| where MetricName in ('deadlock') -| parse _ResourceId with * "/microsoft.sql/servers/" Resource -| summarize Deadlock_max_60Mins = max(Maximum) by Resource, MetricName -``` - -Example D: **Avg CPU usage** from the past hour - -```Kusto -AzureMetrics -| where ResourceProvider == "MICROSOFT.SQL" -| where TimeGenerated >= ago(60min) -| where MetricName in ('cpu_percent') -| parse _ResourceId with * "/microsoft.sql/servers/" Resource -| summarize CPU_Maximum_last60mins = max(Maximum), CPU_Minimum_last60mins = min(Minimum), CPU_Average_last60mins = avg(Average) by Resource, MetricName -``` - -## Alerts - -Azure Monitor alerts proactively notify you when important conditions are found in your monitoring data. These metrics in Azure Monitor are always collected. They allow you to identify and address issues in your system before your customers notice them. You can set alerts on [metrics](../..//azure-monitor/alerts/alerts-metric-overview.md), [logs](../../azure-monitor/alerts/alerts-unified-log.md), and the [activity log](../../azure-monitor/alerts/activity-log-alerts.md). - -If you are creating or running an application in Azure, [Azure Monitor Application Insights](../../azure-monitor/overview.md#application-insights) may offer additional types of alerts. - -The following table lists common and recommended alert rules for Azure SQL Database. You may see different options available depending on your purchase model. - -| Signal name | Operator | Aggregation type | Threshold value | Description | -|:---|:---|:---|:---|:---| -| DTU Percentage | Greater than | Average | 80 | Whenever the average DTU percentage is greater than 80% | -| Log IO percentage | Greater than | Average | 80 | Whenever the average log io percentage is greater than 80% | -| Deadlocks\* | Greater than | Count | 1 | Whenever the count of deadlocks is greater than 1. | -| CPU percentage | Greater than | Average | 80 | Whenever the average cpu percentage is greater than 80% | - -\* Alerting on deadlocks may be unnecessary and noisy in some applications where deadlocks are expected and properly handled. - -## Next steps - -- See [Monitoring Azure SQL Database data reference](monitoring-sql-database-azure-monitor-reference.md) for a reference of the metrics, logs, and other important values created by Azure SQL Database. -- See [Monitoring Azure resources with Azure Monitor](/azure/azure-monitor/insights/monitor-azure-resource) for details on monitoring Azure resources. \ No newline at end of file diff --git a/articles/azure-sql/database/monitoring-tuning-index.yml b/articles/azure-sql/database/monitoring-tuning-index.yml deleted file mode 100644 index 740009ae4a00c..0000000000000 --- a/articles/azure-sql/database/monitoring-tuning-index.yml +++ /dev/null @@ -1,128 +0,0 @@ -### YamlMime:Landing - -title: Azure SQL Database and Azure SQL Managed Instance monitoring and performance tuning -summary: Azure SQL Database and Azure SQL Managed Instance provide tools to help you collect metrics and diagnostic information to monitor your database and improve its performance. # < 160 chars - -metadata: - title: Azure SQL Database and Azure SQL Managed Instance monitoring and tuning documentation - description: Learn about Azure SQL Database monitoring and performance tuning tools that help you collect metrics and diagnostics and improve database performance. - ms.service: sql-database - ms.subservice: performance - ms.custom: sqldbrb=2 - ms.topic: landing-page - ms.collection: performance - author: WilliamDAssafMSFT - ms.author: wiassaf - ms.reviewer: kendralittle, mathoma - ms.date: 10/18/2021 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - # Card (optional) - - title: Get started with monitoring and performance tuning - linkLists: - - linkListType: overview - links: - - text: Monitoring and performance tuning - url: monitor-tune-overview.md - - linkListType: learn - links: - - text: Configure databases for performance - url: /learn/modules/configure-databases-performance/ - - text: Optimize query performance - url: /learn/paths/optimize-query-performance-sql-server/ - - text: Monitor and optimize operational resources - url: /learn/paths/monitor-optimize-operational-resources-sql-server/ - - text: Protect, monitor, and tune - url: /learn/modules/protect-monitor-tuning-migrated-database/ - - # Card (optional) - - title: Azure SQL Database intelligent performance recommendations - linkLists: - - linkListType: concept - links: - - text: SQL Database Advisor tuning recommendations - url: database-advisor-implement-performance-recommendations.md - - text: Automatic tuning - url: automatic-tuning-enable.md - - text: Query Performance Insight - url: query-performance-insight-use.md - - # Card - - title: Azure SQL Database and Azure SQL Managed Instance resource limits - linkLists: - - linkListType: concept - links: - - text: Single database vCore resource limits - url: resource-limits-vcore-single-databases.md - - text: Single database DTU resource limits - url: resource-limits-dtu-single-databases.md - - text: Elastic pool vCore resource limits - url: resource-limits-vcore-elastic-pools.md - - text: Elastic pool DTU resource limits - url: resource-limits-dtu-elastic-pools.md - - text: SQL Managed Instance resource limits - url: ../managed-instance/resource-limits.md - - text: Instance pools resource limits - url: ../managed-instance/instance-pools-overview.md#resource-limitations - - # Card (optional) - - title: Intelligent performance analysis for Azure SQL Database and Azure SQL Managed Instance - linkLists: - - linkListType: overview - links: - - text: Intelligent Insights using AI - url: intelligent-insights-overview.md - - linkListType: concept - links: - - text: Metrics and diagnostics logging - url: metrics-diagnostic-telemetry-logging-streaming-export-configure.md - - text: Understand the performance diagnostics log - url: intelligent-insights-use-diagnostics-log.md - - text: Azure SQL Analytics - url: ../../azure-monitor/insights/azure-sql.md - - text: Detectable performance patterns - url: intelligent-insights-troubleshoot-performance.md - - text: Intelligent query processing - url: /sql/relational-databases/performance/intelligent-query-processing - - # Card - - title: Manual performance tuning for Azure SQL Database and Azure SQL Managed Instance - linkLists: - - linkListType: overview - links: - - text: Identify query performance issues - url: ../identify-query-performance-issues.md - - linkListType: concept - links: - - text: Monitoring using DMVs - url: monitoring-with-dmvs.md - - text: Application and database tuning - url: performance-guidance.md - - text: Using Query Store - url: /sql/relational-databases/performance/monitoring-performance-by-using-the-query-store - - text: Using extended events - url: xevent-db-diff-from-svr.md - - text: Hyperscale performance diagnostics - url: hyperscale-performance-diagnostics.md - - # Card - - title: Azure SQL Database and Azure SQL Managed Instance resource scaling - linkLists: - - linkListType: overview - links: - - text: Dynamically scaling resources - url: scale-resources.md - - linkListType: concept - links: - - text: Scaling single database resources - url: single-database-scale.md - - text: Scaling elastic pool resources - url: elastic-pool-scale.md - - text: Scaling managed instance resources - url: ../managed-instance/sql-managed-instance-paas-overview.md#management-operations - - text: Read scale-out - url: read-scale-out.md diff --git a/articles/azure-sql/database/monitoring-with-dmvs.md b/articles/azure-sql/database/monitoring-with-dmvs.md deleted file mode 100644 index 839b33ef94777..0000000000000 --- a/articles/azure-sql/database/monitoring-with-dmvs.md +++ /dev/null @@ -1,789 +0,0 @@ ---- -title: Monitor performance using DMVs -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn how to detect and diagnose common performance problems by using dynamic management views to monitor Microsoft Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 04/11/2022 ---- -# Monitoring Microsoft Azure SQL Database and Azure SQL Managed Instance performance using dynamic management views -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Microsoft Azure SQL Database and Azure SQL Managed Instance enable a subset of dynamic management views to diagnose performance problems, which might be caused by blocked or long-running queries, resource bottlenecks, poor query plans, and so on. This article provides information on how to detect common performance problems by using dynamic management views. - -Microsoft Azure SQL Database and Azure SQL Managed Instance partially support three categories of dynamic management views: - -- Database-related dynamic management views. -- Execution-related dynamic management views. -- Transaction-related dynamic management views. - -For detailed information on dynamic management views, see [Dynamic Management Views and Functions (Transact-SQL)](/sql/relational-databases/system-dynamic-management-views/system-dynamic-management-views). - -## Monitor with SQL Insights (preview) - -[Azure Monitor SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) is a tool for monitoring Azure SQL managed instances, databases in Azure SQL Database, and SQL Server instances in Azure SQL VMs. This service uses a remote agent to capture data from dynamic management views (DMVs) and routes the data to Azure Log Analytics, where it can be monitored and analyzed. You can view this data from [Azure Monitor](../../azure-monitor/overview.md) in provided views, or access the Log data directly to run queries and analyze trends. To start using Azure Monitor SQL Insights (preview), see [Enable SQL Insights (preview)](../../azure-monitor/insights/sql-insights-enable.md). - - -## Permissions - -In Azure SQL Database, querying a dynamic management view requires **VIEW DATABASE STATE** permissions. The **VIEW DATABASE STATE** permission returns information about all objects within the current database. -To grant the **VIEW DATABASE STATE** permission to a specific database user, run the following query: - -```sql -GRANT VIEW DATABASE STATE TO database_user; -``` - -In Azure SQL Managed Instance, querying a dynamic management view requires **VIEW SERVER STATE** permissions. For more information, see [System Dynamic Management Views](/sql/relational-databases/system-dynamic-management-views/system-dynamic-management-views#required-permissions). - -In an instance of SQL Server and in Azure SQL Managed Instance, dynamic management views return server state information. In Azure SQL Database, they return information regarding your current logical database only. - -This article contains a collection of DMV queries that you can execute using SQL Server Management Studio or Azure Data Studio to detect the following types of query performance issues: - -- [Identifying queries related to excessive CPU consumption](#identify-cpu-performance-issues) -- [PAGELATCH_* and WRITE_LOG waits related to IO bottlenecks](#identify-io-performance-issues) -- [PAGELATCH_* waits caused bytTempDB contention](#identify-tempdb-performance-issues) -- [RESOURCE_SEMAHPORE waits caused by memory grant waiting issues](#identify-memory-grant-wait-performance-issues) -- [Identifying database and object sizes](#calculating-database-and-objects-sizes) -- [Retrieving information about active sessions](#monitoring-connections) -- [Retrieve system-wide and database resource usage information](#monitor-resource-use) -- [Retrieving query performance information](#monitoring-query-performance) - -## Identify CPU performance issues - -If CPU consumption is above 80% for extended periods of time, consider the following troubleshooting steps: - -### The CPU issue is occurring now - -If issue is occurring right now, there are two possible scenarios: - -#### Many individual queries that cumulatively consume high CPU - -Use the following query to identify top query hashes: - -```sql -PRINT '-- top 10 Active CPU Consuming Queries (aggregated)--'; -SELECT TOP 10 GETDATE() runtime, * -FROM (SELECT query_stats.query_hash, SUM(query_stats.cpu_time) 'Total_Request_Cpu_Time_Ms', SUM(logical_reads) 'Total_Request_Logical_Reads', MIN(start_time) 'Earliest_Request_start_Time', COUNT(*) 'Number_Of_Requests', SUBSTRING(REPLACE(REPLACE(MIN(query_stats.statement_text), CHAR(10), ' '), CHAR(13), ' '), 1, 256) AS "Statement_Text" - FROM (SELECT req.*, SUBSTRING(ST.text, (req.statement_start_offset / 2)+1, ((CASE statement_end_offset WHEN -1 THEN DATALENGTH(ST.text)ELSE req.statement_end_offset END-req.statement_start_offset)/ 2)+1) AS statement_text - FROM sys.dm_exec_requests AS req - CROSS APPLY sys.dm_exec_sql_text(req.sql_handle) AS ST ) AS query_stats - GROUP BY query_hash) AS t -ORDER BY Total_Request_Cpu_Time_Ms DESC; -``` - -#### Long running queries that consume CPU are still running - -Use the following query to identify these queries: - -```sql -PRINT '--top 10 Active CPU Consuming Queries by sessions--'; -SELECT TOP 10 req.session_id, req.start_time, cpu_time 'cpu_time_ms', OBJECT_NAME(ST.objectid, ST.dbid) 'ObjectName', SUBSTRING(REPLACE(REPLACE(SUBSTRING(ST.text, (req.statement_start_offset / 2)+1, ((CASE statement_end_offset WHEN -1 THEN DATALENGTH(ST.text)ELSE req.statement_end_offset END-req.statement_start_offset)/ 2)+1), CHAR(10), ' '), CHAR(13), ' '), 1, 512) AS statement_text -FROM sys.dm_exec_requests AS req - CROSS APPLY sys.dm_exec_sql_text(req.sql_handle) AS ST -ORDER BY cpu_time DESC; -GO -``` - -### The CPU issue occurred in the past - -If the issue occurred in the past and you want to do root cause analysis, use [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store). Users with database access can use T-SQL to query Query Store data. Query Store default configurations use a granularity of 1 hour. Use the following query to look at activity for high CPU consuming queries. This query returns the top 15 CPU consuming queries. Remember to change `rsi.start_time >= DATEADD(hour, -2, GETUTCDATE()`: - -```sql --- Top 15 CPU consuming queries by query hash --- note that a query hash can have many query id if not parameterized or not parameterized properly --- it grabs a sample query text by min -WITH AggregatedCPU AS (SELECT q.query_hash, SUM(count_executions * avg_cpu_time / 1000.0) AS total_cpu_millisec, SUM(count_executions * avg_cpu_time / 1000.0)/ SUM(count_executions) AS avg_cpu_millisec, MAX(rs.max_cpu_time / 1000.00) AS max_cpu_millisec, MAX(max_logical_io_reads) max_logical_reads, COUNT(DISTINCT p.plan_id) AS number_of_distinct_plans, COUNT(DISTINCT p.query_id) AS number_of_distinct_query_ids, SUM(CASE WHEN rs.execution_type_desc='Aborted' THEN count_executions ELSE 0 END) AS Aborted_Execution_Count, SUM(CASE WHEN rs.execution_type_desc='Regular' THEN count_executions ELSE 0 END) AS Regular_Execution_Count, SUM(CASE WHEN rs.execution_type_desc='Exception' THEN count_executions ELSE 0 END) AS Exception_Execution_Count, SUM(count_executions) AS total_executions, MIN(qt.query_sql_text) AS sampled_query_text - FROM sys.query_store_query_text AS qt - JOIN sys.query_store_query AS q ON qt.query_text_id=q.query_text_id - JOIN sys.query_store_plan AS p ON q.query_id=p.query_id - JOIN sys.query_store_runtime_stats AS rs ON rs.plan_id=p.plan_id - JOIN sys.query_store_runtime_stats_interval AS rsi ON rsi.runtime_stats_interval_id=rs.runtime_stats_interval_id - WHERE rs.execution_type_desc IN ('Regular', 'Aborted', 'Exception')AND rsi.start_time>=DATEADD(HOUR, -2, GETUTCDATE()) - GROUP BY q.query_hash), OrderedCPU AS (SELECT query_hash, total_cpu_millisec, avg_cpu_millisec, max_cpu_millisec, max_logical_reads, number_of_distinct_plans, number_of_distinct_query_ids, total_executions, Aborted_Execution_Count, Regular_Execution_Count, Exception_Execution_Count, sampled_query_text, ROW_NUMBER() OVER (ORDER BY total_cpu_millisec DESC, query_hash ASC) AS RN - FROM AggregatedCPU) -SELECT OD.query_hash, OD.total_cpu_millisec, OD.avg_cpu_millisec, OD.max_cpu_millisec, OD.max_logical_reads, OD.number_of_distinct_plans, OD.number_of_distinct_query_ids, OD.total_executions, OD.Aborted_Execution_Count, OD.Regular_Execution_Count, OD.Exception_Execution_Count, OD.sampled_query_text, OD.RN -FROM OrderedCPU AS OD -WHERE OD.RN<=15 -ORDER BY total_cpu_millisec DESC; -``` - -Once you identify the problematic queries, it's time to tune those queries to reduce CPU utilization. If you don't have time to tune the queries, you may also choose to upgrade the SLO of the database to work around the issue. - -For Azure SQL Database users, learn more about handling CPU performance problems in [Diagnose and troubleshoot high CPU on Azure SQL Database](high-cpu-diagnose-troubleshoot.md) - -## Identify IO performance issues - -When identifying IO performance issues, the top wait types associated with IO issues are: - -- `PAGEIOLATCH_*` - - For data file IO issues (including `PAGEIOLATCH_SH`, `PAGEIOLATCH_EX`, `PAGEIOLATCH_UP`). If the wait type name has **IO** in it, it points to an IO issue. If there is no **IO** in the page latch wait name, it points to a different type of problem (for example, tempdb contention). - -- `WRITE_LOG` - - For transaction log IO issues. - -### If the IO issue is occurring right now - -Use the [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) or [sys.dm_os_waiting_tasks](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-waiting-tasks-transact-sql) to see the `wait_type` and `wait_time`. - -#### Identify data and log IO usage - -Use the following query to identify data and log IO usage. If the data or log IO is above 80%, it means users have used the available IO for the SQL Database service tier. - -```sql -SELECT end_time, avg_data_io_percent, avg_log_write_percent -FROM sys.dm_db_resource_stats -ORDER BY end_time DESC; -``` - -If the IO limit has been reached, you have two options: - -- Option 1: Upgrade the compute size or service tier -- Option 2: Identify and tune the queries consuming the most IO. - -#### View buffer-related IO using the Query Store - -For option 2, you can use the following query against Query Store for buffer-related IO to view the last two hours of tracked activity: - -```sql --- top queries that waited on buffer --- note these are finished queries -WITH Aggregated AS (SELECT q.query_hash, SUM(total_query_wait_time_ms) total_wait_time_ms, SUM(total_query_wait_time_ms / avg_query_wait_time_ms) AS total_executions, MIN(qt.query_sql_text) AS sampled_query_text, MIN(wait_category_desc) AS wait_category_desc - FROM sys.query_store_query_text AS qt - JOIN sys.query_store_query AS q ON qt.query_text_id=q.query_text_id - JOIN sys.query_store_plan AS p ON q.query_id=p.query_id - JOIN sys.query_store_wait_stats AS waits ON waits.plan_id=p.plan_id - JOIN sys.query_store_runtime_stats_interval AS rsi ON rsi.runtime_stats_interval_id=waits.runtime_stats_interval_id - WHERE wait_category_desc='Buffer IO' AND rsi.start_time>=DATEADD(HOUR, -2, GETUTCDATE()) - GROUP BY q.query_hash), Ordered AS (SELECT query_hash, total_executions, total_wait_time_ms, sampled_query_text, wait_category_desc, ROW_NUMBER() OVER (ORDER BY total_wait_time_ms DESC, query_hash ASC) AS RN - FROM Aggregated) -SELECT OD.query_hash, OD.total_executions, OD.total_wait_time_ms, OD.sampled_query_text, OD.wait_category_desc, OD.RN -FROM Ordered AS OD -WHERE OD.RN<=15 -ORDER BY total_wait_time_ms DESC; -GO -``` - -#### View total log IO for WRITELOG waits - -If the wait type is `WRITELOG`, use the following query to view total log IO by statement: - -```sql --- Top transaction log consumers --- Adjust the time window by changing --- rsi.start_time >= DATEADD(hour, -2, GETUTCDATE()) -WITH AggregatedLogUsed -AS (SELECT q.query_hash, - SUM(count_executions * avg_cpu_time / 1000.0) AS total_cpu_millisec, - SUM(count_executions * avg_cpu_time / 1000.0) / SUM(count_executions) AS avg_cpu_millisec, - SUM(count_executions * avg_log_bytes_used) AS total_log_bytes_used, - MAX(rs.max_cpu_time / 1000.00) AS max_cpu_millisec, - MAX(max_logical_io_reads) max_logical_reads, - COUNT(DISTINCT p.plan_id) AS number_of_distinct_plans, - COUNT(DISTINCT p.query_id) AS number_of_distinct_query_ids, - SUM( CASE - WHEN rs.execution_type_desc = 'Aborted' THEN - count_executions - ELSE - 0 - END - ) AS Aborted_Execution_Count, - SUM( CASE - WHEN rs.execution_type_desc = 'Regular' THEN - count_executions - ELSE - 0 - END - ) AS Regular_Execution_Count, - SUM( CASE - WHEN rs.execution_type_desc = 'Exception' THEN - count_executions - ELSE - 0 - END - ) AS Exception_Execution_Count, - SUM(count_executions) AS total_executions, - MIN(qt.query_sql_text) AS sampled_query_text - FROM sys.query_store_query_text AS qt - JOIN sys.query_store_query AS q - ON qt.query_text_id = q.query_text_id - JOIN sys.query_store_plan AS p - ON q.query_id = p.query_id - JOIN sys.query_store_runtime_stats AS rs - ON rs.plan_id = p.plan_id - JOIN sys.query_store_runtime_stats_interval AS rsi - ON rsi.runtime_stats_interval_id = rs.runtime_stats_interval_id - WHERE rs.execution_type_desc IN ( 'Regular', 'Aborted', 'Exception' ) - AND rsi.start_time >= DATEADD(HOUR, -2, GETUTCDATE()) - GROUP BY q.query_hash), - OrderedLogUsed -AS (SELECT query_hash, - total_log_bytes_used, - number_of_distinct_plans, - number_of_distinct_query_ids, - total_executions, - Aborted_Execution_Count, - Regular_Execution_Count, - Exception_Execution_Count, - sampled_query_text, - ROW_NUMBER() OVER (ORDER BY total_log_bytes_used DESC, query_hash ASC) AS RN - FROM AggregatedLogUsed) -SELECT OD.total_log_bytes_used, - OD.number_of_distinct_plans, - OD.number_of_distinct_query_ids, - OD.total_executions, - OD.Aborted_Execution_Count, - OD.Regular_Execution_Count, - OD.Exception_Execution_Count, - OD.sampled_query_text, - OD.RN -FROM OrderedLogUsed AS OD -WHERE OD.RN <= 15 -ORDER BY total_log_bytes_used DESC; -GO -``` - -## Identify `tempdb` performance issues - -When identifying IO performance issues, the top wait types associated with `tempdb` issues is `PAGELATCH_*` (not `PAGEIOLATCH_*`). However, `PAGELATCH_*` waits do not always mean you have `tempdb` contention. This wait may also mean that you have user-object data page contention due to concurrent requests targeting the same data page. To further confirm `tempdb` contention, use [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) to confirm that the wait_resource value begins with `2:x:y` where 2 is `tempdb` is the database ID, `x` is the file ID, and `y` is the page ID. - -For tempdb contention, a common method is to reduce or rewrite application code that relies on `tempdb`. Common `tempdb` usage areas include: - -- Temp tables -- Table variables -- Table-valued parameters -- Version store usage (associated with long running transactions) -- Queries that have query plans that use sorts, hash joins, and spools - -### Top queries that use table variables and temporary tables - -Use the following query to identify top queries that use table variables and temporary tables: - -```sql -SELECT plan_handle, execution_count, query_plan -INTO #tmpPlan -FROM sys.dm_exec_query_stats - CROSS APPLY sys.dm_exec_query_plan(plan_handle); -GO - -WITH XMLNAMESPACES('http://schemas.microsoft.com/sqlserver/2004/07/showplan' AS sp) -SELECT plan_handle, stmt.stmt_details.value('@Database', 'varchar(max)') 'Database', stmt.stmt_details.value('@Schema', 'varchar(max)') 'Schema', stmt.stmt_details.value('@Table', 'varchar(max)') 'table' -INTO #tmp2 -FROM(SELECT CAST(query_plan AS XML) sqlplan, plan_handle FROM #tmpPlan) AS p - CROSS APPLY sqlplan.nodes('//sp:Object') AS stmt(stmt_details); -GO - -SELECT t.plan_handle, [Database], [Schema], [table], execution_count -FROM(SELECT DISTINCT plan_handle, [Database], [Schema], [table] - FROM #tmp2 - WHERE [table] LIKE '%@%' OR [table] LIKE '%#%') AS t - JOIN #tmpPlan AS t2 ON t.plan_handle=t2.plan_handle; -``` - -### Identify long running transactions - -Use the following query to identify long running transactions. Long running transactions prevent version store cleanup. - -```sql -SELECT DB_NAME(dtr.database_id) 'database_name', - sess.session_id, - atr.name AS 'tran_name', - atr.transaction_id, - transaction_type, - transaction_begin_time, - database_transaction_begin_time transaction_state, - is_user_transaction, - sess.open_transaction_count, - LTRIM(RTRIM(REPLACE( - REPLACE( - SUBSTRING( - SUBSTRING( - txt.text, - (req.statement_start_offset / 2) + 1, - ((CASE req.statement_end_offset - WHEN -1 THEN - DATALENGTH(txt.text) - ELSE - req.statement_end_offset - END - req.statement_start_offset - ) / 2 - ) + 1 - ), - 1, - 1000 - ), - CHAR(10), - ' ' - ), - CHAR(13), - ' ' - ) - ) - ) Running_stmt_text, - recenttxt.text 'MostRecentSQLText' -FROM sys.dm_tran_active_transactions AS atr - INNER JOIN sys.dm_tran_database_transactions AS dtr - ON dtr.transaction_id = atr.transaction_id - LEFT JOIN sys.dm_tran_session_transactions AS sess - ON sess.transaction_id = atr.transaction_id - LEFT JOIN sys.dm_exec_requests AS req - ON req.session_id = sess.session_id - AND req.transaction_id = sess.transaction_id - LEFT JOIN sys.dm_exec_connections AS conn - ON sess.session_id = conn.session_id - OUTER APPLY sys.dm_exec_sql_text(req.sql_handle) AS txt - OUTER APPLY sys.dm_exec_sql_text(conn.most_recent_sql_handle) AS recenttxt -WHERE atr.transaction_type != 2 - AND sess.session_id != @@spid -ORDER BY start_time ASC; -``` - -## Identify memory grant wait performance issues - -If your top wait type is `RESOURCE_SEMAHPORE` and you don't have a high CPU usage issue, you may have a memory grant waiting issue. - -### Determine if a `RESOURCE_SEMAHPORE` wait is a top wait - -Use the following query to determine if a `RESOURCE_SEMAHPORE` wait is a top wait - -```sql -SELECT wait_type, - SUM(wait_time) AS total_wait_time_ms -FROM sys.dm_exec_requests AS req - JOIN sys.dm_exec_sessions AS sess - ON req.session_id = sess.session_id -WHERE is_user_process = 1 -GROUP BY wait_type -ORDER BY SUM(wait_time) DESC; -``` - -### Identify high memory-consuming statements - -Use the following query to identify high memory-consuming statements: - -```sql -SELECT IDENTITY(INT, 1, 1) rowId, - CAST(query_plan AS XML) query_plan, - p.query_id -INTO #tmp -FROM sys.query_store_plan AS p - JOIN sys.query_store_runtime_stats AS r - ON p.plan_id = r.plan_id - JOIN sys.query_store_runtime_stats_interval AS i - ON r.runtime_stats_interval_id = i.runtime_stats_interval_id -WHERE start_time > '2018-10-11 14:00:00.0000000' - AND end_time < '2018-10-17 20:00:00.0000000'; -GO -;WITH cte -AS (SELECT query_id, - query_plan, - m.c.value('@SerialDesiredMemory', 'INT') AS SerialDesiredMemory - FROM #tmp AS t - CROSS APPLY t.query_plan.nodes('//*:MemoryGrantInfo[@SerialDesiredMemory[. > 0]]') AS m(c) ) -SELECT TOP 50 - cte.query_id, - t.query_sql_text, - cte.query_plan, - CAST(SerialDesiredMemory / 1024. AS DECIMAL(10, 2)) SerialDesiredMemory_MB -FROM cte - JOIN sys.query_store_query AS q - ON cte.query_id = q.query_id - JOIN sys.query_store_query_text AS t - ON q.query_text_id = t.query_text_id -ORDER BY SerialDesiredMemory DESC; -``` - -If you encounter out of memory errors in Azure SQL Database, review [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). - -### Identify the top 10 active memory grants - -Use the following query to identify the top 10 active memory grants: - -```sql -SELECT TOP 10 - CONVERT(VARCHAR(30), GETDATE(), 121) AS runtime, - r.session_id, - r.blocking_session_id, - r.cpu_time, - r.total_elapsed_time, - r.reads, - r.writes, - r.logical_reads, - r.row_count, - wait_time, - wait_type, - r.command, - OBJECT_NAME(txt.objectid, txt.dbid) 'Object_Name', - LTRIM(RTRIM(REPLACE( - REPLACE( - SUBSTRING( - SUBSTRING( - text, - (r.statement_start_offset / 2) + 1, - ((CASE r.statement_end_offset - WHEN -1 THEN - DATALENGTH(text) - ELSE - r.statement_end_offset - END - r.statement_start_offset - ) / 2 - ) + 1 - ), - 1, - 1000 - ), - CHAR(10), - ' ' - ), - CHAR(13), - ' ' - ) - ) - ) stmt_text, - mg.dop, --Degree of parallelism - mg.request_time, --Date and time when this query requested the memory grant. - mg.grant_time, --NULL means memory has not been granted - mg.requested_memory_kb / 1024.0 requested_memory_mb, --Total requested amount of memory in megabytes - mg.granted_memory_kb / 1024.0 AS granted_memory_mb, --Total amount of memory actually granted in megabytes. NULL if not granted - mg.required_memory_kb / 1024.0 AS required_memory_mb, --Minimum memory required to run this query in megabytes. - max_used_memory_kb / 1024.0 AS max_used_memory_mb, - mg.query_cost, --Estimated query cost. - mg.timeout_sec, --Time-out in seconds before this query gives up the memory grant request. - mg.resource_semaphore_id, --Non-unique ID of the resource semaphore on which this query is waiting. - mg.wait_time_ms, --Wait time in milliseconds. NULL if the memory is already granted. - CASE mg.is_next_candidate --Is this process the next candidate for a memory grant - WHEN 1 THEN - 'Yes' - WHEN 0 THEN - 'No' - ELSE - 'Memory has been granted' - END AS 'Next Candidate for Memory Grant', - qp.query_plan -FROM sys.dm_exec_requests AS r - JOIN sys.dm_exec_query_memory_grants AS mg - ON r.session_id = mg.session_id - AND r.request_id = mg.request_id - CROSS APPLY sys.dm_exec_sql_text(mg.sql_handle) AS txt - CROSS APPLY sys.dm_exec_query_plan(r.plan_handle) AS qp -ORDER BY mg.granted_memory_kb DESC; -``` - -## Calculating database and objects sizes - -The following query returns the size of your database (in megabytes): - -```sql --- Calculates the size of the database. -SELECT SUM(CAST(FILEPROPERTY(name, 'SpaceUsed') AS bigint) * 8192.) / 1024 / 1024 AS DatabaseSizeInMB -FROM sys.database_files -WHERE type_desc = 'ROWS'; -GO -``` - -The following query returns the size of individual objects (in megabytes) in your database: - -```sql --- Calculates the size of individual database objects. -SELECT sys.objects.name, SUM(reserved_page_count) * 8.0 / 1024 -FROM sys.dm_db_partition_stats, sys.objects -WHERE sys.dm_db_partition_stats.object_id = sys.objects.object_id -GROUP BY sys.objects.name; -GO -``` - -## Monitoring connections - -You can use the [sys.dm_exec_connections](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-connections-transact-sql) view to retrieve information about the connections established to a specific server and managed instance and the details of each connection. In addition, the [sys.dm_exec_sessions](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sessions-transact-sql) view is helpful when retrieving information about all active user connections and internal tasks. - -The following query retrieves information on the current connection: - -```sql -SELECT - c.session_id, c.net_transport, c.encrypt_option, - c.auth_scheme, s.host_name, s.program_name, - s.client_interface_name, s.login_name, s.nt_domain, - s.nt_user_name, s.original_login_name, c.connect_time, - s.login_time -FROM sys.dm_exec_connections AS c -JOIN sys.dm_exec_sessions AS s - ON c.session_id = s.session_id -WHERE c.session_id = @@SPID; -``` - -> [!NOTE] -> When executing the `sys.dm_exec_requests` and `sys.dm_exec_sessions views`, if you have **VIEW DATABASE STATE** permission on the database, you see all executing sessions on the database; otherwise, you see only the current session. - -## Monitor resource use - -You can monitor Azure SQL Database resource usage using [SQL Database Query Performance Insight](query-performance-insight-use.md). For Azure SQL Database and Azure SQL Managed Instance, you can monitor using [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store). - -You can also monitor usage using these views: - -- Azure SQL Database: [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) -- Azure SQL Managed Instance: [sys.server_resource_stats](/sql/relational-databases/system-catalog-views/sys-server-resource-stats-azure-sql-database) -- Both Azure SQL Database and Azure SQL Managed Instance: [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) - -### sys.dm_db_resource_stats - -You can use the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) view in every database. The `sys.dm_db_resource_stats` view shows recent resource use data relative to the service tier. Average percentages for CPU, data IO, log writes, and memory are recorded every 15 seconds and are maintained for 1 hour. - -Because this view provides a more granular look at resource use, use `sys.dm_db_resource_stats` first for any current-state analysis or troubleshooting. For example, this query shows the average and maximum resource use for the current database over the past hour: - -```sql -SELECT - AVG(avg_cpu_percent) AS 'Average CPU use in percent', - MAX(avg_cpu_percent) AS 'Maximum CPU use in percent', - AVG(avg_data_io_percent) AS 'Average data IO in percent', - MAX(avg_data_io_percent) AS 'Maximum data IO in percent', - AVG(avg_log_write_percent) AS 'Average log write use in percent', - MAX(avg_log_write_percent) AS 'Maximum log write use in percent', - AVG(avg_memory_usage_percent) AS 'Average memory use in percent', - MAX(avg_memory_usage_percent) AS 'Maximum memory use in percent' -FROM sys.dm_db_resource_stats; -``` - -For other queries, see the examples in [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database). - -### sys.server_resource_stats - -You can use [sys.server_resource_stats](/sql/relational-databases/system-catalog-views/sys-server-resource-stats-azure-sql-database) to return CPU usage, IO, and storage data for an Azure SQL Managed Instance. The data is collected and aggregated within five-minute intervals. There is one row for every 15 seconds reporting. The data returned includes CPU usage, storage size, IO utilization, and managed instance SKU. Historical data is retained for approximately 14 days. - -```sql -DECLARE @s datetime; -DECLARE @e datetime; -SET @s= DateAdd(d,-7,GetUTCDate()); -SET @e= GETUTCDATE(); -SELECT resource_name, AVG(avg_cpu_percent) AS Average_Compute_Utilization -FROM sys.server_resource_stats -WHERE start_time BETWEEN @s AND @e -GROUP BY resource_name -HAVING AVG(avg_cpu_percent) >= 80; -``` - -### sys.resource_stats - -The [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) view in the `master` database has additional information that can help you monitor the performance of your database at its specific service tier and compute size. The data is collected every 5 minutes and is maintained for approximately 14 days. This view is useful for a longer-term historical analysis of how your database uses resources. - -The following graph shows the CPU resource use for a Premium database with the P2 compute size for each hour in a week. This graph starts on a Monday, shows five work days, and then shows a weekend, when much less happens on the application. - -![Database resource use](./media/monitoring-with-dmvs/sql_db_resource_utilization.png) - -From the data, this database currently has a peak CPU load of just over 50 percent CPU use relative to the P2 compute size (midday on Tuesday). If CPU is the dominant factor in the application's resource profile, then you might decide that P2 is the right compute size to guarantee that the workload always fits. If you expect an application to grow over time, it's a good idea to have an extra resource buffer so that the application doesn't ever reach the performance-level limit. If you increase the compute size, you can help avoid customer-visible errors that might occur when a database doesn't have enough power to process requests effectively, especially in latency-sensitive environments. An example is a database that supports an application that paints webpages based on the results of database calls. - -Other application types might interpret the same graph differently. For example, if an application tries to process payroll data each day and has the same chart, this kind of "batch job" model might do fine at a P1 compute size. The P1 compute size has 100 DTUs compared to 200 DTUs at the P2 compute size. The P1 compute size provides half the performance of the P2 compute size. So, 50 percent of CPU use in P2 equals 100 percent CPU use in P1. If the application does not have timeouts, it might not matter if a job takes 2 hours or 2.5 hours to finish, if it gets done today. An application in this category probably can use a P1 compute size. You can take advantage of the fact that there are periods of time during the day when resource use is lower, so that any "big peak" might spill over into one of the troughs later in the day. The P1 compute size might be good for that kind of application (and save money), as long as the jobs can finish on time each day. - -The database engine exposes consumed resource information for each active database in the `sys.resource_stats` view of the `master` database in each server. The data in the table is aggregated for 5-minute intervals. With the Basic, Standard, and Premium service tiers, the data can take more than 5 minutes to appear in the table, so this data is more useful for historical analysis rather than near-real-time analysis. Query the `sys.resource_stats` view to see the recent history of a database and to validate whether the reservation you chose delivered the performance you want when needed. - -> [!NOTE] -> On Azure SQL Database, you must be connected to the `master` database to query `sys.resource_stats` in the following examples. - -This example shows you how the data in this view is exposed: - -```sql -SELECT TOP 10 * -FROM sys.resource_stats -WHERE database_name = 'resource1' -ORDER BY start_time DESC; -``` - -![The sys.resource_stats catalog view](./media/monitoring-with-dmvs/sys_resource_stats.png) - -The next example shows you different ways that you can use the `sys.resource_stats` catalog view to get information about how your database uses resources: - -1. To look at the past week's resource use for the database userdb1, you can run this query: - - ```sql - SELECT * - FROM sys.resource_stats - WHERE database_name = 'userdb1' AND - start_time > DATEADD(day, -7, GETDATE()) - ORDER BY start_time DESC; - ``` - -2. To evaluate how well your workload fits the compute size, you need to drill down into each aspect of the resource metrics: CPU, reads, writes, number of workers, and number of sessions. Here's a revised query using `sys.resource_stats` to report the average and maximum values of these resource metrics: - - ```sql - SELECT - avg(avg_cpu_percent) AS 'Average CPU use in percent', - max(avg_cpu_percent) AS 'Maximum CPU use in percent', - avg(avg_data_io_percent) AS 'Average physical data IO use in percent', - max(avg_data_io_percent) AS 'Maximum physical data IO use in percent', - avg(avg_log_write_percent) AS 'Average log write use in percent', - max(avg_log_write_percent) AS 'Maximum log write use in percent', - avg(max_session_percent) AS 'Average % of sessions', - max(max_session_percent) AS 'Maximum % of sessions', - avg(max_worker_percent) AS 'Average % of workers', - max(max_worker_percent) AS 'Maximum % of workers' - FROM sys.resource_stats - WHERE database_name = 'userdb1' AND start_time > DATEADD(day, -7, GETDATE()); - ``` - -3. With this information about the average and maximum values of each resource metric, you can assess how well your workload fits into the compute size you chose. Usually, average values from `sys.resource_stats` give you a good baseline to use against the target size. It should be your primary measurement stick. For an example, you might be using the Standard service tier with S2 compute size. The average use percentages for CPU and IO reads and writes are below 40 percent, the average number of workers is below 50, and the average number of sessions is below 200. Your workload might fit into the S1 compute size. It's easy to see whether your database fits in the worker and session limits. To see whether a database fits into a lower compute size with regard to CPU, reads, and writes, divide the DTU number of the lower compute size by the DTU number of your current compute size, and then multiply the result by 100: - - `S1 DTU / S2 DTU * 100 = 20 / 50 * 100 = 40` - - The result is the relative performance difference between the two compute sizes in percentage. If your resource use doesn't exceed this amount, your workload might fit into the lower compute size. However, you need to look at all ranges of resource use values, and determine, by percentage, how often your database workload would fit into the lower compute size. The following query outputs the fit percentage per resource dimension, based on the threshold of 40 percent that we calculated in this example: - - ```sql - SELECT - 100*((COUNT(database_name) - SUM(CASE WHEN avg_cpu_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'CPU Fit Percent', - 100*((COUNT(database_name) - SUM(CASE WHEN avg_log_write_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'Log Write Fit Percent', - 100*((COUNT(database_name) - SUM(CASE WHEN avg_data_io_percent >= 40 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'Physical Data IO Fit Percent' - FROM sys.resource_stats - WHERE database_name = 'sample' AND start_time > DATEADD(day, -7, GETDATE()); - ``` - - Based on your database service tier, you can decide whether your workload fits into the lower compute size. If your database workload objective is 99.9 percent and the preceding query returns values greater than 99.9 percent for all three resource dimensions, your workload likely fits into the lower compute size. - - Looking at the fit percentage also gives you insight into whether you should move to the next higher compute size to meet your objective. For example, userdb1 shows the following CPU use for the past week: - - | Average CPU percent | Maximum CPU percent | - | --- | --- | - | 24.5 |100.00 | - - The average CPU is about a quarter of the limit of the compute size, which would fit well into the compute size of the database. But, the maximum value shows that the database reaches the limit of the compute size. Do you need to move to the next higher compute size? Look at how many times your workload reaches 100 percent, and then compare it to your database workload objective. - - ```sql - SELECT - 100*((COUNT(database_name) - SUM(CASE WHEN avg_cpu_percent >= 100 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'CPU Fit Percent', - 100*((COUNT(database_name) - SUM(CASE WHEN avg_log_write_percent >= 100 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'Log Write Fit Percent', - 100*((COUNT(database_name) - SUM(CASE WHEN avg_data_io_percent >= 100 THEN 1 ELSE 0 END) * 1.0) / COUNT(database_name)) AS 'Physical Data IO Fit Percent' - FROM sys.resource_stats - WHERE database_name = 'sample' AND start_time > DATEADD(day, -7, GETDATE()); - ``` - - If this query returns a value less than 99.9 percent for any of the three resource dimensions, consider either moving to the next higher compute size or use application-tuning techniques to reduce the load on the database. - -4. This exercise also considers your projected workload increase in the future. - -For elastic pools, you can monitor individual databases in the pool with the techniques described in this section. But you can also monitor the pool as a whole. For information, see [Monitor and manage an elastic pool](elastic-pool-overview.md). - -### Maximum concurrent requests - -To see the number of concurrent requests, run this Transact-SQL query on your database: - -```sql -SELECT COUNT(*) AS [Concurrent_Requests] -FROM sys.dm_exec_requests R; -``` - -To analyze the workload of a SQL Server database, modify this query to filter on the specific database you want to analyze. For example, if you have an on-premises database named MyDatabase, this Transact-SQL query returns the count of concurrent requests in that database: - -```sql -SELECT COUNT(*) AS [Concurrent_Requests] -FROM sys.dm_exec_requests R -INNER JOIN sys.databases D ON D.database_id = R.database_id -AND D.name = 'MyDatabase'; -``` - -This is just a snapshot at a single point in time. To get a better understanding of your workload and concurrent request requirements, you'll need to collect many samples over time. - -### Maximum concurrent logins - -You can analyze your user and application patterns to get an idea of the frequency of logins. You also can run real-world loads in a test environment to make sure that you're not hitting this or other limits we discuss in this article. There isn't a single query or dynamic management view (DMV) that can show you concurrent login counts or history. - -If multiple clients use the same connection string, the service authenticates each login. If 10 users simultaneously connect to a database by using the same username and password, there would be 10 concurrent logins. This limit applies only to the duration of the login and authentication. If the same 10 users connect to the database sequentially, the number of concurrent logins would never be greater than 1. - -> [!NOTE] -> Currently, this limit does not apply to databases in elastic pools. - -### Maximum sessions - -To see the number of current active sessions, run this Transact-SQL query on your database: - -```sql -SELECT COUNT(*) AS [Sessions] -FROM sys.dm_exec_connections; -``` - -If you're analyzing a SQL Server workload, modify the query to focus on a specific database. This query helps you determine possible session needs for the database if you are considering moving it to Azure. - -```sql -SELECT COUNT(*) AS [Sessions] -FROM sys.dm_exec_connections C -INNER JOIN sys.dm_exec_sessions S ON (S.session_id = C.session_id) -INNER JOIN sys.databases D ON (D.database_id = S.database_id) -WHERE D.name = 'MyDatabase'; -``` - -Again, these queries return a point-in-time count. If you collect multiple samples over time, you'll have the best understanding of your session use. - -You can get historical statistics on sessions by querying the [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) view and reviewing the `active_session_count` column. - -## Monitoring query performance - -Slow or long running queries can consume significant system resources. This section demonstrates how to use dynamic management views to detect a few common query performance problems. - -### Finding top N queries - -The following example returns information about the top five queries ranked by average CPU time. This example aggregates the queries according to their query hash, so that logically equivalent queries are grouped by their cumulative resource consumption. - -```sql -SELECT TOP 5 query_stats.query_hash AS "Query Hash", - SUM(query_stats.total_worker_time) / SUM(query_stats.execution_count) AS "Avg CPU Time", - MIN(query_stats.statement_text) AS "Statement Text" -FROM - (SELECT QS.*, - SUBSTRING(ST.text, (QS.statement_start_offset/2) + 1, - ((CASE statement_end_offset - WHEN -1 THEN DATALENGTH(ST.text) - ELSE QS.statement_end_offset END - - QS.statement_start_offset)/2) + 1) AS statement_text -FROM sys.dm_exec_query_stats AS QS -CROSS APPLY sys.dm_exec_sql_text(QS.sql_handle) as ST) as query_stats -GROUP BY query_stats.query_hash -ORDER BY 2 DESC; -``` - -### Monitoring blocked queries - -Slow or long-running queries can contribute to excessive resource consumption and be the consequence of blocked queries. The cause of the blocking can be poor application design, bad query plans, the lack of useful indexes, and so on. You can use the sys.dm_tran_locks view to get information about the current locking activity in database. For example code, see [sys.dm_tran_locks (Transact-SQL)](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-locks-transact-sql). For more information on troubleshooting blocking, see [Understand and resolve Azure SQL blocking problems](understand-resolve-blocking.md). - -### Monitoring deadlocks - -In some cases, two or more queries may mutually block one another, resulting in a deadlock. - -You can create an Extended Events trace a database in Azure SQL Database to capture deadlock events, then find related queries and their execution plans in Query Store. Learn more in [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md). - -For Azure SQL Managed Instance, refer to the [Deadlocks](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#deadlock_tools) of the [Transaction locking and row versioning guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide). - -### Monitoring query plans - -An inefficient query plan also may increase CPU consumption. The following example uses the [sys.dm_exec_query_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-stats-transact-sql) view to determine which query uses the most cumulative CPU. - -```sql -SELECT - highest_cpu_queries.plan_handle, - highest_cpu_queries.total_worker_time, - q.dbid, - q.objectid, - q.number, - q.encrypted, - q.[text] -FROM - (SELECT TOP 50 - qs.plan_handle, - qs.total_worker_time - FROM - sys.dm_exec_query_stats qs -ORDER BY qs.total_worker_time desc) AS highest_cpu_queries -CROSS APPLY sys.dm_exec_sql_text(plan_handle) AS q -ORDER BY highest_cpu_queries.total_worker_time DESC; -``` - -## Next steps - -- [Introduction to Azure SQL Database and Azure SQL Managed Instance](sql-database-paas-overview.md) -- [Diagnose and troubleshoot high CPU on Azure SQL Database](high-cpu-diagnose-troubleshoot.md) -- [Tune applications and databases for performance in Azure SQL Database and Azure SQL Managed Instance](performance-guidance.md) -- [Understand and resolve Azure SQL Database blocking problems](understand-resolve-blocking.md) -- [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md) \ No newline at end of file diff --git a/articles/azure-sql/database/move-resources-across-regions.md b/articles/azure-sql/database/move-resources-across-regions.md deleted file mode 100644 index f12449c1bbea0..0000000000000 --- a/articles/azure-sql/database/move-resources-across-regions.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Move resources to new region -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn how to move your database or managed instance to another region. -services: sql-database -ms.service: sql-db-mi -ms.subservice: data-movement -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 06/25/2019 ---- - -# Move resources to new region - Azure SQL Database & Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article teaches you a generic workflow for how to move your database or managed instance to a new region. - -## Overview - -There are various scenarios in which you'd want to move your existing database or managed instance from one region to another. For example, you're expanding your business to a new region and want to optimize it for the new customer base. Or you need to move the operations to a different region for compliance reasons. Or Azure released a new region that provides a better proximity and improves the customer experience. - -This article provides a general workflow for moving resources to a different region. The workflow consists of the following steps: - -1. Verify the prerequisites for the move. -1. Prepare to move the resources in scope. -1. Monitor the preparation process. -1. Test the move process. -1. Initiate the actual move. -1. Remove the resources from the source region. - -> [!NOTE] -> This article applies to migrations within the Azure public cloud or within the same sovereign cloud. - -> [!NOTE] -> To move Azure SQL databases and elastic pools to a different Azure region, you can also use Azure Resource Mover (in preview). Refer [this tutorial](../../resource-mover/tutorial-move-region-sql.md) for detailed steps to do the same. - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -## Move a database - -### Verify prerequisites - -1. Create a target server for each source server. -1. Configure the firewall with the right exceptions by using [PowerShell](scripts/create-and-configure-database-powershell.md). -1. Configure the servers with the correct logins. If you're not the subscription administrator or SQL server administrator, work with the administrator to assign the permissions that you need. For more information, see [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). -1. If your databases are encrypted with transparent data encryption (TDE) and bring your own encryption key (BYOK or Customer-Managed Key) in Azure Key Vault, ensure that the correct encryption material is provisioned in the target regions. - - The simplest way to do this is to add the encryption key from the existing key vault (that is being used as TDE Protector on source server) to the target server and then set the key as the TDE Protector on the target server - > [!NOTE] - > A server or managed instance in one region can now be connected to a key vault in any other region. - - As a best practice to ensure the target server has access to older encryption keys (required for restoring database backups), run the [Get-AzSqlServerKeyVaultKey](/powershell/module/az.sql/get-azsqlserverkeyvaultkey) cmdlet on the source server or [Get-AzSqlInstanceKeyVaultKey](/powershell/module/az.sql/get-azsqlinstancekeyvaultkey) cmdlet on the source managed instance to return the list of available keys and add those keys to the target server. - - For more information and best practices on configuring customer-managed TDE on the target server, see [Azure SQL transparent data encryption with customer-managed keys in Azure Key Vault](transparent-data-encryption-byok-overview.md). - - To move the key vault to the new region, see [Move an Azure key vault across regions](../../key-vault/general/move-region.md) -1. If database-level audit is enabled, disable it and enable server-level auditing instead. After failover, database-level auditing will require the cross-region traffic, which isn't desired or possible after the move. -1. For server-level audits, ensure that: - - The storage container, Log Analytics, or event hub with the existing audit logs is moved to the target region. - - Auditing is configured on the target server. For more information, see [Get started with SQL Database auditing](/azure/azure-sql/database/auditing-overview). -1. If your instance has a long-term retention policy (LTR), the existing LTR backups will remain associated with the current server. Because the target server is different, you'll be able to access the older LTR backups in the source region by using the source server, even if the server is deleted. - - > [!NOTE] - > This will be insufficient for moving between the sovereign cloud and a public region. Such a migration will require moving the LTR backups to the target server, which is not currently supported. - -### Prepare resources - -1. Create a [failover group](failover-group-add-single-database-tutorial.md#2---create-the-failover-group) between the server of the source and the server of the target. -1. Add the databases you want to move to the failover group. - - Replication of all added databases will be initiated automatically. For more information, see [Using failover groups with SQL Database](auto-failover-group-sql-db.md). - -### Monitor the preparation process - -You can periodically call [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) to monitor replication of your databases from the source to the target. The output object of `Get-AzSqlDatabaseFailoverGroup` includes a property for the **ReplicationState**: - -- **ReplicationState = 2** (CATCH_UP) indicates the database is synchronized and can be safely failed over. -- **ReplicationState = 0** (SEEDING) indicates that the database is not yet seeded, and an attempt to fail over will fail. - -### Test synchronization - -After **ReplicationState** is `2`, connect to each database or subset of databases using the secondary endpoint `.secondary.database.windows.net` and perform any query against the databases to ensure connectivity, proper security configuration, and data replication. - -### Initiate the move - -1. Connect to the target server using the secondary endpoint `.secondary.database.windows.net`. -1. Use [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup) to switch the secondary managed instance to be the primary with full synchronization. This operation will succeed or it will roll back. -1. Verify that the command has completed successfully by using `nslook up .secondary.database.windows.net` to ascertain that the DNS CNAME entry points to the target region IP address. If the switch command fails, the CNAME won't be updated. - -### Remove the source databases - -Once the move completes, remove the resources in the source region to avoid unnecessary charges. - -1. Delete the failover group using [Remove-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/remove-azsqldatabasefailovergroup). -1. Delete each source database using [Remove-AzSqlDatabase](/powershell/module/az.sql/remove-azsqldatabase) for each of the databases on the source server. This will automatically terminate geo-replication links. -1. Delete the source server using [Remove-AzSqlServer](/powershell/module/az.sql/remove-azsqlserver). -1. Remove the key vault, audit storage containers, event hub, Azure Active Directory (Azure AD) instance, and other dependent resources to stop being billed for them. - -## Move elastic pools - -### Verify prerequisites - -1. Create a target server for each source server. -1. Configure the firewall with the right exceptions using [PowerShell](scripts/create-and-configure-database-powershell.md). -1. Configure the servers with the correct logins. If you're not the subscription administrator or server administrator, work with the administrator to assign the permissions that you need. For more information, see [How to manage Azure SQL Database security after disaster recovery](active-geo-replication-security-configure.md). -1. If your databases are encrypted with transparent data encryption and use your own encryption key in Azure Key Vault, ensure that the correct encryption material is provisioned in the target region. -1. Create a target elastic pool for each source elastic pool, making sure the pool is created in the same service tier, with the same name and the same size. -1. If a database-level audit is enabled, disable it and enable server-level auditing instead. After failover, database-level auditing will require cross-region traffic, which is not desired, or possible after the move. -1. For server-level audits, ensure that: - - The storage container, Log Analytics, or event hub with the existing audit logs is moved to the target region. - - Audit configuration is configured at the target server. For more information, see [SQL Database auditing](/azure/azure-sql/database/auditing-overview). -1. If your instance has a long-term retention policy (LTR), the existing LTR backups will remain associated with the current server. Because the target server is different, you'll be able to access the older LTR backups in the source region using the source server, even if the server is deleted. - - > [!NOTE] - > This will be insufficient for moving between the sovereign cloud and a public region. Such a migration will require moving the LTR backups to the target server, which is not currently supported. - -### Prepare to move - -1. Create a separate [failover group](failover-group-add-elastic-pool-tutorial.md#3---create-the-failover-group) between each elastic pool on the source server and its counterpart elastic pool on the target server. -1. Add all the databases in the pool to the failover group. - - Replication of the added databases will be initiated automatically. For more information, see [Using failover groups with SQL Database](auto-failover-group-sql-db.md). - - > [!NOTE] - > While it is possible to create a failover group that includes multiple elastic pools, we strongly recommend that you create a separate failover group for each pool. If you have a large number of databases across multiple elastic pools that you need to move, you can run the preparation steps in parallel and then initiate the move step in parallel. This process will scale better and will take less time compared to having multiple elastic pools in the same failover group. - -### Monitor the preparation process - -You can periodically call [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) to monitor replication of your databases from the source to the target. The output object of `Get-AzSqlDatabaseFailoverGroup` includes a property for the **ReplicationState**: - -- **ReplicationState = 2** (CATCH_UP) indicates the database is synchronized and can be safely failed over. -- **ReplicationState = 0** (SEEDING) indicates that the database is not yet seeded, and an attempt to fail over will fail. - -### Test synchronization - -Once **ReplicationState** is `2`, connect to each database or subset of databases using the secondary endpoint `.secondary.database.windows.net` and perform any query against the databases to ensure connectivity, proper security configuration, and data replication. - -### Initiate the move - -1. Connect to the target server using the secondary endpoint `.secondary.database.windows.net`. -1. Use [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup) to switch the secondary managed instance to be the primary with full synchronization. This operation will either succeed, or it will roll back. -1. Verify that the command has completed successfully by using `nslook up .secondary.database.windows.net` to ascertain that the DNS CNAME entry points to the target region IP address. If the switch command fails, the CNAME won't be updated. - -### Remove the source elastic pools - -Once the move completes, remove the resources in the source region to avoid unnecessary charges. - -1. Delete the failover group using [Remove-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/remove-azsqldatabasefailovergroup). -1. Delete each source elastic pool on the source server using [Remove-AzSqlElasticPool](/powershell/module/az.sql/remove-azsqlelasticpool). -1. Delete the source server using [Remove-AzSqlServer](/powershell/module/az.sql/remove-azsqlserver). -1. Remove the key vault, audit storage containers, event hub, Azure AD instance, and other dependent resources to stop being billed for them. - -## Move a managed instance - -### Verify prerequisites - -1. For each source managed instance, create a target instance of SQL Managed Instance of the same size in the target region. -1. Configure the network for a managed instance. For more information, see [network configuration](../managed-instance/how-to-content-reference-guide.md#network-configuration). -1. Configure the target master database with the correct logins. If you're not the subscription or SQL Managed Instance administrator, work with the administrator to assign the permissions that you need. -1. If your databases are encrypted with transparent data encryption and use your own encryption key in Azure Key Vault, ensure that the Azure Key Vault with identical encryption keys exists in both source and target regions. For more information, see [Transparent data encryption with customer-managed keys in Azure Key Vault](transparent-data-encryption-byok-overview.md). -1. If audit is enabled for the managed instance, ensure that: - - The storage container or event hub with the existing logs is moved to the target region. - - Audit is configured on the target instance. For more information, see [Auditing with SQL Managed Instance](../managed-instance/auditing-configure.md). -1. If your instance has a long-term retention policy (LTR), the existing LTR backups will remain associated with the current instance. Because the target instance is different, you'll be able to access the older LTR backups in the source region using the source instance, even if the instance is deleted. - - > [!NOTE] - > This will be insufficient for moving between the sovereign cloud and a public region. Such a migration will require moving the LTR backups to the target instance, which is not currently supported. - -### Prepare resources - -Create a failover group between each source managed instance and the corresponding target instance of SQL Managed Instance. - -Replication of all databases on each instance will be initiated automatically. For more information, see [Auto-failover groups](auto-failover-group-overview.md). - -### Monitor the preparation process - -You can periodically call [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) to monitor replication of your databases from the source to the target. The output object of `Get-AzSqlDatabaseFailoverGroup` includes a property for the **ReplicationState**: - -- **ReplicationState = 2** (CATCH_UP) indicates the database is synchronized and can be safely failed over. -- **ReplicationState = 0** (SEEDING) indicates that the database isn't yet seeded, and an attempt to fail over will fail. - -### Test synchronization - -Once **ReplicationState** is `2`, connect to each database, or subset of databases using the secondary endpoint `.secondary.database.windows.net` and perform any query against the databases to ensure connectivity, proper security configuration, and data replication. - -### Initiate the move - -1. Connect to the target managed instance by using the secondary endpoint `.secondary.database.windows.net`. -1. Use [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup) to switch the secondary managed instance to be the primary with full synchronization. This operation will succeed, or it will roll back. -1. Verify that the command has completed successfully by using `nslook up .secondary.database.windows.net` to ascertain that the DNS CNAME entry points to the target region IP address. If the switch command fails, the CNAME won't be updated. - -### Remove the source managed instances - -Once the move finishes, remove the resources in the source region to avoid unnecessary charges. - -1. Delete the failover group using [Remove-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/remove-azsqldatabasefailovergroup). This will drop the failover group configuration and terminate geo-replication links between the two instances. -1. Delete the source managed instance using [Remove-AzSqlInstance](/powershell/module/az.sql/remove-azsqlinstance). -1. Remove any additional resources in the resource group, such as the virtual cluster, virtual network, and security group. - -## Next steps - -[Manage](manage-data-after-migrating-to-database.md) your database after it has been migrated. \ No newline at end of file diff --git a/articles/azure-sql/database/network-access-controls-overview.md b/articles/azure-sql/database/network-access-controls-overview.md deleted file mode 100644 index 3aaf2a9eeee77..0000000000000 --- a/articles/azure-sql/database/network-access-controls-overview.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Network Access Controls -titleSuffix: Azure SQL Database & Azure Synapse Analytics -description: Overview of how to manage and control network access for Azure SQL Database and Azure Synapse Analytics. -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=3, devx-track-azurepowershell -ms.devlang: -ms.topic: conceptual -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 03/09/2020 ---- - -# Azure SQL Database and Azure Synapse Analytics network access controls - -When you create a logical SQL server from the [Azure portal](single-database-create-quickstart.md) for Azure SQL Database and Azure Synapse Analytics, the result is a public endpoint in the format, *yourservername.database.windows.net*. - -You can use the following network access controls to selectively allow access to a database via the public endpoint: - -- Allow Azure Services: When set to ON, other resources within the Azure boundary, for example an Azure Virtual Machine, can access SQL Database -- IP firewall rules: Use this feature to explicitly allow connections from a specific IP address, for example from on-premises machines - -You can also allow private access to the database from [virtual networks](../../virtual-network/virtual-networks-overview.md) via: - -- Virtual network firewall rules: Use this feature to allow traffic from a specific virtual network within the Azure boundary -- Private Link: Use this feature to create a private endpoint for [logical SQL server](logical-servers.md) within a specific virtual network - -> [!IMPORTANT] -> This article does *not* apply to **SQL Managed Instance**. For more information about the networking configuration, see [connecting to Azure SQL Managed Instance](../managed-instance/connect-application-instance.md) . - -See the below video for a high-level explanation of these access controls and what they do: - -> [!VIDEO https://docs.microsoft.com/shows/Data-Exposed/Data-Exposed--SQL-Database-Connectivity-Explained/player?WT.mc_id=dataexposed-c9-niner] - -## Allow Azure services - -By default during creation of a new logical SQL server [from the Azure portal](single-database-create-quickstart.md), this setting is set to **OFF**. This setting appears when connectivity is allowed using public service endpoint. - -You can also change this setting via the firewall pane after the logical SQL server is created as follows. - -![Screenshot of manage server firewall][2] - -When set to **ON**, your server allows communications from all resources inside the Azure boundary, that may or may not be part of your subscription. - -In many cases, the **ON** setting is more permissive than what most customers want. You may want to set this setting to **OFF** and replace it with more restrictive IP firewall rules or virtual network firewall rules. - -However, doing so affects the following features that run on virtual machines in Azure that aren't part of your virtual network and hence connect to the database via an Azure IP address: - -### Import Export Service - -Import Export Service doesn't work when **Allow access to Azure services** is set to **OFF**. However you can work around the problem [by manually running sqlpackage.exe from an Azure VM or performing the export](./database-import-export-azure-services-off.md) directly in your code by using the DACFx API. - -### Data Sync - -To use the Data sync feature with **Allow access to Azure services** set to **OFF**, you need to create individual firewall rule entries to [add IP addresses](firewall-create-server-level-portal-quickstart.md) from the **Sql service tag** for the region hosting the **Hub** database. -Add these server-level firewall rules to the servers hosting both **Hub** and **Member** databases (which may be in different regions) - -Use the following PowerShell script to generate IP addresses corresponding to the SQL service tag for West US region - -```powershell -PS C:\> $serviceTags = Get-AzNetworkServiceTag -Location eastus2 -PS C:\> $sql = $serviceTags.Values | Where-Object { $_.Name -eq "Sql.WestUS" } -PS C:\> $sql.Properties.AddressPrefixes.Count -70 -PS C:\> $sql.Properties.AddressPrefixes -13.86.216.0/25 -13.86.216.128/26 -13.86.216.192/27 -13.86.217.0/25 -13.86.217.128/26 -13.86.217.192/27 -``` - -> [!TIP] -> Get-AzNetworkServiceTag returns the global range for SQL Service Tag despite specifying the Location parameter. Be sure to filter it to the region that hosts the Hub database used by your sync group - -Note that the output of the PowerShell script is in Classless Inter-Domain Routing (CIDR) notation. This needs to be converted to a format of Start and End IP address using [Get-IPrangeStartEnd.ps1](https://gallery.technet.microsoft.com/scriptcenter/Start-and-End-IP-addresses-bcccc3a9) like this: - -```powershell -PS C:\> Get-IPrangeStartEnd -ip 52.229.17.93 -cidr 26 -start end ------ --- -52.229.17.64 52.229.17.127 -``` - -You can use this additional PowerShell script to convert all the IP addresses from CIDR to Start and End IP address format. - -```powershell -PS C:\>foreach( $i in $sql.Properties.AddressPrefixes) {$ip,$cidr= $i.split('/') ; Get-IPrangeStartEnd -ip $ip -cidr $cidr;} -start end ------ --- -13.86.216.0 13.86.216.127 -13.86.216.128 13.86.216.191 -13.86.216.192 13.86.216.223 -``` - -You can now add these as distinct firewall rules and then set **Allow Azure services to access server** to OFF. - -## IP firewall rules - -Ip based firewall is a feature of the logical SQL server in Azure that prevents all access to your server until you explicitly [add IP addresses](firewall-create-server-level-portal-quickstart.md) of the client machines. - -## Virtual network firewall rules - -In addition to IP rules, the server firewall allows you to define *virtual network rules*. -To learn more, see [Virtual network service endpoints and rules for Azure SQL Database](vnet-service-endpoint-rule-overview.md) or watch this video: - -> [!VIDEO https://docs.microsoft.com/shows/Data-Exposed/Data-Exposed--Demo--Vnet-Firewall-Rules-for-SQL-Database/player?WT.mc_id=dataexposed-c9-niner] - -### Azure Networking terminology - -Be aware of the following Azure Networking terms as you explore Virtual network firewall rules - -**Virtual network:** You can have virtual networks associated with your Azure subscription - -**Subnet:** A virtual network contains **subnets**. Any Azure virtual machines (VMs) that you have are assigned to subnets. One subnet can contain multiple VMs or other compute nodes. Compute nodes that are outside of your virtual network can't access your virtual network unless you configure your security to allow access. - -**Virtual network service endpoint:** A [Virtual network service endpoint](../../virtual-network/virtual-network-service-endpoints-overview.md) is a subnet whose property values include one or more formal Azure service type names. In this article we're interested in the type name of **Microsoft.Sql**, which refers to the Azure service named SQL Database. - -**Virtual network rule:** A virtual network rule for your server is a subnet that is listed in the access control list (ACL) of your server. To be in the ACL for your database in SQL Database, the subnet must contain the **Microsoft.Sql** type name. A virtual network rule tells your server to accept communications from every node that is on the subnet. - -## IP vs. Virtual network firewall rules - -The Azure SQL Database firewall allows you to specify IP address ranges from which communications are accepted into SQL Database. This approach is fine for stable IP addresses that are outside the Azure private network. However, virtual machines (VMs) within the Azure private network are configured with *dynamic* IP addresses. Dynamic IP addresses can change when your VM is restarted and in turn invalidate the IP-based firewall rule. It would be folly to specify a dynamic IP address in a firewall rule, in a production environment. - -You can work around this limitation by obtaining a *static* IP address for your VM. For details, see [Create a virtual machine with a static public IP address using the Azure portal](../../virtual-network/ip-services/virtual-network-deploy-static-pip-arm-portal.md). However, the static IP approach can become difficult to manage, and it's costly when done at scale. - -Virtual network rules are easier alternative to establish and to manage access from a specific subnet that contains your VMs. - -> [!NOTE] -> You cannot yet have SQL Database on a subnet. If your server was a node on a subnet in your virtual network, all nodes within the virtual network could communicate with your SQL Database. In this case, your VMs could communicate with SQL Database without needing any virtual network rules or IP rules. - -## Private Link - -Private Link allows you to connect to a server via a **private endpoint**. A private endpoint is a private IP address within a specific [virtual network](../../virtual-network/virtual-networks-overview.md) and Subnet. - -## Next steps - -- For a quickstart on creating a server-level IP firewall rule, see [Create a database in SQL Database](single-database-create-quickstart.md). - -- For a quickstart on creating a server-level virtual network firewall rule, see [Virtual Network service endpoints and rules for Azure SQL Database](vnet-service-endpoint-rule-overview.md). - -- For help with connecting to a database in SQL Database from open source or third-party applications, see [Client quickstart code samples to SQL Database](/previous-versions/azure/ee336282(v=azure.100)). - -- For information on additional ports that you may need to open, see the **SQL Database: Outside vs inside** section of [Ports beyond 1433 for ADO.NET 4.5 and SQL Database](adonet-v12-develop-direct-route-ports.md) - -- For an overview of Azure SQL Database Connectivity, see [Azure SQL Connectivity Architecture](connectivity-architecture.md) - -- For an overview of Azure SQL Database security, see [Securing your database](security-overview.md) - - -[1]: media/quickstart-create-single-database/new-server2.png -[2]: media/quickstart-create-single-database/manage-server-firewall.png \ No newline at end of file diff --git a/articles/azure-sql/database/outbound-firewall-rule-overview.md b/articles/azure-sql/database/outbound-firewall-rule-overview.md deleted file mode 100644 index 6dc4fd7678e31..0000000000000 --- a/articles/azure-sql/database/outbound-firewall-rule-overview.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Outbound firewall rules -description: Overview of the outbound firewall rules feature for Azure SQL Database and Azure Synapse Analytics. -author: rohitnayakmsft -ms.author: rohitna -titleSuffix: Azure SQL Database and Azure Synapse Analytics -ms.service: sql-database -ms.subservice: security -ms.topic: conceptual -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 4/6/2022 ---- - -# Outbound firewall rules for Azure SQL Database and Azure Synapse Analytics -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa-formerly-sqldw.md)] - -Outbound firewall rules limit network traffic from the Azure SQL [logical server](logical-servers.md) to a customer defined list of Azure Storage accounts and Azure SQL logical servers. Any attempt to access storage accounts or databases not in this list is denied. The following [Azure SQL Database](sql-database-paas-overview.md) features support this feature: - -- [Auditing](auditing-overview.md) -- [Vulnerability assessment](sql-vulnerability-assessment.md) -- [Import/Export service](database-import-export-azure-services-off.md) -- [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql) -- [Bulk Insert](/sql/t-sql/statements/bulk-insert-transact-sql) -- [Elastic query](elastic-query-overview.md) - -> [!IMPORTANT] -> This article applies to both Azure SQL Database and [dedicated SQL pool (formerly SQL DW)](../../synapse-analytics\sql-data-warehouse\sql-data-warehouse-overview-what-is.md) in Azure Synapse Analytics. These settings apply to all SQL Database and dedicated SQL pool (formerly SQL DW) databases associated with the server. For simplicity, the term 'database' refers to both databases in Azure SQL Database and Azure Synapse Analytics. Likewise, any references to 'server' is referring to the [logical SQL server](logical-servers.md) that hosts Azure SQL Database and dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. This article does *not* apply to Azure SQL Managed Instance or dedicated SQL pools in Azure Synapse Analytics workspaces. - -> [!IMPORTANT] -> Outbound firewall rules are defined at the [logical SQL server](logical-servers.md). Geo-replication and Auto-failover groups require the same set of rules to be defined on the primary and all secondaries. - -## Set outbound firewall rules in the Azure portal - -1. Browse to the **Outbound networking** section in the **Firewalls and virtual networks** blade for your Azure SQL Database and select **Configure outbound networking restrictions**. - - ![Screenshot of Outbound Networking section][1] - - This will open up the following blade on the right-hand side: - - ![Screenshot of Outbound Networking blade with nothing selected][2] - -1. Select the check box titled **Restrict outbound networking** and then add the FQDN for the Storage accounts (or SQL Databases) using the **Add domain** button. - - ![Screenshot of Outbound Networking blade showing how to add FQDN][3] - -1. After you're done, you should see a screen similar to the one below. Select **OK** to apply these settings. - - ![Screenshot of of Outbound Networking blade after FQDNs are added][4] - -## Set outbound firewall rules using PowerShell - -> [!IMPORTANT] -> Azure SQL Database still supports the PowerShell Azure Resource Manager module, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. The following script requires the [Azure PowerShell module](/powershell/azure/install-az-ps). - -The following PowerShell script shows how to change the outbound networking setting (using the **RestrictOutboundNetworkAccess** property): - -```powershell -# Get current settings for Outbound Networking -(Get-AzSqlServer -ServerName -ResourceGroupName ).RestrictOutboundNetworkAccess - -# Update setting for Outbound Networking -$SecureString = ConvertTo-SecureString "" -AsPlainText -Force - -Set-AzSqlServer -ServerName -ResourceGroupName -SqlAdministratorPassword $SecureString -RestrictOutboundNetworkAccess "Enabled" -``` - -Use these PowerShell cmdlets to configure outbound firewall rules - -```powershell -# List all Outbound Firewall Rules -Get-AzSqlServerOutboundFirewallRule -ServerName -ResourceGroupName - -# Add an Outbound Firewall Rule -New-AzSqlServerOutboundFirewallRule -ServerName -ResourceGroupName -AllowedFQDN testOBFR1 - -# List a specific Outbound Firewall Rule -Get-AzSqlServerOutboundFirewallRule -ServerName -ResourceGroupName -AllowedFQDN - -#Delete an Outbound Firewall Rule -Remove-AzSqlServerOutboundFirewallRule -ServerName -ResourceGroupName -AllowedFQDN -``` - -## Set outbound firewall rules using the Azure CLI - -> [!IMPORTANT] -> All scripts in this section require the [Azure CLI](/cli/azure/install-azure-cli). - -### Azure CLI in a bash shell - -The following CLI script shows how to change the outbound networking setting (using the **RestrictOutboundNetworkAccess** property) in a bash shell: - -```azurecli-interactive -# Get current setting for Outbound Networking -az sql server show -n sql-server-name -g sql-server-group --query "RestrictOutboundNetworkAccess" - -# Update setting for Outbound Networking -az sql server update -n sql-server-name -g sql-server-group --set RestrictOutboundNetworkAccess="Enabled" -``` - -Use these CLI commands to configure outbound firewall rules - -```azurecli-interactive -# List a server's outbound firewall rules. -az sql server outbound-firewall-rule list -g sql-server-group -s sql-server-name - -# Create a new outbound firewall rule -az sql server outbound-firewall-rule create -g sql-server-group -s sql-server-name --outbound-rule-fqdn allowedFQDN - -# Show the details for an outbound firewall rule. -az sql server outbound-firewall-rule show -g sql-server-group -s sql-server-name --outbound-rule-fqdn allowedFQDN - -# Delete the outbound firewall rule. -az sql server outbound-firewall-rule delete -g sql-server-group -s sql-server-name --outbound-rule-fqdn allowedFQDN -``` - -## Next steps - -- For an overview of Azure SQL Database security, see [Securing your database](security-overview.md). -- For an overview of Azure SQL Database connectivity, see [Azure SQL Connectivity Architecture](connectivity-architecture.md). -- Learn more about [Azure SQL Database and Azure Synapse Analytics network access controls](network-access-controls-overview.md). -- Learn about [Azure Private Link for Azure SQL Database and Azure Synapse Analytics](private-endpoint-overview.md). - - -[1]: media/outbound-firewall-rules/Step1.jpg -[2]: media/outbound-firewall-rules/Step2.jpg -[3]: media/outbound-firewall-rules/Step3.jpg -[4]: media/outbound-firewall-rules/Step4.jpg diff --git a/articles/azure-sql/database/performance-guidance.md b/articles/azure-sql/database/performance-guidance.md deleted file mode 100644 index c95776bb9fd54..0000000000000 --- a/articles/azure-sql/database/performance-guidance.md +++ /dev/null @@ -1,281 +0,0 @@ ---- -title: Performance tuning guidance for applications and databases -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn about tuning database applications and databases for performance in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/22/2022 ---- -# Tune applications and databases for performance in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Once you have identified a performance issue that you are facing with Azure SQL Database and Azure SQL Managed Instance, this article is designed to help you: - -- Tune your application and apply some best practices that can improve performance. -- Tune the database by changing indexes and queries to more efficiently work with data. - -This article assumes that you have already worked through the Azure SQL Database [database advisor recommendations](database-advisor-implement-performance-recommendations.md) and the Azure SQL Database [auto-tuning recommendations](automatic-tuning-overview.md), if applicable. It also assumes that you have reviewed the [overview of monitoring and tuning](monitor-tune-overview.md) and its related articles related to troubleshooting performance issues. Additionally, this article assumes that you do not have a CPU resources, running-related performance issue that can be resolved by increasing the compute size or service tier to provide more resources to your database. - -## Tune your application - -In traditional on-premises SQL Server, the process of initial capacity planning often is separated from the process of running an application in production. Hardware and product licenses are purchased first, and performance tuning is done afterward. When you use Azure SQL, it's a good idea to interweave the process of running an application and tuning it. With the model of paying for capacity on demand, you can tune your application to use the minimum resources needed now, instead of over-provisioning on hardware based on guesses of future growth plans for an application, which often are incorrect. Some customers might choose not to tune an application, and instead choose to over-provision hardware resources. This approach might be a good idea if you don't want to change a key application during a busy period. But, tuning an application can minimize resource requirements and lower monthly bills when you use the service tiers in Azure SQL Database and Azure SQL Managed Instance. - -### Application characteristics - -Although Azure SQL Database and Azure SQL Managed Instance service tiers are designed to improve performance stability and predictability for an application, some best practices can help you tune your application to better take advantage of the resources at a compute size. Although many applications have significant performance gains simply by switching to a higher compute size or service tier, some applications need additional tuning to benefit from a higher level of service. For increased performance, consider additional application tuning for applications that have these characteristics: - -- **Applications that have slow performance because of "chatty" behavior** - - Chatty applications make excessive data access operations that are sensitive to network latency. You might need to modify these kinds of applications to reduce the number of data access operations to the database. For example, you might improve application performance by using techniques like batching ad hoc queries or moving the queries to stored procedures. For more information, see [Batch queries](#batch-queries). - -- **Databases with an intensive workload that can't be supported by an entire single machine** - - Databases that exceed the resources of the highest Premium compute size might benefit from scaling out the workload. For more information, see [Cross-database sharding](#cross-database-sharding) and [Functional partitioning](#functional-partitioning). - -- **Applications that have suboptimal queries** - - Applications, especially those in the data access layer, that have poorly tuned queries might not benefit from a higher compute size. This includes queries that lack a WHERE clause, have missing indexes, or have outdated statistics. These applications benefit from standard query performance-tuning techniques. For more information, see [Missing indexes](#identifying-and-adding-missing-indexes) and [Query tuning and hinting](#query-tuning-and-hinting). - -- **Applications that have suboptimal data access design** - - Applications that have inherent data access concurrency issues, for example deadlocking, might not benefit from a higher compute size. Consider reducing round trips against the database by caching data on the client side with the Azure Caching service or another caching technology. See [Application tier caching](#application-tier-caching). - - To prevent deadlocks from reoccurring in Azure SQL Database, see [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md). For Azure SQL Managed Instance, refer to the [Deadlocks](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#deadlock_tools) of the [Transaction locking and row versioning guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide). - -## Tune your database - -In this section, we look at some techniques that you can use to tune database to gain the best performance for your application and run it at the lowest possible compute size. Some of these techniques match traditional SQL Server tuning best practices, but others are specific to Azure SQL Database and Azure SQL Managed Instance. In some cases, you can examine the consumed resources for a database to find areas to further tune and extend traditional SQL Server techniques to work in Azure SQL Database and Azure SQL Managed Instance. - -### Identifying and adding missing indexes - -A common problem in OLTP database performance relates to the physical database design. Often, database schemas are designed and shipped without testing at scale (either in load or in data volume). Unfortunately, the performance of a query plan might be acceptable on a small scale but degrade substantially under production-level data volumes. The most common source of this issue is the lack of appropriate indexes to satisfy filters or other restrictions in a query. Often, missing indexes manifests as a table scan when an index seek could suffice. - -In this example, the selected query plan uses a scan when a seek would suffice: - -```sql -DROP TABLE dbo.missingindex; -CREATE TABLE dbo.missingindex (col1 INT IDENTITY PRIMARY KEY, col2 INT); -DECLARE @a int = 0; -SET NOCOUNT ON; -BEGIN TRANSACTION - WHILE @a < 20000 - BEGIN - INSERT INTO dbo.missingindex(col2) VALUES (@a); - SET @a += 1; - END - COMMIT TRANSACTION; - GO -SELECT m1.col1 - FROM dbo.missingindex m1 INNER JOIN dbo.missingindex m2 ON(m1.col1=m2.col1) - WHERE m1.col2 = 4; -``` - -![A query plan with missing indexes](./media/performance-guidance/query_plan_missing_indexes.png) - -Azure SQL Database and Azure SQL Managed Instance can help you find and fix common missing index conditions. DMVs that are built into Azure SQL Database and Azure SQL Managed Instance look at query compilations in which an index would significantly reduce the estimated cost to run a query. During query execution, the database engine tracks how often each query plan is executed, and tracks the estimated gap between the executing query plan and the imagined one where that index existed. You can use these DMVs to quickly guess which changes to your physical database design might improve overall workload cost for a database and its real workload. - -You can use this query to evaluate potential missing indexes: - -```sql -SELECT - CONVERT (varchar, getdate(), 126) AS runtime - , mig.index_group_handle - , mid.index_handle - , CONVERT (decimal (28,1), migs.avg_total_user_cost * migs.avg_user_impact * - (migs.user_seeks + migs.user_scans)) AS improvement_measure - , 'CREATE INDEX missing_index_' + CONVERT (varchar, mig.index_group_handle) + '_' + - CONVERT (varchar, mid.index_handle) + ' ON ' + mid.statement + ' - (' + ISNULL (mid.equality_columns,'') - + CASE WHEN mid.equality_columns IS NOT NULL - AND mid.inequality_columns IS NOT NULL - THEN ',' ELSE '' END + ISNULL (mid.inequality_columns, '') + ')' - + ISNULL (' INCLUDE (' + mid.included_columns + ')', '') AS create_index_statement - , migs.* - , mid.database_id - , mid.[object_id] -FROM sys.dm_db_missing_index_groups AS mig - INNER JOIN sys.dm_db_missing_index_group_stats AS migs - ON migs.group_handle = mig.index_group_handle - INNER JOIN sys.dm_db_missing_index_details AS mid - ON mig.index_handle = mid.index_handle - ORDER BY migs.avg_total_user_cost * migs.avg_user_impact * (migs.user_seeks + migs.user_scans) DESC -``` - -In this example, the query resulted in this suggestion: - -```sql -CREATE INDEX missing_index_5006_5005 ON [dbo].[missingindex] ([col2]) -``` - -After it's created, that same SELECT statement picks a different plan, which uses a seek instead of a scan, and then executes the plan more efficiently: - -![A query plan with corrected indexes](./media/performance-guidance/query_plan_corrected_indexes.png) - -The key insight is that the IO capacity of a shared, commodity system is more limited than that of a dedicated server machine. There's a premium on minimizing unnecessary IO to take maximum advantage of the system in the resources of each compute size of the service tiers. Appropriate physical database design choices can significantly improve the latency for individual queries, improve the throughput of concurrent requests handled per scale unit, and minimize the costs required to satisfy the query. - -For more information about tuning indexes using missing index requests, see [Tune nonclustered indexes with missing index suggestions](/sql/relational-databases/indexes/tune-nonclustered-missing-index-suggestions). - -### Query tuning and hinting - -The query optimizer in Azure SQL Database and Azure SQL Managed Instance is similar to the traditional SQL Server query optimizer. Most of the best practices for tuning queries and understanding the reasoning model limitations for the query optimizer also apply to Azure SQL Database and Azure SQL Managed Instance. If you tune queries in Azure SQL Database and Azure SQL Managed Instance, you might get the additional benefit of reducing aggregate resource demands. Your application might be able to run at a lower cost than an un-tuned equivalent because it can run at a lower compute size. - -An example that is common in SQL Server and which also applies to Azure SQL Database and Azure SQL Managed Instance is how the query optimizer "sniffs" parameters. During compilation, the query optimizer evaluates the current value of a parameter to determine whether it can generate a more optimal query plan. Although this strategy often can lead to a query plan that is significantly faster than a plan compiled without known parameter values, currently it works imperfectly both in SQL Server, in Azure SQL Database, and Azure SQL Managed Instance. Sometimes the parameter is not sniffed, and sometimes the parameter is sniffed but the generated plan is suboptimal for the full set of parameter values in a workload. Microsoft includes query hints (directives) so that you can specify intent more deliberately and override the default behavior of parameter sniffing. Often, if you use hints, you can fix cases in which the default SQL Server, Azure SQL Database, and Azure SQL Managed Instance behavior is imperfect for a specific customer workload. - -The next example demonstrates how the query processor can generate a plan that is suboptimal both for performance and resource requirements. This example also shows that if you use a query hint, you can reduce query run time and resource requirements for your database: - -```sql -DROP TABLE psptest1; -CREATE TABLE psptest1(col1 int primary key identity, col2 int, col3 binary(200)); -DECLARE @a int = 0; -SET NOCOUNT ON; -BEGIN TRANSACTION - WHILE @a < 20000 - BEGIN - INSERT INTO psptest1(col2) values (1); - INSERT INTO psptest1(col2) values (@a); - SET @a += 1; - END - COMMIT TRANSACTION - CREATE INDEX i1 on psptest1(col2); -GO - -CREATE PROCEDURE psp1 (@param1 int) - AS - BEGIN - INSERT INTO t1 SELECT * FROM psptest1 - WHERE col2 = @param1 - ORDER BY col2; - END - GO - -CREATE PROCEDURE psp2 (@param2 int) - AS - BEGIN - INSERT INTO t1 SELECT * FROM psptest1 WHERE col2 = @param2 - ORDER BY col2 - OPTION (OPTIMIZE FOR (@param2 UNKNOWN)) - END - GO - -CREATE TABLE t1 (col1 int primary key, col2 int, col3 binary(200)); -GO -``` - -The setup code creates a table that has skewed data distribution. The optimal query plan differs based on which parameter is selected. Unfortunately, the plan caching behavior doesn't always recompile the query based on the most common parameter value. So, it's possible for a suboptimal plan to be cached and used for many values, even when a different plan might be a better plan choice on average. Then the query plan creates two stored procedures that are identical, except that one has a special query hint. - -```sql --- Prime Procedure Cache with scan plan -EXEC psp1 @param1=1; -TRUNCATE TABLE t1; - --- Iterate multiple times to show the performance difference -DECLARE @i int = 0; -WHILE @i < 1000 - BEGIN - EXEC psp1 @param1=2; - TRUNCATE TABLE t1; - SET @i += 1; - END -``` - -We recommend that you wait at least 10 minutes before you begin part 2 of the example, so that the results are distinct in the resulting telemetry data. - -```sql -EXEC psp2 @param2=1; -TRUNCATE TABLE t1; - -DECLARE @i int = 0; - WHILE @i < 1000 - BEGIN - EXEC psp2 @param2=2; - TRUNCATE TABLE t1; - SET @i += 1; - END -``` - -Each part of this example attempts to run a parameterized insert statement 1,000 times (to generate a sufficient load to use as a test data set). When it executes stored procedures, the query processor examines the parameter value that is passed to the procedure during its first compilation (parameter "sniffing"). The processor caches the resulting plan and uses it for later invocations, even if the parameter value is different. The optimal plan might not be used in all cases. Sometimes you need to guide the optimizer to pick a plan that is better for the average case rather than the specific case from when the query was first compiled. In this example, the initial plan generates a "scan" plan that reads all rows to find each value that matches the parameter: - -![Query tuning by using a scan plan](./media/performance-guidance/query_tuning_1.png) - -Because we executed the procedure by using the value 1, the resulting plan was optimal for the value 1 but was suboptimal for all other values in the table. The result likely isn't what you would want if you were to pick each plan randomly, because the plan performs more slowly and uses more resources. - -If you run the test with `SET STATISTICS IO` set to `ON`, the logical scan work in this example is done behind the scenes. You can see that there are 1,148 reads done by the plan (which is inefficient, if the average case is to return just one row): - -![Query tuning by using a logical scan](./media/performance-guidance/query_tuning_2.png) - -The second part of the example uses a query hint to tell the optimizer to use a specific value during the compilation process. In this case, it forces the query processor to ignore the value that is passed as the parameter, and instead to assume `UNKNOWN`. This refers to a value that has the average frequency in the table (ignoring skew). The resulting plan is a seek-based plan that is faster and uses fewer resources, on average, than the plan in part 1 of this example: - -![Query tuning by using a query hint](./media/performance-guidance/query_tuning_3.png) - -You can see the effect in the **sys.resource_stats** table (there is a delay from the time that you execute the test and when the data populates the table). For this example, part 1 executed during the 22:25:00 time window, and part 2 executed at 22:35:00. The earlier time window used more resources in that time window than the later one (because of plan efficiency improvements). - -```sql -SELECT TOP 1000 * -FROM sys.resource_stats -WHERE database_name = 'resource1' -ORDER BY start_time DESC -``` - -![Query tuning example results](./media/performance-guidance/query_tuning_4.png) - -> [!NOTE] -> Although the volume in this example is intentionally small, the effect of suboptimal parameters can be substantial, especially on larger databases. The difference, in extreme cases, can be between seconds for fast cases and hours for slow cases. - -You can examine **sys.resource_stats** to determine whether the resource for a test uses more or fewer resources than another test. When you compare data, separate the timing of tests so that they are not in the same 5-minute window in the **sys.resource_stats** view. The goal of the exercise is to minimize the total amount of resources used, and not to minimize the peak resources. Generally, optimizing a piece of code for latency also reduces resource consumption. Make sure that the changes you make to an application are necessary, and that the changes don't negatively affect the customer experience for someone who might be using query hints in the application. - -If a workload has a set of repeating queries, often it makes sense to capture and validate the optimality of your plan choices because it drives the minimum resource size unit required to host the database. After you validate it, occasionally reexamine the plans to help you make sure that they have not degraded. You can learn more about [query hints (Transact-SQL)](/sql/t-sql/queries/hints-transact-sql-query). - -### Very large database architectures - -Before the release of [Hyperscale](service-tier-hyperscale.md) service tier for single databases in Azure SQL Database, customers used to hit capacity limits for individual databases. These capacity limits still exist for pooled databases in Azure SQL Database elastic pools and instance databases in Azure SQL Managed Instances. The following two sections discuss two options for solving problems with very large databases in Azure SQL Database and Azure SQL Managed Instance when you cannot use the Hyperscale service tier. - -### Cross-database sharding - -Because Azure SQL Database and Azure SQL Managed Instance runs on commodity hardware, the capacity limits for an individual database are lower than for a traditional on-premises SQL Server installation. Some customers use sharding techniques to spread database operations over multiple databases when the operations don't fit inside the limits of an individual database in Azure SQL Database and Azure SQL Managed Instance. Most customers who use sharding techniques in Azure SQL Database and Azure SQL Managed Instance split their data on a single dimension across multiple databases. For this approach, you need to understand that OLTP applications often perform transactions that apply to only one row or to a small group of rows in the schema. - -> [!NOTE] -> Azure SQL Database now provides a library to assist with sharding. For more information, see [Elastic Database client library overview](elastic-database-client-library.md). - -For example, if a database has customer name, order, and order details (like the traditional example Northwind database that ships with SQL Server), you could split this data into multiple databases by grouping a customer with the related order and order detail information. You can guarantee that the customer's data stays in an individual database. The application would split different customers across databases, effectively spreading the load across multiple databases. With sharding, customers not only can avoid the maximum database size limit, but Azure SQL Database and Azure SQL Managed Instance also can process workloads that are significantly larger than the limits of the different compute sizes, as long as each individual database fits into its service tier limits. - -Although database sharding doesn't reduce the aggregate resource capacity for a solution, it's highly effective at supporting very large solutions that are spread over multiple databases. Each database can run at a different compute size to support very large, "effective" databases with high resource requirements. - -#### Functional partitioning - -Users often combine many functions in an individual database. For example, if an application has logic to manage inventory for a store, that database might have logic associated with inventory, tracking purchase orders, stored procedures, and indexed or materialized views that manage end-of-month reporting. This technique makes it easier to administer the database for operations like backup, but it also requires you to size the hardware to handle the peak load across all functions of an application. - -If you use a scale-out architecture in Azure SQL Database and Azure SQL Managed Instance, it's a good idea to split different functions of an application into different databases. By using this technique, each application scales independently. As an application becomes busier (and the load on the database increases), the administrator can choose independent compute sizes for each function in the application. At the limit, with this architecture, an application can be larger than a single commodity machine can handle because the load is spread across multiple machines. - -### Batch queries - -For applications that access data by using high-volume, frequent, ad hoc querying, a substantial amount of response time is spent on network communication between the application tier and the database tier. Even when both the application and the database are in the same data center, the network latency between the two might be magnified by a large number of data access operations. To reduce the network round trips for the data access operations, consider using the option to either batch the ad hoc queries, or to compile them as stored procedures. If you batch the ad hoc queries, you can send multiple queries as one large batch in a single trip to the database. If you compile ad hoc queries in a stored procedure, you could achieve the same result as if you batch them. Using a stored procedure also gives you the benefit of increasing the chances of caching the query plans in the database so you can use the stored procedure again. - -Some applications are write-intensive. Sometimes you can reduce the total IO load on a database by considering how to batch writes together. Often, this is as simple as using explicit transactions instead of auto-commit transactions in stored procedures and ad hoc batches. For an evaluation of different techniques you can use, see [Batching techniques for database applications in Azure](../performance-improve-use-batching.md). Experiment with your own workload to find the right model for batching. Be sure to understand that a model might have slightly different transactional consistency guarantees. Finding the right workload that minimizes resource use requires finding the right combination of consistency and performance trade-offs. - -### Application-tier caching - -Some database applications have read-heavy workloads. Caching layers might reduce the load on the database and might potentially reduce the compute size required to support a database by using Azure SQL Database and Azure SQL Managed Instance. With [Azure Cache for Redis](https://azure.microsoft.com/services/cache/), if you have a read-heavy workload, you can read the data once (or perhaps once per application-tier machine, depending on how it is configured), and then store that data outside of your database. This is a way to reduce database load (CPU and read IO), but there is an effect on transactional consistency because the data being read from the cache might be out of sync with the data in the database. Although in many applications some level of inconsistency is acceptable, that's not true for all workloads. You should fully understand any application requirements before you implement an application-tier caching strategy. - -## Get configuration and design tips - -If you use Azure SQL Database, you can execute an open-source T-SQL [script for improving database configuration and design in Azure SQL DB](https://aka.ms/sqldbtips). The script will analyze your database on demand and provide tips to improve database performance and health. Some tips suggest configuration and operational changes based on best practices, while other tips recommend design changes suitable for your workload, such as enabling advanced database engine features. - -To learn more about the script and get started, visit the [Azure SQL Tips wiki](https://aka.ms/sqldbtipswiki) page. - -## Next steps - -- Learn about the [DTU-based purchasing model](service-tiers-dtu.md) -- Learn more about the [vCore-based purchasing model](service-tiers-vcore.md) -- Read [What is an Azure elastic pool?](elastic-pool-overview.md) -- Discover [When to consider an elastic pool](elastic-pool-overview.md) -- Read about [Monitoring Microsoft Azure SQL Database and Azure SQL Managed Instance performance using dynamic management views](monitoring-with-dmvs.md) -- Learn to [Diagnose and troubleshoot high CPU on Azure SQL Database](high-cpu-diagnose-troubleshoot.md) -- [Tune nonclustered indexes with missing index suggestions](/sql/relational-databases/indexes/tune-nonclustered-missing-index-suggestions) -- Video: [Data Loading Best Practices on Azure SQL Database](/shows/data-exposed/data-loading-best-practices-on-azure-sql-database?WT.mc_id=dataexposed-c9-niner) \ No newline at end of file diff --git a/articles/azure-sql/database/planned-maintenance.md b/articles/azure-sql/database/planned-maintenance.md deleted file mode 100644 index 5083a960b06b5..0000000000000 --- a/articles/azure-sql/database/planned-maintenance.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Plan for Azure maintenance events -description: Learn how to prepare for planned maintenance events in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: aamalvea -ms.author: aamalvea -ms.reviewer: kendralittle, mathoma -ms.date: 03/07/2022 ---- - -# Plan for Azure maintenance events in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Learn how to prepare for planned maintenance events on your database in Azure SQL Database and Azure SQL Managed Instance. - -## What is a planned maintenance event? - -To keep Azure SQL Database and Azure SQL Managed Instance services secure, compliant, stable, and performant, updates are being performed through the service components almost continuously. Thanks to the modern and robust service architecture and innovative technologies like [hot patching](https://aka.ms/azuresqlhotpatching), majority of updates are fully transparent and non-impactful in terms of service availability. Still, few types of updates cause short service interrupts and require special treatment. - -During planned maintenance, members of the database quorum will go offline one at a time, with the intent that there is one responding primary replica. For Business Critical and Premium databases, at least one secondary replica will also be online to ensure no client downtime. When the primary replica needs to be brought offline, a reconfiguration process will occur. For Business Critical and Premium databases one of the secondary replicas will become the new primary replica. For General Purpose, Standard, and Basic databases the primary replica will move to another stateless compute node with sufficient free capacity. - -## What to expect during a planned maintenance event - -Maintenance event can produce single or multiple reconfigurations, depending on the constellation of the primary and secondary replicas at the beginning of the maintenance event. On average, 1.7 reconfigurations occur per planned maintenance event. Reconfigurations generally finish within 30 seconds. The average is eight seconds. If already connected, your application must reconnect to the new primary replica of your database. If a new connection is attempted while the database is undergoing a reconfiguration before the new primary replica is online, you get error 40613 (Database Unavailable): *"Database '{databasename}' on server '{servername}' is not currently available. Please retry the connection later."* If your database has a long-running query, this query will be interrupted during a reconfiguration and will need to be restarted. - -## How to simulate a planned maintenance event - -Ensuring that your client application is resilient to maintenance events prior to deploying to production will help mitigate the risk of application faults and will contribute to application availability for your end users.You can test behavior of your client application during planned maintenance events by [Testing Application Fault Resiliency](./high-availability-sla.md#testing-application-fault-resiliency) via PowerShell, CLI or REST API. Also see [initiating manual failover](https://aka.ms/mifailover-techblog) for Managed Instance. It will produce identical behavior as maintenance event bringing primary replica offline. - -## Retry logic - -Any client production application that connects to a cloud database service should implement a robust connection [retry logic](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors). This will help make reconfigurations transparent to the end users, or at least minimize negative effects. - -### Service Health Alert - -If you want to receive alerts for service issues or planned maintenance activities, you can use Service Health alerts in the Azure portal with appropriate event type and action groups. For more information, see this [Receive alerts on Azure service notifications](../../service-health/alerts-activity-log-service-notifications-portal.md#create-service-health-alert-using-azure-portal). - -## Resource health - -If your database is experiencing log-on failures, check the [Resource Health](../../service-health/resource-health-overview.md#get-started) window in the [Azure portal](https://portal.azure.com) for the current status. The Health History section contains the downtime reason for each event (when available). - -## Maintenance window feature - -The [maintenance window feature](maintenance-window.md) allows for the configuration of predictable maintenance window schedules for eligible Azure SQL databases and SQL managed instances. [Maintenance window advance notifications](../database/advance-notifications.md) are available for databases configured to use a non-default [maintenance window](maintenance-window.md). Maintenance windows and advance notifications for maintenance windows are generally available for Azure SQL Database. For Azure SQL Managed Instance, maintenance windows are generally available but advance notifications are in public preview. - - -## Next steps - -- Learn more about [Resource Health](resource-health-to-troubleshoot-connectivity.md) for Azure SQL Database and Azure SQL Managed Instance. -- For more information about retry logic, see [Retry logic for transient errors](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors). -- Configure maintenance window schedules with the [Maintenance window](maintenance-window.md) feature. \ No newline at end of file diff --git a/articles/azure-sql/database/policy-reference.md b/articles/azure-sql/database/policy-reference.md deleted file mode 100644 index 0188101e11e4f..0000000000000 --- a/articles/azure-sql/database/policy-reference.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Built-in policy definitions for Azure SQL Database -description: Lists Azure Policy built-in policy definitions for Azure SQL Database and SQL Managed Instance. These built-in policy definitions provide common approaches to managing your Azure resources. -ms.date: 03/08/2022 -ms.topic: reference -author: LitKnd -ms.author: kendralittle - -ms.service: sql-database -ms.subservice: service-overview -ms.custom: subject-policy-reference ---- -# Azure Policy built-in definitions for Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This page is an index of [Azure Policy](../../governance/policy/overview.md) built-in policy -definitions for Azure SQL Database and SQL Managed Instance. For additional Azure Policy built-ins for other services, see -[Azure Policy built-in definitions](../../governance/policy/samples/built-in-policies.md). - -The name of each built-in policy definition links to the policy definition in the Azure portal. Use -the link in the **Version** column to view the source on the -[Azure Policy GitHub repo](https://github.com/Azure/azure-policy). - -## Azure SQL Database & SQL Managed Instance - -[!INCLUDE [azure-policy-reference-service-sqldatabase](../../../includes/policy/reference/byrp/microsoft.sql.md)] - -## Limitations -- Azure Policy applicable to a Azure SQL Database creation is not enforced when using T-SQL or SSMS. - -## Next steps - -- See the built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). -- Review the [Azure Policy definition structure](../../governance/policy/concepts/definition-structure.md). -- Review [Understanding policy effects](../../governance/policy/concepts/effects.md). diff --git a/articles/azure-sql/database/powershell-script-content-guide.md b/articles/azure-sql/database/powershell-script-content-guide.md deleted file mode 100644 index aa9b3db04feac..0000000000000 --- a/articles/azure-sql/database/powershell-script-content-guide.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Azure PowerShell script examples -description: Use Azure PowerShell script examples to help you create and manage Azure SQL Database and Azure SQL Managed Instance resources. -services: sql-database -ms.service: sql-db-mi -ms.subservice: development -ms.custom: sqldbrb=2 -ms.devlang: PowerShell -ms.topic: sample -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 03/25/2019 ---- - -# Azure PowerShell samples for Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and Azure SQL Managed Instance enable you to configure your databases, instances, and pools using Azure PowerShell. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use the PowerShell locally, this tutorial requires AZ PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## [Azure SQL Database](#tab/single-database) - -The following table includes links to sample Azure PowerShell scripts for Azure SQL Database. - -|Link|Description| -|---|---| -|**Create and configure single databases and elastic pools**|| -| [Create a single database and configure a server-level firewall rule](scripts/create-and-configure-database-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script creates a single database and configures a server-level IP firewall rule. | -| [Create elastic pools and move pooled databases](scripts/move-database-between-elastic-pools-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script creates elastic pools, moves pooled databases, and changes compute sizes.| -|**Configure geo-replication and failover**|| -| [Configure and fail over a single database using active geo-replication](scripts/setup-geodr-and-failover-database-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script configures active geo-replication for a single database and fails it over to the secondary replica. | -| [Configure and fail over a pooled database using active geo-replication](scripts/setup-geodr-and-failover-elastic-pool-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script configures active geo-replication for a database in an elastic pool and fails it over to the secondary replica. | -|**Configure a failover group**|| -| [Configure a failover group for a single database](scripts/add-database-to-failover-group-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script creates a database and a failover group, adds the database to the failover group, and tests failover to the secondary server. | -| [Configure a failover group for an elastic pool](scripts/add-elastic-pool-to-failover-group-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script creates a database, adds it to an elastic pool, adds the elastic pool to the failover group, and tests failover to the secondary server. | -|**Scale a single database and an elastic pool**|| -| [Scale a single database](scripts/monitor-and-scale-database-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script monitors the performance metrics of a single database, scales it to a higher compute size, and creates an alert rule on one of the performance metrics. | -| [Scale an elastic pool](scripts/monitor-and-scale-pool-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script monitors the performance metrics of an elastic pool, scales it to a higher compute size, and creates an alert rule on one of the performance metrics. | -| **Auditing and threat detection** | -| [Configure auditing and threat-detection](scripts/auditing-threat-detection-powershell-configure.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script configures auditing and threat-detection policies for a database. | -| **Restore, copy, and import a database**|| -| [Restore a database](scripts/restore-database-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script restores a database from a geo-redundant backup and restores a deleted database to the latest backup. | -| [Copy a database to a new server](scripts/copy-database-to-new-server-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script creates a copy of an existing database in a new server. | -| [Import a database from a bacpac file](scripts/import-from-bacpac-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script imports a database into Azure SQL Database from a bacpac file. | -| **Sync data between databases**|| -| [Sync data between databases](scripts/sql-data-sync-sync-data-between-sql-databases.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script configures Data Sync to sync between multiple databases in Azure SQL Database. | -| [Sync data between SQL Database and SQL Server on-premises](scripts/sql-data-sync-sync-data-between-azure-onprem.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script configures Data Sync to sync between a database in Azure SQL Database and a SQL Server on-premises database. | -| [Update the SQL Data Sync sync schema](scripts/update-sync-schema-in-sync-group.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script adds or removes items from the Data Sync sync schema. | - - -Learn more about the [Single-database Azure PowerShell API](single-database-manage.md#powershell). - -## [Azure SQL Managed Instance](#tab/managed-instance) - -The following table includes links to sample Azure PowerShell scripts for Azure SQL Managed Instance. - -|Link|Description| -|---|---| -|**Create and configure managed instances**|| -| [Create and manage a managed instance](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | This PowerShell script shows you how to create and manage a managed instance using Azure PowerShell. | -| [Create and manage a managed instance using the Azure Resource Manager template](../managed-instance/create-template-quickstart.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script shows you how to create and manage a managed instance using Azure PowerShell and the Azure Resource Manager template.| -| [Restore database to a managed instance in another geo-region](../managed-instance/scripts/restore-geo-backup.md) | This PowerShell script takes a backup of one database and restores it to another region. This is known as a geo-restore disaster-recovery scenario. | -| **Configure transparent data encryption**|| -| [Manage transparent data encryption in a managed instance using your own key from Azure Key Vault](../managed-instance/scripts/transparent-data-encryption-byok-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json)| This PowerShell script configures transparent data encryption in a Bring Your Own Key scenario for Azure SQL Managed Instance, using a key from Azure Key Vault.| -|**Configure a failover group**|| -| [Configure a failover group for a managed instance](../managed-instance/scripts/add-to-failover-group-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) | This PowerShell script creates two managed instances, adds them to a failover group, and then tests failover from the primary managed instance to the secondary managed instance. | - - -Learn more about [PowerShell cmdlets for Azure SQL Managed Instance](../managed-instance/api-references-create-manage-instance.md#powershell-create-and-configure-managed-instances). - ---- - -## Additional resources - -The examples listed on this page use the [PowerShell cmdlets](/powershell/module/az.sql/) for creating and managing Azure SQL resources. Additional cmdlets for running queries and performing many database tasks are located in the [sqlserver](/powershell/module/sqlserver/) module. For more information, see [SQL Server PowerShell](/sql/powershell/sql-server-powershell/). \ No newline at end of file diff --git a/articles/azure-sql/database/private-endpoint-overview.md b/articles/azure-sql/database/private-endpoint-overview.md deleted file mode 100644 index f78abef127410..0000000000000 --- a/articles/azure-sql/database/private-endpoint-overview.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -title: Azure Private Link -description: Overview of Private endpoint feature. -author: rohitnayakmsft -ms.author: rohitna -titleSuffix: Azure SQL Database and Azure Synapse Analytics -ms.service: sql-database -ms.subservice: security -ms.topic: overview -ms.custom: sqldbrb=1, fasttrack-edit -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 01/20/2022 ---- - -# Azure Private Link for Azure SQL Database and Azure Synapse Analytics -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa-formerly-sqldw.md)] - -Private Link allows you to connect to various PaaS services in Azure via a **private endpoint**. For a list of PaaS services that support Private Link functionality, go to the [Private Link Documentation](../../private-link/index.yml) page. A private endpoint is a private IP address within a specific [VNet](../../virtual-network/virtual-networks-overview.md) and subnet. - -> [!IMPORTANT] -> This article applies to both Azure SQL Database and [dedicated SQL pool (formerly SQL DW)](../../synapse-analytics\sql-data-warehouse\sql-data-warehouse-overview-what-is.md) in Azure Synapse Analytics. These settings apply to all SQL Database and dedicated SQL pool (formerly SQL DW) databases associated with the server. For simplicity, the term 'database' refers to both databases in Azure SQL Database and Azure Synapse Analytics. Likewise, any references to 'server' is referring to the [logical SQL server](logical-servers.md) that hosts Azure SQL Database and dedicated SQL pool (formerly SQL DW) in Azure Synapse Analytics. This article does *not* apply to Azure SQL Managed Instance or dedicated SQL pools in Azure Synapse Analytics workspaces. - -## How to set up Private Link - -### Creation Process -Private Endpoints can be created using the Azure portal, PowerShell, or the Azure CLI: -- [The portal](../../private-link/create-private-endpoint-portal.md) -- [PowerShell](../../private-link/create-private-endpoint-powershell.md) -- [CLI](../../private-link/create-private-endpoint-cli.md) - -### Approval process -Once the network admin creates the Private Endpoint (PE), the SQL admin can manage the Private Endpoint Connection (PEC) to SQL Database. - -1. Navigate to the server resource in the Azure portal as per steps shown in the screenshot below - - - (1) Select the Private endpoint connections in the left pane - - (2) Shows a list of all Private Endpoint Connections (PECs) - - (3) Corresponding Private Endpoint (PE) created -![Screenshot of all PECs][3] - -1. Select an individual PEC from the list by selecting it. -![Screenshot selected PEC][6] - -1. The SQL admin can choose to approve or reject a PEC and optionally add a short text response. -![Screenshot of PEC approval][4] - -1. After approval or rejection, the list will reflect the appropriate state along with the response text. -![Screenshot of all PECs after approval][5] - -1. Finally clicking on the private endpoint name - ![Screenshot of PEC details][7] - - leads to the Network Interface details - ![Screenshot of NIC details][8] - - which finally leads to the IP address for the private endpoint - ![Screenshot of Private IP][9] - -> [!IMPORTANT] -> When you add a private endpoint connection, public routing to your Azure SQL logical server isn't blocked by default. In the **Firewall and virtual networks** pane, the setting **Deny public network access** is not selected by default. To disable public network access, ensure that you select **Deny public network access**. - -## Disable public access to your Azure SQL logical server - -For this scenario, assume you want to disable all public access to your Azure SQL logical server and allow connections only from your virtual network. - -First, ensure that your private endpoint connections are enabled and configured. Then, to disable public access to your logical server: - -1. Go to the **Firewalls and virtual network** pane of your Azure SQL logical server. -1. Select the **Deny public network access** checkbox. - -![Screenshot that shows selecting the Deny public network access option.](./media/private-endpoint/pec-deny-public-access.png) - -## Test connectivity to SQL Database from an Azure VM in same virtual network -For this scenario, assume you've created an Azure Virtual Machine (VM) running a recent version of Windows in the same virtual network as the private endpoint. - -1. [Start a Remote Desktop (RDP) session and connect to the virtual machine](../../virtual-machines/windows/connect-logon.md#connect-to-the-virtual-machine). - -1. You can then do some basic connectivity checks to ensure that the VM is connecting to SQL Database via the private endpoint using the following tools: - 1. Telnet - 1. Psping - 1. Nmap - 1. SQL Server Management Studio (SSMS) - -### Check Connectivity using Telnet - -[Telnet Client](/previous-versions/windows/it-pro/windows-server-2008-R2-and-2008/cc754293%28v%3dws.10%29) is a Windows feature that can be used to test connectivity. Depending on the version of the Windows OS, you may need to enable this feature explicitly. - -Open a Command Prompt window after you have installed Telnet. Run the Telnet command and specify the IP address and private endpoint of the database in SQL Database. - -``` ->telnet 10.9.0.4 1433 -``` - -When Telnet connects successfully, you'll see a blank screen at the command window like the below image: - - ![Diagram of telnet][2] - -### Check Connectivity using Psping - -[Psping](/sysinternals/downloads/psping) can be used as follows to check that the private endpoint is listening for connections on port 1433. - -Run psping as follows by providing the FQDN for logical SQL server and port 1433: - -``` ->psping.exe mysqldbsrvr.database.windows.net:1433 -... -TCP connect to 10.9.0.4:1433: -5 iterations (warmup 1) ping test: -Connecting to 10.9.0.4:1433 (warmup): from 10.6.0.4:49953: 2.83ms -Connecting to 10.9.0.4:1433: from 10.6.0.4:49954: 1.26ms -Connecting to 10.9.0.4:1433: from 10.6.0.4:49955: 1.98ms -Connecting to 10.9.0.4:1433: from 10.6.0.4:49956: 1.43ms -Connecting to 10.9.0.4:1433: from 10.6.0.4:49958: 2.28ms -``` - -The output show that Psping could ping the private IP address associated with the private endpoint. - -### Check connectivity using Nmap - -Nmap (Network Mapper) is a free and open-source tool used for network discovery and security auditing. For more information and the download link, visit https://nmap.org. You can use this tool to ensure that the private endpoint is listening for connections on port 1433. - -Run Nmap as follows by providing the address range of the subnet that hosts the private endpoint. - -``` ->nmap -n -sP 10.9.0.0/24 -... -Nmap scan report for 10.9.0.4 -Host is up (0.00s latency). -Nmap done: 256 IP addresses (1 host up) scanned in 207.00 seconds -``` -The result shows that one IP address is up; which corresponds to the IP address for the private endpoint. - -### Check connectivity using SQL Server Management Studio (SSMS) -> [!NOTE] -> Use the **Fully Qualified Domain Name (FQDN)** of the server in connection strings for your clients (`.database.windows.net`). Any login attempts made directly to the IP address or using the private link FQDN (`.privatelink.database.windows.net`) shall fail. This behavior is by design, since private endpoint routes traffic to the SQL Gateway in the region and the correct FQDN needs to be specified for logins to succeed. - -Follow the steps here to use [SSMS to connect to the SQL Database](connect-query-ssms.md). After you connect to the SQL Database using SSMS, the following query shall reflect client_net_address that matches the private IP address of the Azure VM you are connecting from: - -```` -select client_net_address from sys.dm_exec_connections -where session_id=@@SPID -```` - -## Limitations -Connections to private endpoint only support **Proxy** as the [connection policy](connectivity-architecture.md#connection-policy) - - -## On-premises connectivity over private peering - -When customers connect to the public endpoint from on-premises machines, their IP address needs to be added to the IP-based firewall using a [Server-level firewall rule](firewall-create-server-level-portal-quickstart.md). While this model works well for allowing access to individual machines for dev or test workloads, it's difficult to manage in a production environment. - -With Private Link, customers can enable cross-premises access to the private endpoint using [ExpressRoute](../../expressroute/expressroute-introduction.md), private peering, or VPN tunneling. Customers can then disable all access via the public endpoint and not use the IP-based firewall to allow any IP addresses. - -## Use cases of Private Link for Azure SQL Database - -Clients can connect to the Private endpoint from the same virtual network, peered virtual network in same region, or via virtual network to virtual network connection across regions. Additionally, clients can connect from on-premises using ExpressRoute, private peering, or VPN tunneling. Below is a simplified diagram showing the common use cases. - - ![Diagram of connectivity options][1] - -In addition, services that are not running directly in the virtual network but are integrated with it (for example, App Service web apps or Functions) can also achieve private connectivity to the database. For more information on this specific use case, see the [Web app with private connectivity to Azure SQL database](/azure/architecture/example-scenario/private-web-app/private-web-app) architecture scenario. - -## Connecting from an Azure VM in Peered Virtual Network - -Configure [virtual network peering](../../virtual-network/tutorial-connect-virtual-networks-powershell.md) to establish connectivity to the SQL Database from an Azure VM in a peered virtual network. - -## Connecting from an Azure VM in virtual network to virtual network environment - -Configure [virtual network to virtual network VPN gateway connection](../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md) to establish connectivity to a database in SQL Database from an Azure VM in a different region or subscription. - -## Connecting from an on-premises environment over VPN - -To establish connectivity from an on-premises environment to the database in SQL Database, choose and implement one of the options: -- [Point-to-Site connection](../../vpn-gateway/vpn-gateway-howto-point-to-site-rm-ps.md) -- [Site-to-Site VPN connection](../../vpn-gateway/vpn-gateway-create-site-to-site-rm-powershell.md) -- [ExpressRoute circuit](../../expressroute/expressroute-howto-linkvnet-portal-resource-manager.md) - -Consider [DNS configuration scenarios](../../private-link/private-endpoint-dns.md#dns-configuration-scenarios) as well, as the FQDN of the service can resolve to the public IP address. - -## Connecting from Azure Synapse Analytics to Azure Storage using Polybase and the COPY statement - -PolyBase and the COPY statement is commonly used to load data into Azure Synapse Analytics from Azure Storage accounts. If the Azure Storage account that you're loading data from limits access only to a set of virtual network subnets via Private Endpoints, Service Endpoints, or IP-based firewalls, the connectivity from PolyBase and the COPY statement to the account will break. For enabling both import and export scenarios with Azure Synapse Analytics connecting to Azure Storage that's secured to a virtual network, follow the steps provided [here](vnet-service-endpoint-rule-overview.md#impact-of-using-virtual-network-service-endpoints-with-azure-storage). - -## Data exfiltration prevention - -Data exfiltration in Azure SQL Database is when a user, such as a database admin is able extract data from one system and move it another location or system outside the organization. For example, the user moves the data to a storage account owned by a third party. - -Consider a scenario with a user running SQL Server Management Studio (SSMS) inside an Azure virtual machine connecting to a database in SQL Database. This database is in the West US data center. The example below shows how to limit access with public endpoints on SQL Database using network access controls. - -1. Disable all Azure service traffic to SQL Database via the public endpoint by setting Allow Azure Services to **OFF**. Ensure no IP addresses are allowed in the server and database level firewall rules. For more information, see [Azure SQL Database and Azure Synapse Analytics network access controls](network-access-controls-overview.md). -1. Only allow traffic to the database in SQL Database using the Private IP address of the VM. For more information, see the articles on [Service Endpoint](vnet-service-endpoint-rule-overview.md) and [virtual network firewall rules](firewall-configure.md). -1. On the Azure VM, narrow down the scope of outgoing connection by using [Network Security Groups (NSGs)](../../virtual-network/manage-network-security-group.md) and Service Tags as follows - - Specify an NSG rule to allow traffic for Service Tag = SQL.WestUs - only allowing connection to SQL Database in West US - - Specify an NSG rule (with a **higher priority**) to deny traffic for Service Tag = SQL - denying connections to SQL Database in all regions - -At the end of this setup, the Azure VM can connect only to a database in SQL Database in the West US region. However, the connectivity isn't restricted to a single database in SQL Database. The VM can still connect to any database in the West US region, including the databases that aren't part of the subscription. While we've reduced the scope of data exfiltration in the above scenario to a specific region, we haven't eliminated it altogether. - -With Private Link, customers can now set up network access controls like NSGs to restrict access to the private endpoint. Individual Azure PaaS resources are then mapped to specific private endpoints. A malicious insider can only access the mapped PaaS resource (for example a database in SQL Database) and no other resource. - -## Next steps - -- For an overview of Azure SQL Database security, see [Securing your database](security-overview.md) -- For an overview of Azure SQL Database connectivity, see [Azure SQL Connectivity Architecture](connectivity-architecture.md) -- You may also be interested in the [Web app with private connectivity to Azure SQL database](/azure/architecture/example-scenario/private-web-app/private-web-app) architecture scenario, which connects a web application outside of the virtual network to the private endpoint of a database. - - -[1]: media/private-endpoint/pe-connect-overview.png -[2]: media/private-endpoint/telnet-result.png -[3]: media/private-endpoint/pec-list-before.png -[4]: media/private-endpoint/pec-approve.png -[5]: media/private-endpoint/pec-list-after.png -[6]: media/private-endpoint/pec-select.png -[7]: media/private-endpoint/pec-click.png -[8]: media/private-endpoint/pec-nic-click.png -[9]: media/private-endpoint/pec-ip-display.png \ No newline at end of file diff --git a/articles/azure-sql/database/purchasing-models.md b/articles/azure-sql/database/purchasing-models.md deleted file mode 100644 index 8895716fef086..0000000000000 --- a/articles/azure-sql/database/purchasing-models.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Purchasing models -titleSuffix: Azure SQL Database -description: "Learn about the purchasing models that are available for Azure SQL Database: the vCore purchasing model and the DTU purchasing model." -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 04/06/2022 ---- -# Compare vCore and DTU-based purchasing models of Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database lets you easily purchase a fully managed platform as a service (PaaS) database engine that fits your performance and cost needs. Depending on the deployment model you've chosen for Azure SQL Database, you can select the purchasing model that works for you: - -- [Virtual core (vCore)-based purchasing model](service-tiers-sql-database-vcore.md) (recommended). This purchasing model provides a choice between a provisioned compute tier and a serverless compute tier. With the provisioned compute tier, you choose the exact amount of compute resources that are always provisioned for your workload. With the serverless compute tier, you specify the autoscaling of the compute resources over a configurable compute range. The serverless compute tier automatically pauses databases during inactive periods when only storage is billed and automatically resumes databases when activity returns. The vCore unit price per unit of time is lower in the provisioned compute tier than it is in the serverless compute tier. The [Hyperscale service tier](service-tier-hyperscale.md) is available for single databases that are using the [vCore-based purchasing model](service-tiers-vcore.md). -- [Database transaction unit (DTU)-based purchasing model](service-tiers-dtu.md). This purchasing model provides bundled compute and storage packages balanced for common workloads. - -## Purchasing models - -There are two purchasing models: - -- [vCore-based purchasing model](service-tiers-vcore.md) is available for both [Azure SQL Database](sql-database-paas-overview.md) and [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md). The [Hyperscale service tier](service-tier-hyperscale.md) is available for single databases that are using the [vCore-based purchasing model](service-tiers-vcore.md). -- [DTU-based purchasing model](service-tiers-dtu.md) is available for [Azure SQL Database](single-database-manage.md). - -The following table and chart compares and contrasts the vCore-based and the DTU-based purchasing models: - -|**Purchasing model**|**Description**|**Best for**| -|---|---|---| -|DTU-based|This model is based on a bundled measure of compute, storage, and I/O resources. Compute sizes are expressed in DTUs for single databases and in elastic database transaction units (eDTUs) for elastic pools. For more information about DTUs and eDTUs, see [What are DTUs and eDTUs?](purchasing-models.md#dtu-purchasing-model).|Customers who want simple, preconfigured resource options| -|vCore-based|This model allows you to independently choose compute and storage resources. The vCore-based purchasing model also allows you to use [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) for SQL Server to save costs.|Customers who value flexibility, control, and transparency| - - -:::image type="content" source="./media/purchasing-models/pricing-model.png" alt-text="Pricing model comparison" lightbox="./media/purchasing-models/pricing-model.png"::: - - -## vCore purchasing model - -A virtual core (vCore) represents a logical CPU and offers you the option to choose between generations of hardware and the physical characteristics of the hardware (for example, the number of cores, the memory, and the storage size). The vCore-based purchasing model gives you flexibility, control, transparency of individual resource consumption, and a straightforward way to translate on-premises workload requirements to the cloud. This model allows you to choose compute, memory, and storage resources based on your workload needs. - -In the vCore-based purchasing model for SQL Database, you can choose between the General Purpose and Business Critical service tiers. Review [service tiers](service-tiers-sql-database-vcore.md#service-tiers) to learn more. For single databases, you can also choose the [Hyperscale service tier](service-tier-hyperscale.md). - -In the vCore-based purchasing model, your costs depend on the choice and usage of: - -- Service tier -- Hardware configuration -- Compute resources (the number of vCores and the amount of memory) -- Reserved database storage -- Actual backup storage - -## DTU purchasing model - -The DTU-based purchasing model uses a database transaction unit (DTU) to calculate and bundle compute costs. A database transaction unit (DTU) represents a blended measure of CPU, memory, reads, and writes. The DTU-based purchasing model offers a set of preconfigured bundles of compute resources and included storage to drive different levels of application performance. If you prefer the simplicity of a preconfigured bundle and fixed payments each month, the DTU-based model might be more suitable for your needs. - -In the DTU-based purchasing model, you can choose between the basic, standard, and premium service tiers for Azure SQL Database. Review [DTU service tiers](service-tiers-dtu.md#compare-service-tiers) to learn more. - - -To convert from the DTU-based purchasing model to the vCore-based purchasing model, see [Migrate from DTU to vCore](migrate-dtu-to-vcore.md). - - -## Compute costs - -Compute costs are calculated differently based on each purchasing model. - -### DTU compute costs - -In the DTU purchasing model, DTUs are offered in preconfigured bundles of compute resources and included storage to drive different levels of application performance. You are billed by the number of DTUs you allocate to your database for your application. - -### vCore compute costs - -In the vCore-based purchasing model, choose between the provisioned compute tier, or the [serverless compute tier](serverless-tier-overview.md). In the provisioned compute tier, the compute cost reflects the total compute capacity that is provisioned for the application. In the serverless compute tier, compute resources are auto-scaled based on workload capacity and billed for the amount of compute used, per second. - -For single databases, compute resources, I/O, and data and log storage are charged per database. For elastic pools, these resources are charged per pool. However, backup storage is always charged per database. - -Since three additional replicas are automatically allocated in the Business Critical service tier, the price is approximately 2.7 times higher than it is in the General Purpose service tier. Likewise, the higher storage price per GB in the Business Critical service tier reflects the higher IO limits and lower latency of the local SSD storage. - -## Storage costs - -Storage costs are calculated differently based on each purchasing model. - -### DTU storage costs - -Storage is included in the price of the DTU. It's possible to add extra storage in the standard and premium tiers. See the [Azure SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/single/) for details on provisioning extra storage. [Long-term backup retention](long-term-retention-overview.md) is not included, and is billed separately. - -## vCore storage costs - -Different types of storage are billed differently. For data storage, you're charged for the provisioned storage based upon the maximum database or pool size you select. The cost doesn't change unless you reduce or increase that maximum. Backup storage is associated with automated backups of your databases and is allocated dynamically. Increasing your backup retention period may increase the backup storage that's consumed by your databases. - -By default, seven days of automated backups of your databases are copied to a storage account. This storage is used by full backups, differential backups, and transaction log backups. The size of differential and transaction log backups depends on the rate of change of the database. A minimum storage amount equal to 100 percent of the maximum data size for the database is provided at no extra charge. Additional consumption of backup storage is charged in GB per month. - -The cost of backup storage is the same for the Business Critical service tier and the General Purpose service tier because both tiers use standard storage for backups. - -For more information about storage prices, see the [pricing](https://azure.microsoft.com/pricing/details/sql-database/single/) page. - -## Frequently asked questions (FAQs) - -### Do I need to take my application offline to convert from a DTU-based service tier to a vCore-based service tier? - -No. You don't need to take the application offline. The new service tiers offer a simple online-conversion method that's similar to the existing process of upgrading databases from the standard to the premium service tier and the other way around. You can start this conversion by using the Azure portal, PowerShell, the Azure CLI, T-SQL, or the REST API. See [Manage single databases](single-database-scale.md) and [Manage elastic pools](elastic-pool-overview.md). - -### Can I convert a database from a service tier in the vCore-based purchasing model to a service tier in the DTU-based purchasing model? - -Yes, you can easily convert your database to any supported performance objective by using the Azure portal, PowerShell, the Azure CLI, T-SQL, or the REST API. See [Manage single databases](single-database-scale.md) and [Manage elastic pools](elastic-pool-overview.md). - -## Next steps - -- For more information about the vCore-based purchasing model, see [vCore-based purchasing model](service-tiers-vcore.md). -- For more information about the DTU-based purchasing model, see [DTU-based purchasing model](service-tiers-dtu.md). \ No newline at end of file diff --git a/articles/azure-sql/database/query-performance-insight-use.md b/articles/azure-sql/database/query-performance-insight-use.md deleted file mode 100644 index e996d7233d88a..0000000000000 --- a/articles/azure-sql/database/query-performance-insight-use.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: Query Performance Insight -description: Query performance monitoring identifies the most CPU-consuming and long-running queries for single and pooled databases in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: NikaKinska -ms.author: nnikolic -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 12/14/2021 ---- -# Query Performance Insight for Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Query Performance Insight provides intelligent query analysis for single and pooled databases. It helps identify the top resource consuming and long-running queries in your workload. This helps you find the queries to optimize to improve overall workload performance and efficiently use the resource that you are paying for. Query Performance Insight helps you spend less time troubleshooting database performance by providing: - -* Deeper insight into your databases resource (DTU) consumption -* Details on top database queries by CPU, duration, and execution count (potential tuning candidates for performance improvements) -* The ability to drill down into details of a query, to view the query text and history of resource utilization -* Annotations that show performance recommendations from [database advisors](database-advisor-implement-performance-recommendations.md) - -![Query Performance Insight](./media/query-performance-insight-use/opening-title.png) - -## Prerequisites - -Query Performance Insight requires that [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) is active on your database. It's automatically enabled for all databases in Azure SQL Database by default. If Query Store is not running, the Azure portal will prompt you to enable it. - -> [!NOTE] -> If the "Query Store is not properly configured on this database" message appears in the portal, see [Optimizing the Query Store configuration](#optimize-the-query-store-configuration). - -## Permissions - -You need the following [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md) permissions to use Query Performance Insight: - -* **Reader**, **Owner**, **Contributor**, **SQL DB Contributor**, or **SQL Server Contributor** permissions are required to view the top resource-consuming queries and charts. -* **Owner**, **Contributor**, **SQL DB Contributor**, or **SQL Server Contributor** permissions are required to view query text. - -## Use Query Performance Insight - -Query Performance Insight is easy to use: - -1. Open the [Azure portal](https://portal.azure.com/) and find a database that you want to examine. -2. From the left-side menu, open **Intelligent Performance** > **Query Performance Insight**. - - ![Query Performance Insight on the menu](./media/query-performance-insight-use/tile.png) - -3. On the first tab, review the list of top resource-consuming queries. -4. Select an individual query to view its details. -5. Open **Intelligent Performance** > **Performance recommendations** and check if any performance recommendations are available. For more information on built-in performance recommendations, see [Azure SQL Database Advisor](database-advisor-implement-performance-recommendations.md). -6. Use sliders or zoom icons to change the observed interval. - - ![Performance dashboard](./media/query-performance-insight-use/performance.png) - -> [!NOTE] -> For Azure SQL Database to render the information in Query Performance Insight, Query Store needs to capture a couple hours of data. If the database has no activity or if Query Store was not active during a certain period, the charts will be empty when Query Performance Insight displays that time range. You can enable Query Store at any time if it's not running. For more information, see [Best practices with Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store). -> - -For database performance recommendations, select [Recommendations](database-advisor-implement-performance-recommendations.md) on the Query Performance Insight navigation blade. - -![The Recommendations tab](./media/query-performance-insight-use/ia.png) - -## Review top CPU-consuming queries - -By default, Query Performance Insight shows the top five CPU-consuming queries when you first open it. - -1. Select or clear individual queries to include or exclude them from the chart by using check boxes. - - The top line shows overall DTU percentage for the database. The bars show CPU percentage that the selected queries consumed during the selected interval. For example, if **Past week** is selected, each bar represents a single day. - - ![Top queries](./media/query-performance-insight-use/top-queries.png) - - > [!IMPORTANT] - > The DTU line shown is aggregated to a maximum consumption value in one-hour periods. It's meant for a high-level comparison only with query execution statistics. In some cases, DTU utilization might seem too high compared to executed queries, but this might not be the case. - > - > For example, if a query maxed out DTU to 100% for a few minutes only, the DTU line in Query Performance Insight will show the entire hour of consumption as 100% (the consequence of the maximum aggregated value). - > - > For a finer comparison (up to one minute), consider creating a custom DTU utilization chart: - > - > 1. In the Azure portal, select **Azure SQL Database** > **Monitoring**. - > 2. Select **Metrics**. - > 3. Select **+Add chart**. - > 4. Select the DTU percentage on the chart. - > 5. In addition, select **Last 24 hours** on the upper-left menu and change it to one minute. - > - > Use the custom DTU chart with a finer level of details to compare with the query execution chart. - - The bottom grid shows aggregated information for the visible queries: - - * Query ID, which is a unique identifier for the query in the database. - * CPU per query during an observable interval, which depends on the aggregation function. - * Duration per query, which also depends on the aggregation function. - * Total number of executions for a specific query. - -2. If your data becomes stale, select the **Refresh** button. - -3. Use sliders and zoom buttons to change the observation interval and investigate consumption spikes: - - ![Sliders and zoom buttons for changing the interval](./media/query-performance-insight-use/zoom.png) - -4. Optionally, you can select the **Custom** tab to customize the view for: - - * Metric (CPU, duration, execution count). - * Time interval (last 24 hours, past week, or past month). - * Number of queries. - * Aggregation function. - - ![Custom tab](./media/query-performance-insight-use/custom-tab.png) - -5. Select the **Go >** button to see the customized view. - - > [!IMPORTANT] - > Query Performance Insight is limited to displaying the top 5-20 consuming queries, depending on your selection. Your database can run many more queries beyond the top ones shown, and these queries will not be included on the chart. - > - > There might exist a database workload type in which lots of smaller queries, beyond the top ones shown, run frequently and use the majority of DTU. These queries don't appear on the performance chart. - > - > For example, a query might have consumed a substantial amount of DTU for a while, although its total consumption in the observed period is less than the other top-consuming queries. In such a case, resource utilization of this query would not appear on the chart. - > - > If you need to understand top query executions beyond the limitations of Query Performance Insight, consider using [Azure SQL Insights](../../azure-monitor/insights/sql-insights-overview.md) for advanced database performance monitoring and troubleshooting. - > - -## View individual query details - -To view query details: - -1. Select any query in the list of top queries. - - ![List of top queries](./media/query-performance-insight-use/details.png) - - A detailed view opens. It shows the CPU consumption, duration, and execution count over time. - -2. Select the chart features for details. - - * The top chart shows a line with the overall database DTU percentage. The bars are the CPU percentage that the selected query consumed. - * The second chart shows the total duration of the selected query. - * The bottom chart shows the total number of executions by the selected query. - - ![Query details](./media/query-performance-insight-use/query-details.png) - -3. Optionally, use sliders, use zoom buttons, or select **Settings** to customize how query data is displayed, or to pick a different time range. - - > [!IMPORTANT] - > Query Performance Insight does not capture any DDL queries. In some cases, it might not capture all ad hoc queries. - > - > In case your database is scope locked, query details blade will not be able to load. - > - -## Review top queries per duration - -Two metrics in Query Performance Insight can help you find potential bottlenecks: duration and execution count. - -Long-running queries have the greatest potential for locking resources longer, blocking other users, and limiting scalability. They're also the best candidates for optimization. For more information, see [Understand and resolve Azure SQL blocking problems](understand-resolve-blocking.md). - -To identify long-running queries: - -1. Open the **Custom** tab in Query Performance Insight for the selected database. -2. Change the metrics to **duration**. -3. Select the number of queries and the observation interval. -4. Select the aggregation function: - - * **Sum** adds up all query execution time for the whole observation interval. - * **Max** finds queries in which execution time was maximum for the whole observation interval. - * **Avg** finds the average execution time of all query executions and shows you the top ones for these averages. - - ![Query duration](./media/query-performance-insight-use/top-duration.png) - -5. Select the **Go >** button to see the customized view. - - > [!IMPORTANT] - > Adjusting the query view does not update the DTU line. The DTU line always shows the maximum consumption value for the interval. - > - > To understand database DTU consumption with more detail (up to one minute), consider creating a custom chart in the Azure portal: - > - > 1. Select **Azure SQL Database** > **Monitoring**. - > 2. Select **Metrics**. - > 3. Select **+Add chart**. - > 4. Select the DTU percentage on the chart. - > 5. In addition, select **Last 24 hours** on the upper-left menu and change it to one minute. - > - > We recommend that you use the custom DTU chart to compare with the query performance chart. - > - -## Review top queries per execution count - -A user application that uses the database might get slow, even though a high number of executions might not be affecting the database itself and resources usage is low. - -In some cases, a high execution count can lead to more network round trips. Round trips affect performance. They're subject to network latency and to downstream server latency. - -For example, many data-driven websites heavily access the database for every user request. Although connection pooling helps, the increased network traffic and processing load on the server can slow performance. In general, keep round trips to a minimum. - -To identify frequently executed ("chatty") queries: - -1. Open the **Custom** tab in Query Performance Insight for the selected database. -2. Change the metrics to **execution count**. -3. Select the number of queries and the observation interval. -4. Select the **Go >** button to see the customized view. - - ![Query execution count](./media/query-performance-insight-use/top-execution.png) - -## Understand performance tuning annotations - -While exploring your workload in Query Performance Insight, you might notice icons with a vertical line on top of the chart. - -These icons are annotations. They show performance recommendations from [Azure SQL Database Advisor](database-advisor-implement-performance-recommendations.md). By hovering over an annotation, you can get summarized information on performance recommendations. - - ![Query annotation](./media/query-performance-insight-use/annotation.png) - -If you want to understand more or apply the advisor's recommendation, select the icon to open details of the recommended action. If this is an active recommendation, you can apply it right away from the portal. - - ![Query annotation details](./media/query-performance-insight-use/annotation-details.png) - -In some cases, due to the zoom level, it's possible that annotations close to each other are collapsed into a single annotation. Query Performance Insight represents this as a group annotation icon. Selecting the group annotation icon opens a new blade that lists the annotations. - -Correlating queries and performance-tuning actions might help you to better understand your workload. - -## Optimize the Query Store configuration - -While using Query Performance Insight, you might see the following Query Store error messages: - -* "Query Store is not properly configured on this database. Click here to learn more." -* "Query Store is not properly configured on this database. Click here to change settings." - -These messages usually appear when Query Store can't collect new data. - -The first case happens when Query Store is in the read-only state and parameters are set optimally. You can fix this by increasing the size of the data store, or by clearing Query Store. (If you clear Query Store, all previously collected telemetry will be lost.) - - ![Query Store details](./media/query-performance-insight-use/qds-off.png) - -The second case happens when Query Store is not enabled, or parameters are not set optimally. You can change the retention and capture policy, and also enable Query Store, by running the following commands provided from [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) or the Azure portal. - -### Recommended retention and capture policy - -There are two types of retention policies: - -* **Size based**: If this policy is set to **AUTO**, it will clean data automatically when near maximum size is reached. -* **Time based**: By default, this policy is set to 30 days. If Query Store runs out of space, it will delete query information older than 30 days. - -You can set the capture policy to: - -* **All**: Query Store captures all queries. -* **Auto**: Query Store ignores infrequent queries and queries with insignificant compile and execution duration. Thresholds for execution count, compile duration, and runtime duration are internally determined. This is the default option. -* **None**: Query Store stops capturing new queries, but runtime statistics for already captured queries are still collected. - -We recommend setting all policies to **AUTO** and the cleaning policy to 30 days by executing the following commands from [SSMS](/sql/ssms/download-sql-server-management-studio-ssms) or the Azure portal. (Replace `YourDB` with the database name.) - -```sql - ALTER DATABASE [YourDB] - SET QUERY_STORE (SIZE_BASED_CLEANUP_MODE = AUTO); - - ALTER DATABASE [YourDB] - SET QUERY_STORE (CLEANUP_POLICY = (STALE_QUERY_THRESHOLD_DAYS = 30)); - - ALTER DATABASE [YourDB] - SET QUERY_STORE (QUERY_CAPTURE_MODE = AUTO); -``` - -Increase the size of Query Store by connecting to a database through [SSMS](/sql/ssms/download-sql-server-management-studio-ssms) or the Azure portal and running the following query. (Replace `YourDB` with the database name.) - -```SQL - ALTER DATABASE [YourDB] - SET QUERY_STORE (MAX_STORAGE_SIZE_MB = 1024); -``` - -Applying these settings will eventually make Query Store collect telemetry for new queries. If you need Query Store to be operational right away, you can optionally choose to clear Query Store by running the following query through SSMS or the Azure portal. (Replace `YourDB` with the database name.) - -> [!NOTE] -> Running the following query will delete all previously collected monitored telemetry in Query Store. - -```SQL - ALTER DATABASE [YourDB] SET QUERY_STORE CLEAR; -``` - -## Next steps - -Consider using [Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md) for advanced performance monitoring of a large fleet of single and pooled databases, elastic pools, managed instances and instance databases. diff --git a/articles/azure-sql/database/quickstart-content-reference-guide.md b/articles/azure-sql/database/quickstart-content-reference-guide.md deleted file mode 100644 index 603297e9c85ce..0000000000000 --- a/articles/azure-sql/database/quickstart-content-reference-guide.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Single database quickstart content reference -description: 'Find a content reference of all the quickstarts that help you quickly get started with Azure SQL Database.' -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: guide -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 07/29/2019 ---- -# Getting started with single databases in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[A single database](../index.yml) is fully managed platform as a service (PaaS) database as a service (DbaaS) that is ideal storage engine for the modern cloud-born applications. In this section, you'll learn how to quickly configure and create a single database in Azure SQL Database. - -## Quickstart overview - -In this section, you'll see an overview of available articles that can help you to quickly get started with single databases. The following quickstarts enable you to quickly create a single database, configure a server-level firewall rule, and then import a database into the new single database using a `.bacpac` file: - -- [Create a single database using the Azure portal](single-database-create-quickstart.md). -- After creating the database, you would need to [secure your database by configuring firewall rules](firewall-create-server-level-portal-quickstart.md). -- If you have an existing database on SQL Server that you want to migrate to Azure SQL Database, you should install [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595) that will analyze your databases on SQL Server and find any issue that could block migration. If you don't find any issue, you can export your database as `.bacpac` file and [import it using the Azure portal or SqlPackage](database-import.md). - - -## Automating management operations - -You can use PowerShell or the Azure CLI to create, configure, and scale your database. - -- [Create and configure a single database using PowerShell](scripts/create-and-configure-database-powershell.md) or [Azure CLI](scripts/create-and-configure-database-cli.md) -- [Update your single database and scale resources using PowerShell](scripts/monitor-and-scale-database-powershell.md) or [Azure CLI](scripts/monitor-and-scale-database-cli.md) - -## Migrating to a single database with minimal downtime - -These quickstarts enable you to quickly create or import your database to Azure using a `.bacpac` file. However, `.bacpac` and `.dacpac` files are designed to quickly move databases across different versions of SQL Server and within Azure SQL, or to implement continuous integration in your DevOps pipeline. However, this method is not designed for migration of your production databases with minimal downtime, because you would need to stop adding new data, wait for the export of the source database to a `.bacpac` file to complete, and then wait for the import into Azure SQL Database to complete. All of this waiting results in downtime of your application, especially for large databases. To move your production database, you need a better way to migrate that guarantees minimal downtime of migration. For this, use the [Data Migration Service (DMS)](../../dms/tutorial-sql-server-to-azure-sql.md?toc=%2fazure%2fsql-database%2ftoc.json) to migrate your database with the minimal downtime. DMS accomplishes this by incrementally pushing the changes made in your source database to the single database being restored. This way, you can quickly switch your application from source to target database with the minimal downtime. - -## Hands-on learning modules - -The following Microsoft Learn modules help you learn for free about Azure SQL Database. - -- [Provision a database in SQL Database to store application data](/learn/modules/provision-azure-sql-db/) -- [Develop and configure an ASP.NET application that queries a database in Azure SQL Database](/learn/modules/develop-app-that-queries-azure-sql/) -- [Secure your database in Azure SQL Database](/learn/modules/secure-your-azure-sql-database/) - -## Next steps - -- Find a [high-level list of supported features in Azure SQL Database](features-comparison.md). -- Learn how to make your [database more secure](secure-database-tutorial.md). -- Find more advanced how-to's in [how to use a single database in Azure SQL Database](how-to-content-reference-guide.md). -- Find more sample scripts written in [PowerShell](powershell-script-content-guide.md) and [the Azure CLI](az-cli-script-samples-content-guide.md). -- Learn more about the [management API](single-database-manage.md) that you can use to configure your databases. -- [Identify the right Azure SQL Database or Azure SQL Managed Instance SKU for your on-premises database](/sql/dma/dma-sku-recommend-sql-db/). \ No newline at end of file diff --git a/articles/azure-sql/database/quota-increase-request.md b/articles/azure-sql/database/quota-increase-request.md deleted file mode 100644 index a81bd0575110d..0000000000000 --- a/articles/azure-sql/database/quota-increase-request.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Request a quota increase -description: This page describes how to create a support request to increase the quotas for Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.topic: how-to -author: sachinpMSFT -ms.author: sachinp -ms.reviewer: kendralittle, mathoma -ms.date: 04/06/2022 ---- - -# Request quota increases for Azure SQL Database and SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article explains how to request a quota increase for Azure SQL Database and Azure SQL Managed Instance. It also explains how to enable subscription access to a region and how to request enabling specific hardware in a region. - -## Create a new support request - -Use the following steps to create a new support request from the Azure portal for SQL Database. - -1. On the [Azure portal](https://portal.azure.com) menu, select **Help + support**. - - ![The Help + support link](./media/quota-increase-request/help-plus-support.png) - -1. In **Help + support**, select **New support request**. - - ![Create a new support request](./media/quota-increase-request/new-support-request.png) - -1. For **Issue type**, select **Service and subscription limits (quotas)**. - - ![Select an issue type](./media/quota-increase-request/select-quota-issue-type.png) - -1. For **Subscription**, select the subscription whose quota you want to increase. - - ![Select a subscription for an increased quota](./media/quota-increase-request/select-subscription-support-request.png) - -1. For **Quota type**, select one of the following quota types: - - - **SQL Database** for single database and elastic pool quotas. - - **SQL Database Managed Instance** for managed instances. - - Then select **Next: Solutions >>**. - - ![Select a quota type](./media/quota-increase-request/select-quota-type.png) - -1. In the **Details** window, select **Enter details** to enter additional information. - - ![Enter details link](./media/quota-increase-request/provide-details-link.png) - -Clicking **Enter details** displays the **Quota details** window that allows you to add additional information. The following sections describe the different options for **SQL Database** and **SQL Managed Instance** quota types. - -## SQL Database quota types - -The following sections describe the quota increase options for the **SQL Database** quota types: - -- Database transaction units (DTUs) per server -- Servers per subscription -- Region access for subscriptions or specific hardware - -### Database transaction units (DTUs) per server - -Use the following steps to request an increase in the DTUs per server. - -1. Select the **Database transaction units (DTUs) per server** quota type. - -1. In the **Resource** list, select the resource to target. - -1. In the **New quota** field, enter the new DTU limit that you are requesting. - - ![DTU quota details](./media/quota-increase-request/quota-details-dtus.png) - -For more information, see [Resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) and [Resources limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md). - -### Servers per subscription - -Use the following steps to request an increase in the number of servers per subscription. - -1. Select the **Servers per subscription** quota type. - -1. In the **Location** list, select the Azure region to use. The quota is per subscription in each region. - -1. In the **New quota** field, enter your request for the maximum number of servers in that region. - - ![Servers quota details](./media/quota-increase-request/quota-details-servers.png) - -For more information, see [SQL Database resource limits and resource governance](resource-limits-logical-server.md). - -### Enable subscription access to a region - -Some offer types are not available in every region. You may see an error such as the following: - -`Your subscription does not have access to create a server in the selected region. For the latest information about region availability for your subscription, go to aka.ms/sqlcapacity. Please try another region or create a support ticket to request access.` - -If your subscription needs access in a particular region, select the **Region access** option. In your request, specify the offering and SKU details that you want to enable for the region. To explore the offering and SKU options, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/single/). - -1. Select the **Region access** quota type. - -1. In the **Select a location** list, select the Azure region to use. The quota is per subscription in each region. - -1. Enter the **Purchase Model**, and **Expected Consumption** details. - - ![Request region access](./media/quota-increase-request/quota-request.png) - -### Request enabling specific hardware in a region - -If the hardware you want to use is not available in your region, you may request it using the following steps. For more information on hardware regional availability, see [Hardware configurations for SQL Database](./service-tiers-sql-database-vcore.md#hardware-configuration) or [Hardware configurations for SQL Managed Instance](../managed-instance/service-tiers-managed-instance-vcore.md#hardware-configurations). - -1. Select the **Other quota request** quota type. - -1. In the **Description** field, state your request, including the name of the hardware and the name of the region you need it in. - - ![Request hardware in a new region](./media/quota-increase-request/hardware-in-new-region.png) - -## Submit your request - -The final step is to fill in the remaining details of your SQL Database quota request. Then select **Next: Review + create>>**, and after reviewing the request details, click **Create** to submit the request. - -## Next steps - -After you submit your request, it will be reviewed. You will be contacted with an answer based on the information you provided in the form. - -For more information about other Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). diff --git a/articles/azure-sql/database/read-scale-out.md b/articles/azure-sql/database/read-scale-out.md deleted file mode 100644 index ef0195e98dd26..0000000000000 --- a/articles/azure-sql/database/read-scale-out.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Read queries on replicas -description: Azure SQL provides the ability to use the capacity of read-only replicas for read workloads, called Read Scale-Out. -services: sql-database -ms.service: sql-database -ms.subservice: scale-out -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 1/20/2022 ---- -# Use read-only replicas to offload read-only query workloads -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -As part of [High Availability architecture](high-availability-sla.md#premium-and-business-critical-service-tier-locally-redundant-availability), each single database, elastic pool database, and managed instance in the Premium and Business Critical service tier is automatically provisioned with a primary read-write replica and several secondary read-only replicas. The secondary replicas are provisioned with the same compute size as the primary replica. The *read scale-out* feature allows you to offload read-only workloads using the compute capacity of one of the read-only replicas, instead of running them on the read-write replica. This way, some read-only workloads can be isolated from the read-write workloads, and will not affect their performance. The feature is intended for the applications that include logically separated read-only workloads, such as analytics. In the Premium and Business Critical service tiers, applications could gain performance benefits using this additional capacity at no extra cost. - -The *read scale-out* feature is also available in the Hyperscale service tier when at least one [secondary replica](service-tier-hyperscale-replicas.md) is added. Hyperscale secondary [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview) provide independent scaling, access isolation, workload isolation, massive read scale-out, and other benefits. Multiple secondary [HA replicas](service-tier-hyperscale-replicas.md#high-availability-replica) can be used for load-balancing read-only workloads that require more resources than available on one secondary HA replica. - -The High Availability architecture of Basic, Standard, and General Purpose service tiers does not include any replicas. The *read scale-out* feature is not available in these service tiers. However, [geo-replicas](active-geo-replication-overview.md) can provide similar functionality in these service tiers. - -The following diagram illustrates the feature for Premium and Business Critical databases and managed instances. - -![Readonly replicas](./media/read-scale-out/business-critical-service-tier-read-scale-out.png) - -The *read scale-out* feature is enabled by default on new Premium, Business Critical, and Hyperscale databases. - -> [!NOTE] -> Read scale-out is always enabled in the Business Critical service tier of Managed Instance, and for Hyperscale databases with at least one secondary replica. - -If your SQL connection string is configured with `ApplicationIntent=ReadOnly`, the application will be redirected to a read-only replica of that database or managed instance. For information on how to use the `ApplicationIntent` property, see [Specifying Application Intent](/sql/relational-databases/native-client/features/sql-server-native-client-support-for-high-availability-disaster-recovery#specifying-application-intent). - -If you wish to ensure that the application connects to the primary replica regardless of the `ApplicationIntent` setting in the SQL connection string, you must explicitly disable read scale-out when creating the database or when altering its configuration. For example, if you upgrade your database from Standard or General Purpose tier to Premium or Business Critical and want to make sure all your connections continue to go to the primary replica, disable read scale-out. For details on how to disable it, see [Enable and disable read scale-out](#enable-and-disable-read-scale-out). - -> [!NOTE] -> Query Store and SQL Profiler features are not supported on read-only replicas. - -## Data consistency - -Data changes made on the primary replica are persisted on read-only replicas synchronously or asynchronously depending on replica type. However, for all replica types, reads from a read-only replica are always asynchronous with respect to the primary. Within a session connected to a read-only replica, reads are always transactionally consistent. Because data propagation latency is variable, different replicas can return data at slightly different points in time relative to the primary and each other. If a read-only replica becomes unavailable and a session reconnects, it may connect to a replica that is at a different point in time than the original replica. Likewise, if an application changes data using a read-write session on the primary and immediately reads it using a read-only session on a read-only replica, it is possible that the latest changes will not be immediately visible. - -Typical data propagation latency between the primary replica and read-only replicas varies in the range from tens of milliseconds to single-digit seconds. However, there is no fixed upper bound on data propagation latency. Conditions such as high resource utilization on the replica can increase latency substantially. Applications that require guaranteed data consistency across sessions, or require committed data to be readable immediately should use the primary replica. - -> [!NOTE] -> To monitor data propagation latency, see [Monitoring and troubleshooting read-only replica](#monitoring-and-troubleshooting-read-only-replicas). - -## Connect to a read-only replica - -When you enable read scale-out for a database, the `ApplicationIntent` option in the connection string provided by the client dictates whether the connection is routed to the write replica or to a read-only replica. Specifically, if the `ApplicationIntent` value is `ReadWrite` (the default value), the connection will be directed to the read-write replica. This is identical to the behavior when `ApplicationIntent` is not included in the connection string. If the `ApplicationIntent` value is `ReadOnly`, the connection is routed to a read-only replica. - -For example, the following connection string connects the client to a read-only replica (replacing the items in the angle brackets with the correct values for your environment and dropping the angle brackets): - -```sql -Server=tcp:.database.windows.net;Database=;ApplicationIntent=ReadOnly;User ID=;Password=;Trusted_Connection=False; Encrypt=True; -``` - -Either of the following connection strings connects the client to a read-write replica (replacing the items in the angle brackets with the correct values for your environment and dropping the angle brackets): - -```sql -Server=tcp:.database.windows.net;Database=;ApplicationIntent=ReadWrite;User ID=;Password=;Trusted_Connection=False; Encrypt=True; - -Server=tcp:.database.windows.net;Database=;User ID=;Password=;Trusted_Connection=False; Encrypt=True; -``` - -## Verify that a connection is to a read-only replica - -You can verify whether you are connected to a read-only replica by running the following query in the context of your database. It will return READ_ONLY when you are connected to a read-only replica. - -```sql -SELECT DATABASEPROPERTYEX(DB_NAME(), 'Updateability'); -``` - -> [!NOTE] -> In Premium and Business Critical service tiers, only one of the read-only replicas is accessible at any given time. Hyperscale supports multiple read-only replicas. - -## Monitoring and troubleshooting read-only replicas - -When connected to a read-only replica, Dynamic Management Views (DMVs) reflect the state of the replica, and can be queried for monitoring and troubleshooting purposes. The database engine provides multiple views to expose a wide variety of monitoring data. - -The following views are commonly used for replica monitoring and troubleshooting: - -| Name | Purpose | -|:---|:---| -|[sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database)| Provides resource utilization metrics for the last hour, including CPU, data IO, and log write utilization relative to service objective limits.| -|[sys.dm_os_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql)| Provides aggregate wait statistics for the database engine instance. | -|[sys.dm_database_replica_states](/sql/relational-databases/system-dynamic-management-views/sys-dm-database-replica-states-azure-sql-database)| Provides replica health state and synchronization statistics. Redo queue size and redo rate serve as indicators of data propagation latency on the read-only replica. | -|[sys.dm_os_performance_counters](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql)| Provides database engine performance counters.| -|[sys.dm_exec_query_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-stats-transact-sql)| Provides per-query execution statistics such as number of executions, CPU time used, etc.| -|[sys.dm_exec_query_plan()](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-plan-transact-sql)| Provides cached query plans. | -|[sys.dm_exec_sql_text()](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sql-text-transact-sql)| Provides query text for a cached query plan.| -|[sys.dm_exec_query_profiles](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-plan-stats-transact-sql)| Provides real time query progress while queries are in execution.| -|[sys.dm_exec_query_plan_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-query-plan-stats-transact-sql)| Provides the last known actual execution plan including runtime statistics for a query.| -|[sys.dm_io_virtual_file_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql)| Provides storage IOPS, throughput, and latency statistics for all database files. | - -> [!NOTE] -> The `sys.resource_stats` and `sys.elastic_pool_resource_stats` DMVs in the logical master database return resource utilization data of the primary replica. - -### Monitoring read-only replicas with Extended Events - -An extended event session cannot be created when connected to a read-only replica. However, in Azure SQL Database, the definitions of database-scoped [Extended Event](xevent-db-diff-from-svr.md) sessions created and altered on the primary replica replicate to read-only replicas, including geo-replicas, and capture events on read-only replicas. - -An extended event session on a read-only replica that is based on a session definition from the primary replica can be started and stopped independently of the primary replica. When an extended event session is dropped on the primary replica, it is also dropped on all read-only replicas. - -### Transaction isolation level on read-only replicas - -Queries that run on read-only replicas are always mapped to the [snapshot](/dotnet/framework/data/adonet/sql/snapshot-isolation-in-sql-server) transaction isolation level. Snapshot isolation uses row versioning to avoid blocking scenarios where readers block writers. - -In rare cases, if a snapshot isolation transaction accesses object metadata that has been modified in another concurrent transaction, it may receive error [3961](/sql/relational-databases/errors-events/mssqlserver-3961-database-engine-error), "Snapshot isolation transaction failed in database '%.*ls' because the object accessed by the statement has been modified by a DDL statement in another concurrent transaction since the start of this transaction. It is disallowed because the metadata is not versioned. A concurrent update to metadata can lead to inconsistency if mixed with snapshot isolation." - -### Long-running queries on read-only replicas - -Queries running on read-only replicas need to access metadata for the objects referenced in the query (tables, indexes, statistics, etc.) In rare cases, if object metadata is modified on the primary replica while a query holds a lock on the same object on the read-only replica, the query can [block](/sql/database-engine/availability-groups/windows/troubleshoot-primary-changes-not-reflected-on-secondary#BKMK_REDOBLOCK) the process that applies changes from the primary replica to the read-only replica. If such a query were to run for a long time, it would cause the read-only replica to be significantly out of sync with the primary replica. For replicas that are potential failover targets (secondary replicas in Premium and Business Critical service tiers, Hyperscale HA replicas, and all geo-replicas), this would also delay database recovery if a failover were to occur, causing longer than expected downtime. - -If a long-running query on a read-only replica directly or indirectly causes this kind of blocking, it may be automatically terminated to avoid excessive data latency and potential database availability impact. The session will receive error 1219, "Your session has been disconnected because of a high priority DDL operation", or error 3947, "The transaction was aborted because the secondary compute failed to catch up redo. Retry the transaction." - -> [!NOTE] -> If you receive error 3961, 1219, or 3947 when running queries against a read-only replica, retry the query. Alternatively, avoid operations that modify object metadata (schema changes, index maintenance, statistics updates, etc.) on the primary replica while long-running queries execute on secondary replicas. - -> [!TIP] -> In Premium and Business Critical service tiers, when connected to a read-only replica, the `redo_queue_size` and `redo_rate` columns in the [sys.dm_database_replica_states](/sql/relational-databases/system-dynamic-management-views/sys-dm-database-replica-states-azure-sql-database) DMV may be used to monitor data synchronization process, serving as indicators of data propagation latency on the read-only replica. -> - -## Enable and disable read scale-out - -Read scale-out is enabled by default on Premium, Business Critical, and Hyperscale service tiers. Read scale-out cannot be enabled in Basic, Standard, or General Purpose service tiers. Read scale-out is automatically disabled on Hyperscale databases configured with zero secondary replicas. - -You can disable and re-enable read scale-out on single databases and elastic pool databases in the Premium or Business Critical service tiers using the following methods. - -> [!NOTE] -> For single databases and elastic pool databases, the ability to disable read scale-out is provided for backward compatibility. Read scale-out cannot be disabled on Business Critical managed instances. - -### Azure portal - -You can manage the read scale-out setting on the **Configure** database blade. - -### PowerShell - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. The Azure Resource Manager module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the Azure Resource Manager modules are substantially identical. For more information about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -Managing read scale-out in Azure PowerShell requires the December 2016 Azure PowerShell release or newer. For the newest PowerShell release, see [Azure PowerShell](/powershell/azure/install-az-ps). - -You can disable or re-enable read scale-out in Azure PowerShell by invoking the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) cmdlet and passing in the desired value (`Enabled` or `Disabled`) for the `-ReadScale` parameter. - -To disable read scale-out on an existing database (replacing the items in the angle brackets with the correct values for your environment and dropping the angle brackets): - -```powershell -Set-AzSqlDatabase -ResourceGroupName -ServerName -DatabaseName -ReadScale Disabled -``` - -To disable read scale-out on a new database (replacing the items in the angle brackets with the correct values for your environment and dropping the angle brackets): - -```powershell -New-AzSqlDatabase -ResourceGroupName -ServerName -DatabaseName -ReadScale Disabled -Edition Premium -``` - -To re-enable read scale-out on an existing database (replacing the items in the angle brackets with the correct values for your environment and dropping the angle brackets): - -```powershell -Set-AzSqlDatabase -ResourceGroupName -ServerName -DatabaseName -ReadScale Enabled -``` - -### REST API - -To create a database with read scale-out disabled, or to change the setting for an existing database, use the following method with the `readScale` property set to `Enabled` or `Disabled`, as in the following sample request. - -```rest -Method: PUT -URL: https://management.azure.com/subscriptions/{SubscriptionId}/resourceGroups/{GroupName}/providers/Microsoft.Sql/servers/{ServerName}/databases/{DatabaseName}?api-version= 2014-04-01-preview -Body: { - "properties": { - "readScale":"Disabled" - } -} -``` - -For more information, see [Databases - Create or update](/rest/api/sql/databases/createorupdate). - -## Using the `tempdb` database on a read-only replica - -The `tempdb` database on the primary replica is not replicated to the read-only replicas. Each replica has its own `tempdb` database that is created when the replica is created. This ensures that `tempdb` is updateable and can be modified during your query execution. If your read-only workload depends on using `tempdb` objects, you should create these objects as part of the same workload, while connected to a read-only replica. - -## Using read scale-out with geo-replicated databases - -Geo-replicated secondary databases have the same High Availability architecture as primary databases. If you're connecting to the geo-replicated secondary database with read scale-out enabled, your sessions with `ApplicationIntent=ReadOnly` will be routed to one of the high availability replicas in the same way they are routed on the primary writeable database. The sessions without `ApplicationIntent=ReadOnly` will be routed to the primary replica of the geo-replicated secondary, which is also read-only. - -In this fashion, creating a geo-replica can provide multiple additional read-only replicas for a read-write primary database. Each additional geo-replica provides another set of read-only replicas. Geo-replicas can be created in any Azure region, including the region of the primary database. - -> [!NOTE] -> There is no automatic round-robin or any other load-balanced routing between the replicas of a geo-replicated secondary database, with the exception of a Hyperscale geo-replica with more than one HA replica. In that case, sessions with read-only intent are distributed over all HA replicas of a geo-replica. - -## Feature support on read-only replicas - -A list of the behavior of some features on read-only replicas is below: -* Auditing on read-only replicas is automatically enabled. For further details about the hierarchy of the storage folders, naming conventions, and log format, see [SQL Database Audit Log Format](audit-log-format.md). -* [Query Performance Insight](query-performance-insight-use.md) relies on data from the [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store), which currently does not track activity on the read-only replica. Query Performance Insight will not show queries which execute on the read-only replica. -* Automatic tuning relies on the Query Store, as detailed in the [Automatic tuning paper](https://www.microsoft.com/en-us/research/uploads/prod/2019/02/autoindexing_azuredb.pdf). Automatic tuning only works for workloads running on the primary replica. - -## Next steps - -- For information about SQL Database Hyperscale offering, see [Hyperscale service tier](service-tier-hyperscale.md). diff --git a/articles/azure-sql/database/recovery-using-backups.md b/articles/azure-sql/database/recovery-using-backups.md deleted file mode 100644 index bd267f1795a3a..0000000000000 --- a/articles/azure-sql/database/recovery-using-backups.md +++ /dev/null @@ -1,269 +0,0 @@ ---- -title: Restore a database from a backup -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn about point-in-time restore, which enables you to roll back a database in Azure SQL Database or an instance in Azure SQL Managed Instance up to 35 days. -services: sql-database -ms.service: sql-db-mi -ms.subservice: backup-restore -ms.custom: -ms.devlang: -ms.topic: conceptual -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma, danil -ms.date: 04/18/2022 ---- -# Recover using automated database backups - Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -The following options are available for database recovery by using [automated database backups](automated-backups-overview.md). You can: - -- Create a new database on the same server, recovered to a specified point in time within the retention period. -- Create a database on the same server, recovered to the deletion time for a deleted database. -- Create a new database on any server in the same region, recovered to the point of the most recent backups. -- Create a new database on any server in any other region, recovered to the point of the most recent replicated backups. Cross-region and cross-subscription point-in-time restore for SQL Managed Instance isn't currently supported. - -If you configured [backup long-term retention](long-term-retention-overview.md), you can also create a new database from any long-term retention backup on any server. - -> [!IMPORTANT] -> You can't overwrite an existing database during restore. - -When you're using the Standard or Premium service tier, your database restore might incur an extra storage cost. The extra cost is incurred when the maximum size of the restored database is greater than the amount of storage included with the target database's service tier and performance level. For pricing details of extra storage, see the [SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/). If the actual amount of used space is less than the amount of storage included, you can avoid this extra cost by setting the maximum database size to the included amount. - -## Recovery time - -The recovery time to restore a database by using automated database backups is affected by several factors: - -- The size of the database. -- The compute size of the database. -- The number of transaction logs involved. -- The amount of activity that needs to be replayed to recover to the restore point. -- The network bandwidth if the restore is to a different region. -- The number of concurrent restore requests being processed in the target region. - -For a large or very active database, the restore might take several hours. If there is a prolonged outage in a region, it's possible that a high number of geo-restore requests will be initiated for disaster recovery. When there are many requests, the recovery time for individual databases can increase. Most database restores finish in less than 12 hours. - -For a single subscription, there are limitations on the number of concurrent restore requests. These limitations apply to any combination of point-in-time restores, geo-restores, and restores from long-term retention backup. - -> [!TIP] -> For Azure SQL Managed Instance system updates will take precedence over database restores in progress. All pending restores in case of a system update on Managed Instance will be suspended and resumed once the update has been applied. This system behavior might prolong the time of restores and might be especially impactful to long-running restores. To achieve a predictable time of database restores, consider configuring [maintenance window](maintenance-window.md) allowing scheduling of system updates at a specific day/time, and consider running database restores outside of the scheduled maintenance window day/time. - -| **Deployment option** | **Max # of concurrent requests being processed** | **Max # of concurrent requests being submitted** | -| :--- | --: | --: | -|**Single database (per subscription)**|30|100| -|**Elastic pool (per pool)**|4|2000| - - -There isn't a built-in method to restore the entire server. For an example of how to accomplish this task, see [Azure SQL Database: Full server recovery](https://gallery.technet.microsoft.com/Azure-SQL-Database-Full-82941666). - -> [!IMPORTANT] -> To recover by using automated backups, you must be a member of the SQL Server Contributor role or SQL Managed Instance Contributor role (depending on the recovery destination) in the subscription, or you must be the subscription owner. For more information, see [Azure RBAC: Built-in roles](../../role-based-access-control/built-in-roles.md). You can recover by using the Azure portal, PowerShell, or the REST API. You can't use Transact-SQL. - -## Point-in-time restore - -You can restore a standalone, pooled, or instance database to an earlier point in time by using the Azure portal, [PowerShell](/powershell/module/az.sql/restore-azsqldatabase), or the [REST API](/rest/api/sql/databases/createorupdate#creates-a-database-from-pointintimerestore.). The request can specify any service tier or compute size for the restored database. Ensure that you have sufficient resources on the server to which you are restoring the database. - -When complete, the restore creates a new database on the same server as the original database. The restored database is charged at normal rates, based on its service tier and compute size. You don't incur charges until the database restore is complete. - -You generally restore a database to an earlier point for recovery purposes. You can treat the restored database as a replacement for the original database or use it as a data source to update the original database. - -> [!IMPORTANT] -> You can only run restore on the same server, cross-server restoration is not supported by Point-in-time restore. - -- **Database replacement** - - If you intend the restored database to be a replacement for the original database, you should specify the original database's compute size and service tier. You can then rename the original database and give the restored database the original name by using the [ALTER DATABASE](/sql/t-sql/statements/alter-database-azure-sql-database) command in T-SQL. - -- **Data recovery** - - If you plan to retrieve data from the restored database to recover from a user or application error, you need to write and execute a data recovery script that extracts data from the restored database and applies to the original database. Although the restore operation may take a long time to complete, the restoring database is visible in the database list throughout the restore process. If you delete the database during the restore, the restore operation will be canceled and you will not be charged for the database that did not complete the restore. - -### Point-in-time restore by using Azure portal - -You can recover a single or instance database to a point in time from the overview blade of the database you want to restore in the Azure portal. - -#### SQL Database - -To recover a database to a point in time by using the Azure portal, open the database overview page and select **Restore** on the toolbar. Choose the backup source, and select the point-in-time backup point from which a new database will be created. - - ![Screenshot of database restore options for SQL Database.](./media/recovery-using-backups/pitr-backup-sql-database-annotated.png) - -#### SQL Managed Instance - -To recover a managed instance database to a point in time by using the Azure portal, open the database overview page, and select **Restore** on the toolbar. Choose the point-in-time backup point from which a new database will be created. - - ![Screenshot of database restore options for SQL managed instance.](./media/recovery-using-backups/pitr-backup-managed-instance-annotated.png) - -> [!TIP] -> To programmatically restore a database from a backup, see [Programmatic recovery using automated backups](recovery-using-backups.md). - -## Deleted database restore - -You can restore a deleted database to the deletion time, or an earlier point in time, on the same server or the same managed instance. You can accomplish this through the Azure portal, [PowerShell](/powershell/module/az.sql/restore-azsqldatabase), or the [REST (createMode=Restore)](/rest/api/sql/databases/createorupdate). You restore a deleted database by creating a new database from the backup. - -> [!IMPORTANT] -> If you delete a server or managed instance, all its databases are also deleted and can't be recovered. You can't restore a deleted server or managed instance. - -### Deleted database restore by using the Azure portal - -You restore deleted databases from the Azure portal from the server or managed instance resource. - -> [!TIP] -> It may take several minutes for recently deleted databases to appear on the **Deleted databases** page in Azure portal, or when displaying deleted databases [programmatically](#programmatic-recovery-using-automated-backups). - -#### SQL Database - -To recover a deleted database to the deletion time by using the Azure portal, open the server overview page, and select **Deleted databases**. Select a deleted database that you want to restore, and type the name for the new database that will be created with data restored from the backup. - - ![Screenshot of restore deleted database](./media/recovery-using-backups/restore-deleted-sql-database-annotated.png) - -#### SQL Managed Instance - -To recover a managed database by using the Azure portal, open the managed instance overview page, and select **Deleted databases**. Select a deleted database that you want to restore, and type the name for the new database that will be created with data restored from the backup. - - ![Screenshot of restore deleted Azure SQL Managed Instance database](./media/recovery-using-backups/restore-deleted-sql-managed-instance-annotated.png) - -### Deleted database restore by using PowerShell - -Use the following sample scripts to restore a deleted database for either SQL Database or SQL Managed Instance by using PowerShell. - -#### SQL Database - -For a sample PowerShell script showing how to restore a deleted database in Azure SQL Database, see [Restore a database using PowerShell](scripts/restore-database-powershell.md). - -#### SQL Managed Instance - -For a sample PowerShell script showing how to restore a deleted instance database, see [Restore deleted instance database using PowerShell](../managed-instance/point-in-time-restore.md#restore-a-deleted-database) - -> [!TIP] -> To programmatically restore a deleted database, see [Programmatically performing recovery using automated backups](recovery-using-backups.md). - -## Geo-restore - -> [!IMPORTANT] -> - Geo-restore is available only for SQL databases or managed instances configured with geo-redundant [backup storage](automated-backups-overview.md#backup-storage-redundancy). If you are not currently using geo-replicated backups for a database, you can change this by [configuring backup storage redundancy](automated-backups-overview.md#configure-backup-storage-redundancy). -> - Geo-restore can be performed on SQL databases or managed instances residing in the same subscription only. - -You can restore a database on any SQL Database server or an instance database on any managed instance in any Azure region from the most recent geo-replicated backups. Geo-restore uses a geo-replicated backup as its source. You can request geo-restore even if the database or datacenter is inaccessible due to an outage. - -Geo-restore is the default recovery option when your database is unavailable because of an incident in the hosting region. You can restore the database to a server in any other region. There is a delay between when a backup is taken and when it is geo-replicated to an Azure blob in a different region. As a result, the restored database can be up to one hour behind the original database. The following illustration shows a database restore from the last available backup in another region. - -![Graphic of geo-restore](./media/recovery-using-backups/geo-restore-2.png) - -### Geo-restore by using the Azure portal - -From the Azure portal, you create a new single or managed instance database and select an available geo-restore backup. The newly created database contains the geo-restored backup data. - -#### SQL Database - -To geo-restore a single database from the Azure portal in the region and server of your choice, follow these steps: - -1. From **Dashboard**, select **Add** > **Create SQL Database**. On the **Basics** tab, enter the required information. -2. Select **Additional settings**. -3. For **Use existing data**, select **Backup**. -4. For **Backup**, select a backup from the list of available geo-restore backups. - - ![Screenshot of Create SQL Database options](./media/recovery-using-backups/geo-restore-azure-sql-database-list-annotated.png) - -Complete the process of creating a new database from the backup. When you create a database in Azure SQL Database, it contains the restored geo-restore backup. - -#### SQL Managed Instance - -To geo-restore a managed instance database from the Azure portal to an existing managed instance in a region of your choice, select a managed instance on which you want a database to be restored. Follow these steps: - -1. Select **New database**. -2. Type a desired database name. -3. Under **Use existing data**, select **Backup**. -4. Select a backup from the list of available geo-restore backups. - - ![Screenshot of New database options](./media/recovery-using-backups/geo-restore-sql-managed-instance-list-annotated.png) - -Complete the process of creating a new database. When you create the instance database, it contains the restored geo-restore backup. - -### Geo-restore by using PowerShell - -#### SQL Database - -For a PowerShell script that shows how to perform geo-restore for a single database, see [Use PowerShell to restore a single database to an earlier point in time](scripts/restore-database-powershell.md). - -#### SQL Managed Instance - -For a PowerShell script that shows how to perform geo-restore for a managed instance database, see [Use PowerShell to restore a managed instance database to another geo-region](../managed-instance/scripts/restore-geo-backup.md). - -### Geo-restore considerations - -You can't perform a point-in-time restore on a geo-secondary database. You can do so only on a primary database. For detailed information about using geo-restore to recover from an outage, see [Recover from an outage](disaster-recovery-guidance.md#recover-using-geo-restore). - -> [!IMPORTANT] -> Geo-restore is the most basic disaster-recovery solution available in SQL Database and SQL Managed Instance. It relies on automatically created geo-replicated backups with a recovery point objective (RPO) up to 1 hour and an estimated recovery time of up to 12 hours. It doesn't guarantee that the target region will have the capacity to restore your databases after a regional outage, because a sharp increase of demand is likely. If your application uses relatively small databases and is not critical to the business, geo-restore is an appropriate disaster-recovery solution. -> -> For business-critical applications that require large databases and must ensure business continuity, use [Auto-failover groups](auto-failover-group-overview.md). It offers a much lower RPO and recovery time objective, and the capacity is always guaranteed. -> -> For more information about business continuity choices, see [Overview of business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). - -## Programmatic recovery using automated backups - -You can also use Azure PowerShell or the REST API for recovery. The following tables describe the set of commands available. - -### PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by SQL Database and SQL Managed Instance, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). Arguments for the commands in the Az module and in Azure Resource Manager modules are to a great extent identical. - -> [!NOTE] -> Restore points represent a period between the earliest restore point and the latest log backup point. Information on latest restore point is currently unavailable on Azure PowerShell. - -#### SQL Database - -To restore a standalone or pooled database, see [Restore-AzSqlDatabase](/powershell/module/az.sql/restore-azsqldatabase). - - | Cmdlet | Description | - | --- | --- | - | [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) |Gets one or more databases. | - | [Get-AzSqlDeletedDatabaseBackup](/powershell/module/az.sql/get-azsqldeleteddatabasebackup) | Gets a deleted database that you can restore. | - | [Get-AzSqlDatabaseGeoBackup](/powershell/module/az.sql/get-azsqldatabasegeobackup) |Gets a geo-redundant backup of a database. | - | [Restore-AzSqlDatabase](/powershell/module/az.sql/restore-azsqldatabase) |Restores a database. | - - > [!TIP] - > For a sample PowerShell script that shows how to perform a point-in-time restore of a database, see [Restore a database by using PowerShell](scripts/restore-database-powershell.md). - -#### SQL Managed Instance - -To restore a managed instance database, see [Restore-AzSqlInstanceDatabase](/powershell/module/az.sql/restore-azsqlinstancedatabase). - - | Cmdlet | Description | - | --- | --- | - | [Get-AzSqlInstance](/powershell/module/az.sql/get-azsqlinstance) |Gets one or more managed instances. | - | [Get-AzSqlInstanceDatabase](/powershell/module/az.sql/get-azsqlinstancedatabase) | Gets an instance database. | - | [Restore-AzSqlInstanceDatabase](/powershell/module/az.sql/restore-azsqlinstancedatabase) |Restores an instance database. | - -### REST API - -To restore a database by using the REST API: - -| API | Description | -| --- | --- | -| [REST (createMode=Recovery)](/rest/api/sql/databases) |Restores a database. | -| [Get Create or Update Database Status](/rest/api/sql/operations) |Returns the status during a restore operation. | - -### Azure CLI - -#### SQL Database - -To restore a database by using the Azure CLI, see [az sql db restore](/cli/azure/sql/db#az-sql-db-restore). - -#### SQL Managed Instance - -To restore a managed instance database by using the Azure CLI, see [az sql midb restore](/cli/azure/sql/midb#az-sql-midb-restore). - -## Summary - -Automatic backups protect your databases from user and application errors, accidental database deletion, and prolonged outages. This built-in capability is available for all service tiers and compute sizes. - -## Next steps - -- [Business continuity overview](business-continuity-high-availability-disaster-recover-hadr-overview.md) -- [SQL Database automated backups](automated-backups-overview.md) -- [Long-term retention](long-term-retention-overview.md) -- To learn about faster recovery options, see [Active geo-replication](active-geo-replication-overview.md) or [Auto-failover groups](auto-failover-group-overview.md). diff --git a/articles/azure-sql/database/replication-to-sql-database.md b/articles/azure-sql/database/replication-to-sql-database.md deleted file mode 100644 index 527cd7ab6f721..0000000000000 --- a/articles/azure-sql/database/replication-to-sql-database.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Azure SQL Server replication to Azure SQL Database -description: You can configure a database in Azure SQL Database as the push subscriber in a one-way transactional or snapshot replication topology. -services: sql-database -ms.service: sql-database -ms.subservice: replication -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: ferno-ms -ms.author: ferno -ms.reviewer: kendralittle, mathoma -ms.date: 04/28/2020 ---- -# Replication to Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You can configure an Azure SQL Database as the push subscriber in a one-way transactional or snapshot replication topology. - -> [!NOTE] -> This article describes the use of [transactional replication](/sql/relational-databases/replication/transactional/transactional-replication) in Azure SQL Database. It is unrelated to [active geo-replication](./active-geo-replication-overview.md), an Azure SQL Database feature that allows you to create complete readable replicas of individual databases. - -## Supported configurations - -- Azure SQL Database can only be the push subscriber of a SQL Server publisher and distributor. -- The SQL Server instance acting as publisher and/or distributor can be an instance of [SQL Server running on-premises](https://www.microsoft.com/sql-server/sql-server-downloads), an [Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md), or an instance of [SQL Server running on an Azure virtual machine in the cloud](../virtual-machines/windows/sql-vm-create-portal-quickstart.md). -- The distribution database and the replication agents cannot be placed on a database in Azure SQL Database. -- [Snapshot](/sql/relational-databases/replication/snapshot-replication) and [one-way transactional](/sql/relational-databases/replication/transactional/transactional-replication) replication are supported. Peer-to-peer transactional replication and merge replication are not supported. - -### Versions - -To successfully replicate to a database in Azure SQL Database, SQL Server publishers and distributors must be using (at least) one of the following versions: - -Publishing to any Azure SQL Database from a SQL Server database is supported by the following versions of SQL Server: - -- SQL Server 2016 and greater -- SQL Server 2014 [RTM CU10 (12.0.4427.24)](https://support.microsoft.com/help/3094220/cumulative-update-10-for-sql-server-2014) or [SP1 CU3 (12.0.2556.4)](https://support.microsoft.com/help/3094221/cumulative-update-3-for-sql-server-2014-service-pack-1) -- SQL Server 2012 [SP2 CU8 (11.0.5634.1)](https://support.microsoft.com/help/3082561/cumulative-update-8-for-sql-server-2012-sp2) or [SP3 (11.0.6020.0)](https://www.microsoft.com/download/details.aspx?id=49996) - -> [!NOTE] -> Attempting to configure replication using an unsupported version can result in error number MSSQL_REPL20084 (The process could not connect to Subscriber.) and MSSQL_REPL40532 (Cannot open server \ requested by the login. The login failed.). - -To use all the features of Azure SQL Database, you must be using the latest versions of [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt). - -### Types of replication - -There are different [types of replication](/sql/relational-databases/replication/types-of-replication): - -| Replication | Azure SQL Database | Azure SQL Managed Instance | -| :----| :------------- | :--------------- | -| [**Standard Transactional**](/sql/relational-databases/replication/transactional/transactional-replication) | Yes (only as subscriber) | Yes | -| [**Snapshot**](/sql/relational-databases/replication/snapshot-replication) | Yes (only as subscriber) | Yes| -| [**Merge replication**](/sql/relational-databases/replication/merge/merge-replication) | No | No| -| [**Peer-to-peer**](/sql/relational-databases/replication/transactional/peer-to-peer-transactional-replication) | No | No| -| [**Bidirectional**](/sql/relational-databases/replication/transactional/bidirectional-transactional-replication) | No | Yes| -| [**Updatable subscriptions**](/sql/relational-databases/replication/transactional/updatable-subscriptions-for-transactional-replication) | No | No| - - -## Remarks - -- Only push subscriptions to Azure SQL Database are supported. -- Replication can be configured by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) or by executing Transact-SQL statements on the publisher. You cannot configure replication by using the Azure portal. -- Replication can only use SQL Server authentication logins to connect to Azure SQL Database. -- Replicated tables must have a primary key. -- You must have an existing Azure subscription. -- The Azure SQL Database subscriber can be in any region. -- A single publication on SQL Server can support both Azure SQL Database and SQL Server (on-premises and SQL Server in an Azure virtual machine) subscribers. -- Replication management, monitoring, and troubleshooting must be performed from SQL Server rather than Azure SQL Database. -- Only `@subscriber_type = 0` is supported in **sp_addsubscription** for SQL Database. -- Azure SQL Database does not support bi-directional, immediate, updatable, or peer-to-peer replication. - -## Replication Architecture - -![Diagram shows the replication architecture with Azure SQL Database, which contains several subscriber clusters in different regions, and on-premises Azure virtual machines, which contains a Publisher, Logread executable, and distributor executables that connect to remote clusters.](./media/replication-to-sql-database/replication-to-sql-database.png) - -## Scenarios - -### Typical Replication Scenario - -1. Create a transactional replication publication on a SQL Server database. -2. On SQL Server use the **New Subscription Wizard** or Transact-SQL statements to create a push to subscription to Azure SQL Database. -3. With single and pooled databases in Azure SQL Database, the initial data set is a snapshot that is created by the Snapshot Agent and distributed and applied by the Distribution Agent. With a SQL Managed Instance publisher, you can also use a database backup to seed the Azure SQL Database subscriber. - -### Data migration scenario - -1. Use transactional replication to replicate data from a SQL Server database to Azure SQL Database. -2. Redirect the client or middle-tier applications to update the database copy. -3. Stop updating the SQL Server version of the table and remove the publication. - -## Limitations - -The following options are not supported for Azure SQL Database subscriptions: - -- Copy file groups association -- Copy table partitioning schemes -- Copy index partitioning schemes -- Copy user defined statistics -- Copy default bindings -- Copy rule bindings -- Copy fulltext indexes -- Copy XML XSD -- Copy XML indexes -- Copy permissions -- Copy spatial indexes -- Copy filtered indexes -- Copy data compression attribute -- Copy sparse column attribute -- Convert filestream to MAX data types -- Convert hierarchyid to MAX data types -- Convert spatial to MAX data types -- Copy extended properties - -### Limitations to be determined - -- Copy collation -- Execution in a serialized transaction of the SP - -## Examples - -Create a publication and a push subscription. For more information, see: - -- [Create a Publication](/sql/relational-databases/replication/publish/create-a-publication) -- [Create a Push Subscription](/sql/relational-databases/replication/create-a-push-subscription/) by using the server name as the subscriber (for example **N'azuresqldbdns.database.windows.net'**) and the Azure SQL Database name as the destination database (for example **AdventureWorks**). - -## See Also - -- [Transactional replication](../managed-instance/replication-transactional-overview.md) -- [Create a Publication](/sql/relational-databases/replication/publish/create-a-publication) -- [Create a Push Subscription](/sql/relational-databases/replication/create-a-push-subscription/) -- [Types of Replication](/sql/relational-databases/replication/types-of-replication) -- [Monitoring (Replication)](/sql/relational-databases/replication/monitor/monitoring-replication) -- [Initialize a Subscription](/sql/relational-databases/replication/initialize-a-subscription) \ No newline at end of file diff --git a/articles/azure-sql/database/reserved-capacity-overview.md b/articles/azure-sql/database/reserved-capacity-overview.md deleted file mode 100644 index 92fc83003411d..0000000000000 --- a/articles/azure-sql/database/reserved-capacity-overview.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Save compute costs with reserved capacity -titleSuffix: Azure SQL Database & SQL Managed Instance -description: Learn how to buy Azure SQL Database and SQL Managed Instance reserved capacity to save on your compute costs. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 04/06/2022 ---- -# Save costs for resources with reserved capacity - Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Save money with Azure SQL Database and SQL Managed Instance by committing to a reservation for compute resources compared to pay-as-you-go prices. With reserved capacity, you make a commitment for SQL Database and/or SQL Managed Instance use for a period of one or three years to get a significant discount on the compute costs. To purchase reserved capacity, you need to specify the Azure region, deployment type, performance tier, and term. - -You do not need to assign the reservation to a specific database or managed instance. Matching existing deployments that are already running or ones that are newly deployed automatically get the benefit. By purchasing a reservation, you commit to usage for the compute costs for a period of one or three years. As soon as you buy a reservation, the compute charges that match the reservation attributes are no longer charged at the pay-as-you go rates. - -A reservation applies to both primary and billable secondary compute replicas, but does not cover software, networking, or storage charges associated with the service. At the end of the reservation term, the billing benefit expires and the database or managed instance is billed at the pay-as-you go price. Reservations do not automatically renew. For pricing information, see the [reserved capacity offering](https://azure.microsoft.com/pricing/details/sql-database/managed/). - -You can buy reserved capacity in the [Azure portal](https://portal.azure.com). Pay for the reservation [up front or with monthly payments](../../cost-management-billing/reservations/prepare-buy-reservation.md). To buy reserved capacity: - -- You must be in the owner role for at least one Enterprise or individual subscription with pay-as-you-go rates. -- For Enterprise subscriptions, **Add Reserved Instances** must be enabled in the [EA portal](https://ea.azure.com). Or, if that setting is disabled, you must be an EA Admin on the subscription. Reserved capacity. - -For more information about how enterprise customers and Pay-As-You-Go customers are charged for reservation purchases, see [Understand Azure reservation usage for your Enterprise enrollment](../../cost-management-billing/reservations/understand-reserved-instance-usage-ea.md) and [Understand Azure reservation usage for your Pay-As-You-Go subscription](../../cost-management-billing/reservations/understand-reserved-instance-usage.md). - -> [!NOTE] -> Purchasing reserved capacity does not pre-allocate or reserve specific infrastructure resources (virtual machines or nodes) for your use. - -## Determine correct size before purchase - -The size of reservation should be based on the total amount of compute used by the existing or soon-to-be-deployed database or managed instance within a specific region and using the same performance tier and hardware configuration. - -For example, let's suppose that you are running one general purpose, Gen5 – 16 vCore elastic pool and two business critical Gen5 – 4 vCore single databases. Further, let's supposed that you plan to deploy within the next month an additional general purpose Gen5 – 16 vCore elastic pool and one business critical Gen5 – 32 vCore elastic pool. Also, let's suppose that you know that you will need these resources for at least 1 year. In this case, you should purchase a 32 (2x16) vCores 1-year reservation for single database/elastic pool general purpose - Gen5 and a 40 (2x4 + 32) vCore 1-year reservation for single database/elastic pool business critical - Gen5. - -## Buy reserved capacity - -1. Sign in to the [Azure portal](https://portal.azure.com). -2. Select **All services** > **Reservations**. -3. Select **Add** and then in the **Purchase Reservations** pane, select **SQL Database** to purchase a new reservation for SQL Database. -4. Fill in the required fields. Existing databases in SQL Database and SQL Managed Instance that match the attributes you select qualify to get the reserved capacity discount. The actual number of databases or managed instances that get the discount depends on the scope and quantity selected. - - ![Screenshot before submitting the reserved capacity purchase](./media/reserved-capacity-overview/sql-reserved-vcores-purchase.png) - - The following table describes required fields. - - | Field | Description| - |------------|--------------| - |Subscription|The subscription used to pay for the capacity reservation. The payment method on the subscription is charged the upfront costs for the reservation. The subscription type must be an enterprise agreement (offer number MS-AZR-0017P or MS-AZR-0148P) or an individual agreement with pay-as-you-go pricing (offer number MS-AZR-0003P or MS-AZR-0023P). For an enterprise subscription, the charges are deducted from the enrollment's Azure Prepayment (previously called monetary commitment) balance or charged as overage. For an individual subscription with pay-as-you-go pricing, the charges are billed to the credit card or invoice payment method on the subscription.| - |Scope |The vCore reservation's scope can cover one subscription or multiple subscriptions (shared scope). If you select

    **Shared**, the vCore reservation discount is applied to the database or managed instance running in any subscriptions within your billing context. For enterprise customers, the shared scope is the enrollment and includes all subscriptions within the enrollment. For Pay-As-You-Go customers, the shared scope is all Pay-As-You-Go subscriptions created by the account administrator.

    **Single subscription**, the vCore reservation discount is applied to the databases or managed instances in this subscription.

    **Single resource group**, the reservation discount is applied to the instances of databases or managed instances in the selected subscription and the selected resource group within that subscription.

    **Management group**, the reservation discount is applied to the matching resource in the list of subscriptions that are a part of both the management group and billing scope.| - |Region |The Azure region that's covered by the capacity reservation.| - |Deployment Type|The SQL resource type that you want to buy the reservation for.| - |Performance Tier|The service tier for the databases or managed instances. | - |Term |One year or three years.| - |Quantity |The amount of compute resources being purchased within the capacity reservation. The quantity is a number of vCores in the selected Azure region and Performance tier that are being reserved and will get the billing discount. For example, if you run or plan to run multiple databases with the total compute capacity of Gen5 16 vCores in the East US region, then you would specify the quantity as 16 to maximize the benefit for all the databases. | - -1. Review the cost of the capacity reservation in the **Costs** section. -1. Select **Purchase**. -1. Select **View this Reservation** to see the status of your purchase. - -## Cancel, exchange, or refund reservations - -You can cancel, exchange, or refund reservations with certain limitations. For more information, see [Self-service exchanges and refunds for Azure Reservations](../../cost-management-billing/reservations/exchange-and-refund-azure-reservations.md). - -## vCore size flexibility - -vCore size flexibility helps you scale up or down within a performance tier and region, without losing the reserved capacity benefit. Reserved capacity also provides you with the flexibility to temporarily move your hot databases in and out of elastic pools (within the same region and performance tier) as part of your normal operations without losing the reserved capacity benefit. By keeping an unapplied buffer in your reservation, you can effectively manage the performance spikes without exceeding your budget. - -## Limitation - -You cannot reserve DTU-based (basic, standard, or premium) databases in SQL Database. Reserved capacity pricing is only supported for features and products that are in General Availability state. - -## Need help? Contact us - -If you have questions or need help, [create a support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). - -## Next steps - -The vCore reservation discount is applied automatically to the number of databases or managed instances that match the capacity reservation scope and attributes. You can update the scope of the capacity reservation through the [Azure portal](https://portal.azure.com), PowerShell, Azure CLI, or the API. - -- For information on Azure SQL Database service tiers for the vCore model, see [vCore model overview - Azure SQL Database](service-tiers-sql-database-vcore.md). -- For information on Azure SQL Managed Instance service tiers for the vCore model, see [vCore model overview - Azure SQL Managed Instance](../managed-instance/service-tiers-managed-instance-vcore.md). - -To learn how to manage the capacity reservation, see [manage reserved capacity](../../cost-management-billing/reservations/manage-reserved-vm-instance.md). - -To learn more about Azure Reservations, see the following articles: - -- [What are Azure Reservations?](../../cost-management-billing/reservations/save-compute-costs-reservations.md) -- [Manage Azure Reservations](../../cost-management-billing/reservations/manage-reserved-vm-instance.md) -- [Understand Azure Reservations discount](../../cost-management-billing/reservations/understand-reservation-charges.md) -- [Understand reservation usage for your Pay-As-You-Go subscription](../../cost-management-billing/reservations/understand-reserved-instance-usage.md) -- [Understand reservation usage for your Enterprise enrollment](../../cost-management-billing/reservations/understand-reserved-instance-usage-ea.md) -- [Azure Reservations in Partner Center Cloud Solution Provider (CSP) program](/partner-center/azure-reservations) diff --git a/articles/azure-sql/database/resource-graph-samples.md b/articles/azure-sql/database/resource-graph-samples.md deleted file mode 100644 index 63f6aa8c0e4d3..0000000000000 --- a/articles/azure-sql/database/resource-graph-samples.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Azure Resource Graph sample queries for Azure SQL Database -description: Sample Azure Resource Graph queries for Azure SQL Database showing use of resource types and tables to access Azure SQL Database related resources and properties. -ms.date: 03/08/2022 -ms.topic: sample -author: LitKnd -ms.author: kendralittle -ms.service: sql-database -ms.subservice: service-overview -ms.custom: subject-resourcegraph-sample ---- -# Azure Resource Graph sample queries for Azure SQL Database - -This page is a collection of [Azure Resource Graph](../../governance/resource-graph/overview.md) -sample queries for Azure SQL Database. For a complete list of Azure Resource Graph samples, see -[Resource Graph samples by Category](../../governance/resource-graph/samples/samples-by-category.md) -and [Resource Graph samples by Table](../../governance/resource-graph/samples/samples-by-table.md). - -## Sample queries - -[!INCLUDE [azure-resource-graph-samples-cat-sql](../../../includes/resource-graph/samples/bycat/azure-sql.md)] - -## Next steps - -- Learn more about the [query language](../../governance/resource-graph/concepts/query-language.md). -- Learn more about how to [explore resources](../../governance/resource-graph/concepts/explore-resources.md). -- See samples of [Starter language queries](../../governance/resource-graph/samples/starter.md). -- See samples of [Advanced language queries](../../governance/resource-graph/samples/advanced.md). diff --git a/articles/azure-sql/database/resource-health-to-troubleshoot-connectivity.md b/articles/azure-sql/database/resource-health-to-troubleshoot-connectivity.md deleted file mode 100644 index 8d71a6f04ec7c..0000000000000 --- a/articles/azure-sql/database/resource-health-to-troubleshoot-connectivity.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Use Azure Resource Health to monitor database health -description: Use Azure Resource Health to monitor Azure SQL Database and Azure SQL Managed Instance health, helps you diagnose and get support when an Azure issue impacts your SQL resources. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: AlainDormehlMSFT -ms.author: aldorme -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 03/24/2021 ---- -# Use Resource Health to troubleshoot connectivity for Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -[Resource Health](../../service-health/resource-health-overview.md#get-started) for Azure SQL Database and Azure SQL Managed Instance helps you diagnose and get support when an Azure issue impacts your SQL resources. It informs you about the current and past health of your resources and helps you mitigate issues. Resource Health provides technical support when you need help with Azure service issues. - -![Overview](./media/resource-health-to-troubleshoot-connectivity/sql-resource-health-overview.jpg) - -## Health checks - -Resource Health determines the health of your SQL resource by examining the success and failure of logins to the resource. Currently, Resource Health for your SQL Database resource only examines login failures due to system error and not user error. The Resource Health status is updated every 1 to 2 minutes. - -## Health states - -### Available - -A status of **Available** means that Resource Health has not detected login failures due to system errors on your SQL resource. - -![Available](./media/resource-health-to-troubleshoot-connectivity/sql-resource-health-available.jpg) - -### Degraded - -A status of **Degraded** means that Resource Health has detected a majority of successful logins, but some failures as well. These are most likely transient login errors. To reduce the impact of connection issues caused by transient login errors, implement [retry logic](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors) in your code. - -![Degraded](./media/resource-health-to-troubleshoot-connectivity/sql-resource-health-degraded.jpg) - -### Unavailable - -A status of **Unavailable** means that Resource Health has detected consistent login failures to your SQL resource. If your resource remains in this state for an extended period of time, contact support. - -![Unavailable](./media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unavailable.jpg) - -### Unknown - -The health status of **Unknown** indicates that Resource Health hasn't received information about this resource for more than 10 minutes. Although this status isn't a definitive indication of the state of the resource, it is an important data point in the troubleshooting process. If the resource is running as expected, the status of the resource will change to Available after a few minutes. If you're experiencing problems with the resource, the Unknown health status might suggest that an event in the platform is affecting the resource. - -![Unknown](./media/resource-health-to-troubleshoot-connectivity/sql-resource-health-unknown.jpg) - -## Historical information - -You can access up to 14 days of health history in the Health History section of Resource Health. The section will also contain the downtime reason (when available) for the downtimes reported by Resource Health. Currently, Azure shows the downtime for your database resource at a two-minute granularity. The actual downtime is likely less than a minute. The average is 8 seconds. - -### Downtime reasons - -When your database experiences downtime, analysis is performed to determine a reason. When available, the downtime reason is reported in the Health History section of Resource Health. Downtime reasons are typically published within 45 minutes after an event. - -#### Planned maintenance - -The Azure infrastructure periodically performs planned maintenance – the upgrade of hardware or software components in the datacenter. While the database undergoes maintenance, Azure SQL may terminate some existing connections and refuse new ones. The login failures experienced during planned maintenance are typically transient, and [retry logic](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors) helps reduce the impact. If you continue to experience login errors, contact support. - -#### Reconfiguration - -Reconfigurations are considered transient conditions and are expected from time to time. These events can be triggered by load balancing or software/hardware failures. Any client production application that connects to a cloud database should implement a robust connection [retry logic](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors), as it would help mitigate these situations and should generally make the errors transparent to the end user. - -## Next steps - -- Learn more about [retry logic for transient errors](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors). -- [Troubleshoot, diagnose, and prevent SQL connection errors](troubleshoot-common-connectivity-issues.md). -- Learn more about [configuring Resource Health alerts](../../service-health/resource-health-alert-arm-template-guide.md). -- Get an overview of [Resource Health](../../service-health/resource-health-overview.md). -- Review [Resource Health FAQ](../../service-health/resource-health-faq.yml). diff --git a/articles/azure-sql/database/resource-limits-dtu-elastic-pools.md b/articles/azure-sql/database/resource-limits-dtu-elastic-pools.md deleted file mode 100644 index 143e3159ecf62..0000000000000 --- a/articles/azure-sql/database/resource-limits-dtu-elastic-pools.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: DTU resource limits elastic pools -description: This page describes some common DTU resource limits for elastic pools in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: seo-lt-2019 sqldbrb=1 references_regions -ms.devlang: -ms.topic: reference -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 04/13/2022 ---- -# Resources limits for elastic pools using the DTU purchasing model -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides the detailed resource limits for databases in Azure SQL Database that are within an elastic pool using the DTU purchasing model. - -* For DTU purchasing model limits for single databases on a server, see [Overview of resource limits on a server](resource-limits-logical-server.md). -* For DTU purchasing model resource limits for Azure SQL Database, see [DTU resource limits single databases](resource-limits-dtu-single-databases.md) and [DTU resource limits elastic pools](resource-limits-dtu-elastic-pools.md). -* For vCore resource limits, see [vCore resource limits - Azure SQL Database](resource-limits-vcore-single-databases.md) and [vCore resource limits - elastic pools](resource-limits-vcore-elastic-pools.md). -* For more information regarding the different purchasing models, see [Purchasing models and service tiers](purchasing-models.md). - -Each read-only replica has its own resources such as DTUs, workers, and sessions. Each read-only replica is subject to the resource limits detailed later in this article. - -## Elastic pool: Storage sizes and compute sizes - -For Azure SQL Database elastic pools, the following tables show the resources available at each service tier and compute size. You can set the service tier, compute size, and storage amount using: - -* [Transact-SQL](elastic-pool-scale.md) via [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](elastic-pool-manage.md#azure-portal) -* [PowerShell](elastic-pool-manage.md#powershell) -* [Azure CLI](elastic-pool-manage.md#azure-cli) -* [REST API](elastic-pool-manage.md#rest-api) - - -> [!IMPORTANT] -> For scaling guidance and considerations, see [Scale an elastic pool](elastic-pool-scale.md) - -The resource limits of individual databases in elastic pools are generally the same as for single databases outside of pools based on DTUs and the service tier. For example, the max concurrent workers for an S2 database is 120 workers. So, the max concurrent workers for a database in a Standard pool is also 120 workers if the max DTU per database in the pool is 50 DTUs (which is equivalent to S2). - -For the same number of DTUs, resources provided to an elastic pool may exceed the resources provided to a single database outside of an elastic pool. This means it is possible for the eDTU utilization of an elastic pool to be less than the summation of DTU utilization across databases within the pool, depending on workload patterns. For example, in an extreme case with only one database in an elastic pool where database DTU utilization is 100%, it is possible for pool eDTU utilization to be 50% for certain workload patterns. This can happen even if max DTU per database remains at the maximum supported value for the given pool size. - -> [!NOTE] -> The storage per pool resource limit in each of the following tables do not include tempdb and log storage. - -### Basic elastic pool limits - -| eDTUs per pool | **50** | **100** | **200** | **300** | **400** | **800** | **1200** | **1600** | -|:---|---:|---:|---:| ---: | ---: | ---: | ---: | ---: | -| Included storage per pool (GB) | 5 | 10 | 20 | 29 | 39 | 78 | 117 | 156 | -| Max storage per pool (GB) | 5 | 10 | 20 | 29 | 39 | 78 | 117 | 156 | -| Max In-Memory OLTP storage per pool (GB) | N/A | N/A | N/A | N/A | N/A | N/A | N/A | N/A | -| Max number DBs per pool 1 | 100 | 200 | 500 | 500 | 500 | 500 | 500 | 500 | -| Max concurrent workers per pool 2 | 100 | 200 | 400 | 600 | 800 | 1600 | 2400 | 3200 | -| Max concurrent sessions per pool 2 | 30000 | 30000 | 30000 | 30000 |30000 | 30000 | 30000 | 30000 | -| Min DTU per database choices | 0, 5 | 0, 5 | 0, 5 | 0, 5 | 0, 5 | 0, 5 | 0, 5 | 0, 5 | -| Max DTU per database choices | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | -| Max storage per database (GB) | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | - - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Standard elastic pool limits - -| eDTUs per pool | **50** | **100** | **200** | **300** | **400** | **800**| -|:---|---:|---:|---:| ---: | ---: | ---: | -| Included storage per pool (GB) 1 | 50 | 100 | 200 | 300 | 400 | 800 | -| Max storage per pool (GB) | 500 | 750 | 1024 | 1280 | 1536 | 2048 | -| Max In-Memory OLTP storage per pool (GB) | N/A | N/A | N/A | N/A | N/A | N/A | -| Max number DBs per pool 2 | 100 | 200 | 500 | 500 | 500 | 500 | -| Max concurrent workers per pool 3 | 100 | 200 | 400 | 600 | 800 | 1600 | -| Max concurrent sessions per pool 3 | 30000 | 30000 | 30000 | 30000 | 30000 | 30000 | -| Min DTU per database choices | 0, 10, 20, 50 | 0, 10, 20, 50, 100 | 0, 10, 20, 50, 100, 200 | 0, 10, 20, 50, 100, 200, 300 | 0, 10, 20, 50, 100, 200, 300, 400 | 0, 10, 20, 50, 100, 200, 300, 400, 800 | -| Max DTU per database choices | 10, 20, 50 | 10, 20, 50, 100 | 10, 20, 50, 100, 200 | 10, 20, 50, 100, 200, 300 | 10, 20, 50, 100, 200, 300, 400 | 10, 20, 50, 100, 200, 300, 400, 800 | -| Max storage per database (GB) | 1024 | 1024 | 1024 | 1024 | 1024 | 1024 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/elastic/) for details on additional cost incurred due to any extra storage provisioned. - -2 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -3 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Standard elastic pool limits (continued) - -| eDTUs per pool | **1200** | **1600** | **2000** | **2500** | **3000** | -|:---|---:|---:|---:| ---: | ---: | -| Included storage per pool (GB) 1 | 1200 | 1600 | 2000 | 2500 | 3000 | -| Max storage per pool (GB) | 2560 | 3072 | 3584 | 4096 | 4096 | -| Max In-Memory OLTP storage per pool (GB) | N/A | N/A | N/A | N/A | N/A | -| Max number DBs per pool 2 | 500 | 500 | 500 | 500 | 500 | -| Max concurrent workers per pool 3 | 2400 | 3200 | 4000 | 5000 | 6000 | -| Max concurrent sessions per pool 3 | 30000 | 30000 | 30000 | 30000 | 30000 | -| Min DTU per database choices | 0, 10, 20, 50, 100, 200, 300, 400, 800, 1200 | 0, 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600 | 0, 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000 | 0, 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000, 2500 | 0, 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000, 2500, 3000 | -| Max DTU per database choices | 10, 20, 50, 100, 200, 300, 400, 800, 1200 | 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600 | 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000 | 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000, 2500 | 10, 20, 50, 100, 200, 300, 400, 800, 1200, 1600, 2000, 2500, 3000 | -| Max storage per database (GB) | 1024 | 1536 | 1792 | 2304 | 2816 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/elastic/) for details on additional cost incurred due to any extra storage provisioned. - -2 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -3 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Premium elastic pool limits - -| eDTUs per pool | **125** | **250** | **500** | **1000** | **1500**| -|:---|---:|---:|---:| ---: | ---: | -| Included storage per pool (GB) 1 | 250 | 500 | 750 | 1024 | 1536 | -| Max storage per pool (GB) | 1024 | 1024 | 1024 | 1024 | 1536 | -| Max In-Memory OLTP storage per pool (GB) | 1 | 2 | 4 | 10 | 12 | -| Max number DBs per pool 2 | 50 | 100 | 100 | 100 | 100 | -| Max concurrent workers per pool (requests) 3 | 200 | 400 | 800 | 1600 | 2400 | -| Max concurrent sessions per pool 3 | 30000 | 30000 | 30000 | 30000 | 30000 | -| Min eDTUs per database | 0, 25, 50, 75, 125 | 0, 25, 50, 75, 125, 250 | 0, 25, 50, 75, 125, 250, 500 | 0, 25, 50, 75, 125, 250, 500, 1000 | 0, 25, 50, 75, 125, 250, 500, 1000| -| Max eDTUs per database | 25, 50, 75, 125 | 25, 50, 75, 125, 250 | 25, 50, 75, 125, 250, 500 | 25, 50, 75, 125, 250, 500, 1000 | 25, 50, 75, 125, 250, 500, 1000| -| Max storage per database (GB) | 1024 | 1024 | 1024 | 1024 | 1536 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/elastic/) for details on additional cost incurred due to any extra storage provisioned. - -2 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -3 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Premium elastic pool limits (continued) - -| eDTUs per pool | **2000** | **2500** | **3000** | **3500** | **4000**| -|:---|---:|---:|---:| ---: | ---: | -| Included storage per pool (GB) 1 | 2048 | 2560 | 3072 | 3548 | 4096 | -| Max storage per pool (GB) | 2048 | 2560 | 3072 | 3548 | 4096| -| Max In-Memory OLTP storage per pool (GB) | 16 | 20 | 24 | 28 | 32 | -| Max number DBs per pool 2 | 100 | 100 | 100 | 100 | 100 | -| Max concurrent workers per pool 3 | 3200 | 4000 | 4800 | 5600 | 6400 | -| Max concurrent sessions per pool 3 | 30000 | 30000 | 30000 | 30000 | 30000 | -| Min DTU per database choices | 0, 25, 50, 75, 125, 250, 500, 1000, 1750 | 0, 25, 50, 75, 125, 250, 500, 1000, 1750 | 0, 25, 50, 75, 125, 250, 500, 1000, 1750 | 0, 25, 50, 75, 125, 250, 500, 1000, 1750 | 0, 25, 50, 75, 125, 250, 500, 1000, 1750, 4000 | -| Max DTU per database choices | 25, 50, 75, 125, 250, 500, 1000, 1750 | 25, 50, 75, 125, 250, 500, 1000, 1750 | 25, 50, 75, 125, 250, 500, 1000, 1750 | 25, 50, 75, 125, 250, 500, 1000, 1750 | 25, 50, 75, 125, 250, 500, 1000, 1750, 4000 | -| Max storage per database (GB) | 2048 | 2560 | 3072 | 3584 | 4096 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/elastic/) for details on additional cost incurred due to any extra storage provisioned. - -2 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -3 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -> [!IMPORTANT] -> More than 1 TB of storage in the Premium tier is currently available in all regions except: China East, China North, Germany Central, and Germany Northeast. In these regions, the storage max in the Premium tier is limited to 1 TB. For more information, see [P11-P15 current limitations](single-database-scale.md#p11-and-p15-constraints-when-max-size-greater-than-1-tb). - -If all DTUs of an elastic pool are used, then each database in the pool receives an equal amount of resources to process queries. The SQL Database service provides resource sharing fairness between databases by ensuring equal slices of compute time. Elastic pool resource sharing fairness is in addition to any amount of resource otherwise guaranteed to each database when the DTU min per database is set to a non-zero value. - -> [!NOTE] -> For additional information on storage limits in the Premium service tier, see [Storage space governance](resource-limits-logical-server.md#storage-space-governance). - -### Database properties for pooled databases - -For each elastic pool, you can optionally specify per database minimum and maximum DTUs to modify resource consumption patterns within the pool. Specified min and max values apply to all databases in the pool. Customizing min and max DTUs for individual databases in the pool is not supported. - -You can also set maximum storage per database, for example to prevent a database from consuming all pool storage. This setting can be configured independently for each database. - -The following table describes per database properties for pooled databases. - -| Property | Description | -|:--- |:--- | -| Max DTUs per database |The maximum number of DTUs that any database in the pool may use, if available based on utilization by other databases in the pool. Max DTUs per database is not a resource guarantee for a database. If the workload in each database does not need all available pool resources to perform adequately, consider setting max DTUs per database to prevent a single database from monopolizing pool resources. Some degree of over-committing is expected since the pool generally assumes hot and cold usage patterns for databases, where all databases are not simultaneously peaking. | -| Min DTUs per database |The minimum number of DTUs reserved for any database in the pool. Consider setting a min DTUs per database when you want to guarantee resource availability for each database regardless of resource consumption by other databases in the pool. The min DTUs per database may be set to 0, and is also the default value. This property is set to anywhere between 0 and the average DTUs utilization per database.| -| Max storage per database |The maximum database size set by the user for a database in a pool. Pooled databases share allocated pool storage, so the size a database can reach is limited to the smaller of remaining pool storage and maximum database size. Maximum database size refers to the maximum size of the data files and does not include the space used by the log file. | - - -> [!IMPORTANT] -> Because resources in an elastic pool are finite, setting min DTUs per database to a value greater than 0 implicitly limits resource utilization by each database. If, at a point in time, most databases in a pool are idle, resources reserved to satisfy the min DTUs guarantee are not available to databases active at that point in time. -> -> Additionally, setting min DTUs per database to a value greater than 0 implicitly limits the number of databases that can be added to the pool. For example, if you set the min DTUs to 100 in a 400 DTU pool, it means that you will not be able to add more than 4 databases to the pool, because 100 DTUs are reserved for each database. -> - -While the per database properties are expressed in DTUs, they also govern consumption of other resource types, such as data IO, log IO, buffer pool memory, and worker threads. As you adjust min and max per database DTUs values, reservations and limits for all resource types are adjusted proportionally. - -Min and max per database DTU values apply to resource consumption by user workloads, but not to resource consumption by internal processes. For example, for a database with a per database max DTU set to half of the pool eDTU, user workload cannot consume more than one half of the buffer pool memory. However, this database can still take advantage of pages in the buffer pool that were loaded by internal processes. For more information, see [Resource consumption by user workloads and internal processes](resource-limits-logical-server.md#resource-consumption-by-user-workloads-and-internal-processes). - -## Tempdb sizes - -The following table lists tempdb sizes for single databases in Azure SQL Database: - -|Service-level objective|Maximum `tempdb` data file size (GB)|Number of `tempdb` data files|Maximum `tempdb` data size (GB)| -|---|---:|---:|---:| -|Basic Elastic Pools (all DTU configurations)|13.9|12|166.7| -|Standard Elastic Pools (50 eDTU)|13.9|12|166.7| -|Standard Elastic Pools (100 eDTU)|32|1|32| -|Standard Elastic Pools (200 eDTU)|32|2|64| -|Standard Elastic Pools (300 eDTU)|32|3|96| -|Standard Elastic Pools (400 eDTU)|32|3|96| -|Standard Elastic Pools (800 eDTU)|32|6|192| -|Standard Elastic Pools (1200 eDTU)|32|10|320| -|Standard Elastic Pools (1600-3000 eDTU)|32|12|384| -|Premium Elastic Pools (all DTU configurations)|13.9|12|166.7| - - -## Next steps - -* For vCore resource limits for a single database, see [resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -* For DTU resource limits for a single database, see [resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -* For vCore resource limits for elastic pools, see [resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -* For resource limits for managed instances in Azure SQL Managed Instance, see [SQL Managed Instance resource limits](../managed-instance/resource-limits.md). -* For information about general Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). -* For information about resource limits on a logical SQL server, see [overview of resource limits on a logical SQL server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. \ No newline at end of file diff --git a/articles/azure-sql/database/resource-limits-dtu-single-databases.md b/articles/azure-sql/database/resource-limits-dtu-single-databases.md deleted file mode 100644 index 7a63e3613dd31..0000000000000 --- a/articles/azure-sql/database/resource-limits-dtu-single-databases.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: DTU resource limits single databases -description: This page describes some common DTU resource limits for single databases in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: references_regions, seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: reference -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 01/31/2022 ---- -# Resource limits for single databases using the DTU purchasing model - Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides the detailed resource limits for Azure SQL Database single databases using the DTU purchasing model. - -* For DTU purchasing model limits for single databases on a server, see [Overview of resource limits on a server](resource-limits-logical-server.md). -* For DTU purchasing model resource limits for Azure SQL Database, see [DTU resource limits single databases](resource-limits-dtu-single-databases.md) and [DTU resource limits elastic pools](resource-limits-dtu-elastic-pools.md). -* For vCore resource limits, see [vCore resource limits - Azure SQL Database](resource-limits-vcore-single-databases.md) and [vCore resource limits - elastic pools](resource-limits-vcore-elastic-pools.md). -* For more information regarding the different purchasing models, see [Purchasing models and service tiers](purchasing-models.md). - -Each read-only replica has its own resources such as DTUs, workers, and sessions. Each read-only replica is subject to the resource limits detailed later in this article. - - -## Single database: Storage sizes and compute sizes - -The following tables show the resources available for a single database at each service tier and compute size. You can set the service tier, compute size, and storage amount for a single database using: - -* [Transact-SQL](single-database-manage.md#transact-sql-t-sql) via [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](single-database-manage.md#the-azure-portal) -* [PowerShell](single-database-manage.md#powershell) -* [Azure CLI](single-database-manage.md#azure-cli) -* [REST API](single-database-manage.md#rest-api) - -> [!IMPORTANT] -> For scaling guidance and considerations, see [Scale a single database](single-database-scale.md) - -### Basic service tier - -| **Compute size** | **Basic** | -| :--- | --: | -| Max DTUs | 5 | -| Included storage (GB) | 2 | -| Max storage (GB) | 2 | -| Max in-memory OLTP storage (GB) |N/A | -| Max concurrent workers | 30 | -| Max concurrent sessions | 300 | - - -> [!IMPORTANT] -> The Basic service tier provides less than one vCore (CPU). For CPU-intensive workloads, a service tier of S3 or greater is recommended. -> ->Regarding data storage, the Basic service tier is placed on Standard Page Blobs. Standard Page Blobs use hard disk drive (HDD)-based storage media and are best suited for development, testing, and other infrequently accessed workloads that are less sensitive to performance variability. -> - -### Standard service tier - -| **Compute size** | **S0** | **S1** | **S2** | **S3** | -| :--- |---:| ---:|---:|---:| -| Max DTUs | 10 | 20 | 50 | 100 | -| Included storage (GB) 1 | 250 | 250 | 250 | 250 | -| Max storage (GB) | 250 | 250 | 250 | 1024 | -| Max in-memory OLTP storage (GB) | N/A | N/A | N/A | N/A | -| Max concurrent workers | 60 | 90 | 120 | 200 | -| Max concurrent sessions |600 | 900 | 1200 | 2400 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/single/) for details on additional cost incurred due to any extra storage provisioned. - -> [!IMPORTANT] -> The Standard S0, S1 and S2 tiers provide less than one vCore (CPU). For CPU-intensive workloads, a service tier of S3 or greater is recommended. -> ->Regarding data storage, the Standard S0 and S1 service tiers are placed on Standard Page Blobs. Standard Page Blobs use hard disk drive (HDD)-based storage media and are best suited for development, testing, and other infrequently accessed workloads that are less sensitive to performance variability. -> - -### Standard service tier (continued) - -| **Compute size** | **S4** | **S6** | **S7** | **S9** | **S12** | -| :--- |---:| ---:|---:|---:|---:| -| Max DTUs | 200 | 400 | 800 | 1600 | 3000 | -| Included storage (GB) 1 | 250 | 250 | 250 | 250 | 250 | -| Max storage (GB) | 1024 | 1024 | 1024 | 1024 | 1024 | -| Max in-memory OLTP storage (GB) | N/A | N/A | N/A | N/A |N/A | -| Max concurrent workers | 400 | 800 | 1600 | 3200 |6000 | -| Max concurrent sessions |4800 | 9600 | 19200 | 30000 |30000 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/single/) for details on additional cost incurred due to any extra storage provisioned. - -### Premium service tier - -| **Compute size** | **P1** | **P2** | **P4** | **P6** | **P11** | **P15** | -| :--- |---:|---:|---:|---:|---:|---:| -| Max DTUs | 125 | 250 | 500 | 1000 | 1750 | 4000 | -| Included storage (GB) 1 | 500 | 500 | 500 | 500 | 4096 2 | 4096 2 | -| Max storage (GB) | 1024 | 1024 | 1024 | 1024 | 4096 2 | 4096 2 | -| Max in-memory OLTP storage (GB) | 1 | 2 | 4 | 8 | 14 | 32 | -| Max concurrent workers | 200 | 400 | 800 | 1600 | 2800 | 6400 | -| Max concurrent sessions | 30000 | 30000 | 30000 | 30000 | 30000 | 30000 | - - -1 See [SQL Database pricing options](https://azure.microsoft.com/pricing/details/sql-database/single/) for details on additional cost incurred due to any extra storage provisioned. - -2 From 1024 GB up to 4096 GB in increments of 256 GB. - -> [!IMPORTANT] -> More than 1 TB of storage in the Premium tier is currently available in all regions except: China East, China North, Germany Central, and Germany Northeast. In these regions, the storage max in the Premium tier is limited to 1 TB. For more information, see [P11-P15 current limitations](single-database-scale.md#p11-and-p15-constraints-when-max-size-greater-than-1-tb). - -> [!NOTE] -> For additional information on storage limits in the Premium service tier, see [Storage space governance](resource-limits-logical-server.md#storage-space-governance). - -## Tempdb sizes - -The following table lists tempdb sizes for single databases in Azure SQL Database: - -|Service-level objective|Maximum `tempdb` data file size (GB)|Number of `tempdb` data files|Maximum `tempdb` data size (GB)| -|---|---:|---:|---:| -|Basic|13.9|1|13.9| -|S0|13.9|1|13.9| -|S1|13.9|1|13.9| -|S2|13.9|1|13.9| -|S3|32|1|32 -|S4|32|2|64| -|S6|32|3|96| -|S7|32|6|192| -|S9|32|12|384| -|S12|32|12|384| -|P1|13.9|12|166.7| -|P2|13.9|12|166.7| -|P4|13.9|12|166.7| -|P6|13.9|12|166.7| -|P11|13.9|12|166.7| -|P15|13.9|12|166.7| - - -## Next steps - -- For vCore resource limits for a single database, see [resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -- For vCore resource limits for elastic pools, see [resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -- For DTU resource limits for elastic pools, see [resource limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) -- For resource limits for managed instances in Azure SQL Managed Instance, see [SQL Managed Instance resource limits](../managed-instance/resource-limits.md). -- For information about general Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). -- For information about resource limits on a logical SQL server, see [overview of resource limits on a logical SQL server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. \ No newline at end of file diff --git a/articles/azure-sql/database/resource-limits-logical-server.md b/articles/azure-sql/database/resource-limits-logical-server.md deleted file mode 100644 index 1458454d1dcd3..0000000000000 --- a/articles/azure-sql/database/resource-limits-logical-server.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: Resource management in Azure SQL Database -description: This article provides an overview of resource management in Azure SQL Database with information about what happens when resource limits are reached. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: -ms.devlang: -ms.topic: reference -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 03/31/2022 ---- - -# Resource management in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](resource-limits-logical-server.md) -> * [Azure SQL Managed Instance](../managed-instance/resource-limits.md) - -This article provides an overview of resource management in Azure SQL Database. It provides information on what happens when resource limits are reached, and describes resource governance mechanisms that are used to enforce these limits. - -For specific resource limits per pricing tier (also known as service objective) for single databases, refer to either [DTU-based single database resource limits](resource-limits-dtu-single-databases.md) or [vCore-based single database resource limits](resource-limits-vcore-single-databases.md). For elastic pool resource limits, refer to either [DTU-based elastic pool resource limits](resource-limits-dtu-elastic-pools.md) or [vCore-based elastic pool resource limits](resource-limits-vcore-elastic-pools.md). - -> [!TIP] -> For Azure Synapse Analytics dedicated SQL pool limits, see [capacity limits](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-service-capacity-limits.md) and [memory and concurrency limits](../../synapse-analytics/sql-data-warehouse/memory-concurrency-limits.md). - -## Logical server limits - - - -| Resource | Limit | -| :--- | :--- | -| Databases per [logical server](logical-servers.md) | 5000 | -| Default number of logical servers per subscription in a region | 20 | -| Max number of logical servers per subscription in a region | 250 | -| DTU / eDTU quota per logical server | 54,000 | -| vCore quota per logical server | 540 | -| Max elastic pools per logical server | Limited by number of DTUs or vCores. For example, if each pool is 1000 DTUs, then a server can support 54 pools.| - -> [!IMPORTANT] -> As the number of databases approaches the limit per logical server, the following can occur: -> -> - Increasing latency in running queries against the master database. This includes views of resource utilization statistics such as `sys.resource_stats`. -> - Increasing latency in management operations and rendering portal viewpoints that involve enumerating databases in the server. - -> [!NOTE] -> To obtain more DTU/eDTU quota, vCore quota, or more logical servers than the default number, submit a new support request in the Azure portal. For more information, see [Request quota increases for Azure SQL Database](quota-increase-request.md). - -## What happens when resource limits are reached - -### Compute CPU - -When database compute CPU utilization becomes high, query latency increases, and queries can even time out. Under these conditions, queries may be queued by the service and are provided resources for execution as resources become free. -When encountering high compute utilization, mitigation options include: - -- Increasing the compute size of the database or elastic pool to provide the database with more compute resources. See [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md). -- Optimizing queries to reduce CPU resource utilization of each query. For more information, see [Query Tuning/Hinting](performance-guidance.md#query-tuning-and-hinting). - -### Storage - -When data space used reaches the maximum data size limit, either at the database level or at the elastic pool level, inserts and updates that increase data size fail and clients receive an [error message](troubleshoot-common-errors-issues.md). SELECT and DELETE statements remain unaffected. - -In Premium and Business Critical service tiers, clients also receive an error message if combined storage consumption by data, transaction log, and `tempdb` for a single database or an elastic pool exceeds maximum local storage size. For more information, see [Storage space governance](#storage-space-governance). - -When encountering high space utilization, mitigation options include: - -- Increase maximum data size of the database or elastic pool, or scale up to a service objective with a higher maximum data size limit. See [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md). -- If the database is in an elastic pool, then alternatively the database can be moved outside of the pool, so that its storage space isn't shared with other databases. -- Shrink a database to reclaim unused space. In elastic pools, shrinking a database provides more storage for other databases in the pool. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). -- Check if high space utilization is due to a spike in the size of Persistent Version Store (PVS). PVS is a part of each database, and is used to implement [Accelerated Database Recovery](../accelerated-database-recovery.md). To determine current PVS size, see [PVS troubleshooting](/sql/relational-databases/accelerated-database-recovery-management#troubleshooting). A common reason for large PVS size is a transaction that is open for a long time (hours), preventing cleanup of row older versions in PVS. -- For databases and elastic pools in Premium and Business Critical service tiers that consume large amounts of storage, you may receive an out-of-space error even though used space in the database or elastic pool is below its maximum data size limit. This may happen if `tempdb` or transaction log files consume a large amount of storage toward the maximum local storage limit. [Fail over](high-availability-sla.md#testing-application-fault-resiliency) the database or elastic pool to reset `tempdb` to its initial smaller size, or [shrink](file-space-manage.md#shrinking-transaction-log-file) transaction log to reduce local storage consumption. - -### Sessions, workers, and requests - -Sessions, workers, and requests are defined as follows: - -- A session represents a process connected to the database engine. -- A request is the logical representation of a query or batch. A request is issued by a client connected to a session. Over time, multiple requests may be issued on the same session. -- A worker thread, also known as a worker or thread, is a logical representation of an operating system thread. A request may have many workers when executed with a parallel query execution plan, or a single worker when executed with a serial (single threaded) execution plan. Workers are also required to support activities outside of requests: for example, a worker is required to process a login request as a session connects. - -For more information about these concepts, see the [Thread and Task Architecture Guide](/sql/relational-databases/thread-and-task-architecture-guide). - -The maximum numbers of sessions and workers are determined by the service tier and compute size. New requests are rejected when session or worker limits are reached, and clients receive an error message. While the number of connections can be controlled by the application, the number of concurrent workers is often harder to estimate and control. This is especially true during peak load periods when database resource limits are reached and workers pile up due to longer running queries, large blocking chains, or excessive query parallelism. - -> [!NOTE] -> The initial offering of Azure SQL Database supported only single threaded queries. At that time, the number of requests was always equivalent to the number of workers. Error message 10928 in Azure SQL Database contains the wording "The request limit for the database is *N* and has been reached" for backwards compatibility purposes. The limit reached is actually the number of workers. If your max degree of parallelism (MAXDOP) setting is equal to zero or is greater than one, the number of workers may be much higher than the number of requests, and the limit may be reached much sooner than when MAXDOP is equal to one. Learn more about error 10928 in [Resource governance errors](troubleshoot-common-errors-issues.md#resource-governance-errors). - -You can mitigate approaching or hitting worker or session limits by: - -- Increasing the service tier or compute size of the database or elastic pool. See [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md). -- Optimizing queries to reduce resource utilization if the cause of increased workers is contention for compute resources. For more information, see [Query Tuning/Hinting](performance-guidance.md#query-tuning-and-hinting). -- Optimizing the query workload to reduce the number of occurrences and duration of query blocking. For more information, see [Understand and resolve Azure SQL blocking problems](understand-resolve-blocking.md). -- Reducing the [MAXDOP](configure-max-degree-of-parallelism.md) setting when appropriate. - -Find worker and session limits for Azure SQL Database by service tier and compute size: - -- [Resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -- [Resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -- [Resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -- [Resources limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) - -Learn more about troubleshooting specific errors for session or worker limits in [Resource governance errors](troubleshoot-common-errors-issues.md#resource-governance-errors). - -### Memory - -Unlike other resources (CPU, workers, storage), reaching the memory limit does not negatively impact query performance, and does not cause errors and failures. As described in detail in [Memory Management Architecture Guide](/sql/relational-databases/memory-management-architecture-guide), the database engine often uses all available memory, by design. Memory is used primarily for caching data, to avoid slower storage access. Thus, higher memory utilization usually improves query performance due to faster reads from memory, rather than slower reads from storage. - -After database engine startup, as the workload starts reading data from storage, the database engine aggressively caches data in memory. After this initial ramp-up period, it is common and expected to see the `avg_memory_usage_percent` and `avg_instance_memory_percent` columns in [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) to be close or equal to 100%, particularly for databases that are not idle, and do not fully fit in memory. - -Besides the data cache, memory is used in other components of the database engine. When there is demand for memory and all available memory has been used by the data cache, the database engine will dynamically reduce data cache size to make memory available to other components, and will dynamically grow data cache when other components release memory. - -In rare cases, a sufficiently demanding workload may cause an insufficient memory condition, leading to out-of-memory errors. This can happen at any level of memory utilization between 0% and 100%. This is more likely to occur on smaller compute sizes that have proportionally smaller memory limits, and/or with workloads using more memory for query processing, such as in [dense elastic pools](elastic-pool-resource-management.md). - -When encountering out-of-memory errors, mitigation options include: -- Review the details of the OOM condition in [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). -- Increasing the service tier or compute size of the database or elastic pool. See [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md). -- Optimizing queries and configuration to reduce memory utilization. Common solutions are described in the following table. - -|Solution|Description| -| :----- | :----- | -|Reduce the size of memory grants|For more information about memory grants, see the [Understanding SQL Server memory grant](https://techcommunity.microsoft.com/t5/sql-server/understanding-sql-server-memory-grant/ba-p/383595) blog post. A common solution for avoiding excessively large memory grants is keeping [statistics](/sql/relational-databases/statistics/statistics) up to date. This results in more accurate estimates of memory consumption by the query engine, avoiding unnecessarily large memory grants.

    By default, in databases using compatibility level 140 and above, the database engine may automatically adjust memory grant size using [Batch mode memory grant feedback](/sql/relational-databases/performance/intelligent-query-processing#batch-mode-memory-grant-feedback). Similarly, in databases using compatibility level 150 and above, the database engine also uses [Row mode memory grant feedback](/sql/relational-databases/performance/intelligent-query-processing#row-mode-memory-grant-feedback), for more common row mode queries. This built-in functionality helps avoid out-of-memory errors due to unnecessarily large memory grants.| -|Reduce the size of query plan cache|The database engine caches query plans in memory, to avoid compiling a query plan for every query execution. To avoid query plan cache bloat caused by caching plans that are only used once, make sure to use parameterized queries, and consider enabling OPTIMIZE_FOR_AD_HOC_WORKLOADS [database-scoped configuration](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql).| -|Reduce the size of lock memory|The database engine uses memory for [locks](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#Lock_Engine). When possible, avoid large transactions that may acquire a large number of locks and cause high lock memory consumption.| - -## Resource consumption by user workloads and internal processes - -Azure SQL Database requires compute resources to implement core service features such as high availability and disaster recovery, database backup and restore, monitoring, Query Store, Automatic tuning, etc. The system sets aside a certain limited portion of the overall resources for these internal processes using [resource governance](#resource-governance) mechanisms, making the remainder of resources available for user workloads. At times when internal processes aren't using compute resources, the system makes them available to user workloads. - -Total CPU and memory consumption by user workloads and internal processes is reported in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) and [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) views, in `avg_instance_cpu_percent` and `avg_instance_memory_percent` columns. This data is also reported via the `sqlserver_process_core_percent` and `sqlserver_process_memory_percent` Azure Monitor metrics, for [single databases](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserversdatabases) and [elastic pools](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) at the pool level. - -CPU and memory consumption by user workloads in each database is reported in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) and [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) views, in `avg_cpu_percent` and `avg_memory_usage_percent` columns. For elastic pools, pool-level resource consumption is reported in the [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) view. User workload CPU consumption is also reported via the `cpu_percent` Azure Monitor metric, for [single databases](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserversdatabases) and [elastic pools](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserverselasticpools) at the pool level. - -A more detailed breakdown of recent resource consumption by user workloads and internal processes is reported in the [sys.dm_resource_governor_resource_pools_history_ex](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-resource-pools-history-ex-azure-sql-database) and [sys.dm_resource_governor_workload_groups_history_ex](/sql/relational-databases/system-dynamic-management-views/sys-dm-resource-governor-workload-groups-history-ex-azure-sql-database) views. For details on resource pools and workload groups referenced in these views, see [Resource governance](#resource-governance). These views report on resource utilization by user workloads and specific internal processes in the associated resource pools and workload groups. - -In the context of performance monitoring and troubleshooting, it's important to consider both **user CPU consumption** (`avg_cpu_percent`, `cpu_percent`), and **total CPU consumption** by user workloads and internal processes (`avg_instance_cpu_percent`,`sqlserver_process_core_percent`). - -**User CPU consumption** is calculated as a percentage of the user workload limits in each service objective. **User CPU utilization** at 100% indicates that the user workload has reached the limit of the service objective. However, when **total CPU consumption** reaches the 70-100% range, it's possible to see user workload throughput flattening out and query latency increasing, even if reported **user CPU consumption** remains significantly below 100%. This is more likely to occur when using smaller service objectives with a moderate allocation of compute resources, but relatively intense user workloads, such as in [dense elastic pools](elastic-pool-resource-management.md). This can also occur with smaller service objectives when internal processes temporarily require additional resources, for example when creating a new replica of the database, or backing up the database. - -When **total CPU consumption** is high, mitigation options are the same as noted in the [Compute CPU](#compute-cpu) section, and include service objective increase and/or user workload optimization. - -## Resource governance - -To enforce resource limits, Azure SQL Database uses a resource governance implementation that is based on SQL Server [Resource Governor](/sql/relational-databases/resource-governor/resource-governor), modified and extended to run in the cloud. In SQL Database, multiple [resource pools](/sql/relational-databases/resource-governor/resource-governor-resource-pool) and [workload groups](/sql/relational-databases/resource-governor/resource-governor-workload-group), with resource limits set at both pool and group levels, provide a [balanced Database-as-a-Service](https://azure.microsoft.com/blog/resource-governance-in-azure-sql-database/). User workload and internal workloads are classified into separate resource pools and workload groups. User workload on the primary and readable secondary replicas, including geo-replicas, is classified into the `SloSharedPool1` resource pool and `UserPrimaryGroup.DBId[N]` workload groups, where `[N]` stands for the database ID value. In addition, there are multiple resource pools and workload groups for various internal workloads. - -In addition to using Resource Governor to govern resources within the database engine, Azure SQL Database also uses Windows [Job Objects](/windows/win32/procthread/job-objects) for process level resource governance, and Windows [File Server Resource Manager (FSRM)](/windows-server/storage/fsrm/fsrm-overview) for storage quota management. - -Azure SQL Database resource governance is hierarchical in nature. From top to bottom, limits are enforced at the OS level and at the storage volume level using operating system resource governance mechanisms and Resource Governor, then at the resource pool level using Resource Governor, and then at the workload group level using Resource Governor. Resource governance limits in effect for the current database or elastic pool are reported in the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) view. - -### Data I/O governance - -Data I/O governance is a process in Azure SQL Database used to limit both read and write physical I/O against data files of a database. IOPS limits are set for each service level to minimize the "noisy neighbor" effect, to provide resource allocation fairness in a multi-tenant service, and to stay within the capabilities of the underlying hardware and storage. - -For single databases, workload group limits are applied to all storage I/O against the database. For elastic pools, workload group limits apply to each database in the pool. Additionally, the resource pool limit additionally applies to the cumulative I/O of the elastic pool. In `tempdb`, I/O is subject to workload group limits, with the exception of Basic, Standard, and General Purpose service tier, where higher `tempdb` I/O limits apply. In general, resource pool limits may not be achievable by the workload against a database (either single or pooled), because workload group limits are lower than resource pool limits and limit IOPS/throughput sooner. However, pool limits may be reached by the combined workload against multiple databases in the same pool. - -For example, if a query generates 1000 IOPS without any I/O resource governance, but the workload group maximum IOPS limit is set to 900 IOPS, the query won't be able to generate more than 900 IOPS. However, if the resource pool maximum IOPS limit is set to 1500 IOPS, and the total I/O from all workload groups associated with the resource pool exceeds 1500 IOPS, then the I/O of the same query may be reduced below the workgroup limit of 900 IOPS. - -The IOPS and throughput max values returned by the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) view act as limits/caps, not as guarantees. Further, resource governance doesn't guarantee any specific storage latency. The best achievable latency, IOPS, and throughput for a given user workload depend not only on I/O resource governance limits, but also on the mix of I/O sizes used, and on the capabilities of the underlying storage. SQL Database uses I/Os that vary in size between 512 KB and 4 MB. For the purposes of enforcing IOPS limits, every I/O is accounted regardless of its size, with the exception of databases with data files in Azure Storage. In that case, IOs larger than 256 KB are accounted as multiple 256-KB I/Os, to align with Azure Storage I/O accounting. - -For Basic, Standard, and General Purpose databases, which use data files in Azure Storage, the `primary_group_max_io` value may not be achievable if a database doesn't have enough data files to cumulatively provide this number of IOPS, or if data isn't distributed evenly across files, or if the performance tier of underlying blobs limits IOPS/throughput below the resource governance limits. Similarly, with small log IOs generated by frequent transaction commits, the `primary_max_log_rate` value may not be achievable by a workload due to the IOPS limit on the underlying Azure Storage blob. For databases using Azure Premium Storage, Azure SQL Database uses sufficiently large storage blobs to obtain needed IOPS/throughput, regardless of database size. For larger databases, multiple data files are created to increase total IOPS/throughput capacity. - -Resource utilization values such as `avg_data_io_percent` and `avg_log_write_percent`, reported in the [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database), [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database), and [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) views, are calculated as percentages of maximum resource governance limits. Therefore, when factors other than resource governance limit IOPS/throughput, it's possible to see IOPS/throughput flattening out and latencies increasing as the workload increases, even though reported resource utilization remains below 100%. - -To determine read and write IOPS, throughput, and latency per database file, use the [sys.dm_io_virtual_file_stats()](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql) function. This function surfaces all I/O against the database, including background I/O that isn't accounted towards `avg_data_io_percent`, but uses IOPS and throughput of the underlying storage, and can impact observed storage latency. The function reports additional latency that may be introduced by I/O resource governance for reads and writes, in the `io_stall_queued_read_ms` and `io_stall_queued_write_ms` columns respectively. - -### Transaction log rate governance - -Transaction log rate governance is a process in Azure SQL Database used to limit high ingestion rates for workloads such as bulk insert, SELECT INTO, and index builds. These limits are tracked and enforced at the subsecond level to the rate of log record generation, limiting throughput regardless of how many IOs may be issued against data files. Transaction log generation rates currently scale linearly up to a point that is hardware-dependent and service tier-dependent. - -Log rates are set such that they can be achieved and sustained in a variety of scenarios, while the overall system can maintain its functionality with minimized impact to the user load. Log rate governance ensures that transaction log backups stay within published recoverability SLAs. This governance also prevents an excessive backlog on secondary replicas, that could otherwise lead to longer than expected downtime during failovers. - -The actual physical IOs to transaction log files are not governed or limited. As log records are generated, each operation is evaluated and assessed for whether it should be delayed in order to maintain a maximum desired log rate (MB/s per second). The delays aren't added when the log records are flushed to storage, rather log rate governance is applied during log rate generation itself. - -The actual log generation rates imposed at run time may also be influenced by feedback mechanisms, temporarily reducing the allowable log rates so the system can stabilize. Log file space management, avoiding running into out of log space conditions and data replication mechanisms can temporarily decrease the overall system limits. - -Log rate governor traffic shaping is surfaced via the following wait types (exposed in the [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) and [sys.dm_os_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql) views): - -| Wait Type | Notes | -| :--- | :--- | -| LOG_RATE_GOVERNOR | Database limiting | -| POOL_LOG_RATE_GOVERNOR | Pool limiting | -| INSTANCE_LOG_RATE_GOVERNOR | Instance level limiting | -| HADR_THROTTLE_LOG_RATE_SEND_RECV_QUEUE_SIZE | Feedback control, availability group physical replication in Premium/Business Critical not keeping up | -| HADR_THROTTLE_LOG_RATE_LOG_SIZE | Feedback control, limiting rates to avoid an out of log space condition | -| HADR_THROTTLE_LOG_RATE_MISMATCHED_SLO | Geo-replication feedback control, limiting log rate to avoid high data latency and unavailability of geo-secondaries| - -When encountering a log rate limit that is hampering desired scalability, consider the following options: - -- Scale up to a higher service level in order to get the maximum log rate of a service tier, or switch to a different service tier. The [Hyperscale](service-tier-hyperscale.md) service tier provides 100 MB/s log rate regardless of chosen service level. -- If data being loaded is transient, such as staging data in an ETL process, it can be loaded into `tempdb` (which is minimally logged). -- For analytic scenarios, load into a clustered [columnstore](/sql/relational-databases/indexes/columnstore-indexes-overview) table, or a table with indexes that use [data compression](/sql/relational-databases/data-compression/data-compression). This reduces the required log rate. This technique does increase CPU utilization and is only applicable to data sets that benefit from clustered columnstore indexes or data compression. - -### Storage space governance - -In Premium and Business Critical service tiers, customer data including *data files*, *transaction log files*, and *tempdb files* is stored on the local SSD storage of the machine hosting the database or elastic pool. Local SSD storage provides high IOPS and throughput, and low I/O latency. In addition to customer data, local storage is used for the operating system, management software, monitoring data and logs, and other files necessary for system operation. - -The size of local storage is finite and depends on hardware capabilities, which determine the **maximum local storage** limit, or local storage set aside for customer data. This limit is set to maximize customer data storage, while ensuring safe and reliable system operation. To find the **maximum local storage** value for each service objective, see resource limits documentation for [single databases](resource-limits-vcore-single-databases.md) and [elastic pools](resource-limits-vcore-elastic-pools.md). - -You can also find this value, and the amount of local storage currently used by a given database or elastic pool, using the following query: - -```sql -SELECT server_name, database_name, slo_name, user_data_directory_space_quota_mb, user_data_directory_space_usage_mb -FROM sys.dm_user_db_resource_governance -WHERE database_id = DB_ID(); -``` - -|Column|Description| -| :----- | :----- | -|`server_name`|Logical server name| -|`database_name`|Database name| -|`slo_name`|Service objective name, including hardware generation| -|`user_data_directory_space_quota_mb`|**Maximum local storage**, in MB| -|`user_data_directory_space_usage_mb`|Current local storage consumption by data files, transaction log files, and `tempdb` files, in MB. Updated every five minutes.| - -This query should be executed in the user database, not in the master database. For elastic pools, the query can be executed in any database in the pool. Reported values apply to the entire pool. - -> [!IMPORTANT] -> In Premium and Business Critical service tiers, if the workload attempts to increase combined local storage consumption by data files, transaction log files, and `tempdb` files over the **maximum local storage** limit, an out-of-space error will occur. - -Local SSD storage is also used by databases in service tiers other than Premium and Business Critical for the tempdb database and Hyperscale RBPEX cache. As databases are created, deleted, and increase or decrease in size, total local storage consumption on a machine fluctuates over time. If the system detects that available local storage on a machine is low, and a database or an elastic pool is at risk of running out of space, it will move the database or elastic pool to a different machine with sufficient local storage available. - -This move occurs in an online fashion, similarly to a database scaling operation, and has a similar [impact](single-database-scale.md#impact), including a short (seconds) failover at the end of the operation. This failover terminates open connections and rolls back transactions, potentially impacting applications using the database at that time. - -Because all data is copied to local storage volumes on different machines, moving larger databases in Premium and Business Critical service tiers may require a substantial amount of time. During that time, if local space consumption by a database or an elastic pool, or by the `tempdb` database grows rapidly, the risk of running out of space increases. The system initiates database movement in a balanced fashion to minimize out-of-space errors while avoiding unnecessary failovers. - -## Tempdb sizes - -Size limits for `tempdb` in Azure SQL Database depend on the purchasing and deployment model. - -To learn more, review `tempdb` size limits for: -- vCore purchasing model: [single databases](resource-limits-vcore-single-databases.md), [pooled databases](resource-limits-vcore-elastic-pools.md) -- DTU purchasing model: [single databases](resource-limits-dtu-single-databases.md#tempdb-sizes), [pooled databases](resource-limits-dtu-elastic-pools.md#tempdb-sizes). - -## Next steps - -- For information about general Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). -- For information about DTUs and eDTUs, see [DTUs and eDTUs](purchasing-models.md#dtu-purchasing-model). -- For information about `tempdb` size limits, see [single vCore databases](resource-limits-vcore-single-databases.md), [pooled vCore databases](resource-limits-vcore-elastic-pools.md), [single DTU databases](resource-limits-dtu-single-databases.md#tempdb-sizes), and [pooled DTU databases](resource-limits-dtu-elastic-pools.md#tempdb-sizes). diff --git a/articles/azure-sql/database/resource-limits-vcore-elastic-pools.md b/articles/azure-sql/database/resource-limits-vcore-elastic-pools.md deleted file mode 100644 index 2b2177af872a7..0000000000000 --- a/articles/azure-sql/database/resource-limits-vcore-elastic-pools.md +++ /dev/null @@ -1,589 +0,0 @@ ---- -title: Elastic pool vCore resource limits -description: This page describes some common vCore resource limits for elastic pools in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1, references_regions -ms.devlang: -ms.topic: reference -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 04/25/2022 ---- -# Resource limits for elastic pools using the vCore purchasing model -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides the detailed resource limits for Azure SQL Database elastic pools and pooled databases using the vCore purchasing model. - -* For DTU purchasing model limits for single databases on a server, see [Overview of resource limits on a server](resource-limits-logical-server.md). -* For DTU purchasing model resource limits for Azure SQL Database, see [DTU resource limits single databases](resource-limits-dtu-single-databases.md) and [DTU resource limits elastic pools](resource-limits-dtu-elastic-pools.md). -* For vCore resource limits, see [vCore resource limits - Azure SQL Database](resource-limits-vcore-single-databases.md) and [vCore resource limits - elastic pools](resource-limits-vcore-elastic-pools.md). -* For more information regarding the different purchasing models, see [Purchasing models and service tiers](purchasing-models.md). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -Each read-only replica of an elastic pool has its own resources, such as vCores, memory, data IOPS, TempDB, workers, and sessions. Each read-only replica is subject to elastic pool resource limits detailed later in this article. - -You can set the service tier, compute size (service objective), and storage amount using: - -* [Transact-SQL](elastic-pool-scale.md) via [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](elastic-pool-manage.md#azure-portal) -* [PowerShell](elastic-pool-manage.md#powershell) -* [Azure CLI](elastic-pool-manage.md#azure-cli) -* [REST API](elastic-pool-manage.md#rest-api) - -> [!IMPORTANT] -> For scaling guidance and considerations, see [Scale an elastic pool](elastic-pool-scale.md). - -If all vCores of an elastic pool are busy, then each database in the pool receives an equal amount of compute resources to process queries. Azure SQL Database provides resource sharing fairness between databases by ensuring equal slices of compute time. Elastic pool resource sharing fairness is in addition to any amount of resource otherwise guaranteed to each database when the vCore min per database is set to a non-zero value. - -## General purpose - provisioned compute - Gen4 - - - -> [!IMPORTANT] -> New Gen4 databases are no longer supported in the Australia East or Brazil South regions. - -### General purpose service tier: Generation 4 compute platform (part 1) - -|Compute size (service objective)|GP_Gen4_1|GP_Gen4_2|GP_Gen4_3|GP_Gen4_4|GP_Gen4_5|GP_Gen4_6 -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|1|2|3|4|5|6| -|Memory (GB)|7|14|21|28|35|42| -|Max number DBs per pool 1|100|200|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|512|756|1536|1536|1536|2048| -|Max log size 2|154|227|461|461|461|614| -|TempDB max data size (GB)|32|64|96|128|160|192| -|Storage type|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3 |400|800|1200|1600|2000|2400| -|Max log rate per pool (MBps)|6|12|18|24|30|36| -|Max concurrent workers per pool4 |210|420|630|840|1050|1260| -|Max concurrent logins per pool 4 |210|420|630|840|1050|1260| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1|0, 0.25, 0.5, 1, 2|0, 0.25, 0.5, 1...3|0, 0.25, 0.5, 1...4|0, 0.25, 0.5, 1...5|0, 0.25, 0.5, 1...6| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### General purpose service tier: Generation 4 compute platform (part 2) - -|Compute size (service objective)|GP_Gen4_7|GP_Gen4_8|GP_Gen4_9|GP_Gen4_10|GP_Gen4_16|GP_Gen4_24| -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|7|8|9|10|16|24| -|Memory (GB)|49|56|63|70|112|159.5| -|Max number DBs per pool 1|500|500|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|2048|2048|2048|2048|3584|4096| -|Max log size (GB) 2|614|614|614|614|1075|1229| -|TempDB max data size (GB)|224|256|288|320|512|768| -|Storage type|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3|2800|3200|3600|4000|6400|9600| -|Max log rate per pool (MBps)|42|48|54|60|62.5|62.5| -|Max concurrent workers per pool 4|1470|1680|1890|2100|3360|5040| -|Max concurrent logins pool 4|1470|1680|1890|2100|3360|5040| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1...7|0, 0.25, 0.5, 1...8|0, 0.25, 0.5, 1...9|0, 0.25, 0.5, 1...10|0, 0.25, 0.5, 1...10, 16|0, 0.25, 0.5, 1...10, 16, 24| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## General purpose - provisioned compute - Gen5 - -### General purpose service tier: Generation 5 compute platform (part 1) - -|Compute size (service objective)|GP_Gen5_2|GP_Gen5_4|GP_Gen5_6|GP_Gen5_8|GP_Gen5_10|GP_Gen5_12|GP_Gen5_14| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|2|4|6|8|10|12|14| -|Memory (GB)|10.4|20.8|31.1|41.5|51.9|62.3|72.7| -|Max number DBs per pool 1|100|200|500|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|512|756|1536|2048|2048|2048|2048| -|Max log size (GB) 2|154|227|461|461|461|614|614| -|TempDB max data size (GB)|64|128|192|256|320|384|448| -|Storage type|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3|800|1600|2400|3200|4000|4800|5600| -|Max log rate per pool (MBps)|12|24|36|48|60|62.5|62.5| -|Max concurrent workers per pool 4|210|420|630|840|1050|1260|1470| -|Max concurrent logins per pool 4|210|420|630|840|1050|1260|1470| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1, 2|0, 0.25, 0.5, 1...4|0, 0.25, 0.5, 1...6|0, 0.25, 0.5, 1...8|0, 0.25, 0.5, 1...10|0, 0.25, 0.5, 1...12|0, 0.25, 0.5, 1...14| -|Number of replicas|1|1|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### General purpose service tier: Generation 5 compute platform (part 2) - -|Compute size (service objective)|GP_Gen5_16|GP_Gen5_18|GP_Gen5_20|GP_Gen5_24|GP_Gen5_32|GP_Gen5_40|GP_Gen5_80| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|16|18|20|24|32|40|80| -|Memory (GB)|83|93.4|103.8|124.6|166.1|207.6|415.2| -|Max number DBs per pool 1|500|500|500|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|2048|3072|3072|3072|4096|4096|4096| -|Max log size (GB) 2|614|922|922|922|1229|1229|1229| -|TempDB max data size (GB)|512|576|640|768|1024|1280|2560| -|Storage type|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3 |6,400|7,200|8,000|9,600|12,800|16,000|16,000| -|Max log rate per pool (MBps)|62.5|62.5|62.5|62.5|62.5|62.5|62.5| -|Max concurrent workers per pool 4|1680|1890|2100|2520|3360|4200|8400| -|Max concurrent logins per pool 4|1680|1890|2100|2520|3360|4200|8400| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1...16|0, 0.25, 0.5, 1...18|0, 0.25, 0.5, 1...20|0, 0.25, 0.5, 1...20, 24|0, 0.25, 0.5, 1...20, 24, 32|0, 0.25, 0.5, 1...16, 24, 32, 40|0, 0.25, 0.5, 1...16, 24, 32, 40, 80| -|Number of replicas|1|1|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## General purpose - provisioned compute - Fsv2-series - -### Fsv2-series compute generation (part 1) - -|Compute size (service objective)|GP_Fsv2_8|GP_Fsv2_10|GP_Fsv2_12|GP_Fsv2_14| GP_Fsv2_16| -|:---| ---:|---:|---:|---:|---:| -|Compute generation|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series| -|vCores|8|10|12|14|16| -|Memory (GB)|15.1|18.9|22.7|26.5|30.2| -|Max number DBs per pool 1|500|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1024|1024|1024|1024|1536| -|Max log size (GB) 2|336|336|336|336|512| -|TempDB max data size (GB)|37|46|56|65|74| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3|2560|3200|3840|4480|5120| -|Max log rate per pool (MBps)|48|60|62.5|62.5|62.5| -|Max concurrent workers per pool 4|400|500|600|700|800| -|Max concurrent logins per pool 4|800|1000|1200|1400|1600| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0-8|0-10|0-12|0-14|0-16| -|Number of replicas|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Fsv2-series compute generation (part 2) - -|Compute size (service objective)|GP_Fsv2_18|GP_Fsv2_20|GP_Fsv2_24|GP_Fsv2_32| GP_Fsv2_36|GP_Fsv2_72| -|:---| ---:|---:|---:|---:|---:|---:| -|Compute generation|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series| -|vCores|18|20|24|32|36|72| -|Memory (GB)|34.0|37.8|45.4|60.5|68.0|136.0| -|Max number DBs per pool 1|500|500|500|500|500| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1536|1536|1536|3072|3072|4096| -|Max log size (GB) 2|512|512|512|1024|1024|1024| -|TempDB max data size (GB)|83|93|111|148|167|333| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3|5760|6400|7680|10240|11520|12800| -|Max log rate per pool (MBps)|62.5|62.5|62.5|62.5|62.5|62.5| -|Max concurrent workers per pool 4|900|1000|1200|1600|1800|3600| -|Max concurrent logins per pool 4|1800|2000|2400|3200|3600|7200| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0-18|0-20|0-24|0-32|0-36|0-72| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## General purpose - provisioned compute - DC-series - -|Compute size (service objective)|GP_DC_2|GP_DC_4|GP_DC_6|GP_DC_8| -|:--- | --: |--: |--: |--: | -|Compute generation|DC|DC|DC|DC| -|vCores|2|4|6|8| -|Memory (GB)|9|18|27|36| -|Max number DBs per pool 1|100|400|400|400| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A| -|Max data size (GB)|756|1536|2048|2048| -|Max log size (GB) 2|227|461|614|614| -|TempDB max data size (GB)|64|128|192|256| -|Storage type|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage|Premium (Remote) Storage| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS per pool 3|800|1600|2400|3200| -|Max log rate per pool (MBps)|12|24|36|48| -|Max concurrent workers per pool 4|168|336|504|672| -|Max concurrent logins per pool 4|168|336|504|672| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|2|2...4|2...6|2...8| -|Number of replicas|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## Business critical - provisioned compute - Gen4 - -> [!IMPORTANT] -> New Gen4 databases are no longer supported in the Australia East or Brazil South regions. - -### Business critical service tier: Generation 4 compute platform (part 1) - -|Compute size (service objective)|BC_Gen4_2|BC_Gen4_3|BC_Gen4_4|BC_Gen4_5|BC_Gen4_6| -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|2|3|4|5|6| -|Memory (GB)|14|21|28|35|42| -|Max number DBs per pool 1|50|100|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|2|3|4|5|6| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|Max data size (GB)|1024|1024|1024|1024|1024| -|Max log size (GB) 2|307|307|307|307|307| -|TempDB max data size (GB)|64|96|128|160|192| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1356|1356|1356|1356|1356| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|9,000|13,500|18,000|22,500|27,000| -|Max log rate per pool (MBps)|20|30|40|50|60| -|Max concurrent workers per pool 4|420|630|840|1050|1260| -|Max concurrent logins per pool 4|420|630|840|1050|1260| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1, 2|0, 0.25, 0.5, 1...3|0, 0.25, 0.5, 1...4|0, 0.25, 0.5, 1...5|0, 0.25, 0.5, 1...6| -|Number of replicas|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Business critical service tier: Generation 4 compute platform (part 2) - -|Compute size (service objective)|BC_Gen4_7|BC_Gen4_8|BC_Gen4_9|BC_Gen4_10|BC_Gen4_16|BC_Gen4_24| -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|7|8|9|10|16|24| -|Memory (GB)|49|56|63|70|112|159.5| -|Max number DBs per pool 1|100|100|100|100|100|100| -|Columnstore support|N/A|N/A|N/A|N/A|N/A|N/A| -|In-memory OLTP storage (GB)|7|8|9.5|11|20|36| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|Max data size (GB)|1024|1024|1024|1024|1024|1024| -|Max log size (GB) 2|307|307|307|307|307|307| -|TempDB max data size (GB)|224|256|288|320|512|768| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1356|1356|1356|1356|1356|1356| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|31,500|36,000|40,500|45,000|72,000|96,000| -|Max log rate per pool (MBps)|70|80|80|80|80|80| -|Max concurrent workers per pool 4|1470|1680|1890|2100|3360|5040| -|Max concurrent logins per pool 4|1470|1680|1890|2100|3360|5040| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1...7|0, 0.25, 0.5, 1...8|0, 0.25, 0.5, 1...9|0, 0.25, 0.5, 1...10|0, 0.25, 0.5, 1...10, 16|0, 0.25, 0.5, 1...10, 16, 24| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## Business critical - provisioned compute - Gen5 - -### Business critical service tier: Generation 5 compute platform (part 1) - -|Compute size (service objective)|BC_Gen5_4|BC_Gen5_6|BC_Gen5_8|BC_Gen5_10|BC_Gen5_12|BC_Gen5_14| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|4|6|8|10|12|14| -|Memory (GB)|20.8|31.1|41.5|51.9|62.3|72.7| -|Max number DBs per pool 1|50|100|100|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|3.14|4.71|6.28|8.65|11.02|13.39| -|Max data size (GB)|1024|1536|2048|2048|3072|3072| -|Max log size (GB) 2|307|307|461|461|922|922| -|TempDB max data size (GB)|128|192|256|320|384|448| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|4829|4829|4829|4829|4829|4829| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|18,000|27,000|36,000|45,000|54,000|63,000| -|Max log rate per pool (MBps)|60|90|120|120|120|120| -|Max concurrent workers per pool 4|420|630|840|1050|1260|1470| -|Max concurrent logins per pool 4|420|630|840|1050|1260|1470| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1...4|0, 0.25, 0.5, 1...6|0, 0.25, 0.5, 1...8|0, 0.25, 0.5, 1...10|0, 0.25, 0.5, 1...12|0, 0.25, 0.5, 1...14| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### Business critical service tier: Generation 5 compute platform (part 2) - -|Compute size (service objective)|BC_Gen5_16|BC_Gen5_18|BC_Gen5_20|BC_Gen5_24|BC_Gen5_32|BC_Gen5_40|BC_Gen5_80| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|16|18|20|24|32|40|80| -|Memory (GB)|83|93.4|103.8|124.6|166.1|207.6|415.2| -|Max number DBs per pool 1|100|100|100|100|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|15.77|18.14|20.51|25.25|37.94|52.23|131.68| -|Max data size (GB)|3072|3072|3072|4096|4096|4096|4096| -|Max log size (GB) 2|922|922|922|1229|1229|1229|1229| -|TempDB max data size (GB)|512|576|640|768|1024|1280|2560| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|4829|4829|4829|4829|4829|4829|4829| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|72,000|81,000|90,000|108,000|144,000|180,000|256,000| -|Max log rate per pool (MBps)|120|120|120|120|120|120|120| -|Max concurrent workers per pool 4|1680|1890|2100|2520|3360|4200|8400| -|Max concurrent logins per pool 4|1680|1890|2100|2520|3360|4200|8400| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|0, 0.25, 0.5, 1...16|0, 0.25, 0.5, 1...18|0, 0.25, 0.5, 1...20|0, 0.25, 0.5, 1...20, 24|0, 0.25, 0.5, 1...20, 24, 32|0, 0.25, 0.5, 1...20, 24, 32, 40|0, 0.25, 0.5, 1...20, 24, 32, 40, 80| -|Number of replicas|4|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## Business critical - provisioned compute - M-series - -### M-series compute generation (part 1) - -|Compute size (service objective)|BC_M_8|BC_M_10|BC_M_12|BC_M_14|BC_M_16|BC_M_18| -|:---| ---:|---:|---:|---:|---:|---:| -|Compute generation|M-series|M-series|M-series|M-series|M-series|M-series| -|vCores|8|10|12|14|16|18| -|Memory (GB)|235.4|294.3|353.2|412.0|470.9|529.7| -|Max number DBs per pool 1|100|100|100|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|64|80|96|112|128|150| -|Max data size (GB)|512|640|768|896|1024|1152| -|Max log size (GB) 2|171|213|256|299|341|384| -|TempDB max data size (GB)|256|320|384|448|512|576| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|13836|13836|13836|13836|13836|13836| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|12,499|15,624|18,748|21,873|24,998|28,123| -|Max log rate per pool (MBps)|48|60|72|84|96|108| -|Max concurrent workers per pool 4|800|1,000|1,200|1,400|1,600|1,800| -|Max concurrent logins per pool 4|800|1,000|1,200|1,400|1,600|1,800| -|Max concurrent sessions|30000|30000|30000|30000|30000|30000| -|Min/max elastic pool vCore choices per database|0-8|0-10|0-12|0-14|0-16|0-18| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|No|No|No|No|No|No| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -### M-series compute generation (part 2) - -|Compute size (service objective)|BC_M_20|BC_M_24|BC_M_32|BC_M_64|BC_M_128| -|:---| ---:|---:|---:|---:|---:| -|Compute generation|M-series|M-series|M-series|M-series|M-series| -|vCores|20|24|32|64|128| -|Memory (GB)|588.6|706.3|941.8|1883.5|3767.0| -|Max number DBs per pool 1|100|100|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|172|216|304|704|1768| -|Max data size (GB)|1280|1536|2048|4096|4096| -|Max log size (GB) 2|427|512|683|1024|1024| -|TempDB max data size (GB)|640|768|1024|2048|4096| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|13836|13836|13836|13836|13836| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|31,248|37,497|49,996|99,993|160,000| -|Max log rate per pool (MBps)|120|144|192|264|264| -|Max concurrent workers per pool 4|2,000|2,400|3,200|6,400|12,800| -|Max concurrent logins per pool 4|2,000|2,400|3,200|6,400|12,800| -|Max concurrent sessions|30000|30000|30000|30000|30000| -|Number of replicas|4|4|4|4|4| -|Multi-AZ|No|No|No|No|No| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## Business critical - provisioned compute - DC-series - -|Compute size (service objective)|BC_DC_2|BC_DC_4|BC_DC_6|BC_DC_8| -|:--- | --: |--: |--: |--: | -|Compute generation|DC|DC|DC|DC| -|vCores|2|4|6|8| -|Memory (GB)|9|18|27|36| -|Max number DBs per pool 1|50|100|100|100| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|1.7|3.7|5.9|8.2| -|Max data size (GB)|768|768|768|768| -|Max log size (GB) 2|230|230|230|230| -|TempDB max data size (GB)|64|128|192|256| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1406|1406|1406|1406| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS per pool 3|15750|31500|47250|56000| -|Max log rate per pool (MBps)|20|60|90|120| -|Max concurrent workers per pool 4|168|336|504|672| -|Max concurrent logins per pool 4|168|336|504|672| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Min/max elastic pool vCore choices per database|2|2...4|2...6|2...8| -|Number of replicas|4|4|4|4| -|Multi-AZ|No|No|No|No| -|Read Scale-out|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size| - -1 See [Resource management in dense elastic pools](elastic-pool-resource-management.md) for additional considerations. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -4 For the max concurrent workers for any individual database, see [Single database resource limits](resource-limits-vcore-single-databases.md). For example, if the elastic pool is using Gen5 and the max vCore per database is set at 2, then the max concurrent workers value is 200. If max vCore per database is set to 0.5, then the max concurrent workers value is 50 since on Gen5 there are a max of 100 concurrent workers per vCore. For other max vCore settings per database that are less 1 vCore or less, the number of max concurrent workers is similarly rescaled. - -## Database properties for pooled databases - -For each elastic pool, you can optionally specify per database minimum and maximum vCores to modify resource consumption patterns within the pool. Specified min and max values apply to all databases in the pool. Customizing min and max vCores for individual databases in the pool is not supported. - -You can also set maximum storage per database, for example to prevent a database from consuming all pool storage. This setting can be configured independently for each database. - -The following table describes per database properties for pooled databases. - -| Property | Description | -|:--- |:--- | -| Max vCores per database |The maximum number of vCores that any database in the pool may use, if available based on utilization by other databases in the pool. Max vCores per database is not a resource guarantee for a database. If the workload in each database does not need all available pool resources to perform adequately, consider setting max vCores per database to prevent a single database from monopolizing pool resources. Some degree of over-committing is expected since the pool generally assumes hot and cold usage patterns for databases, where all databases are not simultaneously peaking. | -| Min vCores per database |The minimum number of vCores reserved for any database in the pool. Consider setting a min vCores per database when you want to guarantee resource availability for each database regardless of resource consumption by other databases in the pool. The min vCores per database may be set to 0, and is also the default value. This property is set to anywhere between 0 and the average vCores utilization per database.| -| Max storage per database |The maximum database size set by the user for a database in a pool. Pooled databases share allocated pool storage, so the size a database can reach is limited to the smaller of remaining pool storage and maximum database size. Maximum database size refers to the maximum size of the data files and does not include the space used by the log file. | - - -> [!IMPORTANT] -> Because resources in an elastic pool are finite, setting min vCores per database to a value greater than 0 implicitly limits resource utilization by each database. If, at a point in time, most databases in a pool are idle, resources reserved to satisfy the min vCores guarantee are not available to databases active at that point in time. -> -> Additionally, setting min vCores per database to a value greater than 0 implicitly limits the number of databases that can be added to the pool. For example, if you set the min vCores to 2 in a 20 vCore pool, it means that you will not be able to add more than 10 databases to the pool, because 2 vCores are reserved for each database. -> - -Even though the per database properties are expressed in vCores, they also govern consumption of other resource types, such as data IO, log IO, buffer pool memory, and worker threads. As you adjust min and max per database vCore values, reservations and limits for all resource types are adjusted proportionally. - -Min and max per database vCore values apply to resource consumption by user workloads, but not to resource consumption by internal processes. For example, for a database with a per database max vCores set to half of the pool vCores, user workload cannot consume more than one half of the buffer pool memory. However, this database can still take advantage of pages in the buffer pool that were loaded by internal processes. For more information, see [Resource consumption by user workloads and internal processes](resource-limits-logical-server.md#resource-consumption-by-user-workloads-and-internal-processes). - -> [!NOTE] -> The resource limits of individual databases in elastic pools are generally the same as for single databases outside of pools that have the same compute size (service objective). For example, the max concurrent workers for an GP_Gen4_1 database is 200 workers. So, the max concurrent workers for a database in a GP_Gen4_1 pool is also 200 workers. Note, the total number of concurrent workers in GP_Gen4_1 pool is 210. - -## Next steps - -- For vCore resource limits for a single database, see [resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -- For DTU resource limits for a single database, see [resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -- For DTU resource limits for elastic pools, see [resource limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) -- For resource limits for managed instances, see [managed instance resource limits](../managed-instance/resource-limits.md). -- For information about general Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). -- For information about resource limits on a logical SQL server, see [overview of resource limits on a logical SQL server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. diff --git a/articles/azure-sql/database/resource-limits-vcore-single-databases.md b/articles/azure-sql/database/resource-limits-vcore-single-databases.md deleted file mode 100644 index a4752149c2ff6..0000000000000 --- a/articles/azure-sql/database/resource-limits-vcore-single-databases.md +++ /dev/null @@ -1,713 +0,0 @@ ---- -title: Single database vCore resource limits -description: This page describes some common vCore resource limits for a single database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=1, references_regions -ms.devlang: -ms.topic: reference -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 04/25/2022 ---- -# Resource limits for single databases using the vCore purchasing model -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article provides the detailed resource limits for single databases in Azure SQL Database using the vCore purchasing model. - -* For DTU purchasing model limits for single databases on a server, see [Overview of resource limits on a server](resource-limits-logical-server.md). -* For DTU purchasing model resource limits for Azure SQL Database, see [DTU resource limits single databases](resource-limits-dtu-single-databases.md) and [DTU resource limits elastic pools](resource-limits-dtu-elastic-pools.md). -* For elastic pool vCore resource limits, [vCore resource limits - elastic pools](resource-limits-vcore-elastic-pools.md). -* For more information regarding the different purchasing models, see [Purchasing models and service tiers](purchasing-models.md). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -Each read-only replica of a database has its own resources, such as vCores, memory, data IOPS, tempdb, workers, and sessions. Each read-only replica is subject to the resource limits detailed later in this article. - -You can set the service tier, compute size (service objective), and storage amount for a single database using: - -* [Transact-SQL](single-database-manage.md#transact-sql-t-sql) via [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](single-database-manage.md#the-azure-portal) -* [PowerShell](single-database-manage.md#powershell) -* [Azure CLI](single-database-manage.md#azure-cli) -* [REST API](single-database-manage.md#rest-api) - -> [!IMPORTANT] -> For scaling guidance and considerations, see [Scale a single database](single-database-scale.md). - -## General purpose - serverless compute - Gen5 - - - -The [serverless compute tier](serverless-tier-overview.md) is currently available on Gen5 hardware only. - -### Gen5 compute generation (part 1) - -|Compute size (service objective)|GP_S_Gen5_1|GP_S_Gen5_2|GP_S_Gen5_4|GP_S_Gen5_6|GP_S_Gen5_8| -|:--- | --: |--: |--: |--: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5| -|Min-max vCores|0.5-1|0.5-2|0.5-4|0.75-6|1.0-8| -|Min-max memory (GB)|2.02-3|2.05-6|2.10-12|2.25-18|3.00-24| -|Min-max auto-pause delay (minutes)|60-10080|60-10080|60-10080|60-10080|60-10080| -|Columnstore support|Yes 1|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|512|1024|1024|1024|2048| -|Max log size (GB) 2|154|307|307|307|461| -|Tempdb max data size (GB)|32|64|128|192|256| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 3|320|640|1280|1920|2560| -|Max log rate (MBps)|4.5|9|18|27|36| -|Max concurrent workers|75|150|300|450|600| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 Service objectives with smaller max vCore configurations may have insufficient memory for creating and using columnstore indexes. If encountering performance problems with columnstore, increase the max vCore configuration to increase the max memory available. - -2 For documented max data size values. Reducing max data size reduces max log size proportionally. - -3 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen5 compute generation (part 2) - -|Compute size (service objective)|GP_S_Gen5_10|GP_S_Gen5_12|GP_S_Gen5_14|GP_S_Gen5_16| -|:--- | --: |--: |--: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5| -|Min-max vCores|1.25-10|1.50-12|1.75-14|2.00-16| -|Min-max memory (GB)|3.75-30|4.50-36|5.25-42|6.00-48| -|Min-max auto-pause delay (minutes)|60-10080|60-10080|60-10080|60-10080| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A| -|Max data size (GB)|2048|3072|3072|3072| -|Max log size (GB) 1|461|461|461|922| -|Tempdb max data size (GB)|320|384|448|512| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|3200|3840|4480|5120| -|Max log rate (MBps)|45|50|50|50| -|Max concurrent workers|750|900|1050|1200| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen5 compute generation (part 3) - -|Compute size (service objective)|GP_S_Gen5_18|GP_S_Gen5_20|GP_S_Gen5_24|GP_S_Gen5_32|GP_S_Gen5_40| -|:--- | --: |--: |--: |--: |--:| -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5| -|Min-max vCores|2.25-18|2.5-20|3-24|4-32|5-40| -|Min-max memory (GB)|6.75-54|7.5-60|9-72|12-96|15-120| -|Min-max auto-pause delay (minutes)|60-10080|60-10080|60-10080|60-10080|60-10080| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|3072|3072|4096|4096|4096| -|Max log size (GB) 1|922|922|1024|1024|1024| -|Tempdb max data size (GB)|576|640|768|1024|1280| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|5760|6400|7680|10240|12800| -|Max log rate (MBps)|50|50|50|50|50| -|Max concurrent workers|1350|1500|1800|2400|3000| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - - -## Hyperscale - provisioned compute - Gen4 - -### Gen4 compute generation (part 1) - -|Compute size (service objective)|HS_Gen4_1|HS_Gen4_2|HS_Gen4_3|HS_Gen4_4|HS_Gen4_5|HS_Gen4_6| -|:--- | --: |--: |--: |---: | --: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|1|2|3|4|5|6| -|Memory (GB)|7|14|21|28|35|42| -|[RBPEX](hyperscale-architecture.md#compute) Size|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (TB)|100 |100 |100 |100 |100 |100| -|Max log size (TB)|Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited | -|Tempdb max data size (GB)|32|64|96|128|160|192| -|Storage type| [Note 1](#notes) |[Note 1](#notes)|[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) | -|Max local SSD IOPS 1|4000 |8000 |12000 |16000 |20000 |24000 | -|Max log rate (MBps)|100 |100 |100 |100 |100 |100 | -|IO latency (approximate)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)| -|Max concurrent workers|200|400|600|800|1000|1200| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Secondary replicas|0-4|0-4|0-4|0-4|0-4|0-4| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Backup storage retention|7 days|7 days|7 days|7 days|7 days|7 days| - - -1 Besides local SSD IO, workloads will use remote [page server](hyperscale-architecture.md#page-server) IO. Effective IOPS will depend on workload. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance), and [Data IO in resource utilization statistics](hyperscale-performance-diagnostics.md#data-io-in-resource-utilization-statistics). - -### Gen4 compute generation (part 2) - -|Compute size (service objective)|HS_Gen4_7|HS_Gen4_8|HS_Gen4_9|HS_Gen4_10|HS_Gen4_16|HS_Gen4_24| -|:--- | ---: |--: |--: | --: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|7|8|9|10|16|24| -|Memory (GB)|49|56|63|70|112|159.5| -|[RBPEX](hyperscale-architecture.md#compute) Size|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (TB)|100 |100 |100 |100 |100 |100 | -|Max log size (TB)|Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited | -|Tempdb max data size (GB)|224|256|288|320|512|768| -|Storage type| [Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) | -|Max local SSD IOPS 1|28000 |32000 |36000 |40000 |64000 |76800 | -|Max log rate (MBps)|100 |100 |100 |100 |100 |100 | -|IO latency (approximate)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)| -|Max concurrent workers|1400|1600|1800|2000|3200|4800| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Secondary replicas|0-4|0-4|0-4|0-4|0-4|0-4| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Backup storage retention|7 days|7 days|7 days|7 days|7 days|7 days| - - -1 Besides local SSD IO, workloads will use remote [page server](hyperscale-architecture.md#page-server) IO. Effective IOPS will depend on workload. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance), and [Data IO in resource utilization statistics](hyperscale-performance-diagnostics.md#data-io-in-resource-utilization-statistics). - -## Hyperscale - provisioned compute - Gen5 - -### Gen5 compute generation (part 1) - -|Compute size (service objective)|HS_Gen5_2|HS_Gen5_4|HS_Gen5_6|HS_Gen5_8|HS_Gen5_10|HS_Gen5_12|HS_Gen5_14| -|:--- | --: |--: |--: |--: |---: | --: |--: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|2|4|6|8|10|12|14| -|Memory (GB)|10.4|20.8|31.1|41.5|51.9|62.3|72.7| -|[RBPEX](hyperscale-architecture.md#compute) Size|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (TB)|100 |100 |100 |100 |100 |100 |100| -|Max log size (TB)|Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited | -|Tempdb max data size (GB)|64|128|192|256|320|384|448| -|Storage type| [Note 1](#notes) |[Note 1](#notes)|[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) | -|Max local SSD IOPS 1|8000 |16000 |24000 |32000 |40000 |48000 |56000 | -|Max log rate (MBps)|100 |100 |100 |100 |100 |100 |100 | -|IO latency (approximate)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)| -|Max concurrent workers|200|400|600|800|1000|1200|1400| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Secondary replicas|0-4|0-4|0-4|0-4|0-4|0-4|0-4| -|Multi-AZ|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Backup storage retention|7 days|7 days|7 days|7 days|7 days|7 days|7 days| - - -1 Besides local SSD IO, workloads will use remote [page server](hyperscale-architecture.md#page-server) IO. Effective IOPS will depend on workload. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance), and [Data IO in resource utilization statistics](hyperscale-performance-diagnostics.md#data-io-in-resource-utilization-statistics). - -### Gen5 compute generation (part 2) - -|Compute size (service objective)|HS_Gen5_16|HS_Gen5_18|HS_Gen5_20|HS_Gen5_24|HS_Gen5_32|HS_Gen5_40|HS_Gen5_80| -|:--- | --: |--: |--: |--: |---: |--: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|16|18|20|24|32|40|80| -|Memory (GB)|83|93.4|103.8|124.6|166.1|207.6|415.2| -|[RBPEX](hyperscale-architecture.md#compute) Size|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory|3X Memory| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (TB)|100 |100 |100 |100 |100 |100 |100 | -|Max log size (TB)|Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited |Unlimited | -|Tempdb max data size (GB)|512|576|640|768|1024|1280|2560| -|Storage type| [Note 1](#notes) |[Note 1](#notes)|[Note 1](#notes)|[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) |[Note 1](#notes) | -|Max local SSD IOPS 1|64000 |72000 |80000 |96000 |128000 |160000 |204800 | -|Max log rate (MBps)|100 |100 |100 |100 |100 |100 |100 | -|IO latency (approximate)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)| -|Max concurrent workers|1600|1800|2000|2400|3200|4000|8000| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Secondary replicas|0-4|0-4|0-4|0-4|0-4|0-4|0-4| -|Multi-AZ|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)|[Available in preview](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview)| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Backup storage retention|7 days|7 days|7 days|7 days|7 days|7 days|7 days| - - -1 Besides local SSD IO, workloads will use remote [page server](hyperscale-architecture.md#page-server) IO. Effective IOPS will depend on workload. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance), and [Data IO in resource utilization statistics](hyperscale-performance-diagnostics.md#data-io-in-resource-utilization-statistics). - -#### Notes - -**Note 1**: Hyperscale is a multi-tiered architecture with separate compute and storage components: [Hyperscale Service Tier Architecture](service-tier-hyperscale.md#distributed-functions-architecture) - -**Note 2**: Latency is 1-2 ms for data on local compute replica SSD, which caches most used data pages. Higher latency for data retrieved from page servers. - -## Hyperscale - provisioned compute - DC-series - -|Compute size (service objective)|HS_DC_2|HS_DC_4|HS_DC_6|HS_DC_8| -|:--- | --: |--: |--: |--: |---: | -|Compute generation|DC-series|DC-series|DC-series|DC-series| -|vCores|2|4|6|8| -|Memory (GB)|9|18|27|36| -|[RBPEX](hyperscale-architecture.md#compute) Size|3X Memory|3X Memory|3X Memory|3X Memory| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A| -|Max data size (TB)|100 |100 |100 |100 | -|Max log size (TB)|Unlimited |Unlimited |Unlimited |Unlimited | -|Tempdb max data size (GB)|64|128|192|256| -|Storage type| [Note 1](#notes) |[Note 1](#notes)|[Note 1](#notes) |[Note 1](#notes) | -|Max local SSD IOPS 1|14000|28000|42000|44800| -|Max log rate (MBps)|100 |100 |100 |100 | -|IO latency (approximate)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)|[Note 2](#notes)| -|Max concurrent workers|160|320|480|640| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Secondary replicas|0-4|0-4|0-4|0-4| -|Multi-AZ|N/A|N/A|N/A|N/A| -|Read Scale-out|Yes|Yes|Yes|Yes| -|Backup storage retention|7 days|7 days|7 days|7 days| - - -1 Besides local SSD IO, workloads will use remote [page server](hyperscale-architecture.md#page-server) IO. Effective IOPS will depend on workload. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance), and [Data IO in resource utilization statistics](hyperscale-performance-diagnostics.md#data-io-in-resource-utilization-statistics). - -### Notes - -**Note 1**: Hyperscale is a multi-tiered architecture with separate compute and storage components: [Hyperscale Service Tier Architecture](service-tier-hyperscale.md#distributed-functions-architecture) - -**Note 2**: Latency is 1-2 ms for data on local compute replica SSD, which caches most used data pages. Higher latency for data retrieved from page servers. - -## General purpose - provisioned compute - Gen4 - -> [!IMPORTANT] -> New Gen4 databases are no longer supported in the Australia East or Brazil South regions. - -### Gen4 compute generation (part 1) - -|Compute size (service objective)|GP_Gen4_1|GP_Gen4_2|GP_Gen4_3|GP_Gen4_4|GP_Gen4_5|GP_Gen4_6 -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|1|2|3|4|5|6| -|Memory (GB)|7|14|21|28|35|42| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1024|1024|1536|1536|1536|3072| -|Max log size (GB) 1|307|307|461|461|461|922| -|Tempdb max data size (GB)|32|64|96|128|160|192| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|320|640|960|1280|1600|1920| -|Max log rate (MBps)|4.5|9|13.5|18|22.5|27| -|Max concurrent workers|200|400|600|800|1000|1200| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen4 compute generation (part 2) - -|Compute size (service objective)|GP_Gen4_7|GP_Gen4_8|GP_Gen4_9|GP_Gen4_10|GP_Gen4_16|GP_Gen4_24 -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|7|8|9|10|16|24| -|Memory (GB)|49|56|63|70|112|159.5| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|3072|3072|3072|3072|4096|4096| -|Max log size (GB) 1|922|922|922|922|1229|1229| -|Tempdb max data size (GB)|224|256|288|320|512|768| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read) -|Max data IOPS 2|2240|2560|2880|3200|5120|7680| -|Max log rate (MBps)|31.5|36|40.5|45|50|50| -|Max concurrent workers|1400|1600|1800|2000|3200|4800| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## General purpose - provisioned compute - Gen5 - -### Gen5 compute generation (part 1) - -|Compute size (service objective)|GP_Gen5_2|GP_Gen5_4|GP_Gen5_6|GP_Gen5_8|GP_Gen5_10|GP_Gen5_12|GP_Gen5_14| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|2|4|6|8|10|12|14| -|Memory (GB)|10.4|20.8|31.1|41.5|51.9|62.3|72.7| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1024|1024|1536|2048|2048|3072|3072| -|Max log size (GB) 1|307|307|461|461|461|922|922| -|Tempdb max data size (GB)|64|128|192|256|320|384|384| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|640|1280|1920|2560|3200|3840|4480| -|Max log rate (MBps)|9|18|27|36|45|50|50| -|Max concurrent workers|200|400|600|800|1000|1200|1400| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen5 compute generation (part 2) - -|Compute size (service objective)|GP_Gen5_16|GP_Gen5_18|GP_Gen5_20|GP_Gen5_24|GP_Gen5_32|GP_Gen5_40|GP_Gen5_80| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|16|18|20|24|32|40|80| -|Memory (GB)|83|93.4|103.8|124.6|166.1|207.6|415.2| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|3072|3072|3072|4096|4096|4096|4096| -|Max log size (GB) 1|922|922|922|1024|1024|1024|1024| -|Tempdb max data size (GB)|512|576|640|768|1024|1280|2560| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|5120|5760|6400|7680|10240|12800|12800| -|Max log rate (MBps)|50|50|50|50|50|50|50| -|Max concurrent workers|1600|1800|2000|2400|3200|4000|8000| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1|1|1| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## General purpose - provisioned compute - Fsv2-series - -### Fsv2-series compute generation (part 1) - -|Compute size (service objective)|GP_Fsv2_8|GP_Fsv2_10|GP_Fsv2_12|GP_Fsv2_14| GP_Fsv2_16| -|:---| ---:|---:|---:|---:|---:| -|Compute generation|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series| -|vCores|8|10|12|14|16| -|Memory (GB)|15.1|18.9|22.7|26.5|30.2| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1024|1024|1024|1024|1536| -|Max log size (GB) 1|336|336|336|336|512| -|Tempdb max data size (GB)|37|46|56|65|74| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|2560|3200|3840|4480|5120| -|Max log rate (MBps)|36|45|50|50|50| -|Max concurrent workers|400|500|600|700|800| -|Max concurrent logins|800|1000|1200|1400|1600| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Fsv2-series compute generation (part 2) - -|Compute size (service objective)|GP_Fsv2_18|GP_Fsv2_20|GP_Fsv2_24|GP_Fsv2_32| GP_Fsv2_36|GP_Fsv2_72| -|:---| ---:|---:|---:|---:|---:|---:| -|Compute generation|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series|Fsv2-series| -|vCores|18|20|24|32|36|72| -|Memory (GB)|34.0|37.8|45.4|60.5|68.0|136.0| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A|N/A|N/A| -|Max data size (GB)|1536|1536|1536|3072|3072|4096| -|Max log size (GB) 1|512|512|512|1024|1024|1024| -|Tempdb max data size (GB)|83|93|111|148|167|333| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|5760|6400|7680|10240|11520|12800| -|Max log rate (MBps)|50|50|50|50|50|50| -|Max concurrent workers|900|1000|1200|1600|1800|3600| -|Max concurrent logins|1800|2000|2400|3200|3600|7200| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## General purpose - provisioned compute - DC-series - -|Compute size (service objective)|GP_DC_2|GP_DC_4|GP_DC_6|GP_DC_8| -|:---| ---:|---:|---:|---:| -|Compute generation|DC-series|DC-series|DC-series|DC-series| -|vCores|2|4|6|8| -|Memory (GB)|9|18|27|36| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|N/A|N/A|N/A|N/A| -|Max data size (GB)|1024|1536|3072|3072| -|Max log size (GB) 1|307|461|922|922| -|Tempdb max data size (GB)|64|128|192|256| -|Storage type|Remote SSD|Remote SSD|Remote SSD|Remote SSD| -|IO latency (approximate)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)|5-7 ms (write)
    5-10 ms (read)| -|Max data IOPS 2|640|1280|1920|2560| -|Max log rate (MBps)|9|18|27|36| -|Max concurrent workers|160|320|480|640| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Number of replicas|1|1|1|1| -|Multi-AZ|N/A|N/A|N/A|N/A| -|Read Scale-out|N/A|N/A|N/A|N/A| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## Business critical - provisioned compute - Gen4 - -> [!IMPORTANT] -> New Gen4 databases are no longer supported in the Australia East or Brazil South regions. - -### Gen4 compute generation (part 1) - -|Compute size (service objective)|BC_Gen4_1|BC_Gen4_2|BC_Gen4_3|BC_Gen4_4|BC_Gen4_5|BC_Gen4_6| -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|1|2|3|4|5|6| -|Memory (GB)|7|14|21|28|35|42| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|1|2|3|4|5|6| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|Max data size (GB)|1024|1024|1024|1024|1024|1024| -|Max log size (GB) 1|307|307|307|307|307|307| -|Tempdb max data size (GB)|32|64|96|128|160|192| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1356|1356|1356|1356|1356|1356| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|4,000|8,000|12,000|16,000|20,000|24,000| -|Max log rate (MBps)|8|16|24|32|40|48| -|Max concurrent workers|200|400|600|800|1000|1200| -|Max concurrent logins|200|400|600|800|1000|1200| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen4 compute generation (part 2) - -|Compute size (service objective)|BC_Gen4_7|BC_Gen4_8|BC_Gen4_9|BC_Gen4_10|BC_Gen4_16|BC_Gen4_24| -|:--- | --: |--: |--: |--: |--: |--: | -|Compute generation|Gen4|Gen4|Gen4|Gen4|Gen4|Gen4| -|vCores|7|8|9|10|16|24| -|Memory (GB)|49|56|63|70|112|159.5| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|7|8|9.5|11|20|36| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|Max data size (GB)|1024|1024|1024|1024|1024|1024| -|Max log size (GB) 1|307|307|307|307|307|307| -|Tempdb max data size (GB)|224|256|288|320|512|768| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1356|1356|1356|1356|1356|1356| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|28,000|32,000|36,000|40,000|64,000|76,800| -|Max log rate (MBps)|56|64|64|64|64|64| -|Max concurrent workers|1400|1600|1800|2000|3200|4800| -|Max concurrent logins|1400|1600|1800|2000|3200|4800| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## Business critical - provisioned compute - Gen5 - -### Gen5 compute generation (part 1) - -|Compute size (service objective)|BC_Gen5_2|BC_Gen5_4|BC_Gen5_6|BC_Gen5_8|BC_Gen5_10|BC_Gen5_12|BC_Gen5_14| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|2|4|6|8|10|12|14| -|Memory (GB)|10.4|20.8|31.1|41.5|51.9|62.3|72.7| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|1.57|3.14|4.71|6.28|8.65|11.02|13.39| -|Max data size (GB)|1024|1024|1536|2048|2048|3072|3072| -|Max log size (GB) 1|307|307|461|461|461|922|922| -|Tempdb max data size (GB)|64|128|192|256|320|384|448| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|4829|4829|4829|4829|4829|4829|4829| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|8000|16,000|24,000|32,000|40,000|48,000|56,000| -|Max log rate (MBps)|24|48|72|96|96|96|96| -|Max concurrent workers|200|400|600|800|1000|1200|1400| -|Max concurrent logins|200|400|600|800|1000|1200|1400| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|4|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### Gen5 compute generation (part 2) - -|Compute size (service objective)|BC_Gen5_16|BC_Gen5_18|BC_Gen5_20|BC_Gen5_24|BC_Gen5_32|BC_Gen5_40|BC_Gen5_80| -|:--- | --: |--: |--: |--: |---: | --: |--: | -|Compute generation|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5|Gen5| -|vCores|16|18|20|24|32|40|80| -|Memory (GB)|83|93.4|103.8|124.6|166.1|207.6|415.2| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|15.77|18.14|20.51|25.25|37.94|52.23|131.64| -|Max data size (GB)|3072|3072|3072|4096|4096|4096|4096| -|Max log size (GB) 1|922|922|922|1024|1024|1024|1024| -|Tempdb max data size (GB)|512|576|640|768|1024|1280|2560| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|4829|4829|4829|4829|4829|4829|4829| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|64,000|72,000|80,000|96,000|128,000|160,000|204,800| -|Max log rate (MBps)|96|96|96|96|96|96|96| -|Max concurrent workers|1600|1800|2000|2400|3200|4000|8000| -|Max concurrent logins|1600|1800|2000|2400|3200|4000|8000| -|Max concurrent sessions|30,000|30,000|30,000|30,000|30,000|30,000|30,000| -|Number of replicas|4|4|4|4|4|4|4| -|Multi-AZ|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## Business critical - provisioned compute - M-series - -### M-series compute generation (part 1) - -|Compute size (service objective)|BC_M_8|BC_M_10|BC_M_12|BC_M_14|BC_M_16|BC_M_18| -|:---| ---:|---:|---:|---:|---:|---:| -|Compute generation|M-series|M-series|M-series|M-series|M-series|M-series| -|vCores|8|10|12|14|16|18| -|Memory (GB)|235.4|294.3|353.2|412.0|470.9|529.7| -|Columnstore support|Yes|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|64|80|96|112|128|150| -|Max data size (GB)|512|640|768|896|1024|1152| -|Max log size (GB) 1|171|213|256|299|341|384| -|Tempdb max data size (GB)|256|320|384|448|512|576| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|13836|13836|13836|13836|13836|13836| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|12,499|15,624|18,748|21,873|24,998|28,123| -|Max log rate (MBps)|48|60|72|84|96|108| -|Max concurrent workers|800|1,000|1,200|1,400|1,600|1,800| -|Max concurrent logins|800|1,000|1,200|1,400|1,600|1,800| -|Max concurrent sessions|30000|30000|30000|30000|30000|30000| -|Number of replicas|4|4|4|4|4|4| -|Multi-AZ|No|No|No|No|No|No| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -### M-series compute generation (part 2) - -|Compute size (service objective)|BC_M_20|BC_M_24|BC_M_32|BC_M_64|BC_M_128| -|:---| ---:|---:|---:|---:|---:| -|Compute generation|M-series|M-series|M-series|M-series|M-series| -|vCores|20|24|32|64|128| -|Memory (GB)|588.6|706.3|941.8|1883.5|3767.0| -|Columnstore support|Yes|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|172|216|304|704|1768| -|Max data size (GB)|1280|1536|2048|4096|4096| -|Max log size (GB) 1|427|512|683|1024|1024| -|Tempdb max data size (GB)|640|768|1024|2048|4096| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|13836|13836|13836|13836|13836| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|31,248|37,497|49,996|99,993|160,000| -|Max log rate (MBps)|120|144|192|264|264| -|Max concurrent workers|2,000|2,400|3,200|6,400|12,800| -|Max concurrent logins|2,000|2,400|3,200|6,400|12,800| -|Max concurrent sessions|30000|30000|30000|30000|30000| -|Number of replicas|4|4|4|4|4| -|Multi-AZ|No|No|No|No|No| -|Read Scale-out|Yes|Yes|Yes|Yes|Yes| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## Business critical - provisioned compute - DC-series - -|Compute size (service objective)|BC_DC_2|BC_DC_4|BC_DC_6|BC_DC_8| -|:--- | --: |--: |--: |--: | -|Compute generation|DC-series|DC-series|DC-series|DC-series| -|vCores|2|4|6|8| -|Memory (GB)|9|18|27|36| -|Columnstore support|Yes|Yes|Yes|Yes| -|In-memory OLTP storage (GB)|1.7|3.7|5.9|8.2| -|Max data size (GB)|768|768|768|768| -|Max log size (GB) 1|230|230|230|230| -|Tempdb max data size (GB)|64|128|192|256| -|[Max local storage size](resource-limits-logical-server.md#storage-space-governance) (GB)|1406|1406|1406|1406| -|Storage type|Local SSD|Local SSD|Local SSD|Local SSD| -|IO latency (approximate)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)|1-2 ms (write)
    1-2 ms (read)| -|Max data IOPS 2|14000|28000|42000|44800| -|Max log rate (MBps)|24|48|72|96| -|Max concurrent workers|200|400|600|800| -|Max concurrent logins|200|400|600|800| -|Max concurrent sessions|30,000|30,000|30,000|30,000| -|Number of replicas|4|4|4|4| -|Multi-AZ|No|No|No|No| -|Read Scale-out|No|No|No|No| -|Included backup storage|1X DB size|1X DB size|1X DB size|1X DB size| - -1 For documented max data size values. Reducing max data size reduces max log size proportionally. - -2 The maximum value for IO sizes ranging between 8 KB and 64 KB. Actual IOPS are workload-dependent. For details, see [Data IO Governance](resource-limits-logical-server.md#resource-governance). - -## Next steps - -- For DTU resource limits for a single database, see [resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -- For vCore resource limits for elastic pools, see [resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -- For DTU resource limits for elastic pools, see [resource limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) -- For resource limits for SQL Managed Instance, see [SQL Managed Instance resource limits](../managed-instance/resource-limits.md). -- For information about general Azure limits, see [Azure subscription and service limits, quotas, and constraints](../../azure-resource-manager/management/azure-subscription-service-limits.md). -- For information about resource limits on a server, see [overview of resource limits on a server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. diff --git a/articles/azure-sql/database/saas-dbpertenant-dr-geo-replication.md b/articles/azure-sql/database/saas-dbpertenant-dr-geo-replication.md deleted file mode 100644 index b79a438c6ea03..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-dr-geo-replication.md +++ /dev/null @@ -1,313 +0,0 @@ ---- -title: Disaster Recovery for SaaS apps with Geo Replication -description: "Learn how to use Azure SQL Database geo-replicas to recover a multi-tenant SaaS app in the event of an outage" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.date: 01/25/2019 ---- -# Disaster recovery for a multi-tenant SaaS application using database geo-replication -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you explore a full disaster recovery scenario for a multi-tenant SaaS application implemented using the database-per-tenant model. To protect the app from an outage, you use [_geo-replication_](active-geo-replication-overview.md) to create replicas for the catalog and tenant databases in an alternate recovery region. If an outage occurs, you quickly fail over to these replicas to resume normal business operations. On failover, the databases in the original region become secondary replicas of the databases in the recovery region. Once these replicas come back online they automatically catch up to the state of the databases in the recovery region. After the outage is resolved, you fail back to the databases in the original production region. - -This tutorial explores both the failover and failback workflows. You'll learn how to: -> [!div class="checklist"] -> -> * Sync database and elastic pool configuration info into the tenant catalog -> * Set up a recovery environment in an alternate region, comprising application, servers, and pools -> * Use _geo-replication_ to replicate the catalog and tenant databases to the recovery region -> * Fail over the application and catalog and tenant databases to the recovery region -> * Later, fail over the application, catalog and tenant databases back to the original region after the outage is resolved -> * Update the catalog as each tenant database is failed over to track the primary location of each tenant's database -> * Ensure the application and primary tenant database are always colocated in the same Azure region to reduce latency - - -Before starting this tutorial, make sure the following prerequisites are completed: -* The Wingtip Tickets SaaS database per tenant app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS database per tenant application](saas-dbpertenant-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) - -## Introduction to the geo-replication recovery pattern - -![Recovery Architecture](./media/saas-dbpertenant-dr-geo-replication/recovery-architecture.png) - -Disaster recovery (DR) is an important consideration for many applications, whether for compliance reasons or business continuity. Should there be a prolonged service outage, a well-prepared DR plan can minimize business disruption. Using geo-replication provides the lowest RPO and RTO by maintaining database replicas in a recovery region that can be failed over to at short notice. - -A DR plan based on geo-replication comprises three distinct parts: -* Set-up - creation and maintenance of the recovery environment -* Recovery - failover of the app and databases to the recovery environment if an outage occurs, -* Repatriation - failover of the app and databases back to the original region once the application is resolved - -All parts have to be considered carefully, especially if operating at scale. Overall, the plan must accomplish several goals: - -* Setup - * Establish and maintain a mirror-image environment in the recovery region. Creating the elastic pools and replicating any databases in this recovery environment reserves capacity in the recovery region. Maintaining this environment includes replicating new tenant databases as they are provisioned. -* Recovery - * Where a scaled-down recovery environment is used to minimize day-to-day costs, pools and databases must be scaled up to acquire full operational capacity in the recovery region - * Enable new tenant provisioning in the recovery region as soon as possible - * Be optimized for restoring tenants in priority order - * Be optimized for getting tenants online as fast as possible by doing steps in parallel where practical - * Be resilient to failure, restartable, and idempotent - * Be possible to cancel the process in mid-flight if the original region comes back on-line. -* Repatriation - * Fail over databases from the recovery region to replicas in the original region with minimal impact to tenants: no data loss and minimum period off-line per tenant. - -In this tutorial, these challenges are addressed using features of Azure SQL Database and the Azure platform: - -* [Azure Resource Manager templates](../../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md), to reserve all needed capacity as quickly as possible. Azure Resource Manager templates are used to provision a mirror image of the production servers and elastic pools in the recovery region. -* [Geo-replication](active-geo-replication-overview.md), to create asynchronously replicated read-only secondaries for all databases. During an outage, you fail over to the replicas in the recovery region. After the outage is resolved, you fail back to the databases in the original region with no data loss. -* [Asynchronous](../../azure-resource-manager/management/async-operations.md) failover operations sent in tenant-priority order, to minimize failover time for large numbers of databases. -* [Shard management recovery features](elastic-database-recovery-manager.md), to change database entries in the catalog during recovery and repatriation. These features allow the app to connect to tenant databases regardless of location without reconfiguring the app. -* [SQL server DNS aliases](./dns-alias-overview.md), to enable seamless provisioning of new tenants regardless of which region the app is operating in. DNS aliases are also used to allow the catalog sync process to connect to the active catalog regardless of its location. - -## Get the disaster recovery scripts - -> [!IMPORTANT] -> Like all the Wingtip Tickets management scripts, the DR scripts are sample quality and are not to be used in production. - -The recovery scripts used in this tutorial and Wingtip application source code are available in the [Wingtip Tickets SaaS database per tenant GitHub repository](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant/). Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets management scripts. - -## Tutorial overview -In this tutorial, you first use geo-replication to create replicas of the Wingtip Tickets application and its databases in a different region. Then, you fail over to this region to simulate recovering from an outage. When complete, the application is fully functional in the recovery region. - -Later, in a separate repatriation step, you fail over the catalog and tenant databases in the recovery region to the original region. The application and databases stay available throughout repatriation. When complete, the application is fully functional in the original region. - -> [!Note] -> The application is recovered into the _paired region_ of the region in which the application is deployed. For more information, see [Azure paired regions](../../availability-zones/cross-region-replication-azure.md). - -## Review the healthy state of the application - -Before you start the recovery process, review the normal healthy state of the application. -1. In your web browser, open the Wingtip Tickets Events Hub (http://events.wingtip-dpt.<user>.trafficmanager.net - replace <user> with your deployment's user value). - * Scroll to the bottom of the page and notice the catalog server name and location in the footer. The location is the region in which you deployed the app. - *TIP: Hover the mouse over the location to enlarge the display.* - ![Events hub healthy state in original region](./media/saas-dbpertenant-dr-geo-replication/events-hub-original-region.png) - -2. Click on the Contoso Concert Hall tenant and open its event page. - * In the footer, notice the tenant server name. The location will be the same as the catalog server's location. - -3. In the [Azure portal](https://portal.azure.com), open the resource group in which the app is deployed - * Notice the region in which the servers are deployed. - -## Sync tenant configuration into catalog - -In this task, you start a process that syncs the configuration of the servers, elastic pools, and databases into the tenant catalog. The process keeps this information up-to-date in the catalog. The process works with the active catalog, whether in the original region or in the recovery region. The configuration information is used as part of the recovery process to ensure the recovery environment is consistent with the original environment, and then later during repatriation to ensure the original region is made consistent with any changes made in the recovery environment. The catalog is also used to keep track of the recovery state of tenant resources - -> [!IMPORTANT] -> For simplicity, the sync process and other long running recovery and repatriation processes are implemented in these tutorials as local PowerShell jobs or sessions that run under your client user login. The authentication tokens issued when you login will expire after several hours and the jobs will then fail. In a production scenario, long-running processes should be implemented as reliable Azure services of some kind, running under a service principal. See [Use Azure PowerShell to create a service principal with a certificate](../../active-directory/develop/howto-authenticate-service-principal-powershell.md). - -1. In the _PowerShell ISE_, open the ...\Learning Modules\UserConfig.psm1 file. Replace `` and `` on lines 10 and 11 with the value used when you deployed the app. Save the file! - -2. In the *PowerShell ISE*, open the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script and set: - * **$DemoScenario = 1**, Start a background job that syncs tenant server, and pool configuration info into the catalog - -3. Press **F5** to run the sync script. A new PowerShell session is opened to sync the configuration of tenant resources. -![Screenshot that shows the new PowerShell session that is opened to sync the configuration of tenant resources.](./media/saas-dbpertenant-dr-geo-replication/sync-process.png) - -Leave the PowerShell window running in the background and continue with the rest of the tutorial. - -> [!Note] -> The sync process connects to the catalog via a DNS alias. This alias is modified during restore and repatriation to point to the active catalog. The sync process keeps the catalog up-to-date with any database or pool configuration changes made in the recovery region. During repatriation, these changes are applied to the equivalent resources in the original region. - -## Create secondary database replicas in the recovery region - -In this task, you start a process that deploys a duplicate app instance and replicates the catalog and all tenant databases to a recovery region. - -> [!Note] -> This tutorial adds geo-replication protection to the Wingtip Tickets sample application. In a production scenario for an application that uses geo-replication, each tenant would be provisioned with a geo-replicated database from the outset. See [Designing highly available services using Azure SQL Database](designing-cloud-solutions-for-disaster-recovery.md#scenario-1-using-two-azure-regions-for-business-continuity-with-minimal-downtime) - -1. In the *PowerShell ISE*, open the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script and set the following values: - * **$DemoScenario = 2**, Create mirror image recovery environment and replicate catalog and tenant databases - -2. Press **F5** to run the script. A new PowerShell session is opened to create the replicas. -![Sync process](./media/saas-dbpertenant-dr-geo-replication/replication-process.png) - -## Review the normal application state - -At this point, the application is running normally in the original region and now is protected by geo-replication. Read-only secondary replicas, exist in the recovery region for all databases. - -1. In the Azure portal, look at your resource groups and note that a resource group has been created with -recovery suffix in the recovery region. - -2. Explore the resources in the recovery resource group. - -3. Click on the Contoso Concert Hall database on the _tenants1-dpt-<user>-recovery_ server. Click on Geo-Replication on the left side. - - ![Contoso Concert geo-replication link](./media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication.png) - -In the Azure regions map, note the geo-replication link between the primary in the original region and the secondary in the recovery region. - -## Fail over the application into the recovery region - -### Geo-replication recovery process overview - -The recovery script performs the following tasks: - -1. Disables the Traffic Manager endpoint for the web app in the original region. Disabling the endpoint prevents users from connecting to the app in an invalid state should the original region come online during recovery. - -1. Uses a force failover of the catalog database in the recovery region to make it the primary database, and updates the _activecatalog_ alias to point to the recovery catalog server. - -1. Updates the _newtenant_ alias to point to the tenant server in the recovery region. Changing this alias ensures that the databases for any new tenants are provisioned in the recovery region. - -1. Marks all existing tenants in the recovery catalog as offline to prevent access to tenant databases before they are failed over. - -1. Updates the configuration of all elastic pools and replicated single databases in the recovery region to mirror their configuration in the original region. (This task is only needed if pools or replicated databases in the recovery environment are scaled down during normal operations to reduce costs). - -1. Enables the Traffic Manager endpoint for the web app in the recovery region. Enabling this endpoint allows the application to provision new tenants. At this stage, existing tenants are still offline. - -1. Submits batches of requests to force fail over databases in priority order. - * Batches are organized so that databases are failed over in parallel across all pools. - * Failover requests are submitted using asynchronous operations so they are submitted quickly and multiple requests can be processed concurrently. - - > [!Note] - > In an outage scenario, the primary databases in the original region are offline. Force fail over on the secondary breaks the connection to the primary without trying to apply any residual queued transactions. In a DR drill scenario like this tutorial, if there is any update activity at the time of failover there could be some data loss. Later, during repatriation, when you fail over databases in the recovery region back to the original region, a normal failover is used to ensure there is no data loss. - -1. Monitors the service to determine when databases have been failed over. Once a tenant database is failed over, it updates the catalog to record the recovery state of the tenant database and mark the tenant as online. - * Tenant databases can be accessed by the application as soon as they're marked online in the catalog. - * A sum of rowversion values in the tenant database is stored in the catalog. This value acts as a fingerprint that allows the repatriation process to determine if the database has been updated in the recovery region. - -### Run the script to fail over to the recovery region - -Now imagine there is an outage in the region in which the application is deployed and run the recovery script: - -1. In the *PowerShell ISE*, open the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script and set the following values: - * **$DemoScenario = 3**, Recover the app into a recovery region by failing over to replicas - -2. Press **F5** to run the script. - * The script opens in a new PowerShell window and then starts a series of PowerShell jobs that run in parallel. These jobs fail over tenant databases to the recovery region. - * The recovery region is the _paired region_ associated with the Azure region in which you deployed the application. For more information, see [Azure paired regions](../../availability-zones/cross-region-replication-azure.md). - -3. Monitor the status of the recovery process in the PowerShell window. - ![failover process](./media/saas-dbpertenant-dr-geo-replication/failover-process.png) - -> [!Note] -> To explore the code for the recovery jobs, review the PowerShell scripts in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\RecoveryJobs folder. - -### Review the application state during recovery - -While the application endpoint is disabled in Traffic Manager, the application is unavailable. After the catalog is failed over to the recovery region and all the tenants marked offline, the application is brought back online. Although the application is available, each tenant appears offline in the events hub until its database is failed over. It's important to design your application to handle offline tenant databases. - -1. Promptly after the catalog database has been recovered, refresh the Wingtip Tickets Events Hub in your web browser. - * In the footer, notice that the catalog server name now has a _-recovery_ suffix and is located in the recovery region. - * Notice that tenants that are not yet restored, are marked as offline, and are not selectable. - - > [!Note] - > With only a few databases to recover, you may not be able to refresh the browser before recovery has completed, so you may not see the tenants while they are offline. - - ![Events hub offline](./media/saas-dbpertenant-dr-geo-replication/events-hub-offlinemode.png) - - * If you open an offline tenant's Events page directly, it displays a 'tenant offline' notification. For example, if Contoso Concert Hall is offline, try to open http://events.wingtip-dpt.<user>.trafficmanager.net/contosoconcerthall - ![Contoso Offline page](./media/saas-dbpertenant-dr-geo-replication/dr-in-progress-offline-contosoconcerthall.png) - -### Provision a new tenant in the recovery region -Even before all the existing tenant databases have failed over, you can provision new tenants in the recovery region. - -1. In the *PowerShell ISE*, open the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script and set the following property: - * **$DemoScenario = 4**, Provision a new tenant in the recovery region - -2. Press **F5** to run the script and provision the new tenant. - -3. The Hawthorn Hall events page opens in the browser when it completes. Note from the footer that the Hawthorn Hall database is provisioned in the recovery region. - ![Hawthorn Hall Events Page](./media/saas-dbpertenant-dr-geo-replication/hawthornhallevents.png) - -4. In the browser, refresh the Wingtip Tickets Events Hub page to see Hawthorn Hall included. - * If you provisioned Hawthorn Hall without waiting for the other tenants to restore, other tenants may still be offline. - - -## Review the recovered state of the application - -When the recovery process completes, the application and all tenants are fully functional in the recovery region. - -1. Once the display in the PowerShell console window indicates all the tenants are recovered, refresh the Events Hub. The tenants will all appear online, including the new tenant, Hawthorn Hall. - - ![recovered and new tenants in the events hub](./media/saas-dbpertenant-dr-geo-replication/events-hub-with-hawthorn-hall.png) - -2. In the [Azure portal](https://portal.azure.com), open the list of resource groups. - * Notice the resource group that you deployed, plus the recovery resource group, with the _-recovery_ suffix. The recovery resource group contains all the resources created during the recovery process, plus new resources created during the outage. - -3. Open the recovery resource group and notice the following items: - * The recovery versions of the catalog and tenants1 servers, with _-recovery_ suffix. The restored catalog and tenant databases on these servers all have the names used in the original region. - - * The _tenants2-dpt-<user>-recovery_ SQL server. This server is used for provisioning new tenants during the outage. - * The App Service named, _events-wingtip-dpt-<recoveryregion>-<user>_;, which is the recovery instance of the Events app. - - ![Azure recovery resources](./media/saas-dbpertenant-dr-geo-replication/resources-in-recovery-region.png) - -4. Open the _tenants2-dpt-<user>-recovery_ SQL server. Notice it contains the database _hawthornhall_ and the elastic pool, _Pool1_. The _hawthornhall_ database is configured as an elastic database in _Pool1_ elastic pool. - -5. Navigate back to the resource group and click on the Contoso Concert Hall database on the _tenants1-dpt-<user>-recovery_ server. Click on Geo-Replication on the left side. - - ![Contoso database after failover](./media/saas-dbpertenant-dr-geo-replication/contoso-geo-replication-after-failover.png) - -## Change tenant data -In this task, you update one of the tenant databases. - -1. In your browser, find the events list for the Contoso Concert Hall and note the last event name. -2. In the *PowerShell ISE*, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script, set the following value: - * **$DemoScenario = 5** Delete an event from a tenant in the recovery region -3. Press **F5** to execute the script -4. Refresh the Contoso Concert Hall events page (http://events.wingtip-dpt.<user>.trafficmanager.net/contosoconcerthall - substitute <user> with your deployment's user value) and notice that the last event has been deleted. - -## Repatriate the application to its original production region - -This task repatriates the application to its original region. In a real scenario, you would initiate repatriation when the outage is resolved. - -### Repatriation process overview - -![Repatriation Architecture](./media/saas-dbpertenant-dr-geo-replication/repatriation-architecture.png) - -The repatriation process: -1. Cancels any outstanding or in-flight database restore requests. -2. Updates the _newtenant_ alias to point to the tenants' server in the origin region. Changing this alias ensures that the databases for any new tenants will now be provisioned in the origin region. -3. Seeds any changed tenant data to the original region -4. Fails over tenant databases in priority order. - -Failover effectively moves the database to the original region. When the database fails over, any open connections are dropped and the database is unavailable for a few seconds. Applications should be written with retry logic to ensure they connect again. Although this brief disconnect is often not noticed, you may choose to repatriate databases out of business hours. - - -### Run the repatriation script -Now let's imagine the outage is resolved and run the repatriation script. - -1. In the *PowerShell ISE*, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-FailoverToReplica\Demo-FailoverToReplica.ps1 script. - -2. Verify that the Catalog Sync process is still running in its PowerShell instance. If necessary, restart it by setting: - * **$DemoScenario = 1**, Start synchronizing tenant server, pool, and database configuration info into the catalog - * Press **F5** to run the script. - -3. Then to start the repatriation process, set: - * **$DemoScenario = 6**, Repatriate the app into its original region - * Press **F5** to run the recovery script in a new PowerShell window. Repatriation will take several minutes and can be monitored in the PowerShell window. - ![Repatriation process](./media/saas-dbpertenant-dr-geo-replication/repatriation-process.png) - -4. While the script is running, refresh the Events Hub page (http://events.wingtip-dpt.<user>.trafficmanager.net) - * Notice that all the tenants are online and accessible throughout this process. - -5. After the repatriation is complete, refresh the Events hub and open the events page for Hawthorn Hall. Notice that this database has been repatriated to the original region. - ![Events hub repatriated](./media/saas-dbpertenant-dr-geo-replication/events-hub-repatriated.png) - - -## Designing the application to ensure app and database are colocated -The application is designed so that it always connects from an instance in the same region as the tenant database. This design reduces latency between the application and the database. This optimization assumes the app-to-database interaction is chattier than the user-to-app interaction. - -Tenant databases may be spread across recovery and original regions for some time during repatriation. For each database, the app looks up the region in which the database is located by doing a DNS lookup on the tenant server name. In SQL Database, the server name is an alias. The aliased server name contains the region name. If the application isn't in the same region as the database, it redirects to the instance in the same region as the server. Redirecting to instance in the same region as the database minimizes latency between app and database. - -## Next steps - -In this tutorial you learned how to: -> [!div class="checklist"] -> -> * Sync database and elastic pool configuration info into the tenant catalog -> * Set up a recovery environment in an alternate region, comprising application, servers, and pools -> * Use _geo-replication_ to replicate the catalog and tenant databases to the recovery region -> * Fail over the application and catalog and tenant databases to the recovery region -> * Fail back the application, catalog and tenant databases to the original region after the outage is resolved - -You can learn more about the technologies Azure SQL Database provides to enable business continuity in the [Business Continuity Overview](business-continuity-high-availability-disaster-recover-hadr-overview.md) documentation. - -## Additional resources - -* [Additional tutorials that build upon the Wingtip SaaS application](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-dr-geo-restore.md b/articles/azure-sql/database/saas-dbpertenant-dr-geo-restore.md deleted file mode 100644 index 38dc574bf0b9e..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-dr-geo-restore.md +++ /dev/null @@ -1,376 +0,0 @@ ---- -title: "SaaS apps: Geo-redundant backups for disaster recovery" -description: "Learn to use Azure SQL Database geo-redundant backups to recover a multitenant SaaS app in the event of an outage" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle - -ms.date: 01/14/2019 ---- -# Use geo-restore to recover a multitenant SaaS application from database backups -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This tutorial explores a full disaster recovery scenario for a multitenant SaaS application implemented with the database per tenant model. You use [geo-restore](recovery-using-backups.md) to recover the catalog and tenant databases from automatically maintained geo-redundant backups into an alternate recovery region. After the outage is resolved, you use [geo-replication](active-geo-replication-overview.md) to repatriate changed databases to their original region. - -![Diagram shows an original and recovery regions, both of which have an app, catalog, original or mirror images of servers and pools, automatic backups to storage, with the recovery region accepting geo-replication of backup and having server and pool for new tenants.](./media/saas-dbpertenant-dr-geo-restore/geo-restore-architecture.png) - -Geo-restore is the lowest-cost disaster recovery solution for Azure SQL Database. However, restoring from geo-redundant backups can result in data loss of up to one hour. It can take considerable time, depending on the size of each database. - -> [!NOTE] -> Recover applications with the lowest possible RPO and RTO by using geo-replication instead of geo-restore. - -This tutorial explores both restore and repatriation workflows. You learn how to: -> [!div class="checklist"] -> -> * Sync database and elastic pool configuration info into the tenant catalog. -> * Set up a mirror image environment in a recovery region that includes application, servers, and pools. -> * Recover catalog and tenant databases by using geo-restore. -> * Use geo-replication to repatriate the tenant catalog and changed tenant databases after the outage is resolved. -> * Update the catalog as each database is restored (or repatriated) to track the current location of the active copy of each tenant's database. -> * Ensure that the application and tenant database are always co-located in the same Azure region to reduce latency. - - -Before you start this tutorial, complete the following prerequisites: -* Deploy the Wingtip Tickets SaaS database per tenant app. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS database per tenant application](saas-dbpertenant-get-started-deploy.md). -* Install Azure PowerShell. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps). - -## Introduction to the geo-restore recovery pattern - -Disaster recovery (DR) is an important consideration for many applications, whether for compliance reasons or business continuity. If there's a prolonged service outage, a well-prepared DR plan can minimize business disruption. A DR plan based on geo-restore must accomplish several goals: - * Reserve all needed capacity in the chosen recovery region as quickly as possible to ensure that it's available to restore tenant databases. - * Establish a mirror image recovery environment that reflects the original pool and database configuration. - * Allow cancellation of the restore process in mid-flight if the original region comes back online. - * Enable tenant provisioning quickly so new tenant onboarding can restart as soon as possible. - * Be optimized to restore tenants in priority order. - * Be optimized to get tenants online as soon as possible by doing steps in parallel where practical. - * Be resilient to failure, restartable, and idempotent. - * Repatriate databases to their original region with minimal impact to tenants when the outage is resolved. - -> [!NOTE] -> The application is recovered into the paired region of the region in which the application is deployed. For more information, see [Azure paired regions](../../availability-zones/cross-region-replication-azure.md). - -This tutorial uses features of Azure SQL Database and the Azure platform to address these challenges: - -* [Azure Resource Manager templates](../../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md), to reserve all needed capacity as quickly as possible. Azure Resource Manager templates are used to provision a mirror image of the original servers and elastic pools in the recovery region. A separate server and pool are also created for provisioning new tenants. -* [Elastic Database Client Library](elastic-database-client-library.md) (EDCL), to create and maintain a tenant database catalog. The extended catalog includes periodically refreshed pool and database configuration information. -* [Shard management recovery features](elastic-database-recovery-manager.md) of the EDCL, to maintain database location entries in the catalog during recovery and repatriation. -* [Geo-restore](recovery-using-backups.md#geo-restore), to recover the catalog and tenant databases from automatically maintained geo-redundant backups. -* [Asynchronous restore operations](../../azure-resource-manager/management/async-operations.md), sent in tenant-priority order, are queued for each pool by the system and processed in batches so the pool isn't overloaded. These operations can be canceled before or during execution if necessary. -* [Geo-replication](active-geo-replication-overview.md), to repatriate databases to the original region after the outage. There is no data loss and minimal impact on the tenant when you use geo-replication. -* [SQL server DNS aliases](./dns-alias-overview.md), to allow the catalog sync process to connect to the active catalog regardless of its location. - -## Get the disaster recovery scripts - -The DR scripts used in this tutorial are available in the [Wingtip Tickets SaaS database per tenant GitHub repository](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant). Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets management scripts. - -> [!IMPORTANT] -> Like all the Wingtip Tickets management scripts, the DR scripts are sample quality and are not to be used in production. - -## Review the healthy state of the application -Before you start the recovery process, review the normal healthy state of the application. - -1. In your web browser, open the Wingtip Tickets events hub (http://events.wingtip-dpt.<user>.trafficmanager.net, replace <user> with your deployment's user value). - - Scroll to the bottom of the page and notice the catalog server name and location in the footer. The location is the region in which you deployed the app. - - > [!TIP] - > Hover the mouse over the location to enlarge the display. - - ![Events hub healthy state in original region](./media/saas-dbpertenant-dr-geo-restore/events-hub-original-region.png) - -2. Select the Contoso Concert Hall tenant and open its event page. - - In the footer, notice the tenant's server name. The location is the same as the catalog server's location. - - ![Contoso Concert Hall original region](./media/saas-dbpertenant-dr-geo-restore/contoso-original-location.png) - -3. In the [Azure portal](https://portal.azure.com), review and open the resource group in which you deployed the app. - - Notice the resources and the region in which the app service components and SQL Database is deployed. - -## Sync the tenant configuration into the catalog - -In this task, you start a process to sync the configuration of the servers, elastic pools, and databases into the tenant catalog. This information is used later to configure a mirror image environment in the recovery region. - -> [!IMPORTANT] -> For simplicity, the sync process and other long-running recovery and repatriation processes are implemented in these samples as local PowerShell jobs or sessions that run under your client user login. The authentication tokens issued when you log in expire after several hours, and the jobs will then fail. -> In a production scenario, long-running processes should be implemented as reliable Azure services of some kind, running under a service principal. See [Use Azure PowerShell to create a service principal with a certificate](../../active-directory/develop/howto-authenticate-service-principal-powershell.md). - -1. In the PowerShell ISE, open the ...\Learning Modules\UserConfig.psm1 file. Replace `` and `` on lines 10 and 11 with the value used when you deployed the app. Save the file. - -2. In the PowerShell ISE, open the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script. - - In this tutorial, you run each of the scenarios in this PowerShell script, so keep this file open. - -3. Set the following: - - $DemoScenario = 1: Start a background job that syncs tenant server and pool configuration info into the catalog. - -4. To run the sync script, select F5. - - This information is used later to ensure that recovery creates a mirror image of the servers, pools, and databases in the recovery region. - - ![Sync process](./media/saas-dbpertenant-dr-geo-restore/sync-process.png) - -Leave the PowerShell window running in the background and continue with the rest of this tutorial. - -> [!NOTE] -> The sync process connects to the catalog via a DNS alias. The alias is modified during restore and repatriation to point to the active catalog. The sync process keeps the catalog up to date with any database or pool configuration changes made in the recovery region. During repatriation, these changes are applied to the equivalent resources in the original region. - -## Geo-restore recovery process overview - -The geo-restore recovery process deploys the application and restores databases from backups into the recovery region. - -The recovery process does the following: - -1. Disables the Azure Traffic Manager endpoint for the web app in the original region. Disabling the endpoint prevents users from connecting to the app in an invalid state should the original region come online during recovery. - -2. Provisions a recovery catalog server in the recovery region, geo-restores the catalog database, and updates the activecatalog alias to point to the restored catalog server. Changing the catalog alias ensures that the catalog sync process always syncs to the active catalog. - -3. Marks all existing tenants in the recovery catalog as offline to prevent access to tenant databases before they are restored. - -4. Provisions an instance of the app in the recovery region and configures it to use the restored catalog in that region. To keep latency to a minimum, the sample app is designed to always connect to a tenant database in the same region. - -5. Provisions a server and elastic pool in which new tenants are provisioned. Creating these resources ensures that provisioning new tenants doesn't interfere with the recovery of existing tenants. - -6. Updates the new tenant alias to point to the server for new tenant databases in the recovery region. Changing this alias ensures that databases for any new tenants are provisioned in the recovery region. - -7. Provisions servers and elastic pools in the recovery region for restoring tenant databases. These servers and pools are a mirror image of the configuration in the original region. Provisioning pools up front reserves the capacity needed to restore all the databases. - - An outage in a region might place significant pressure on the resources available in the paired region. If you rely on geo-restore for DR, then reserving resources quickly is recommended. Consider geo-replication if it's critical that an application is recovered in a specific region. - -8. Enables the Traffic Manager endpoint for the web app in the recovery region. Enabling this endpoint allows the application to provision new tenants. At this stage, existing tenants are still offline. - -9. Submits batches of requests to restore databases in priority order. - - * Batches are organized so that databases are restored in parallel across all pools. - - * Restore requests are submitted asynchronously so they are submitted quickly and queued for execution in each pool. - - * Because restore requests are processed in parallel across all pools, it's better to distribute important tenants across many pools. - -10. Monitors the service to determine when databases are restored. After a tenant database is restored, it's marked online in the catalog, and a rowversion sum for the tenant database is recorded. - - * Tenant databases can be accessed by the application as soon as they're marked online in the catalog. - - * A sum of rowversion values in the tenant database is stored in the catalog. This sum acts as a fingerprint that allows the repatriation process to determine if the database was updated in the recovery region. - -## Run the recovery script - -> [!IMPORTANT] -> This tutorial restores databases from geo-redundant backups. Although these backups are typically available within 10 minutes, it can take up to an hour. The script pauses until they're available. - -Imagine there's an outage in the region in which the application is deployed, and run the recovery script: - -1. In the PowerShell ISE, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script, set the following value: - - $DemoScenario = 2: Recover the app into a recovery region by restoring from geo-redundant backups. - -2. To run the script, select F5. - - * The script opens in a new PowerShell window and then starts a set of PowerShell jobs that run in parallel. These jobs restore servers, pools, and databases to the recovery region. - - * The recovery region is the paired region associated with the Azure region in which you deployed the application. For more information, see [Azure paired regions](../../availability-zones/cross-region-replication-azure.md). - -3. Monitor the status of the recovery process in the PowerShell window. - - ![Screenshot that shows the PowerShell window where you can monitor the status of the recovery process.](./media/saas-dbpertenant-dr-geo-restore/dr-in-progress.png) - -> [!NOTE] -> To explore the code for the recovery jobs, review the PowerShell scripts in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\RecoveryJobs folder. - -## Review the application state during recovery -While the application endpoint is disabled in Traffic Manager, the application is unavailable. The catalog is restored, and all the tenants are marked offline. The application endpoint in the recovery region is then enabled, and the application is back online. Although the application is available, tenants appear offline in the events hub until their databases are restored. It's important to design your application to handle offline tenant databases. - -* After the catalog database has been recovered but before the tenants are back online, refresh the Wingtip Tickets events hub in your web browser. - - * In the footer, notice that the catalog server name now has a -recovery suffix and is located in the recovery region. - - * Notice that tenants that are not yet restored are marked as offline and are not selectable. - - ![Recovery process](./media/saas-dbpertenant-dr-geo-restore/events-hub-tenants-offline-in-recovery-region.png) - - * If you open a tenant's events page directly while the tenant is offline, the page displays a tenant offline notification. For example, if Contoso Concert Hall is offline, try to open http://events.wingtip-dpt.<user>.trafficmanager.net/contosoconcerthall. - - ![Screenshot that shows an offline events page.](./media/saas-dbpertenant-dr-geo-restore/dr-in-progress-offline-contosoconcerthall.png) - -## Provision a new tenant in the recovery region -Even before tenant databases are restored, you can provision new tenants in the recovery region. New tenant databases provisioned in the recovery region are repatriated with the recovered databases later. - -1. In the PowerShell ISE, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script, set the following property: - - $DemoScenario = 3: Provision a new tenant in the recovery region. - -2. To run the script, select F5. - -3. The Hawthorn Hall events page opens in the browser when provisioning finishes. - - Notice that the Hawthorn Hall database is located in the recovery region. - - ![Hawthorn Hall provisioned in the recovery region](./media/saas-dbpertenant-dr-geo-restore/hawthorn-hall-provisioned-in-recovery-region.png) - -4. In the browser, refresh the Wingtip Tickets events hub page to see Hawthorn Hall included. - - If you provisioned Hawthorn Hall without waiting for the other tenants to restore, other tenants might still be offline. - -## Review the recovered state of the application - -When the recovery process finishes, the application and all tenants are fully functional in the recovery region. - -1. After the display in the PowerShell console window indicates all the tenants are recovered, refresh the events hub. - - The tenants all appear online, including the new tenant, Hawthorn Hall. - - ![Recovered and new tenants in the events hub](./media/saas-dbpertenant-dr-geo-restore/events-hub-with-hawthorn-hall.png) - -2. Click on Contoso Concert Hall and open its events page. - - In the footer, notice that the database is located on the recovery server located in the recovery region. - - ![Contoso in the recovery region](./media/saas-dbpertenant-dr-geo-restore/contoso-recovery-location.png) - -3. In the [Azure portal](https://portal.azure.com), open the list of resource groups. - - Notice the resource group that you deployed, plus the recovery resource group, with the -recovery suffix. The recovery resource group contains all the resources created during the recovery process, plus new resources created during the outage. - -4. Open the recovery resource group and notice the following items: - - * The recovery versions of the catalog and tenants1 servers, with the -recovery suffix. The restored catalog and tenant databases on these servers all have the names used in the original region. - - * The tenants2-dpt-<user>-recovery SQL server. This server is used for provisioning new tenants during the outage. - - * The app service named events-wingtip-dpt-<recoveryregion>-<user>, which is the recovery instance of the events app. - - ![Contoso resources in the recovery region](./media/saas-dbpertenant-dr-geo-restore/resources-in-recovery-region.png) - -5. Open the tenants2-dpt-<user>-recovery SQL server. Notice that it contains the database hawthornhall and the elastic pool Pool1. The hawthornhall database is configured as an elastic database in the Pool1 elastic pool. - -## Change the tenant data -In this task, you update one of the restored tenant databases. The repatriation process copies restored databases that have been changed to the original region. - -1. In your browser, find the events list for the Contoso Concert Hall, scroll through the events, and notice the last event, Seriously Strauss. - -2. In the PowerShell ISE, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script, set the following value: - - $DemoScenario = 4: Delete an event from a tenant in the recovery region. - -3. To execute the script, select F5. - -4. Refresh the Contoso Concert Hall events page (http://events.wingtip-dpt.<user>.trafficmanager.net/contosoconcerthall), and notice that the event Seriously Strauss is missing. - -At this point in the tutorial, you have recovered the application, which is now running in the recovery region. You have provisioned a new tenant in the recovery region and modified data of one of the restored tenants. - -> [!NOTE] -> Other tutorials in the sample are not designed to run with the app in the recovery state. If you want to explore other tutorials, be sure to repatriate the application first. - -## Repatriation process overview - -The repatriation process reverts the application and its databases to its original region after an outage is resolved. - -![Geo-restore repatriation](./media/saas-dbpertenant-dr-geo-restore/geo-restore-repatriation.png) - -The process: - -1. Stops any ongoing restore activity and cancels any outstanding or in-flight database restore requests. - -2. Reactivates in the original region tenant databases that have not been changed since the outage. These databases include those not recovered yet and those recovered but not changed afterward. The reactivated databases are exactly as last accessed by their tenants. - -3. Provisions a mirror image of the new tenant's server and elastic pool in the original region. After this action is complete, the new tenant alias is updated to point to this server. Updating the alias causes new tenant onboarding to occur in the original region instead of the recovery region. - -3. Uses geo-replication to move the catalog to the original region from the recovery region. - -4. Updates pool configuration in the original region so it's consistent with changes that were made in the recovery region during the outage. - -5. Creates the required servers and pools to host any new databases created during the outage. - -6. Uses geo-replication to repatriate restored tenant databases that have been updated post-restore and all new tenant databases provisioned during the outage. - -7. Cleans up resources created in the recovery region during the restore process. - -To limit the number of tenant databases that need to be repatriated, steps 1 to 3 are done promptly. - -Step 4 is only done if the catalog in the recovery region has been modified during the outage. The catalog is updated if new tenants are created or if any database or pool configuration is changed in the recovery region. - -It's important that step 7 causes minimal disruption to tenants and no data is lost. To achieve this goal, the process uses geo-replication. - -Before each database is geo-replicated, the corresponding database in the original region is deleted. The database in the recovery region is then geo-replicated, creating a secondary replica in the original region. After replication is complete, the tenant is marked offline in the catalog, which breaks any connections to the database in the recovery region. The database is then failed over, causing any pending transactions to process on the secondary so no data is lost. - -On failover, the database roles are reversed. The secondary in the original region becomes the primary read-write database, and the database in the recovery region becomes a read-only secondary. The tenant entry in the catalog is updated to reference the database in the original region, and the tenant is marked online. At this point, repatriation of the database is complete. - -Applications should be written with retry logic to ensure that they reconnect automatically when connections are broken. When they use the catalog to broker the reconnection, they connect to the repatriated database in the original region. Although the brief disconnect is often not noticed, you might choose to repatriate databases out of business hours. - -After a database is repatriated, the secondary database in the recovery region can be deleted. The database in the original region then relies again on geo-restore for DR protection. - -In step 8, resources in the recovery region, including the recovery servers and pools, are deleted. - -## Run the repatriation script -Let's imagine the outage is resolved and run the repatriation script. - -If you've followed the tutorial, the script immediately reactivates Fabrikam Jazz Club and Dogwood Dojo in the original region because they're unchanged. It then repatriates the new tenant, Hawthorn Hall, and Contoso Concert Hall because it has been modified. The script also repatriates the catalog, which was updated when Hawthorn Hall was provisioned. - -1. In the PowerShell ISE, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script, verify that the Catalog Sync process is still running in its PowerShell instance. If necessary, restart it by setting: - - $DemoScenario = 1: Start synchronizing tenant server, pool, and database configuration info into the catalog. - - To run the script, select F5. - -2. Then to start the repatriation process, set: - - $DemoScenario = 5: Repatriate the app into its original region. - - To run the recovery script in a new PowerShell window, select F5. Repatriation takes several minutes and can be monitored in the PowerShell window. - -3. While the script is running, refresh the events hub page (http://events.wingtip-dpt.<user>.trafficmanager.net). - - Notice that all the tenants are online and accessible throughout this process. - -4. Select the Fabrikam Jazz Club to open it. If you didn't modify this tenant, notice from the footer that the server is already reverted to the original server. - -5. Open or refresh the Contoso Concert Hall events page. Notice from the footer that, initially, the database is still on the -recovery server. - -6. Refresh the Contoso Concert Hall events page when the repatriation process finishes, and notice that the database is now in your original region. - -7. Refresh the events hub again and open Hawthorn Hall. Notice that its database is also located in the original region. - -## Clean up recovery region resources after repatriation -After repatriation is complete, it's safe to delete the resources in the recovery region. - -> [!IMPORTANT] -> Delete these resources promptly to stop all billing for them. - -The restore process creates all the recovery resources in a recovery resource group. The cleanup process deletes this resource group and removes all references to the resources from the catalog. - -1. In the PowerShell ISE, in the ...\Learning Modules\Business Continuity and Disaster Recovery\DR-RestoreFromBackup\Demo-RestoreFromBackup.ps1 script, set: - - $DemoScenario = 6: Delete obsolete resources from the recovery region. - -2. To run the script, select F5. - -After cleaning up the scripts, the application is back where it started. At this point, you can run the script again or try out other tutorials. - -## Designing the application to ensure that the app and the database are co-located -The application is designed to always connect from an instance in the same region as the tenant's database. This design reduces latency between the application and the database. This optimization assumes the app-to-database interaction is chattier than the user-to-app interaction. - -Tenant databases might be spread across recovery and original regions for some time during repatriation. For each database, the app looks up the region in which the database is located by doing a DNS lookup on the tenant server name. The server name is an alias. The aliased server name contains the region name. If the application isn't in the same region as the database, it redirects to the instance in the same region as the server. Redirecting to the instance in the same region as the database minimizes latency between the app and the database. - -## Next steps - -In this tutorial, you learned how to: -> [!div class="checklist"] -> -> * Use the tenant catalog to hold periodically refreshed configuration information, which allows a mirror image recovery environment to be created in another region. -> * Recover databases into the recovery region by using geo-restore. -> * Update the tenant catalog to reflect restored tenant database locations. -> * Use a DNS alias to enable an application to connect to the tenant catalog throughout without reconfiguration. -> * Use geo-replication to repatriate recovered databases to their original region after an outage is resolved. - -Try the [Disaster recovery for a multitenant SaaS application using database geo-replication](./saas-dbpertenant-dr-geo-replication.md) tutorial to learn how to use geo-replication to dramatically reduce the time needed to recover a large-scale multitenant application. - -## Additional resources - -[Additional tutorials that build upon the Wingtip SaaS application](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-get-started-deploy.md b/articles/azure-sql/database/saas-dbpertenant-get-started-deploy.md deleted file mode 100644 index 9e33410f3a8f6..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-get-started-deploy.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Database-per-tenant SaaS tutorial -description: Deploy and explore the Wingtip Tickets SaaS multitenant application that demonstrates the database-per-tenant pattern and other SaaS patterns by using Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/25/2019 ---- -# Deploy and explore a multitenant SaaS app that uses the database-per-tenant pattern with Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you deploy and explore the Wingtip Tickets SaaS database-per-tenant application (Wingtip). The app uses a database-per-tenant pattern to store the data of multiple tenants. The app is designed to showcase features of Azure SQL Database that simplify how to enable SaaS scenarios. - -Five minutes after you select **Deploy to Azure**, you have a multitenant SaaS application. The app includes a database that runs in Azure SQL Database. The app is deployed with three sample tenants, each with its own database. All the databases are deployed into a SQL elastic pool. The app is deployed to your Azure subscription. You have full access to explore and work with the individual components of the app. The application C# source code and the management scripts are available in the [WingtipTicketsSaaS-DbPerTenant GitHub repo][github-wingtip-dpt]. - -In this tutorial you learn: - -> [!div class="checklist"] -> - How to deploy the Wingtip SaaS application. -> - Where to get the application source code and management scripts. -> - About the servers, pools, and databases that make up the app. -> - How tenants are mapped to their data with the *catalog*. -> - How to provision a new tenant. -> - How to monitor tenant activity in the app. - -A [series of related tutorials](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) offers to explore various SaaS design and management patterns. The tutorials build beyond this initial deployment. When you use the tutorials, you can examine the provided scripts to see how the different SaaS patterns are implemented. The scripts demonstrate how features of SQL Database simplify the development of SaaS applications. - -## Prerequisites - -To complete this tutorial, make sure Azure PowerShell is installed. For more information, see [Get started with Azure PowerShell](/powershell/azure/get-started-azureps). - -## Deploy the Wingtip Tickets SaaS application - -### Plan the names - -In the steps of this section, you provide a user value that is used to make sure resource names are globally unique. You also provide a name for the resource group that contains all the resources created by a deployment of the app. For a fictitious person named Ann Finley, we suggest: - -- **User**: *af1* is made up of Ann Finley's initials plus a digit. If you deploy the app a second time, use a different value. An example is af2. -- **Resource group**: *wingtip-dpt-af1* indicates this is the database-per-tenant app. Append the user name af1 to correlate the resource group name with the names of the resources it contains. - -Choose your names now, and write them down. - -### Steps - -1. To open the Wingtip Tickets SaaS database-per-tenant deployment template in the Azure portal, select **Deploy to Azure**. - - [![Image showing a button labeled "Deploy to Azure".](../../media/template-deployments/deploy-to-azure.svg)](https://aka.ms/deploywingtipdpt) - -1. Enter values in the template for the required parameters. - - > [!IMPORTANT] - > Some authentication and server firewalls are intentionally unsecured for demonstration purposes. We recommend that you create a new resource group. Don't use existing resource groups, servers, or pools. Don't use this application, scripts, or any deployed resources for production. Delete this resource group when you're finished with the application to stop related billing. - - - **Resource group**: Select **Create new**, and provide the unique name you chose earlier for the resource group. - - **Location**: Select a location from the drop-down list. - - **User**: Use the user name value you chose earlier. - -1. Deploy the application. - - a. Select to agree to the terms and conditions. - - b. Select **Purchase**. - -1. To monitor deployment status, select **Notifications** (the bell icon to the right of the search box). Deploying the Wingtip Tickets SaaS app takes approximately five minutes. - - ![Deployment succeeded](./media/saas-dbpertenant-get-started-deploy/succeeded.png) - -## Download and unblock the Wingtip Tickets management scripts - -While the application deploys, download the source code and management scripts. - -> [!IMPORTANT] -> Executable contents (scripts and DLLs) might be blocked by Windows when .zip files are downloaded from an external source and extracted. Follow the steps to unblock the .zip file before you extract the scripts. Unblocking makes sure the scripts are allowed to run. - -1. Browse to the [WingtipTicketsSaaS-DbPerTenant GitHub repo][github-wingtip-dpt]. -1. Select **Clone or download**. -1. Select **Download ZIP**, and then save the file. -1. Right-click the **WingtipTicketsSaaS-DbPerTenant-master.zip** file, and then select **Properties**. -1. On the **General** tab, select **Unblock** > **Apply**. -1. Select **OK**, and extract the files - -Scripts are located in the ...\\WingtipTicketsSaaS-DbPerTenant-master\\Learning Modules folder. - -## Update the user configuration file for this deployment - -Before you run any scripts, update the resource group and user values in the User Config file. Set these variables to the values you used during deployment. - -1. In the PowerShell ISE, open ...\\Learning Modules\\**UserConfig.psm1** -1. Update **ResourceGroupName** and **Name** with the specific values for your deployment (on lines 10 and 11 only). -1. Save the changes. - -These values are referenced in nearly every script. - -## Run the application - -The app showcases venues that host events. Venue types include concert halls, jazz clubs, and sports clubs. In Wingtip Tickets, venues are registered as tenants. Being a tenant gives a venue an easy way to list events and to sell tickets to their customers. Each venue gets a personalized website to list their events and to sell tickets. - -Internally in the app, each tenant gets a database deployed into an elastic pool. - -A central **Events Hub** page provides a list of links to the tenants in your deployment. - -1. Use the URL to open the Events Hub in your web browser: http://events.wingtip-dpt.<user>.trafficmanager.net. Substitute <user> with your deployment's user value. - - ![Events Hub](./media/saas-dbpertenant-get-started-deploy/events-hub.png) - -2. Select **Fabrikam Jazz Club** in the Events Hub. - - ![Events](./media/saas-dbpertenant-get-started-deploy/fabrikam.png) - -### Azure Traffic Manager - -The Wingtip application uses [*Azure Traffic Manager*](../../traffic-manager/traffic-manager-overview.md) to control the distribution of incoming requests. The URL to access the events page for a specific tenant uses the following format: - -- http://events.wingtip-dpt.<user>.trafficmanager.net/fabrikamjazzclub - - The parts of the preceding format are explained in the following table. - - | URL part | Description | - | :-------------- | :---------------- | - | events.wingtip-dpt | The events parts of the Wingtip app.

    *-dpt* distinguishes the *database-per-tenant* implementation of Wingtip Tickets from other implementations. Examples are the *single* app-per-tenant (*-sa*) or *multitenant database* (*-mt*) implementations. | - | .*<user>* | *af1* in the example. | - | .trafficmanager.net/ | Traffic Manager, base URL. | - | fabrikamjazzclub | Identifies the tenant named Fabrikam Jazz Club. | - - -- The tenant name is parsed from the URL by the events app. -- The tenant name is used to create a key. -- The key is used to access the catalog to obtain the location of the tenant's database. - - The catalog is implemented by using *shard map management*. -- The Events Hub uses extended metadata in the catalog to construct the list-of-events page URLs for each tenant. - -In a production environment, typically you create a CNAME DNS record to [*point a company internet domain*](../../traffic-manager/traffic-manager-point-internet-domain.md) to the Traffic Manager DNS name. - -> [!NOTE] -> It may not be immediately obvious what the use of the traffic manager is in this tutorial. The goal of this series of tutorials is to showcase patterns that can handle the scale of a complex production environment. In such a case, for example, you would have multiple web apps distributed across the globe, co-located with databases and you would need traffic manager to route between these instances. -Another set of tutorials that illustrates the use of traffic manager though are the [geo-restore](./saas-dbpertenant-dr-geo-restore.md) and the [geo-replication](./saas-dbpertenant-dr-geo-replication.md) tutorials. In these tutorials, traffic manager is used to help to switch over to a recovery instance of the SaaS app in the event of a regional outage. - -## Start generating load on the tenant databases - -Now that the app is deployed, let's put it to work. - -The *Demo-LoadGenerator* PowerShell script starts a workload that runs against all tenant databases. The real-world load on many SaaS apps is sporadic and unpredictable. To simulate this type of load, the generator produces a load with randomized spikes or bursts of activity on each tenant. The bursts occur at randomized intervals. It takes several minutes for the load pattern to emerge. Let the generator run for at least three or four minutes before you monitor the load. - -1. In the PowerShell ISE, open the ...\\Learning Modules\\Utilities\\*Demo-LoadGenerator.ps1* script. -2. Press F5 to run the script and start the load generator. Leave the default parameter values for now. -3. Sign in to your Azure account, and select the subscription you want to use, if necessary. - -The load generator script starts a background job for each database in the catalog and then stops. If you rerun the load generator script, it stops any background jobs that are running before it starts new ones. - -### Monitor the background jobs - -If you want to control and monitor the background jobs, use the following cmdlets: - -- `Get-Job` -- `Receive-Job` -- `Stop-Job` - -### Demo-LoadGenerator.ps1 actions - -*Demo-LoadGenerator.ps1* mimics an active workload of customer transactions. The following steps describe the sequence of actions that *Demo-LoadGenerator.ps1* initiates: - -1. *Demo-LoadGenerator.ps1* starts *LoadGenerator.ps1* in the foreground. - - - Both .ps1 files are stored under the folders Learning Modules\\Utilities\\. - -2. *LoadGenerator.ps1* loops through all tenant databases in the catalog. - -3. *LoadGenerator.ps1* starts a background PowerShell job for each tenant database: - - - By default, the background jobs run for 120 minutes. - - Each job causes a CPU-based load on one tenant database by executing *sp_CpuLoadGenerator*. The intensity and duration of the load varies depending on `$DemoScenario`. - - *sp_CpuLoadGenerator* loops around a SQL SELECT statement that causes a high CPU load. The time interval between issues of the SELECT varies according to parameter values to create a controllable CPU load. Load levels and intervals are randomized to simulate more realistic loads. - - This .sql file is stored under *WingtipTenantDB\\dbo\\StoredProcedures\\*. - -4. If `$OneTime = $false`, the load generator starts the background jobs and then continues to run. Every 10 seconds, it monitors for any new tenants that are provisioned. If you set `$OneTime = $true`, the LoadGenerator starts the background jobs and then stops running in the foreground. For this tutorial, leave `$OneTime = $false`. - - Use Ctrl-C or Stop Operation Ctrl-Break if you want to stop or restart the load generator. - - If you leave the load generator running in the foreground, use another PowerShell ISE instance to run other PowerShell scripts. - -  - -Before you continue with the next section, leave the load generator running in the job-invoking state. - -## Provision a new tenant - -The initial deployment creates three sample tenants. Now you create another tenant to see the impact on the deployed application. In the Wingtip app, the workflow to provision new tenants is explained in the [Provision and catalog tutorial](saas-dbpertenant-provision-and-catalog.md). In this phase, you create a new tenant, which takes less than one minute. - -1. Open a new PowerShell ISE. -2. Open ...\\Learning Modules\Provision and Catalog\\*Demo-ProvisionAndCatalog.ps1*. -3. To run the script, press F5. Leave the default values for now. - - > [!NOTE] - > Many Wingtip SaaS scripts use *$PSScriptRoot* to browse folders to call functions in other scripts. This variable is evaluated only when the full script is executed by pressing F5. Highlighting and running a selection with F8 can result in errors. To run the scripts, press F5. - -The new tenant database is: - -- Created in an SQL elastic pool. -- Initialized. -- Registered in the catalog. - -After successful provisioning, the *Events* site of the new tenant appears in your browser. - -![New tenant](./media/saas-dbpertenant-get-started-deploy/red-maple-racing.png) - -Refresh the Events Hub to make the new tenant appear in the list. - -## Explore the servers, pools, and tenant databases - -Now that you've started running a load against the collection of tenants, let's look at some of the resources that were deployed. - -1. In the [Azure portal](https://portal.azure.com), browse to your list of SQL servers. Then open the **catalog-dpt-<USER>** server. - - The catalog server contains two databases, **tenantcatalog** and **basetenantdb** (a template database that's copied to create new tenants). - - ![Screenshot shows a catalog server Overview page with the two databases.](./media/saas-dbpertenant-get-started-deploy/databases.png) - -2. Go back to your list of SQL servers. - -3. Open the **tenants1-dpt-<USER>** server that holds the tenant databases. - -4. See the following items: - - - Each tenant database is an **Elastic Standard** database in a 50-eDTU standard pool. - - The Red Maple Racing database is the tenant database you provisioned previously. - - ![Server with databases](./media/saas-dbpertenant-get-started-deploy/server.png) - -## Monitor the pool - -After *LoadGenerator.ps1* runs for several minutes, enough data should be available to start looking at some monitoring capabilities. These capabilities are built into pools and databases. - -Browse to the server **tenants1-dpt-<user>**, and select **Pool1** to view resource utilization for the pool. In the following charts, the load generator ran for one hour. - - ![Monitor pool](./media/saas-dbpertenant-get-started-deploy/monitor-pool.png) - -- The first chart, labeled **Resource utilization**, shows pool eDTU utilization. -- The second chart shows eDTU utilization of the five most active databases in the pool. - -The two charts illustrate that elastic pools and SQL Database are well suited to unpredictable SaaS application workloads. The charts show that four databases are each bursting to as much as 40 eDTUs, and yet all the databases are comfortably supported by a 50-eDTU pool. The 50-eDTU pool can support even heavier workloads. If the databases are provisioned as single databases, each one needs to be an S2 (50 DTU) to support the bursts. The cost of four single S2 databases is nearly three times the price of the pool. In real-world situations, SQL Database customers run up to 500 databases in 200 eDTU pools. For more information, see the [Performance monitoring tutorial](saas-dbpertenant-performance-monitoring.md). - -## Additional resources - -- For more information, see additional [tutorials that build on the Wingtip Tickets SaaS database-per-tenant application](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials). -- To learn about elastic pools, see [What is an Azure SQL elastic pool?](elastic-pool-overview.md). -- To learn about elastic jobs, see [Manage scaled-out cloud databases](./elastic-jobs-overview.md). -- To learn about multitenant SaaS applications, see [Design patterns for multitenant SaaS applications](saas-tenancy-app-design-patterns.md). - -## Next steps - -In this tutorial you learned: - -> [!div class="checklist"] -> - How to deploy the Wingtip Tickets SaaS application. -> - About the servers, pools, and databases that make up the app. -> - How tenants are mapped to their data with the *catalog*. -> - How to provision new tenants. -> - How to view pool utilization to monitor tenant activity. -> - How to delete sample resources to stop related billing. - -Next, try the [Provision and catalog tutorial](saas-dbpertenant-provision-and-catalog.md). - - - -[github-wingtip-dpt]: https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-log-analytics.md b/articles/azure-sql/database/saas-dbpertenant-log-analytics.md deleted file mode 100644 index ceadd4919db0b..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-log-analytics.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Azure Monitor logs with a multitenant app -description: Set up and use Azure Monitor logs with a multitenant Azure SQL Database SaaS app -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/25/2019 ---- -# Set up and use Azure Monitor logs with a multitenant Azure SQL Database SaaS app -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you set up and use [Azure Monitor logs](../../azure-monitor/logs/log-query-overview.md) to monitor elastic pools and databases. This tutorial builds on the [Performance monitoring and management tutorial](saas-dbpertenant-performance-monitoring.md). It shows how to use Azure Monitor logs to augment the monitoring and alerting provided in the Azure portal. Azure Monitor logs supports monitoring thousands of elastic pools and hundreds of thousands of databases. Azure Monitor logs provides a single monitoring solution, which can integrate monitoring of different applications and Azure services across multiple Azure subscriptions. - -[!INCLUDE [azure-monitor-log-analytics-rebrand](../../../includes/azure-monitor-log-analytics-rebrand.md)] - -In this tutorial you learn how to: - -> [!div class="checklist"] -> * Install and configure Azure Monitor logs. -> * Use Azure Monitor logs to monitor pools and databases. - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip Tickets SaaS database-per-tenant app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS database-per-tenant application](./saas-dbpertenant-get-started-deploy.md). -* Azure PowerShell is installed. For more information, see [Get started with Azure PowerShell](/powershell/azure/get-started-azureps). - -See the [Performance monitoring and management tutorial](saas-dbpertenant-performance-monitoring.md) for a discussion of SaaS scenarios and patterns and how they affect the requirements on a monitoring solution. - -## Monitor and manage database and elastic pool performance with Azure Monitor logs - -For Azure SQL Database, monitoring and alerting is available on databases and pools in the Azure portal. This built-in monitoring and alerting is convenient, but it's also resource-specific. That means it's less well suited to monitor large installations or provide a unified view across resources and subscriptions. - -For high-volume scenarios, you can use Azure Monitor logs for monitoring and alerting. Azure Monitor is a separate Azure service that enables analytics over logs gathered in a workspace from potentially many services. Azure Monitor logs provides a built-in query language and data visualization tools that allow operational data analytics. The SQL Analytics solution provides several predefined elastic pool and database monitoring and alerting views and queries. Azure Monitor logs also provides a custom view designer. - -OMS workspaces are now referred to as Log Analytics workspaces. Log Analytics workspaces and analytics solutions open in the Azure portal. The Azure portal is the newer access point, but it might be what's behind the Operations Management Suite portal in some areas. - -### Create performance diagnostic data by simulating a workload on your tenants - -1. In the PowerShell ISE, open *..\\WingtipTicketsSaaS-MultiTenantDb-master\\Learning Modules\\Performance Monitoring and Management\\Demo-PerformanceMonitoringAndManagement.ps1*. Keep this script open because you might want to run several of the load generation scenarios during this tutorial. -1. If you haven't done so already, provision a batch of tenants to make the monitoring context more interesting. This process takes a few minutes. - - a. Set **$DemoScenario = 1**, _Provision a batch of tenants_. - - b. To run the script and deploy an additional 17 tenants, press F5. - -1. Now start the load generator to run a simulated load on all the tenants. - - a. Set **$DemoScenario = 2**, _Generate normal intensity load (approximately 30 DTU)_. - - b. To run the script, press F5. - -## Get the Wingtip Tickets SaaS database-per-tenant application scripts - -The Wingtip Tickets SaaS multitenant database scripts and application source code are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. For steps to download and unblock the Wingtip Tickets PowerShell scripts, see the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md). - -## Install and configure Log Analytics workspace and the Azure SQL Analytics solution - -Azure Monitor is a separate service that must be configured. Azure Monitor logs collects log data, telemetry, and metrics in a Log Analytics workspace. Just like other resources in Azure, a Log Analytics workspace must be created. The workspace doesn't need to be created in the same resource group as the applications it monitors. Doing so often makes the most sense though. For the Wingtip Tickets app, use a single resource group to make sure the workspace is deleted with the application. - -1. In the PowerShell ISE, open *..\\WingtipTicketsSaaS-MultiTenantDb-master\\Learning Modules\\Performance Monitoring and Management\\Log Analytics\\Demo-LogAnalytics.ps1*. -1. To run the script, press F5. - -Now you can open Azure Monitor logs in the Azure portal. It takes a few minutes to collect telemetry in the Log Analytics workspace and to make it visible. The longer you leave the system gathering diagnostic data, the more interesting the experience is. - -## Use Log Analytics workspace and the SQL Analytics solution to monitor pools and databases - - -In this exercise, open Log Analytics workspace in the Azure portal to look at the telemetry gathered for the databases and pools. - -1. Browse to the [Azure portal](https://portal.azure.com). Select **All services** to open Log Analytics workspace. Then, search for Log Analytics. - - ![Open Log Analytics workspace](./media/saas-dbpertenant-log-analytics/log-analytics-open.png) - -1. Select the workspace named _wtploganalytics-<user>_. - -1. Select **Overview** to open the log analytics solution in the Azure portal. - - ![Overview](./media/saas-dbpertenant-log-analytics/click-overview.png) - - > [!IMPORTANT] - > It might take a couple of minutes before the solution is active. - -1. Select the **Azure SQL Analytics** tile to open it. - - ![Overview tile](./media/saas-dbpertenant-log-analytics/overview.png) - -1. The views in the solution scroll sideways, with their own inner scroll bar at the bottom. Refresh the page if necessary. - -1. To explore the summary page, select the tiles or individual databases to open a drill-down explorer. - - ![Log analytics dashboard](./media/saas-dbpertenant-log-analytics/log-analytics-overview.png) - -1. Change the filter setting to modify the time range. For this tutorial, select **Last 1 hour**. - - ![Time filter](./media/saas-dbpertenant-log-analytics/log-analytics-time-filter.png) - -1. Select an individual database to explore the query usage and metrics for that database. - - ![Database analytics](./media/saas-dbpertenant-log-analytics/log-analytics-database.png) - -1. To see usage metrics, scroll the analytics page to the right. - - ![Database metrics](./media/saas-dbpertenant-log-analytics/log-analytics-database-metrics.png) - -1. Scroll the analytics page to the left, and select the server tile in the **Resource Info** list. - - ![Resource Info list](./media/saas-dbpertenant-log-analytics/log-analytics-resource-info.png) - - A page opens that shows the pools and databases on the server. - - ![Server with pools and databases](./media/saas-dbpertenant-log-analytics/log-analytics-server.png) - -1. Select a pool. On the pool page that opens, scroll to the right to see the pool metrics. - - ![Pool metrics](./media/saas-dbpertenant-log-analytics/log-analytics-pool-metrics.png) - - -1. Back in the Log Analytics workspace, select **OMS Portal** to open the workspace there. - - ![Log Analytics workspace](./media/saas-dbpertenant-log-analytics/log-analytics-workspace-oms-portal.png) - -In the Log Analytics workspace, you can explore the log and metric data further. - -Monitoring and alerting in Azure Monitor logs are based on queries over the data in the workspace, unlike the alerting defined on each resource in the Azure portal. By basing alerts on queries, you can define a single alert that looks over all databases, rather than defining one per database. Queries are limited only by the data available in the workspace. - -For more information on how to use Azure Monitor logs to query and set alerts, see [Work with alert rules in Azure Monitor logs](../../azure-monitor/alerts/alerts-metric.md). - -Azure Monitor logs for SQL Database charges based on the data volume in the workspace. In this tutorial, you created a free workspace, which is limited to 500 MB per day. After that limit is reached, data is no longer added to the workspace. - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> * Install and configure Azure Monitor logs. -> * Use Azure Monitor logs to monitor pools and databases. - -Try the [Tenant analytics tutorial](saas-dbpertenant-log-analytics.md). - -## Additional resources - -* [Additional tutorials that build on the initial Wingtip Tickets SaaS database-per-tenant application deployment](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [Azure Monitor logs](../../azure-monitor/insights/azure-sql.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-performance-monitoring.md b/articles/azure-sql/database/saas-dbpertenant-performance-monitoring.md deleted file mode 100644 index 757e7bf90378e..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-performance-monitoring.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -title: "Saas app: Monitor performance of many databases" -description: "Monitor and manage performance of Azure SQL Database in a multi-tenant SaaS app" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/25/2019 ---- -# Monitor and manage performance of Azure SQL Database in a multi-tenant SaaS app -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, several key performance management scenarios used in SaaS applications are explored. Using a load generator to simulate activity across all tenant databases, the built-in monitoring and alerting features of SQL Database and elastic pools are demonstrated. - -The Wingtip Tickets SaaS Database Per Tenant app uses a single-tenant data model, where each venue (tenant) has their own database. Like many SaaS applications, the anticipated tenant workload pattern is unpredictable and sporadic. In other words, ticket sales may occur at any time. To take advantage of this typical database usage pattern, tenant databases are deployed into elastic pools. Elastic pools optimize the cost of a solution by sharing resources across many databases. With this type of pattern, it's important to monitor database and pool resource usage to ensure that loads are reasonably balanced across pools. You also need to ensure that individual databases have adequate resources, and that pools are not hitting their [eDTU](purchasing-models.md#dtu-purchasing-model) limits. This tutorial explores ways to monitor and manage databases and pools, and how to take corrective action in response to variations in workload. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> -> * Simulate usage on the tenant databases by running a provided load generator -> * Monitor the tenant databases as they respond to the increase in load -> * Scale up the Elastic pool in response to the increased database load -> * Provision a second Elastic pool to load balance database activity - - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip Tickets SaaS Database Per Tenant app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Database Per Tenant application](./saas-dbpertenant-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) - -## Introduction to SaaS performance management patterns - -Managing database performance consists of compiling and analyzing performance data, and then reacting to this data by adjusting parameters to maintain an acceptable response time for your application. When hosting multiple tenants, Elastic pools are a cost-effective way to provide and manage resources for a group of databases with unpredictable workloads. With certain workload patterns, as few as two S3 databases can benefit from being managed in a pool. - -![application diagram](./media/saas-dbpertenant-performance-monitoring/app-diagram.png) - -Pools, and the databases in pools, should be monitored to ensure they stay within acceptable ranges of performance. Tune the pool configuration to meet the needs of the aggregate workload of all databases, ensuring that the pool eDTUs are appropriate for the overall workload. Adjust the per-database min and per-database max eDTU values to appropriate values for your specific application requirements. - -### Performance management strategies - -* To avoid having to manually monitor performance, it’s most effective to **set alerts that trigger when databases or pools stray out of normal ranges**. -* To respond to short-term fluctuations in the aggregate compute size of a pool, the **pool eDTU level can be scaled up or down**. If this fluctuation occurs on a regular or predictable basis, **scaling the pool can be scheduled to occur automatically**. For example, scale down when you know your workload is light, maybe overnight, or during weekends. -* To respond to longer-term fluctuations, or changes in the number of databases, **individual databases can be moved into other pools**. -* To respond to short-term increases in *individual* database load **individual databases can be taken out of a pool and assigned an individual compute size**. Once the load is reduced, the database can then be returned to the pool. When this is known in advance, databases can be moved preemptively to ensure the database always has the resources it needs, and to avoid impact on other databases in the pool. If this requirement is predictable, such as a venue experiencing a rush of ticket sales for a popular event, then this management behavior can be integrated into the application. - -The [Azure portal](https://portal.azure.com) provides built-in monitoring and alerting on most resources. Monitoring and alerting is available on databases and pools. This built-in monitoring and alerting is resource-specific, so it's convenient to use for small numbers of resources, but is not very convenient when working with many resources. - -For high-volume scenarios, where you're working with many resources, [Azure Monitor logs](./saas-dbpertenant-log-analytics.md) can be used. This is a separate Azure service that provides analytics over emitted logs gathered in a Log Analytics workspace. Azure Monitor logs can collect telemetry from many services and be used to query and set alerts. - -## Get the Wingtip Tickets SaaS Database Per Tenant application scripts - -The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Provision additional tenants - -While pools can be cost-effective with just two S3 databases, the more databases that are in the pool the more cost-effective the averaging effect becomes. For a good understanding of how performance monitoring and management works at scale, this tutorial requires you have at least 20 databases deployed. - -If you already provisioned a batch of tenants in a prior tutorial, skip to the [Simulate usage on all tenant databases](#simulate-usage-on-all-tenant-databases) section. - -1. In the **PowerShell ISE**, open …\\Learning Modules\\Performance Monitoring and Management\\*Demo-PerformanceMonitoringAndManagement.ps1*. Keep this script open as you'll run several scenarios during this tutorial. -1. Set **$DemoScenario** = **1**, **Provision a batch of tenants** -1. Press **F5** to run the script. - -The script will deploy 17 tenants in less than five minutes. - -The *New-TenantBatch* script uses a nested or linked set of [Resource Manager](../../azure-resource-manager/index.yml) templates that create a batch of tenants, which by default copies the database **basetenantdb** on the catalog server to create the new tenant databases, then registers these in the catalog, and finally initializes them with the tenant name and venue type. This is consistent with the way the app provisions a new tenant. Any changes made to *basetenantdb* are applied to any new tenants provisioned thereafter. See the [Schema Management tutorial](saas-tenancy-schema-management.md) to see how to make schema changes to *existing* tenant databases (including the *basetenantdb* database). - -## Simulate usage on all tenant databases - -The *Demo-PerformanceMonitoringAndManagement.ps1* script is provided that simulates a workload running against all tenant databases. The load is generated using one of the available load scenarios: - -| Demo | Scenario | -|:--|:--| -| 2 | Generate normal intensity load (approximately 40 DTU) | -| 3 | Generate load with longer and more frequent bursts per database| -| 4 | Generate load with higher DTU bursts per database (approximately 80 DTU)| -| 5 | Generate a normal load plus a high load on a single tenant (approximately 95 DTU)| -| 6 | Generate unbalanced load across multiple pools| - -The load generator applies a *synthetic* CPU-only load to every tenant database. The generator starts a job for each tenant database, which calls a stored procedure periodically that generates the load. The load levels (in eDTUs), duration, and intervals are varied across all databases, simulating unpredictable tenant activity. - -1. In the **PowerShell ISE**, open …\\Learning Modules\\Performance Monitoring and Management\\*Demo-PerformanceMonitoringAndManagement.ps1*. Keep this script open as you'll run several scenarios during this tutorial. -1. Set **$DemoScenario** = **2**, *Generate normal intensity load*. -1. Press **F5** to apply a load to all your tenant databases. - -Wingtip Tickets SaaS Database Per Tenant is a SaaS app, and the real-world load on a SaaS app is typically sporadic and unpredictable. To simulate this, the load generator produces a randomized load distributed across all tenants. Several minutes are needed for the load pattern to emerge, so run the load generator for 3-5 minutes before attempting to monitor the load in the following sections. - -> [!IMPORTANT] -> The load generator is running as a series of jobs in your local PowerShell session. Keep the *Demo-PerformanceMonitoringAndManagement.ps1* tab open! If you close the tab, or suspend your machine, the load generator stops. The load generator remains in a *job-invoking* state where it generates load on any new tenants that are provisioned after the generator is started. Use *Ctrl-C* to stop invoking new jobs and exit the script. The load generator will continue to run, but only on existing tenants. - -## Monitor resource usage using the Azure portal - -To monitor the resource usage that results from the load being applied, open the portal to the pool containing the tenant databases: - -1. Open the [Azure portal](https://portal.azure.com) and browse to the *tenants1-dpt-<USER>* server. -1. Scroll down and locate elastic pools and click **Pool1**. This pool contains all the tenant databases created so far. - -Observe the **Elastic pool monitoring** and **Elastic database monitoring** charts. - -The pool's resource utilization is the aggregate database utilization for all databases in the pool. The database chart shows the five hottest databases: - -![database chart](./media/saas-dbpertenant-performance-monitoring/pool1.png) - -Because there are additional databases in the pool beyond the top five, the pool utilization shows activity that is not reflected in the top five databases chart. For additional details, click **Database Resource Utilization**: - -![database resource utilization](./media/saas-dbpertenant-performance-monitoring/database-utilization.png) - - -## Set performance alerts on the pool - -Set an alert on the pool that triggers on \>75% utilization as follows: - -1. Open *Pool1* (on the *tenants1-dpt-\* server) in the [Azure portal](https://portal.azure.com). -1. Click **Alert Rules**, and then click **+ Add alert**: - - ![add alert](./media/saas-dbpertenant-performance-monitoring/add-alert.png) - -1. Provide a name, such as **High DTU**, -1. Set the following values: - * **Metric = eDTU percentage** - * **Condition = greater than** - * **Threshold = 75** - * **Period = Over the last 30 minutes** -1. Add an email address to the *Additional administrator email(s)* box and click **OK**. - - ![set alert](./media/saas-dbpertenant-performance-monitoring/alert-rule.png) - - -## Scale up a busy pool - -If the aggregate load level increases on a pool to the point that it maxes out the pool and reaches 100% eDTU usage, then individual database performance is affected, potentially slowing query response times for all databases in the pool. - -**Short-term**, consider scaling up the pool to provide additional resources, or removing databases from the pool (moving them to other pools, or out of the pool to a stand-alone service tier). - -**Longer term**, consider optimizing queries or index usage to improve database performance. Depending on the application's sensitivity to performance issues its best practice to scale a pool up before it reaches 100% eDTU usage. Use an alert to warn you in advance. - -You can simulate a busy pool by increasing the load produced by the generator. Causing the databases to burst more frequently, and for longer, increasing the aggregate load on the pool without changing the requirements of the individual databases. Scaling up the pool is easily done in the portal or from PowerShell. This exercise uses the portal. - -1. Set *$DemoScenario* = **3**, _Generate load with longer and more frequent bursts per database_ to increase the intensity of the aggregate load on the pool without changing the peak load required by each database. -1. Press **F5** to apply a load to all your tenant databases. - -1. Go to **Pool1** in the Azure portal. - -Monitor the increased pool eDTU usage on the upper chart. It takes a few minutes for the new higher load to kick in, but you should quickly see the pool start to hit max utilization, and as the load steadies into the new pattern, it rapidly overloads the pool. - -1. To scale up the pool, click **Configure pool** at the top of the **Pool1** page. -1. Adjust the **Pool eDTU** setting to **100**. Changing the pool eDTU does not change the per-database settings (which is still 50 eDTU max per database). You can see the per-database settings on the right side of the **Configure pool** page. -1. Click **Save** to submit the request to scale the pool. - -Go back to **Pool1** > **Overview** to view the monitoring charts. Monitor the effect of providing the pool with more resources (although with few databases and a randomized load it’s not always easy to see conclusively until you run for some time). While you are looking at the charts bear in mind that 100% on the upper chart now represents 100 eDTUs, while on the lower chart 100% is still 50 eDTUs as the per-database max is still 50 eDTUs. - -Databases remain online and fully available throughout the process. At the last moment as each database is ready to be enabled with the new pool eDTU, any active connections are broken. Application code should always be written to retry dropped connections, and so will reconnect to the database in the scaled-up pool. - -## Load balance between pools - -As an alternative to scaling up the pool, create a second pool and move databases into it to balance the load between the two pools. To do this the new pool must be created on the same server as the first. - -1. In the [Azure portal](https://portal.azure.com), open the **tenants1-dpt-<USER>** server. -1. Click **+ New pool** to create a pool on the current server. -1. On the **Elastic pool** template: - - 1. Set **Name** to *Pool2*. - 1. Leave the pricing tier as **Standard Pool**. - 1. Click **Configure pool**, - 1. Set **Pool eDTU** to *50 eDTU*. - 1. Click **Add databases** to see a list of databases on the server that can be added to *Pool2*. - 1. Select any 10 databases to move these to the new pool, and then click **Select**. If you've been running the load generator, the service already knows that your performance profile requires a larger pool than the default 50 eDTU size and recommends starting with a 100 eDTU setting. - - ![recommendation](./media/saas-dbpertenant-performance-monitoring/configure-pool.png) - - 1. For this tutorial, leave the default at 50 eDTUs, and click **Select** again. - 1. Select **OK** to create the new pool and to move the selected databases into it. - -Creating the pool and moving the databases takes a few minutes. As databases are moved they remain online and fully accessible until the very last moment, at which point any open connections are closed. As long as you have some retry logic, clients will then connect to the database in the new pool. - -Browse to **Pool2** (on the *tenants1-dpt-\* server) to open the pool and monitor its performance. If you don't see it, wait for provisioning of the new pool to complete. - -You now see that resource usage on *Pool1* has dropped and that *Pool2* is now similarly loaded. - -## Manage performance of an individual database - -If an individual database in a pool experiences a sustained high load, depending on the pool configuration, it may tend to dominate the resources in the pool and impact other databases. If the activity is likely to continue for some time, the database can be temporarily moved out of the pool. This allows the database to have the extra resources it needs, and isolates it from the other databases. - -This exercise simulates the effect of Contoso Concert Hall experiencing a high load when tickets go on sale for a popular concert. - -1. In the **PowerShell ISE**, open the …\\*Demo-PerformanceMonitoringAndManagement.ps1* script. -1. Set **$DemoScenario = 5, Generate a normal load plus a high load on a single tenant (approximately 95 DTU).** -1. Set **$SingleTenantDatabaseName = contosoconcerthall** -1. Execute the script using **F5**. - - -1. In the [Azure portal](https://portal.azure.com), browse to the list of databases on the *tenants1-dpt-\* server. -1. Click on the **contosoconcerthall** database. -1. Click on the pool that **contosoconcerthall** is in. Locate the pool in the **Elastic pool** section. - -1. Inspect the **Elastic pool monitoring** chart and look for the increased pool eDTU usage. After a minute or two, the higher load should start to kick in, and you should quickly see that the pool hits 100% utilization. -2. Inspect the **Elastic database monitoring** display, which shows the hottest databases in the past hour. The *contosoconcerthall* database should soon appear as one of the five hottest databases. -3. **Click on the Elastic database monitoring** **chart** and it opens the **Database Resource Utilization** page where you can monitor any of the databases. This lets you isolate the display for the *contosoconcerthall* database. -4. From the list of databases, click **contosoconcerthall**. -5. Click **Pricing Tier (scale DTUs)** to open the **Configure performance** page where you can set a stand-alone compute size for the database. -6. Click on the **Standard** tab to open the scale options in the Standard tier. -7. Slide the **DTU slider** to right to select **100** DTUs. Note this corresponds to the service objective, **S3**. -8. Click **Apply** to move the database out of the pool and make it a *Standard S3* database. -9. Once scaling is complete, monitor the effect on the contosoconcerthall database and Pool1 on the elastic pool and database blades. - -Once the high load on the contosoconcerthall database subsides you should promptly return it to the pool to reduce its cost. If it’s unclear when that will happen you could set an alert on the database that will trigger when its DTU usage drops below the per-database max on the pool. Moving a database into a pool is described in exercise 5. - -## Other performance management patterns - -**Pre-emptive scaling** -In the exercise above where you explored how to scale an isolated database, you knew which database to look for. If the management of Contoso Concert Hall had informed Wingtips of the impending ticket sale, the database could have been moved out of the pool preemptively. Otherwise, it would likely have required an alert on the pool or the database to spot what was happening. You wouldn’t want to learn about this from the other tenants in the pool complaining of degraded performance. And if the tenant can predict how long they will need additional resources you can set up an Azure Automation runbook to move the database out of the pool and then back in again on a defined schedule. - -**Tenant self-service scaling** -Because scaling is a task easily called via the management API, you can easily build the ability to scale tenant databases into your tenant-facing application, and offer it as a feature of your SaaS service. For example, let tenants self-administer scaling up and down, perhaps linked directly to their billing! - -**Scaling a pool up and down on a schedule to match usage patterns** - -Where aggregate tenant usage follows predictable usage patterns, you can use Azure Automation to scale a pool up and down on a schedule. For example, scale a pool down after 6pm and up again before 6am on weekdays when you know there is a drop in resource requirements. - - - -## Next steps - -In this tutorial you learn how to: - -> [!div class="checklist"] -> * Simulate usage on the tenant databases by running a provided load generator -> * Monitor the tenant databases as they respond to the increase in load -> * Scale up the Elastic pool in response to the increased database load -> * Provision a second Elastic pool to load balance the database activity - -[Restore a single tenant tutorial](saas-dbpertenant-restore-single-tenant.md) - - -## Additional resources - -* Additional [tutorials that build upon the Wingtip Tickets SaaS Database Per Tenant application deployment](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [SQL Elastic pools](elastic-pool-overview.md) -* [Azure automation](../../automation/automation-intro.md) -* [Azure Monitor logs](./saas-dbpertenant-log-analytics.md) - Setting up and using Azure Monitor logs tutorial \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-provision-and-catalog.md b/articles/azure-sql/database/saas-dbpertenant-provision-and-catalog.md deleted file mode 100644 index 81f96ed70f6ac..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-provision-and-catalog.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Provision new tenants in a multitenant app -description: Learn how to provision and catalog new tenants in an Azure SQL Database multitenant SaaS app -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/24/2018 ---- -# Learn how to provision new tenants and register them in the catalog -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you learn how to provision and catalog SaaS patterns. You also learn how they're implemented in the Wingtip Tickets SaaS database-per-tenant application. You create and initialize new tenant databases and register them in the application's tenant catalog. The catalog is a database that maintains the mapping between the SaaS application's many tenants and their data. The catalog plays an important role in directing application and management requests to the correct database. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> -> * Provision a single new tenant. -> * Provision a batch of additional tenants. - - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip Tickets SaaS database-per-tenant app is deployed. To deploy it in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS database-per-tenant application](./saas-dbpertenant-get-started-deploy.md). -* Azure PowerShell is installed. For more information, see [Get started with Azure PowerShell](/powershell/azure/get-started-azureps). - -## Introduction to the SaaS catalog pattern - -In a database-backed multitenant SaaS application, it's important to know where information for each tenant is stored. In the SaaS catalog pattern, a catalog database is used to hold the mapping between each tenant and the database in which their data is stored. This pattern applies whenever tenant data is distributed across multiple databases. - -Each tenant is identified by a key in the catalog, which is mapped to the location of their database. In the Wingtip Tickets app, the key is formed from a hash of the tenant's name. This scheme allows the app to construct the key from the tenant name included in the application URL. Other tenant key schemes can be used. - -The catalog allows the name or location of the database to be changed with minimal impact on the application. In a multitenant database model, this capability also accommodates moving a tenant between databases. The catalog also can be used to indicate whether a tenant or database is offline for maintenance or other actions. This capability is explored in the [Restore single tenant tutorial](saas-dbpertenant-restore-single-tenant.md). - -The catalog also can store additional tenant or database metadata, such as the schema version, service plan, or SLAs offered to tenants. The catalog can store other information that enables application management, customer support, or DevOps. - -Beyond the SaaS application, the catalog can enable database tools. In the Wingtip Tickets SaaS database-per-tenant sample, the catalog is used to enable cross-tenant query, which is explored in the [Ad hoc reporting tutorial](saas-tenancy-cross-tenant-reporting.md). Cross-database job management is explored in the [Schema management](saas-tenancy-schema-management.md) and [Tenant analytics](saas-tenancy-tenant-analytics.md) tutorials. - -In the Wingtip Tickets SaaS samples, the catalog is implemented by using the Shard Management features of the [Elastic Database client library (EDCL)](elastic-database-client-library.md). The EDCL is available in Java and the .NET Framework. The EDCL enables an application to create, manage, and use a database-backed shard map. - -A shard map contains a list of shards (databases) and the mapping between keys (tenants) and shards. EDCL functions are used during tenant provisioning to create the entries in the shard map. They're used at run time by applications to connect to the correct database. EDCL caches connection information to minimize traffic to the catalog database and speed up the application. - -> [!IMPORTANT] -> The mapping data is accessible in the catalog database, but *don't edit it*. Edit mapping data by using Elastic Database Client Library APIs only. Directly manipulating the mapping data risks corrupting the catalog and isn't supported. - - -## Introduction to the SaaS provisioning pattern - -When you add a new tenant in a SaaS application that uses a single-tenant database model, you must provision a new tenant database. The database must be created in the appropriate location and service tier. It also must be initialized with the appropriate schema and reference data. And it must be registered in the catalog under the appropriate tenant key. - -Different approaches to database provisioning can be used. You can execute SQL scripts, deploy a bacpac, or copy a template database. - -Database provisioning needs to be part of your schema management strategy. You must make sure that new databases are provisioned with the latest schema. This requirement is explored in the [Schema management tutorial](saas-tenancy-schema-management.md). - -The Wingtip Tickets database-per-tenant app provisions new tenants by copying a template database named _basetenantdb_, which is deployed on the catalog server. Provisioning can be integrated into the application as part of a sign-up experience. It also can be supported offline by using scripts. This tutorial explores provisioning by using PowerShell. - -Provisioning scripts copy the _basetenantdb_ database to create a new tenant database in an elastic pool. The tenant database is created in the tenant server mapped to the _newtenant_ DNS alias. This alias maintains a reference to the server used to provision new tenants and is updated to point to a recovery tenant server in the disaster recovery tutorials ([DR using georestore](./saas-dbpertenant-dr-geo-restore.md), [DR using georeplication](./saas-dbpertenant-dr-geo-replication.md)). The scripts then initialize the database with tenant-specific information and register it in the catalog shard map. Tenant databases are given names based on the tenant name. This naming scheme isn't a critical part of the pattern. The catalog maps the tenant key to the database name, so any naming convention can be used. - - -## Get the Wingtip Tickets SaaS database-per-tenant application scripts - -The Wingtip Tickets SaaS scripts and application source code are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - - -## Provision and catalog detailed walkthrough - -To understand how the Wingtip Tickets application implements new tenant provisioning, add a breakpoint and follow the workflow while you provision a tenant. - -1. In the PowerShell ISE, open ...\\Learning Modules\\ProvisionAndCatalog\\_Demo-ProvisionAndCatalog.ps1_ and set the following parameters: - - * **$TenantName** = the name of the new venue (for example, *Bushwillow Blues*). - * **$VenueType** = one of the predefined venue types: _blues, classicalmusic, dance, jazz, judo, motor racing, multipurpose, opera, rockmusic, soccer_. - * **$DemoScenario** = **1**, *Provision a single tenant*. - -2. To add a breakpoint, put your cursor anywhere on the line that says *New-Tenant `*. Then press F9. - - ![Screenshot shows a script with New-Tenant highlighted for adding a breakpoint.](./media/saas-dbpertenant-provision-and-catalog/breakpoint.png) - -3. To run the script, press F5. - -4. After the script execution stops at the breakpoint, press F11 to step into the code. - - ![Debugging](./media/saas-dbpertenant-provision-and-catalog/debug.png) - - - -Trace the script's execution by using the **Debug** menu options. Press F10 and F11 to step over or into the called functions. For more information about debugging PowerShell scripts, see [Tips on working with and debugging PowerShell scripts](/powershell/scripting/components/ise/how-to-debug-scripts-in-windows-powershell-ise). - - -You don't need to explicitly follow this workflow. It explains how to debug the script. - -* **Import the CatalogAndDatabaseManagement.psm1 module.** It provides a catalog and tenant-level abstraction over the [Shard Management](elastic-scale-shard-map-management.md) functions. This module encapsulates much of the catalog pattern and is worth exploring. -* **Import the SubscriptionManagement.psm1 module.** It contains functions for signing in to Azure and selecting the Azure subscription you want to work with. -* **Get configuration details.** Step into Get-Configuration by using F11, and see how the app config is specified. Resource names and other app-specific values are defined here. Don't change these values until you are familiar with the scripts. -* **Get the catalog object.** Step into Get-Catalog, which composes and returns a catalog object that's used in the higher-level script. This function uses Shard Management functions that are imported from **AzureShardManagement.psm1**. The catalog object is composed of the following elements: - - * $catalogServerFullyQualifiedName is constructed by using the standard stem plus your user name: _catalog-\.database.windows .net_. - * $catalogDatabaseName is retrieved from the config: *tenantcatalog*. - * $shardMapManager object is initialized from the catalog database. - * $shardMap object is initialized from the _tenantcatalog_ shard map in the catalog database. A catalog object is composed and returned. It's used in the higher-level script. -* **Calculate the new tenant key.** A hash function is used to create the tenant key from the tenant name. -* **Check if the tenant key already exists.** The catalog is checked to make sure the key is available. -* **The tenant database is provisioned with New-TenantDatabase.** Use F11 to step into how the database is provisioned by using an [Azure Resource Manager template](../../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). - - The database name is constructed from the tenant name to make it clear which shard belongs to which tenant. You also can use other database naming conventions. A Resource Manager template creates a tenant database by copying a template database (_baseTenantDB_) on the catalog server. As an alternative, you can create a database and initialize it by importing a bacpac. Or you can execute an initialization script from a well-known location. - - The Resource Manager template is in the …\Learning Modules\Common\ folder: *tenantdatabasecopytemplate.json* - -* **The tenant database is further initialized.** The venue (tenant) name and the venue type are added. You also can do other initialization here. - -* **The tenant database is registered in the catalog.** It's registered with *Add-TenantDatabaseToCatalog* by using the tenant key. Use F11 to step into the details: - - * The catalog database is added to the shard map (the list of known databases). - * The mapping that links the key value to the shard is created. - * Additional metadata about the tenant (the venue's name) is added to the Tenants table in the catalog. The Tenants table isn't part of the Shard Management schema, and it isn't installed by the EDCL. This table illustrates how the catalog database can be extended to support additional application-specific data. - - -After provisioning completes, execution returns to the original *Demo-ProvisionAndCatalog* script. The **Events** page opens for the new tenant in the browser. - - ![Events page](./media/saas-dbpertenant-provision-and-catalog/new-tenant.png) - - -## Provision a batch of tenants - -This exercise provisions a batch of 17 tenants. We recommend that you provision this batch of tenants before starting other Wingtip Tickets SaaS database-per-tenant tutorials. There are more than just a few databases to work with. - -1. In the PowerShell ISE, open ...\\Learning Modules\\ProvisionAndCatalog\\*Demo-ProvisionAndCatalog.ps1*. Change the *$DemoScenario* parameter to 3: - - * **$DemoScenario** = **3**, *Provision a batch of tenants*. -2. To run the script, press F5. - -The script deploys a batch of additional tenants. It uses an [Azure Resource Manager template](../../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md) that controls the batch and delegates provisioning of each database to a linked template. Using templates in this way allows Azure Resource Manager to broker the provisioning process for your script. The templates provision databases in parallel and handle retries, if needed. The script is idempotent, so if it fails or stops for any reason, run it again. - -### Verify the batch of tenants that successfully deployed - -* In the [Azure portal](https://portal.azure.com), browse to your list of servers and open the *tenants1* server. Select **SQL databases**, and verify that the batch of 17 additional databases is now in the list. - - ![Database list](./media/saas-dbpertenant-provision-and-catalog/database-list.png) - - - -## Other provisioning patterns - -Other provisioning patterns not included in this tutorial: - -**Pre-provisioning databases**: The pre-provisioning pattern exploits the fact that databases in an elastic pool don't add extra cost. Billing is for the elastic pool, not the databases. Idle databases consume no resources. By pre-provisioning databases in a pool and allocating them when needed, you can reduce the time to add tenants. The number of databases pre-provisioned can be adjusted as needed to keep a buffer suitable for the anticipated provisioning rate. - -**Auto-provisioning**: In the auto-provisioning pattern, a provisioning service provisions servers, pools, and databases automatically, as needed. If you want, you can include pre-provisioning databases in elastic pools. If databases are decommissioned and deleted, gaps in elastic pools can be filled by the provisioning service. Such a service can be simple or complex, such as handling provisioning across multiple geographies and setting up geo-replication for disaster recovery. - -With the auto-provisioning pattern, a client application or script submits a provisioning request to a queue to be processed by the provisioning service. It then polls the service to determine completion. If pre-provisioning is used, requests are handled quickly. The service provisions a replacement database in the background. - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> -> * Provision a single new tenant. -> * Provision a batch of additional tenants. -> * Step into the details of provisioning tenants and registering them into the catalog. - -Try the [Performance monitoring tutorial](./saas-dbpertenant-performance-monitoring.md). - -## Additional resources - -* Additional [tutorials that build on the Wingtip Tickets SaaS database-per-tenant application](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [Elastic database client library](elastic-database-client-library.md) -* [Debug scripts in the Windows PowerShell ISE](/powershell/scripting/components/ise/how-to-debug-scripts-in-windows-powershell-ise) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-restore-single-tenant.md b/articles/azure-sql/database/saas-dbpertenant-restore-single-tenant.md deleted file mode 100644 index bbff2fc594e9a..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-restore-single-tenant.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Restore a database in a multitenant SaaS app -description: Learn how to restore a single tenant's Azure SQL Database after accidentally deleting data -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/04/2018 ---- -# Restore a single tenant with a database-per-tenant SaaS application -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The database-per-tenant model makes it easy to restore a single tenant to a prior point in time without affecting other tenants. - -In this tutorial, you learn two data recovery patterns: - -> [!div class="checklist"] -> * Restore a database into a parallel database (side by side). -> * Restore a database in place, replacing the existing database. - -| Pattern | Description | -|:--|:--| -| Restore into a parallel database | This pattern can be used for tasks such as review, auditing, and compliance to allow a tenant to inspect their data from an earlier point. The tenant's current database remains online and unchanged. | -| Restore in place | This pattern is typically used to recover a tenant to an earlier point, after a tenant accidentally deletes or corrupts data. The original database is taken off line and replaced with the restored database. | - - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip SaaS app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip SaaS application](./saas-dbpertenant-get-started-deploy.md). -* Azure PowerShell is installed. For details, see [Get started with Azure PowerShell](/powershell/azure/get-started-azureps). - -## Introduction to the SaaS tenant restore patterns - -There are two simple patterns for restoring an individual tenant's data. Because tenant databases are isolated from each other, restoring one tenant has no impact on any other tenant's data. The Azure SQL Database point-in-time-restore (PITR) feature is used in both patterns. PITR always creates a new database. - -* **Restore in parallel**: In the first pattern, a new parallel database is created alongside the tenant's current database. The tenant is then given read-only access to the restored database. The restored data can be reviewed and potentially used to overwrite current data values. It's up to the app designer to determine how the tenant accesses the restored database and what options for recovery are provided. Simply allowing the tenant to review their data at an earlier point might be all that's required in some scenarios. - -* **Restore in place**: The second pattern is useful if data was lost or corrupted and the tenant wants to revert to an earlier point. The tenant is taken off line while the database is restored. The original database is deleted, and the restored database is renamed. The backup chain of the original database remains accessible after the deletion, so you can restore the database to an earlier point in time, if necessary. - -If the database uses [active geo-replication](active-geo-replication-overview.md) and restoring in parallel, we recommend that you copy any required data from the restored copy into the original database. If you replace the original database with the restored database, you need to reconfigure and resynchronize geo-replication. - -## Get the Wingtip Tickets SaaS database-per-tenant application scripts - -The Wingtip Tickets SaaS Multitenant Database scripts and application source code are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. For steps to download and unblock the Wingtip Tickets SaaS scripts, see the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md). - -## Before you start - -When a database is created, it can take 10 to 15 minutes before the first full backup is available to restore from. If you just installed the application, you might need to wait for a few minutes before you try this scenario. - -## Simulate a tenant accidentally deleting data - -To demonstrate these recovery scenarios, first "accidentally" delete an event in one of the tenant databases. - -### Open the Events app to review the current events - -1. Open the Events Hub (http://events.wtp.<user>.trafficmanager.net), and select **Contoso Concert Hall**. - - ![Events Hub](./media/saas-dbpertenant-restore-single-tenant/events-hub.png) - -2. Scroll the list of events, and make a note of the last event in the list. - - ![Last event appears](./media/saas-dbpertenant-restore-single-tenant/last-event.png) - -### "Accidentally" delete the last event - -1. In the PowerShell ISE, open ...\\Learning Modules\\Business Continuity and Disaster Recovery\\RestoreTenant\\*Demo-RestoreTenant.ps1*, and set the following value: - - * **$DemoScenario** = **1**, *Delete last event (with no ticket sales)*. -2. Press F5 to run the script and delete the last event. The following confirmation message appears: - - ```Console - Deleting last unsold event from Contoso Concert Hall ... - Deleted event 'Seriously Strauss' from Contoso Concert Hall venue. - ``` - -3. The Contoso events page opens. Scroll down and verify that the event is gone. If the event is still in the list, select **Refresh** and verify that it's gone. - ![Last event removed](./media/saas-dbpertenant-restore-single-tenant/last-event-deleted.png) - -## Restore a tenant database in parallel with the production database - -This exercise restores the Contoso Concert Hall database to a point in time before the event was deleted. This scenario assumes that you want to review the deleted data in a parallel database. - - The *Restore-TenantInParallel.ps1* script creates a parallel tenant database named *ContosoConcertHall\_old*, with a parallel catalog entry. This pattern of restore is best suited for recovering from a minor data loss. You also can use this pattern if you need to review data for compliance or auditing purposes. It's the recommended approach when you use [active geo-replication](active-geo-replication-overview.md). - -1. Complete the [Simulate a tenant accidentally deleting data](#simulate-a-tenant-accidentally-deleting-data) section. -2. In the PowerShell ISE, open ...\\Learning Modules\\Business Continuity and Disaster Recovery\\RestoreTenant\\_Demo-RestoreTenant.ps1_. -3. Set **$DemoScenario** = **2**, *Restore tenant in parallel*. -4. To run the script, press F5. - -The script restores the tenant database to a point in time before you deleted the event. The database is restored to a new database named _ContosoConcertHall\_old_. The catalog metadata that exists in this restored database is deleted, and then the database is added to the catalog by using a key constructed from the *ContosoConcertHall\_old* name. - -The demo script opens the events page for this new tenant database in your browser. Note from the URL ```http://events.wingtip-dpt.<user>.trafficmanager.net/contosoconcerthall_old``` that this page shows data from the restored database where *_old* is added to the name. - -Scroll the events listed in the browser to confirm that the event deleted in the previous section was restored. - -Exposing the restored tenant as an additional tenant, with its own Events app, is unlikely to be how you provide a tenant access to restored data. It serves to illustrate the restore pattern. Typically, you give read-only access to the old data and retain the restored database for a defined period. In the sample, you can delete the restored tenant entry after you're finished by running the _Remove restored tenant_ scenario. - -1. Set **$DemoScenario** = **4**, *Remove restored tenant*. -2. To run the script, press F5. -3. The *ContosoConcertHall\_old* entry is now deleted from the catalog. Close the events page for this tenant in your browser. - -## Restore a tenant in place, replacing the existing tenant database - -This exercise restores the Contoso Concert Hall tenant to a point before the event was deleted. The *Restore-TenantInPlace* script restores a tenant database to a new database and deletes the original. This restore pattern is best suited to recovering from serious data corruption, and the tenant might have to accommodate significant data loss. - -1. In the PowerShell ISE, open the **Demo-RestoreTenant.ps1** file. -2. Set **$DemoScenario** = **5**, *Restore tenant in place*. -3. To run the script, press F5. - -The script restores the tenant database to a point before the event was deleted. It first takes the Contoso Concert Hall tenant off line to prevent further updates. Then, a parallel database is created by restoring from the restore point. The restored database is named with a time stamp to make sure the database name doesn't conflict with the existing tenant database name. Next, the old tenant database is deleted, and the restored database is renamed to the original database name. Finally, Contoso Concert Hall is brought online to allow the app access to the restored database. - -You successfully restored the database to a point in time before the event was deleted. When the **Events** page opens, confirm that the last event was restored. - -After you restore the database, it takes another 10 to 15 minutes before the first full backup is available to restore from again. - -## Next steps - -In this tutorial, you learned how to: - -> [!div class="checklist"] -> * Restore a database into a parallel database (side by side). -> * Restore a database in place. - -Try the [Manage tenant database schema](saas-tenancy-schema-management.md) tutorial. - -## Additional resources - -* [Additional tutorials that build on the Wingtip SaaS application](saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [Overview of business continuity with Azure SQL Database](business-continuity-high-availability-disaster-recover-hadr-overview.md) -* [Learn about SQL Database backups](automated-backups-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-dbpertenant-wingtip-app-overview.md b/articles/azure-sql/database/saas-dbpertenant-wingtip-app-overview.md deleted file mode 100644 index 8780c73fc881a..0000000000000 --- a/articles/azure-sql/database/saas-dbpertenant-wingtip-app-overview.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Multitenant app example - Wingtip SaaS -description: Learn by using a sample multitenant application that uses Azure SQL Database, the Wingtip SaaS example -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/24/2018 ---- -# Introduction to a multitenant SaaS app that uses the database-per-tenant pattern with Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The Wingtip SaaS application is a sample multitenant app. The app uses the database-per-tenant SaaS application pattern to service multiple tenants. The app showcases features of Azure SQL Database that enable SaaS scenarios by using several SaaS design and management patterns. To quickly get up and running, the Wingtip SaaS app deploys in less than five minutes. - -Application source code and management scripts are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. Before you start, see the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets management scripts. - -## Application architecture - -The Wingtip SaaS app uses the database-per-tenant model. It uses SQL elastic pools to maximize efficiency. For provisioning and mapping tenants to their data, a catalog database is used. The core Wingtip SaaS application uses a pool with three sample tenants, plus the catalog database. The catalog and tenant servers have been provisioned with DNS aliases. These aliases are used to maintain a reference to the active resources used by the Wingtip application. These aliases are updated to point to recovery resources in the disaster recovery tutorials. Completing many of the Wingtip SaaS tutorials results in add-ons to the initial deployment. Add-ons such as analytic databases and cross-database schema management are introduced. - - -![Wingtip SaaS architecture](./media/saas-dbpertenant-wingtip-app-overview/app-architecture.png) - - -As you go through the tutorials and work with the app, focus on the SaaS patterns as they relate to the data tier. In other words, focus on the data tier, and don't overanalyze the app itself. Understanding the implementation of these SaaS patterns is key to implementing these patterns in your applications. Also consider any necessary modifications for your specific business requirements. - -## SQL Database Wingtip SaaS tutorials - -After you deploy the app, explore the following tutorials that build on the initial deployment. These tutorials explore common SaaS patterns that take advantage of built-in features of SQL Database, Azure Synapse Analytics, and other Azure services. Tutorials include PowerShell scripts with detailed explanations. The explanations simplify understanding and implementation of the same SaaS management patterns in your applications. - - -| Tutorial | Description | -|:--|:--| -| [Guidance and tips for the SQL Database multitenant SaaS app example](saas-tenancy-wingtip-app-guidance-tips.md) | Download and run PowerShell scripts to prepare parts of the application. | -|[Deploy and explore the Wingtip SaaS application](./saas-dbpertenant-get-started-deploy.md)| Deploy and explore the Wingtip SaaS application with your Azure subscription. | -|[Provision and catalog tenants](./saas-dbpertenant-provision-and-catalog.md)| Learn how the application connects to tenants by using a catalog database, and how the catalog maps tenants to their data. | -|[Monitor and manage performance](./saas-dbpertenant-performance-monitoring.md)| Learn how to use monitoring features of SQL Database and set alerts when performance thresholds are exceeded. | -|[Monitor with Azure Monitor logs](./saas-dbpertenant-log-analytics.md) | Learn how to use [Azure Monitor logs](../../azure-monitor/logs/log-query-overview.md) to monitor large amounts of resources across multiple pools. | -|[Restore a single tenant](./saas-dbpertenant-restore-single-tenant.md)| Learn how to restore a tenant database to a prior point in time. Also learn how to restore to a parallel database, which leaves the existing tenant database online. | -|[Manage tenant database schema](saas-tenancy-schema-management.md)| Learn how to update schema and update reference data across all tenant databases. | -|[Run cross-tenant distributed queries](saas-tenancy-cross-tenant-reporting.md) | Create an ad hoc analytics database, and run real-time distributed queries across all tenants. | -|[Run analytics on extracted tenant data](saas-tenancy-tenant-analytics.md) | Extract tenant data into an analytics database or data warehouse for offline analytics queries. | - - -## Next steps - -- [General guidance and tips when you deploy and use the Wingtip Tickets SaaS app example](saas-tenancy-wingtip-app-guidance-tips.md) -- [Deploy the Wingtip SaaS application](./saas-dbpertenant-get-started-deploy.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-adhoc-reporting.md b/articles/azure-sql/database/saas-multitenantdb-adhoc-reporting.md deleted file mode 100644 index f9f807fc26216..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-adhoc-reporting.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Ad hoc reporting queries across multiple databases -description: "Run ad hoc reporting queries across multiple Azure SQL databases in a multi-tenant app example." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 10/30/2018 ---- -# Run ad hoc analytics queries across multiple databases (Azure SQL Database) -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you run distributed queries across the entire set of tenant databases to enable ad hoc interactive reporting. These queries can extract insights buried in the day-to-day operational data of the Wingtip Tickets SaaS app. To do these extractions, you deploy an additional analytics database to the catalog server and use Elastic Query to enable distributed queries. - - -In this tutorial you learn: - -> [!div class="checklist"] -> -> * How to deploy an ad hoc reporting database -> * How to run distributed queries across all tenant databases - - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip Tickets SaaS Multi-tenant Database app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Multi-tenant Database application](saas-multitenantdb-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) -* SQL Server Management Studio (SSMS) is installed. To download and install SSMS, see [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Ad hoc reporting pattern - -![adhoc reporting pattern](./media/saas-multitenantdb-adhoc-reporting/adhocreportingpattern_shardedmultitenantDB.png) - -SaaS applications can analyze the vast amount of tenant data that is stored centrally in the cloud. The analyses reveal insights into the operation and usage of your application. These insights can guide feature development, usability improvements, and other investments in your apps and services. - -Accessing this data in a single multi-tenant database is easy, but not so easy when distributed at scale across potentially thousands of databases. One approach is to use [Elastic Query](elastic-query-overview.md), which enables querying across a distributed set of databases with common schema. These databases can be distributed across different resource groups and subscriptions. Yet one common login must have access to extract data from all the databases. Elastic Query uses a single *head* database in which external tables are defined that mirror tables or views in the distributed (tenant) databases. Queries submitted to this head database are compiled to produce a distributed query plan, with portions of the query pushed down to the tenant databases as needed. Elastic Query uses the shard map in the catalog database to determine the location of all tenant databases. Setup and query are straightforward using standard [Transact-SQL](/sql/t-sql/language-reference), and support ad hoc querying from tools like Power BI and Excel. - -By distributing queries across the tenant databases, Elastic Query provides immediate insight into live production data. However, as Elastic Query pulls data from potentially many databases, query latency can sometimes be higher than for equivalent queries submitted to a single multi-tenant database. Be sure to design queries to minimize the data that is returned. Elastic Query is often best suited for querying small amounts of real-time data, as opposed to building frequently used or complex analytics queries or reports. If queries do not perform well, look at the [execution plan](/sql/relational-databases/performance/display-an-actual-execution-plan) to see what part of the query has been pushed down to the remote database. And assess how much data is being returned. Queries that require complex analytical processing might be better served by saving the extracted tenant data into a database that is optimized for analytics queries. SQL Database and Azure Synapse Analytics could host such the analytics database. - -This pattern for analytics is explained in the [tenant analytics tutorial](saas-multitenantdb-tenant-analytics.md). - -## Get the Wingtip Tickets SaaS Multi-tenant Database application source code and scripts - -The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-MultitenantDB](https://github.com/microsoft/WingtipTicketsSaaS-MultiTenantDB) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Create ticket sales data - -To run queries against a more interesting data set, create ticket sales data by running the ticket-generator. - -1. In the *PowerShell ISE*, open the ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReporting.ps1* script and set the following values: - * **$DemoScenario** = 1, **Purchase tickets for events at all venues**. -2. Press **F5** to run the script and generate ticket sales. While the script is running, continue the steps in this tutorial. The ticket data is queried in the *Run ad hoc distributed queries* section, so wait for the ticket generator to complete. - -## Explore the tenant tables - -In the Wingtip Tickets SaaS Multi-tenant Database application, tenants are stored in a hybrid tenant management model - where tenant data is either stored in a multi-tenant database or a single tenant database and can be moved between the two. When querying across all tenant databases, it's important that Elastic Query can treat the data as if it is part of a single logical database sharded by tenant. - -To achieve this pattern, all tenant tables include a *VenueId* column that identifies which tenant the data belongs to. The *VenueId* is computed as a hash of the Venue name, but any approach could be used to introduce a unique value for this column. This approach is similar to the way the tenant key is computed for use in the catalog. Tables containing *VenueId* are used by Elastic Query to parallelize queries and push them down to the appropriate remote tenant database. This dramatically reduces the amount of data that is returned and results in an increase in performance especially when there are multiple tenants whose data is stored in single tenant databases. - -## Deploy the database used for ad hoc distributed queries - -This exercise deploys the *adhocreporting* database. This is the head database that contains the schema used for querying across all tenant databases. The database is deployed to the existing catalog server, which is the server used for all management-related databases in the sample app. - -1. Open ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReporting.ps1* in the *PowerShell ISE* and set the following values: - * **$DemoScenario** = 2, **Deploy Ad hoc analytics database**. - -2. Press **F5** to run the script and create the *adhocreporting* database. - -In the next section, you add schema to the database so it can be used to run distributed queries. - -## Configure the 'head' database for running distributed queries - -This exercise adds schema (the external data source and external table definitions) to the ad hoc reporting database that enables querying across all tenant databases. - -1. Open SQL Server Management Studio, and connect to the Adhoc reporting database you created in the previous step. The name of the database is *adhocreporting*. -2. Open ...\Learning Modules\Operational Analytics\Adhoc Reporting\ *Initialize-AdhocReportingDB.sql* in SSMS. -3. Review the SQL script and note the following: - - Elastic Query uses a database-scoped credential to access each of the tenant databases. This credential needs to be available in all the databases and should normally be granted the minimum rights required to enable these ad hoc queries. - - ![create credential](./media/saas-multitenantdb-adhoc-reporting/create-credential.png) - - By using the catalog database as the external data source, queries are distributed to all databases registered in the catalog when the query is run. Because server names are different for each deployment, this initialization script gets the location of the catalog database by retrieving the current server (@@servername) where the script is executed. - - ![create external data source](./media/saas-multitenantdb-adhoc-reporting/create-external-data-source.png) - - The external tables that reference tenant tables are defined with **DISTRIBUTION = SHARDED(VenueId)**. This routes a query for a particular *VenueId* to the appropriate database and improves performance for many scenarios as shown in the next section. - - ![create external tables](./media/saas-multitenantdb-adhoc-reporting/external-tables.png) - - The local table *VenueTypes* that is created and populated. This reference data table is common in all tenant databases, so it can be represented here as a local table and populated with the common data. For some queries, this may reduce the amount of data moved between the tenant databases and the *adhocreporting* database. - - ![create table](./media/saas-multitenantdb-adhoc-reporting/create-table.png) - - If you include reference tables in this manner, be sure to update the table schema and data whenever you update the tenant databases. - -4. Press **F5** to run the script and initialize the *adhocreporting* database. - -Now you can run distributed queries, and gather insights across all tenants! - -## Run ad hoc distributed queries - -Now that the *adhocreporting* database is set up, go ahead and run some distributed queries. Include the execution plan for a better understanding of where the query processing is happening. - -When inspecting the execution plan, hover over the plan icons for details. - -1. In *SSMS*, open ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReportingQueries.sql*. -2. Ensure you are connected to the **adhocreporting** database. -3. Select the **Query** menu and click **Include Actual Execution Plan** -4. Highlight the *Which venues are currently registered?* query, and press **F5**. - - The query returns the entire venue list, illustrating how quick and easy it is to query across all tenants and return data from each tenant. - - Inspect the plan and see that the entire cost is the remote query because we're simply going to each tenant database and selecting the venue information. - - ![SELECT * FROM dbo.Venues](./media/saas-multitenantdb-adhoc-reporting/query1-plan.png) - -5. Select the next query, and press **F5**. - - This query joins data from the tenant databases and the local *VenueTypes* table (local, as it's a table in the *adhocreporting* database). - - Inspect the plan and see that the majority of cost is the remote query because we query each tenant's venue info (dbo.Venues), and then do a quick local join with the local *VenueTypes* table to display the friendly name. - - ![Join on remote and local data](./media/saas-multitenantdb-adhoc-reporting/query2-plan.png) - -6. Now select the *On which day were the most tickets sold?* query, and press **F5**. - - This query does a bit more complex joining and aggregation. What's important to note is that most of the processing is done remotely, and once again, we bring back only the rows we need, returning just a single row for each venue's aggregate ticket sale count per day. - - ![query](./media/saas-multitenantdb-adhoc-reporting/query3-plan.png) - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> -> * Run distributed queries across all tenant databases -> * Deploy an ad hoc reporting database and add schema to it to run distributed queries. - -Now try the [Tenant Analytics tutorial](saas-multitenantdb-tenant-analytics.md) to explore extracting data to a separate analytics database for more complex analytics processing. - -## Additional resources - - - -* [Elastic Query](elastic-query-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-get-started-deploy.md b/articles/azure-sql/database/saas-multitenantdb-get-started-deploy.md deleted file mode 100644 index db9f05d026359..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-get-started-deploy.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -title: Deploy a sharded multi-tenant database SaaS app -description: "Deploy and explore the sharded Wingtip Tickets SaaS multi-tenant database application, that demonstrates SaaS patterns by using Azure SQL Database." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 10/16/2018 ---- -# Deploy and explore a sharded multi-tenant application -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you deploy and explore a sample multi-tenant SaaS application that is named Wingtip Tickets. The Wingtip Tickets app is designed to showcase features of Azure SQL Database that simplify the implementation of SaaS scenarios. - -This implementation of the Wingtip Tickets app uses a sharded multi-tenant database pattern. The sharding is by tenant identifier. Tenant data is distributed to a particular database according to the tenant identifier values. - -This database pattern allows you to store one or more tenants in each shard or database. You can optimize for lowest cost by having each database be shared by multiple tenants. Or you can optimize for isolation by having each database store only one tenant. Your optimization choice can be made independently for each specific tenant. Your choice can be made when the tenant is first stored, or you can change your mind later. The application is designed to work well either way. - -## App deploys quickly - -The app runs in the Azure cloud and uses Azure SQL Database. The deployment section that follows provides the blue **Deploy to Azure** button. When the button is pressed, the app is fully deployed to your Azure subscription within five minutes. You have full access to work with the individual application components. - -The application is deployed with data for three sample tenants. The tenants are stored together in one multi-tenant database. - -Anyone can download the C# and PowerShell source code for Wingtip Tickets from [its GitHub repository][link-github-wingtip-multitenantdb-55g]. - -## Learn in this tutorial - -> [!div class="checklist"] -> - How to deploy the Wingtip Tickets SaaS application. -> - Where to get the application source code, and management scripts. -> - About the servers and databases that make up the app. -> - How tenants are mapped to their data with the *catalog*. -> - How to provision a new tenant. -> - How to monitor tenant activity in the app. - -A series of related tutorials is available that build upon this initial deployment. The tutorials explore a range of SaaS design and management patterns. When you work through the tutorials, you are encouraged to step through the provided scripts to see how the different SaaS patterns are implemented. - -## Prerequisites - -To complete this tutorial, make sure the following prerequisites are completed: - -- The latest Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell][link-azure-get-started-powershell-41q]. - -## Deploy the Wingtip Tickets app - -### Plan the names - -In the steps of this section, you provide a *user* value that is used to ensure resource names are globally unique, and a name for the *resource group* which contains all the resources created by a deployment of the app. For a person named *Ann Finley*, we suggest: -- *User:* **af1** *(Their initials, plus a digit. Use a different value (e.g. af2) if you deploy the app a second time.)* -- *Resource group:* **wingtip-mt-af1** *(wingtip-mt indicates this is the sharded multi-tenant app. Appending the user name af1 correlates the resource group name with the names of the resources it contains.)* - -Choose your names now, and write them down. - -### Steps - -1. Click the following blue **Deploy to Azure** button. - - It opens the Azure portal with the Wingtip Tickets SaaS deployment template. - - [![Button for Deploy to Azure.][image-deploy-to-azure-blue-48d]][link-aka-ms-deploywtp-mtapp-52k] - -1. Enter the required parameter values for the deployment. - - > [!IMPORTANT] - > For this demonstration, do not use any pre-existing resource groups, servers, or pools. Instead, choose **Create a new resource group**. Delete this resource group when you are finished with the application to stop related billing. - > Do not use this application, or any resources it creates, for production. Some aspects of authentication, and the server firewall settings, are intentionally insecure in the app to facilitate the demonstration. - - - For **Resource group** - Select **Create new**, and then provide a **Name** for the resource group (case sensitive). - - Select a **Location** from the drop-down list. - - For **User** - We recommend that you choose a short **User** value. - -1. **Deploy the application**. - - - Click to agree to the terms and conditions. - - Click **Purchase**. - -1. Monitor deployment status by clicking **Notifications**, which is the bell icon to the right of the search box. Deploying the Wingtip app takes approximately five minutes. - - ![deployment succeeded](./media/saas-multitenantdb-get-started-deploy/succeeded.png) - -## Download and unblock the management scripts - -While the application is deploying, download the application source code and management scripts. - -> [!NOTE] -> Executable contents (scripts, DLLs) may be blocked by Windows when zip files are downloaded from an external source and extracted. When extracting the scripts from a zip file, use the following steps to unblock the .zip file before extracting. By unblocking the .zip file, you ensure the scripts are allowed to run. - -1. Browse to [the WingtipTicketsSaaS-MultiTenantDb GitHub repo](https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDb). -2. Click **Clone or download**. -3. Click **Download ZIP** and save the file. -4. Right-click the **WingtipTicketsSaaS-MultiTenantDb-master.zip** file and select **Properties**. -5. On the **General** tab, select **Unblock**, and click **Apply**. -6. Click **OK**. -7. Extract the files. - -The scripts are located in the *..\\WingtipTicketsSaaS-MultiTenantDb-master\\Learning Modules\\* folder. - -## Update the configuration file for this deployment - -Before running any scripts, set the *resource group* and *user* values in **UserConfig.psm1**. Set these variables to the same values you set during deployment. - -1. Open ...\\Learning Modules\\*UserConfig.psm1* in the *PowerShell ISE*. -2. Update *ResourceGroupName* and *Name* with the specific values for your deployment (on lines 10 and 11 only). -3. Save the changes. - -The values set in this file are used by all the scripts, so it is important they are accurate. If you redeploy the app, you must choose different values for User and Resource Group. Then update the UserConfig.psm1 file again with the new values. - -## Run the application - -In the Wingtip app, the tenants are venues. A venue can be concert hall, a sports club, or any other location that hosts events. The venues register in Wingtip as customers, and a tenant identifier is generated for each venue. Each venue lists its upcoming events in Wingtip, so the public can buy tickets to the events. - -Each venue gets a personalized web app to list their events and sell tickets. Each web app is independent and isolated from other tenants. Internally in Azure SQL Database, each the data for each tenant is stored in a sharded multi-tenant database, by default. All data is tagged with the tenant identifier. - -A central **Events Hub** webpage provides a list of links to the tenants in your particular deployment. Use the following steps to experience the **Events Hub** webpage and an individual web app: - -1. Open the **Events Hub** in your web browser: - - http://events.wingtip-mt.<user>.trafficmanager.net   *(Replace <user> with your deployment's user value.)* - - ![events hub](./media/saas-multitenantdb-get-started-deploy/events-hub.png) - -2. Click **Fabrikam Jazz Club** in the **Events Hub**. - - ![Events](./media/saas-multitenantdb-get-started-deploy/fabrikam.png) - -### Azure Traffic Manager - -To control the distribution of incoming requests, the Wingtip app uses [Azure Traffic Manager](../../traffic-manager/traffic-manager-overview.md). The events page for each tenant includes the tenant name in its URL. Each URL also includes your specific User value. Each URL obeys the shown format by using the following steps: - -- http://events.wingtip-mt.<user>.trafficmanager.net/*fabrikamjazzclub* - -1. The events app parses the tenant name from the URL. The tenant name is *fabrikamjazzclub* in the preceding example URL. -2. The app then hashes the tenant name to create a key to access a catalog using [shard map management](elastic-scale-shard-map-management.md). -3. The app finds the key in the catalog, and obtains the corresponding location of the tenant's database. -4. The app uses the location info to find and access the one database that contains all the data for the tenant. - -### Events Hub - -1. The **Events Hub** lists all the tenants that are registered in the catalog, and their venues. -2. The **Events Hub** uses extended metadata in the catalog to retrieve the tenant's name associated with each mapping to construct the URLs. - -In a production environment, you typically create a CNAME DNS record to [point a company internet domain](../../traffic-manager/traffic-manager-point-internet-domain.md) to the traffic manager profile. - -## Start generating load on the tenant databases - -Now that the app is deployed, let's put it to work! The *Demo-LoadGenerator* PowerShell script starts a workload running for each tenant. The real-world load on many SaaS apps is typically sporadic and unpredictable. To simulate this type of load, the generator produces a load distributed across all tenants. The load includes randomized bursts on each tenant occurring at randomized intervals. It takes several minutes for the load pattern to emerge, so it's best to let the generator run for at least three or four minutes before monitoring the load. - -1. In the *PowerShell ISE*, open the ...\\Learning Modules\\Utilities\\*Demo-LoadGenerator.ps1* script. -2. Press **F5** to run the script and start the load generator (leave the default parameter values for now). - -The *Demo-LoadGenerator.ps1* script opens another PowerShell session where the load generator runs. The load generator runs in this session as a foreground task that invokes background load-generation jobs, one for each tenant. - -After the foreground task starts, it remains in a job-invoking state. The task starts additional background jobs for any new tenants that are subsequently provisioned. - -Closing the PowerShell session stops all jobs. - -You might want to restart the load generator session to use different parameter values. If so, close the PowerShell generation session, and then rerun the *Demo-LoadGenerator.ps1*. - -## Provision a new tenant into the sharded database - -The initial deployment includes three sample tenants in the *Tenants1* database. Let's create another tenant and observe its effects on the deployed application. In this step, you press one key to create a new tenant: - -1. Open ...\\Learning Modules\\Provision and Catalog\\*Demo-ProvisionTenants.ps1* in the *PowerShell ISE*. -2. Press **F5** (not **F8**) to run the script (leave the default values for now). - - > [!NOTE] - > You must run the PowerShell scripts only by pressing the **F5** key, not by pressing **F8** to run a selected part of the script. The problem with **F8** is that the *$PSScriptRoot* variable is not evaluated. This variable is needed by many scripts to navigate folders, invoke other scripts, or import modules. - -The new Red Maple Racing tenant is added to the *Tenants1* database and registered in the catalog. The new tenant's ticket-selling **Events** site opens in your browser: - -![New tenant](./media/saas-multitenantdb-get-started-deploy/red-maple-racing.png) - -Refresh the **Events Hub**, and the new tenant now appears in the list. - -## Provision a new tenant in its own database - -The sharded multi-tenant model allows you to choose whether to provision a new tenant into a database that contains other tenants, or into a database of its own. A tenant isolated in its own database enjoys the following benefits: - -- The performance of the tenant's database can be managed without the need to compromise with the needs of other tenants. -- If necessary, the database can be restored to an earlier point in time, because no other tenants would be affected. - -You might choose to put free-trial customers, or economy customers, into multi-tenant databases. You could put each premium tenant into its own dedicated database. If you create lots of databases that contain only one tenant, you can manage them all collectively in an elastic pool to optimize resource costs. - -Next, we provision another tenant, this time in its own database: - -1. In ...\\Learning Modules\\Provision and Catalog\\*Demo-ProvisionTenants.ps1*, modify *$TenantName* to **Salix Salsa**, *$VenueType* to **dance** and *$Scenario* to **2**. - -2. Press **F5** to run the script again. - - This **F5** press provisions the new tenant in a separate database. The database and the tenant are registered in the catalog. Then the browser opens to the Events page of the tenant. - - ![Salix Salsa events page](./media/saas-multitenantdb-get-started-deploy/salix-salsa.png) - - - Scroll to the bottom of the page. There in the banner you see the database name in which the tenant data is stored. - -3. Refresh the **Events Hub** and the two new tenants now appears in the list. - -## Explore the servers and tenant databases - -Now we look at some of the resources that were deployed: - -1. In the [Azure portal](https://portal.azure.com), browse to the list of resource groups. Open the resource group you created when you deployed the application. - - ![resource group](./media/saas-multitenantdb-get-started-deploy/resource-group.png) - -2. Click **catalog-mt<user>** server. The catalog server contains two databases named *tenantcatalog* and *basetenantdb*. The *basetenantdb* database is an empty template database. It is copied to create a new tenant database, whether used for many tenants or just one tenant. - - ![catalog server](./media/saas-multitenantdb-get-started-deploy/catalog-server.png) - -3. Go back to the resource group and select the *tenants1-mt* server that holds the tenant databases. - - The tenants1 database is a multi-tenant database in which the original three tenants, plus the first tenant you added, are stored. It is configured as a 50 DTU Standard database. - - The **salixsalsa** database holds the Salix Salsa dance venue as its only tenant. It is configured as a Standard edition database with 50 DTUs by default. - - ![tenants server](./media/saas-multitenantdb-get-started-deploy/tenants-server.png) - -## Monitor the performance of the database - -If the load generator has been running for several minutes, enough telemetry is available to look at the database monitoring capabilities built into the Azure portal. - -1. Browse to the **tenants1-mt<user>** server, and click **tenants1** to view resource utilization for the database that has four tenants in it. Each tenant is subject to a sporadic heavy load from the load generator: - - ![monitor tenants1](./media/saas-multitenantdb-get-started-deploy/monitor-tenants1.png) - - The DTU utilization chart nicely illustrates how a multi-tenant database can support an unpredictable workload across many tenants. In this case, the load generator is applying a sporadic load of roughly 30 DTUs to each tenant. This load equates to 60% utilization of a 50 DTU database. Peaks that exceed 60% are the result of load being applied to more than one tenant at the same time. - -2. Browse to the **tenants1-mt<user>** server, and click the **salixsalsa** database. You can see the resource utilization on this database that contains only one tenant. - - ![salixsalsa database](./media/saas-multitenantdb-get-started-deploy/monitor-salix.png) - -The load generator is applying a similar load to each tenant, regardless of which database each tenant is in. With only one tenant in the **salixsalsa** database, you can see that the database could sustain a much higher load than the database with several tenants. - -### Resource allocations vary by workload - -Sometimes a multi-tenant database requires more resources for good performance than does a single-tenant database, but not always. The optimal allocation of resources depends on the particular workload characteristics for the tenants in your system. - -The workloads generated by the load generator script are for illustration purposes only. - -## Additional resources - -- To learn about multi-tenant SaaS applications, see [Design patterns for multi-tenant SaaS applications](saas-tenancy-app-design-patterns.md). - -- To learn about elastic pools, see: - - - [Elastic pools help you manage and scale multiple databases in Azure SQL Database](elastic-pool-overview.md) - - [Scaling out with Azure SQL Database](elastic-scale-introduction.md) - -## Next steps - -In this tutorial you learned: - -> [!div class="checklist"] -> - How to deploy the Wingtip Tickets SaaS Multi-tenant Database application. -> - About the servers, and databases that make up the app. -> - Tenants are mapped to their data with the *catalog*. -> - How to provision new tenants, into a multi-tenant database and single-tenant database. -> - How to view pool utilization to monitor tenant activity. -> - How to delete sample resources to stop related billing. - -Now try the [Provision and catalog tutorial](saas-multitenantdb-provision-and-catalog.md). - - - - -[link-aka-ms-deploywtp-mtapp-52k]: https://aka.ms/deploywtp-mtapp - - -[link-azure-get-started-powershell-41q]: /powershell/azure/get-started-azureps - -[link-github-wingtip-multitenantdb-55g]: https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDB/ - - - - - -[image-deploy-to-azure-blue-48d]: media/saas-multitenantdb-get-started-deploy/deploy.png "Button for deploying to Azure." \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-performance-monitoring.md b/articles/azure-sql/database/saas-multitenantdb-performance-monitoring.md deleted file mode 100644 index b92b29b184a41..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-performance-monitoring.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Monitor performance of a sharded multi-tenant database -description: "Monitor and manage performance of sharded multi-tenant Azure SQL Database in a multi-tenant SaaS app" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/25/2019 ---- -# Monitor and manage performance of sharded multi-tenant Azure SQL Database in a multi-tenant SaaS app -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, several key performance management scenarios used in SaaS applications are explored. Using a load generator to simulate activity across sharded multi-tenant databases, the built-in monitoring and alerting features of Azure SQL Database are demonstrated. - -The Wingtip Tickets SaaS Multi-tenant Database app uses a sharded multi-tenant data model, where venue (tenant) data is distributed by tenant ID across potentially multiple databases. Like many SaaS applications, the anticipated tenant workload pattern is unpredictable and sporadic. In other words, ticket sales may occur at any time. To take advantage of this typical database usage pattern, databases can be scaled up and down to optimize the cost of a solution. With this type of pattern, it's important to monitor database resource usage to ensure that loads are reasonably balanced across potentially multiple databases. You also need to ensure that individual databases have adequate resources and are not hitting their [DTU](purchasing-models.md#dtu-purchasing-model) limits. This tutorial explores ways to monitor and manage databases, and how to take corrective action in response to variations in workload. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> -> * Simulate usage on a sharded multi-tenant database by running a provided load generator -> * Monitor the database as it responds to the increase in load -> * Scale up the database in response to the increased database load -> * Provision a tenant into a single-tenant database - -To complete this tutorial, make sure the following prerequisites are completed: - -* The Wingtip Tickets SaaS Multi-tenant Database app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Multi-tenant Database application](./saas-multitenantdb-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) - -## Introduction to SaaS performance management patterns - -Managing database performance consists of compiling and analyzing performance data, and then reacting to this data by adjusting parameters to maintain an acceptable response time for your application. - -### Performance management strategies - -* To avoid having to manually monitor performance, it’s most effective to **set alerts that trigger when databases stray out of normal ranges**. -* To respond to short-term fluctuations in the compute size of a database, the **DTU level can be scaled up or down**. If this fluctuation occurs on a regular or predictable basis, **scaling the database can be scheduled to occur automatically**. For example, scale down when you know your workload is light, maybe overnight, or during weekends. -* To respond to longer-term fluctuations, or changes in the tenants, **individual tenants can be moved into other database**. -* To respond to short-term increases in *individual* tenant load, **individual tenants can be taken out of a database and assigned an individual compute size**. Once the load is reduced, the tenant can then be returned to the multi-tenant database. When this is known in advance, tenants can be moved preemptively to ensure the database always has the resources it needs, and to avoid impact on other tenants in the multi-tenant database. If this requirement is predictable, such as a venue experiencing a rush of ticket sales for a popular event, then this management behavior can be integrated into the application. - -The [Azure portal](https://portal.azure.com) provides built-in monitoring and alerting on most resources. For SQL Database, monitoring and alerting is available on databases. This built-in monitoring and alerting is resource-specific, so it's convenient to use for small numbers of resources, but is not convenient when working with many resources. - -For high-volume scenarios, where you're working with many resources, [Azure Monitor logs](https://azure.microsoft.com/services/log-analytics/) can be used. This is a separate Azure service that provides analytics over emitted logs gathered in a Log Analytics workspace. Azure Monitor logs can collect telemetry from many services and be used to query and set alerts. - -## Get the Wingtip Tickets SaaS Multi-tenant Database application source code and scripts - -The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-MultitenantDB](https://github.com/microsoft/WingtipTicketsSaaS-MultiTenantDB) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Provision additional tenants - -For a good understanding of how performance monitoring and management works at scale, this tutorial requires you to have multiple tenants in a sharded multi-tenant database. - -If you have already provisioned a batch of tenants in a prior tutorial, skip to the [Simulate usage on all tenant databases](#simulate-usage-on-all-tenant-databases) section. - -1. In the **PowerShell ISE**, open …\\Learning Modules\\Performance Monitoring and Management\\*Demo-PerformanceMonitoringAndManagement.ps1*. Keep this script open as you'll run several scenarios during this tutorial. -1. Set **$DemoScenario** = **1**, _Provision a batch of tenants_ -1. Press **F5** to run the script. - -The script deploys 17 tenants into the multi-tenant database in a few minutes. - -The *New-TenantBatch* script creates new tenants with unique tenant keys within the sharded multi-tenant database and initializes them with the tenant name and venue type. This is consistent with the way the app provisions a new tenant. - -## Simulate usage on all tenant databases - -The *Demo-PerformanceMonitoringAndManagement.ps1* script is provided that simulates a workload running against the multi-tenant database. The load is generated using one of the available load scenarios: - -| Demo | Scenario | -|:--|:--| -| 2 | Generate normal intensity load (approximately 30 DTU) | -| 3 | Generate load with longer bursts per tenant| -| 4 | Generate load with higher DTU bursts per tenant (approximately 70 DTU)| -| 5 | Generate a high intensity (approximately 90 DTU) on a single tenant plus a normal intensity load on all other tenants | - -The load generator applies a *synthetic* CPU-only load to every tenant database. The generator starts a job for each tenant database, which calls a stored procedure periodically that generates the load. The load levels (in DTUs), duration, and intervals are varied across all databases, simulating unpredictable tenant activity. - -1. In the **PowerShell ISE**, open …\\Learning Modules\\Performance Monitoring and Management\\*Demo-PerformanceMonitoringAndManagement.ps1*. Keep this script open as you'll run several scenarios during this tutorial. -1. Set **$DemoScenario** = **2**, _Generate normal intensity load_ -1. Press **F5** to apply a load to all your tenants. - -Wingtip Tickets SaaS Multi-tenant Database is a SaaS app, and the real-world load on a SaaS app is typically sporadic and unpredictable. To simulate this, the load generator produces a randomized load distributed across all tenants. Several minutes are needed for the load pattern to emerge, so run the load generator for 3-5 minutes before attempting to monitor the load in the following sections. - -> [!IMPORTANT] -> The load generator is running as a series of jobs in a new PowerShell window. If you close the session, the load generator stops. The load generator remains in a *job-invoking* state where it generates load on any new tenants that are provisioned after the generator is started. Use *Ctrl-C* to stop invoking new jobs and exit the script. The load generator will continue to run, but only on existing tenants. - -## Monitor resource usage using the Azure portal - -To monitor the resource usage that results from the load being applied, open the portal to the multi-tenant database, **tenants1**, containing the tenants: - -1. Open the [Azure portal](https://portal.azure.com) and browse to the server *tenants1-mt-<USER>*. -1. Scroll down and locate databases and click **tenants1**. This sharded multi-tenant database contains all the tenants created so far. - -![database chart](./media/saas-multitenantdb-performance-monitoring/multitenantdb.png) - -Observe the **DTU** chart. - -## Set performance alerts on the database - -Set an alert on the database that triggers on \>75% utilization as follows: - -1. Open the *tenants1* database (on the *tenants1-mt-<USER>* server) in the [Azure portal](https://portal.azure.com). -1. Click **Alert Rules**, and then click **+ Add alert**: - - ![add alert](./media/saas-multitenantdb-performance-monitoring/add-alert.png) - -1. Provide a name, such as **High DTU**, -1. Set the following values: - * **Metric = DTU percentage** - * **Condition = greater than** - * **Threshold = 75**. - * **Period = Over the last 30 minutes** -1. Add an email address to the *Additional administrator email(s)* box and click **OK**. - - ![set alert](./media/saas-multitenantdb-performance-monitoring/set-alert.png) - -## Scale up a busy database - -If the load level increases on a database to the point that it maxes out the database and reaches 100% DTU usage, then database performance is affected, potentially slowing query response times. - -**Short term**, consider scaling up the database to provide additional resources, or removing tenants from the multi-tenant database (moving them out of the multi-tenant database to a stand-alone database). - -**Longer term**, consider optimizing queries or index usage to improve database performance. Depending on the application's sensitivity to performance issues its best practice to scale a database up before it reaches 100% DTU usage. Use an alert to warn you in advance. - -You can simulate a busy database by increasing the load produced by the generator. Causing the tenants to burst more frequently, and for longer, increasing the load on the multi-tenant database without changing the requirements of the individual tenants. Scaling up the database is easily done in the portal or from PowerShell. This exercise uses the portal. - -1. Set *$DemoScenario* = **3**, _Generate load with longer and more frequent bursts per database_ to increase the intensity of the aggregate load on the database without changing the peak load required by each tenant. -1. Press **F5** to apply a load to all your tenant databases. -1. Go to the **tenants1** database in the Azure portal. - -Monitor the increased database DTU usage on the upper chart. It takes a few minutes for the new higher load to kick in, but you should quickly see the database start to hit max utilization, and as the load steadies into the new pattern, it rapidly overloads the database. - -1. To scale up the database, click **Pricing tier (scale DTUs)** in the settings blade. -1. Adjust the **DTU** setting to **100**. -1. Click **Apply** to submit the request to scale the database. - -Go back to **tenants1** > **Overview** to view the monitoring charts. Monitor the effect of providing the database with more resources (although with few tenants and a randomized load it’s not always easy to see conclusively until you run for some time). While you are looking at the charts bear in mind that 100% on the upper chart now represents 100 DTUs, while on the lower chart 100% is still 50 DTUs. - -Databases remain online and fully available throughout the process. Application code should always be written to retry dropped connections, and so will reconnect to the database. - -## Provision a new tenant in its own database - -The sharded multi-tenant model allows you to choose whether to provision a new tenant in a multi-tenant database alongside other tenants, or to provision the tenant in a database of its own. By provisioning a tenant in its own database, it benefits from the isolation inherent in the separate database, allowing you to manage the performance of that tenant independently of others, restore that tenant independently of others, etc. For example, you might choose to put free-trial or regular customers in a multi-tenant database, and premium customers in individual databases. If isolated single-tenant databases are created, they can still be managed collectively in an elastic pool to optimize resource costs. - -If you already provisioned a new tenant in its own database, skip the next few steps. - -1. In the **PowerShell ISE**, open …\\Learning Modules\\ProvisionTenants\\*Demo-ProvisionTenants.ps1*. -1. Modify **$TenantName = "Salix Salsa"** and **$VenueType = "dance"** -1. Set **$Scenario** = **2**, _Provision a tenant in a new single-tenant database_ -1. Press **F5** to run the script. - -The script will provision this tenant in a separate database, register the database and the tenant with the catalog, and then open the tenant’s Events page in the browser. Refresh the Events Hub page and you will see "Salix Salsa" has been added as a venue. - -## Manage performance of an individual database - -If a single tenant within a multi-tenant database experiences a sustained high load, it may tend to dominate the database resources and impact other tenants in the same database. If the activity is likely to continue for some time, the tenant can be temporarily moved out of the database and into its own single-tenant database. This allows the tenant to have the extra resources it needs, and fully isolates it from the other tenants. - -This exercise simulates the effect of Salix Salsa experiencing a high load when tickets go on sale for a popular event. - -1. Open the …\\*Demo-PerformanceMonitoringAndManagement.ps1* script. -1. Set **$DemoScenario = 5**, _Generate a normal load plus a high load on a single tenant (approximately 90 DTU)._ -1. Set **$SingleTenantName = Salix Salsa** -1. Execute the script using **F5**. - -Go to portal and navigate to **salixsalsa** > **Overview** to view the monitoring charts. - -## Other performance management patterns - -**Tenant self-service scaling** - -Because scaling is a task easily called via the management API, you can easily build the ability to scale tenant databases into your tenant-facing application, and offer it as a feature of your SaaS service. For example, let tenants self-administer scaling up and down, perhaps linked directly to their billing! - -**Scaling a database up and down on a schedule to match usage patterns** - -Where aggregate tenant usage follows predictable usage patterns, you can use Azure Automation to scale a database up and down on a schedule. For example, scale a database down after 6pm and up again before 6am on weekdays when you know there is a drop in resource requirements. - -## Next steps - -In this tutorial you learn how to: - -> [!div class="checklist"] -> * Simulate usage on a sharded multi-tenant database by running a provided load generator -> * Monitor the database as it responds to the increase in load -> * Scale up the database in response to the increased database load -> * Provision a tenant into a single-tenant database - -## Additional resources - - -* [Azure automation](../../automation/automation-intro.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-provision-and-catalog.md b/articles/azure-sql/database/saas-multitenantdb-provision-and-catalog.md deleted file mode 100644 index 272937cf4277a..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-provision-and-catalog.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Provision in SaaS multi-tenant -description: "Learn how to provision and catalog new tenants in an Azure SQL Database multi-tenant SaaS app" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/24/2018 ---- -# Provision and catalog new tenants in a SaaS application using a sharded multi-tenant Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article covers the provisioning and cataloging of new tenants, in a *multi-tenant sharded database* model or pattern. - -This article has two major parts: - -- [Conceptual discussion](#goto_2_conceptual) of the provisioning and cataloging of new tenants. - -- [Tutorial](#goto_1_tutorial) that highlights the PowerShell script code that accomplishes the provisioning and cataloging. - - The tutorial uses the Wingtip Tickets SaaS application, adapted to the multi-tenant sharded database pattern. - - - -## Database pattern - -This section, plus a few more that follow, discuss the concepts of the multi-tenant sharded database pattern. - -In this multi-tenant sharded model, the table schemas inside each database include a tenant key in the primary key of tables that store tenant data. The tenant key enables each individual database to store 0, 1, or many tenants. The use of sharded databases makes it easy for the application system to support a very large number of tenants. All the data for any one tenant is stored in one database. The large number of tenants are distributed across the many sharded databases. A catalog database stores the mapping of each tenant to its database. - -#### Isolation versus lower cost - -A tenant that has a database all to itself enjoys the benefits of isolation. The tenant can have the database restored to an earlier date without being restricted by the impact on other tenants. Database performance can be tuned to optimize for the one tenant, again without having to compromise with other tenants. The problem is that isolation costs more than it costs to share a database with other tenants. - -When a new tenant is provisioned, it can share a database with other tenants, or it can be placed into its own new database. Later you can change your mind and move the database to the other situation. - -Databases with multiple tenants and single tenants are mixed in the same SaaS application, to optimize cost or isolation for each tenant. - - ![Sharded multi-tenant database app with tenant catalog](./media/saas-multitenantdb-provision-and-catalog/MultiTenantCatalog.png) - -## Tenant catalog pattern - -When you have two or more databases that each contain at least one tenant, the application must have a way to discover which database stores the tenant of current interest. A catalog database stores this mapping. - -#### Tenant key - -For each tenant, the Wingtip application can derive a unique key, which is the tenant key. The app extracts the tenant name from the webpage URL. The app hashes the name to obtain the key. The app uses the key to access the catalog. The catalog cross-references information about the database in which the tenant is stored. The app uses the database info to connect. Other tenant key schemes can also be used. - -Using a catalog allows the name or location of a tenant database to be changed after provisioning without disrupting the application. In a multi-tenant database model, the catalog accommodates moving a tenant between databases. - -#### Tenant metadata beyond location - -The catalog can also indicate whether a tenant is offline for maintenance or other actions. And the catalog can be extended to store additional tenant or database metadata, such as the following items: -- The service tier or edition of a database. -- The version of the database schema. -- The tenant name and its SLA (service level agreement). -- Information to enable application management, customer support, or devops processes. - -The catalog can also be used to enable cross-tenant reporting, schema management, and data extract for analytics purposes. - -### Elastic Database Client Library - -In Wingtip, the catalog is implemented in the *tenantcatalog* database. The *tenantcatalog* is created using the Shard Management features of the [Elastic Database Client Library (EDCL)](elastic-database-client-library.md). The library enables an application to create, manage, and use a *shard map* that is stored in a database. A shard map cross-references the tenant key with its shard, meaning its sharded database. - -During tenant provisioning, EDCL functions can be used from applications or PowerShell scripts to create the entries in the shard map. Later the EDCL functions can be used to connect to the correct database. The EDCL caches connection information to minimize the traffic on the catalog database and speed up the process of connecting. - -> [!IMPORTANT] -> Do *not* edit the data in the catalog database through direct access! Direct updates are not supported due to the high risk of data corruption. Instead, edit the mapping data by using EDCL APIs only. - -## Tenant provisioning pattern - -#### Checklist - -When you want to provision a new tenant into an existing shared database, of the shared database you must ask the following questions: -- Does it have enough space left for the new tenant? -- Does it have tables with the necessary reference data for the new tenant, or can the data be added? -- Does it have the appropriate variation of the base schema for the new tenant? -- Is it in the appropriate geographic location close to the new tenant? -- Is it at the right service tier for the new tenant? - -When you want the new tenant to be isolated in its own database, you can create it to meet the specifications for the tenant. - -After the provisioning is complete, you must register the tenant in the catalog. Finally, the tenant mapping can be added to reference the appropriate shard. - -#### Template database - -Provision the database by executing SQL scripts, deploying a bacpac, or copying a template database. The Wingtip apps copy a template database to create new tenant databases. - -Like any application, Wingtip will evolve over time. At times, Wingtip will require changes to the database. Changes may include the following items: -- New or changed schema. -- New or changed reference data. -- Routine database maintenance tasks to ensure optimal app performance. - -With a SaaS application, these changes need to be deployed in a coordinated manner across a potentially massive fleet of tenant databases. For these changes to be in future tenant databases, they need to be incorporated into the provisioning process. This challenge is explored further in the [schema management tutorial](saas-tenancy-schema-management.md). - -#### Scripts - -The tenant provisioning scripts in this tutorial support both of the following scenarios: -- Provisioning a tenant into an existing database shared with other tenants. -- Provisioning a tenant into its own database. - -Tenant data is then initialized and registered in the catalog shard map. In the sample app, databases that contain multiple tenants are given a generic name, such as *tenants1* or *tenants2*. Databases that contain a single tenant are given the tenant's name. The specific naming conventions used in the sample are not a critical part of the pattern, as the use of a catalog allows any name to be assigned to the database. - - - -## Tutorial begins - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> * Provision a tenant into a multi-tenant database -> * Provision a tenant into a single-tenant database -> * Provision a batch of tenants into both multi-tenant and single-tenant databases -> * Register a database and tenant mapping in a catalog - -#### Prerequisites - -To complete this tutorial, make sure the following prerequisites are completed: - -- Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) - -- The Wingtip Tickets SaaS Multi-tenant Database app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Multi-tenant Database application](./saas-multitenantdb-get-started-deploy.md) - -- Get the Wingtip scripts and source code: - - The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-MultitenantDB](https://github.com/microsoft/WingtipTicketsSaaS-MultiTenantDB) GitHub repo. - - See the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip scripts. - -## Provision a tenant into a database *shared* with other tenants - -In this section, you see a list of the major actions for provisioning that are taken by the PowerShell scripts. Then you use the PowerShell ISE debugger to step through the scripts to see the actions in code. - -#### Major actions of provisioning - -The following are key elements of the provisioning workflow you step through: - -- **Calculate the new tenant key**: A hash function is used to create the tenant key from the tenant name. -- **Check if the tenant key already exists**: The catalog is checked to ensure the key has not already been registered. -- **Initialize tenant in the default tenant database**: The tenant database is updated to add the new tenant information. -- **Register tenant in the catalog**: The mapping between the new tenant key and the existing tenants1 database is added to the catalog. -- **Add the tenant's name to a catalog extension table**: The venue name is added to the Tenants table in the catalog. This addition shows how the Catalog database can be extended to support additional application-specific data. -- **Open Events page for the new tenant**: The *Bushwillow Blues* events page is opened in the browser. - - ![Screenshot that shows the Events page for a new tenant.](./media/saas-multitenantdb-provision-and-catalog/bushwillow.png) - -#### Debugger steps - -To understand how the Wingtip app implements new tenant provisioning in a shared database, add a breakpoint and step through the workflow: - -1. In the *PowerShell ISE*, open ...\\Learning Modules\\ProvisionTenants\\*Demo-ProvisionTenants.ps1* and set the following parameters: - - **$TenantName** = **Bushwillow Blues**, the name of a new venue. - - **$VenueType** = **blues**, one of the pre-defined venue types: blues, classicalmusic, dance, jazz, judo, motorracing, multipurpose, opera, rockmusic, soccer (lowercase, no spaces). - - **$DemoScenario** = **1**, to provision a tenant in a shared database with other tenants. - -2. Add a breakpoint by putting your cursor anywhere on line 38, the line that says: *New-Tenant `*, and then press **F9**. - - ![Screenshot that highlights the line that includes New Tenant.](./media/saas-multitenantdb-provision-and-catalog/breakpoint.png) - -3. Run the script by pressing **F5**. - -4. After script execution stops at the breakpoint, press **F11** to step into the code. - - ![Screenshot shows the Windows PowerShell ISE with the Debug menu open and Step Into selected.](./media/saas-multitenantdb-provision-and-catalog/debug.png) - -5. Trace the script's execution using the **Debug** menu options, **F10** and **F11**, to step over or into called functions. - -For more information about debugging PowerShell scripts, see [Tips on working with and debugging PowerShell scripts](/powershell/scripting/components/ise/how-to-debug-scripts-in-windows-powershell-ise). - -## Provision a tenant in its *own* database - -#### Major actions of provisioning - -The following are key elements of the workflow you step through while tracing the script: - -- **Calculate the new tenant key**: A hash function is used to create the tenant key from the tenant name. -- **Check if the tenant key already exists**: The catalog is checked to ensure the key has not already been registered. -- **Create a new tenant database**: The database is created by copying the *basetenantdb* database using a Resource Manager template. The new database name is based on the tenant's name. -- **Add database to catalog**: The new tenant database is registered as a shard in the catalog. -- **Initialize tenant in the default tenant database**: The tenant database is updated to add the new tenant information. -- **Register tenant in the catalog**: The mapping between the new tenant key and the *sequoiasoccer* database is added to the catalog. -- **Tenant name is added to the catalog**: The venue name is added to the Tenants extension table in the catalog. -- **Open Events page for the new tenant**: The *Sequoia Soccer* Events page is opened in the browser. - - ![events](./media/saas-multitenantdb-provision-and-catalog/sequoiasoccer.png) - -#### Debugger steps - -Now walk through the script process when creating a tenant in its own database: - -1. Still in ...\\Learning Modules\\ProvisionTenants\\*Demo-ProvisionTenants.ps1* set the following parameters: - - **$TenantName** = **Sequoia Soccer**, the name of a new venue. - - **$VenueType** = **soccer**, one of the pre-defined venue types: blues, classicalmusic, dance, jazz, judo, motorracing, multipurpose, opera, rockmusic, soccer (lower case, no spaces). - - **$DemoScenario** = **2**, to provision a tenant into its own database. - -2. Add a new breakpoint by putting your cursor anywhere on line 57, the line that says: *& $PSScriptRoot\New-TenantAndDatabase `*, and press **F9**. - - ![break point](./media/saas-multitenantdb-provision-and-catalog/breakpoint2.png) - -3. Run the script by pressing **F5**. - -4. After the script execution stops at the breakpoint, press **F11** to step into the code. Use **F10** and **F11** to step over and step into functions to trace the execution. - -## Provision a batch of tenants - -This exercise provisions a batch of 17 tenants. It’s recommended you provision this batch of tenants before starting other Wingtip Tickets tutorials so there are more databases to work with. - -1. In the *PowerShell ISE*, open ...\\Learning Modules\\ProvisionTenants\\*Demo-ProvisionTenants.ps1* and change the *$DemoScenario* parameter to 4: - - **$DemoScenario** = **4**, to provision a batch of tenants into a shared database. - -2. Press **F5** and run the script. - -### Verify the deployed set of tenants - -At this stage, you have a mix of tenants deployed into a shared database and tenants deployed into their own databases. The Azure portal can be used to inspect the databases created. In the [Azure portal](https://portal.azure.com), open the **tenants1-mt-\** server by browsing to the list of SQL servers. The **SQL databases** list should include the shared **tenants1** database and the databases for the tenants that are in their own database: - - ![database list](./media/saas-multitenantdb-provision-and-catalog/Databases.png) - -While the Azure portal shows the tenant databases, it doesn't let you see the tenants *inside* the shared database. The full list of tenants can be seen in the **Events Hub** webpage of Wingtip, and by browsing the catalog. - -#### Using Wingtip Tickets events hub page - -Open the Events Hub page in the browser (http:events.wingtip-mt.\.trafficmanager.net) - -#### Using catalog database - -The full list of tenants and the corresponding database for each is available in the catalog. A SQL view is provided that joins the tenant name to the database name. The view nicely demonstrates the value of extending the metadata that is stored in the catalog. -- The SQL view is available in the tenantcatalog database. -- The tenant name is stored in the Tenants table. -- The database name is stored in the Shard Management tables. - -1. In SQL Server Management Studio (SSMS), connect to the tenants server at **catalog-mt.\.database.windows.net**, with Login = **developer**, and Password = **P\@ssword1** - - ![SSMS connection dialog](./media/saas-multitenantdb-provision-and-catalog/SSMSConnection.png) - -2. In the SSMS Object Explorer, browse to the views in the *tenantcatalog* database. - -3. Right click on the view *TenantsExtended* and choose **Select Top 1000 Rows**. Note the mapping between tenant name and database for the different tenants. - - ![ExtendedTenants view in SSMS](./media/saas-multitenantdb-provision-and-catalog/extendedtenantsview.png) - -## Other provisioning patterns - -This section discusses other interesting provisioning patterns. - -#### Pre-provisioning databases in elastic pools - -The pre-provisioning pattern exploits the fact that when using elastic pools, billing is for the pool not the databases. Thus databases can be added to an elastic pool before they are needed without incurring extra cost. This pre-visioning significantly reduces the time taken to provision a tenant into a database. The number of databases pre-provisioned can be adjusted as needed to keep a buffer suitable for the anticipated provisioning rate. - -#### Auto-provisioning - -In the auto-provisioning pattern, a dedicated provisioning service is used to provision servers, pools, and databases automatically as needed. This automation includes the pre-provisioning of databases in elastic pools. And if databases are decommissioned and deleted, the gaps this creates in elastic pools can be filled by the provisioning service as desired. - -This type of automated service could be simple or complex. For example, the automation could handle provisioning across multiple geographies, and could set up geo-replication for disaster recovery. With the auto-provisioning pattern, a client application or script would submit a provisioning request to a queue to be processed by a provisioning service. The script would then poll to detect completion. If pre-provisioning is used, requests would be handled quickly, while a background service would manage the provisioning of a replacement database. - -## Additional resources - - -- [Elastic database client library](elastic-database-client-library.md) -- [How to Debug Scripts in Windows PowerShell ISE](/powershell/scripting/components/ise/how-to-debug-scripts-in-windows-powershell-ise) - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> * Provision a single new tenant into a shared multi-tenant database and its own database -> * Provision a batch of additional tenants -> * Step through the details of provisioning tenants, and registering them into the catalog - -Try the [Performance monitoring tutorial](./saas-multitenantdb-performance-monitoring.md). \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-schema-management.md b/articles/azure-sql/database/saas-multitenantdb-schema-management.md deleted file mode 100644 index 40754aa98f2e8..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-schema-management.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Manage schema in a multi-tenant app -description: "Manage Schema for multiple tenants in a multi-tenant application that uses Azure SQL Database" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/18/2018 ---- -# Manage schema in a SaaS application that uses sharded multi-tenant databases -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This tutorial examines the challenges in maintaining a fleet of databases in a Software as a Service (SaaS) application. Solutions are demonstrated for fanning out schema changes across the fleet of databases. - -Like any application, the Wingtip Tickets SaaS app will evolve over time, and will require changes to the database. Changes may impact schema or reference data, or apply database maintenance tasks. With a SaaS application using a database per tenant pattern, changes must be coordinated across a potentially massive fleet of tenant databases. In addition, you must incorporate these changes into the database provisioning process to ensure they are included in new databases as they are created. - -#### Two scenarios - -This tutorial explores the following two scenarios: -- Deploy reference data updates for all tenants. -- Rebuild an index on the table that contains the reference data. - -The [Elastic Jobs](./elastic-jobs-overview.md) feature of Azure SQL Database is used to execute these operations across tenant databases. The jobs also operate on the 'template' tenant database. In the Wingtip Tickets sample app, this template database is copied to provision a new tenant database. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> * Create a job agent. -> * Execute a T-SQL query on multiple tenant databases. -> * Update reference data in all tenant databases. -> * Create an index on a table in all tenant databases. - -## Prerequisites - -- The Wingtip Tickets multi-tenant database app must already be deployed: - - For instructions, see the first tutorial, which introduces the Wingtip Tickets SaaS multi-tenant database app:
    [Deploy and explore a sharded multi-tenant application that uses Azure SQL Database](./saas-multitenantdb-get-started-deploy.md). - - The deploy process runs for less than five minutes. - - You must have the *sharded multi-tenant* version of Wingtip installed. The versions for *Standalone* and *Database per tenant* do not support this tutorial. - -- The latest version of SQL Server Management Studio (SSMS) must be installed. [Download and Install SSMS](/sql/ssms/download-sql-server-management-studio-ssms). - -- Azure PowerShell must be installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps). - -> [!NOTE] -> This tutorial uses features of the Azure SQL Database service that are in a limited preview ([Elastic Database jobs](elastic-database-client-library.md)). If you wish to do this tutorial, provide your subscription ID to *SaaSFeedback\@microsoft.com* with subject=Elastic Jobs Preview. After you receive confirmation that your subscription has been enabled, [download and install the latest pre-release jobs cmdlets](https://github.com/jaredmoo/azure-powershell/releases). This preview is limited, so contact *SaaSFeedback\@microsoft.com* for related questions or support. - -## Introduction to SaaS schema management patterns - -The sharded multi-tenant database model used in this sample enables a tenants database to contain one or more tenants. This sample explores the potential to use a mix of a many-tenant and one-tenant databases, enabling a *hybrid* tenant management model. Managing changes to these databases can be complicated. [Elastic Jobs](./elastic-jobs-overview.md) facilitates administration and management of large numbers of database. Jobs enable you to securely and reliably run Transact-SQL scripts as tasks, against a group of tenant databases. The tasks are independent of user interaction or input. This method can be used to deploy changes to schema or to common reference data, across all tenants in an application. Elastic Jobs can also be used to maintain a golden template copy of the database. The template is used to create new tenants, always ensuring the latest schema and reference data are in use. - -![screen](./media/saas-multitenantdb-schema-management/schema-management.png) - -## Elastic Jobs limited preview - -There is a new version of Elastic Jobs that is now an integrated feature of Azure SQL Database. This new version of Elastic Jobs is currently in limited preview. The limited preview currently supports using PowerShell to create a job agent, and T-SQL to create and manage jobs. -> [!NOTE] -> This tutorial uses features of the SQL Database service that are in a limited preview (Elastic Database jobs). If you wish to do this tutorial, provide your subscription ID to SaaSFeedback@microsoft.com with subject=Elastic Jobs Preview. After you receive confirmation that your subscription has been enabled, download and install the latest pre-release jobs cmdlets. This preview is limited, so contact SaaSFeedback@microsoft.com for related questions or support. - -## Get the Wingtip Tickets SaaS Multi-tenant Database application source code and scripts - -The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-MultitenantDB](https://github.com/microsoft/WingtipTicketsSaaS-MultiTenantDB) repository on GitHub. See the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Create a job agent database and new job agent - -This tutorial requires that you use PowerShell to create the job agent database and job agent. Like the MSDB database used by SQL Agent, a job agent uses a database in Azure SQL Database to store job definitions, job status, and history. After the job agent is created, you can create and monitor jobs immediately. - -1. In **PowerShell ISE**, open *...\\Learning Modules\\Schema Management\\Demo-SchemaManagement.ps1*. -2. Press **F5** to run the script. - -The *Demo-SchemaManagement.ps1* script calls the *Deploy-SchemaManagement.ps1* script to create a database named _jobagent_ on the catalog server. The script then creates the job agent, passing the _jobagent_ database as a parameter. - -## Create a job to deploy new reference data to all tenants - -#### Prepare - -Each tenant's database includes a set of venue types in the **VenueTypes** table. Each venue type defines the kind of events that can be hosted at a venue. These venue types correspond to the background images you see in the tenant events app. In this exercise, you deploy an update to all databases to add two additional venue types: *Motorcycle Racing* and *Swimming Club*. - -First, review the venue types included in each tenant database. Connect to one of the tenant databases in SQL Server Management Studio (SSMS) and inspect the VenueTypes table. You can also query this table in the Query editor in the Azure portal, accessed from the database page. - -1. Open SSMS and connect to the tenant server: *tenants1-dpt-<user>.database.windows.net* -1. To confirm that *Motorcycle Racing* and *Swimming Club* **are not** currently included, browse to the *contosoconcerthall* database on the *tenants1-dpt-<user>* server and query the *VenueTypes* table. - - - -#### Steps - -Now you create a job to update the **VenueTypes** table in each tenants database, by adding the two new venue types. - -To create a new job, you use the set of jobs system stored procedures that were created in the _jobagent_ database. The stored procedures were created when the job agent was created. - -1. In SSMS, connect to the tenant server: tenants1-mt-<user>.database.windows.net - -2. Browse to the *tenants1* database. - -3. Query the *VenueTypes* table to confirm that *Motorcycle Racing* and *Swimming Club* are not yet in the results list. - -4. Connect to the catalog server, which is *catalog-mt-<user>.database.windows.net*. - -5. Connect to the _jobagent_ database in the catalog server. - -6. In SSMS, open the file *...\\Learning Modules\\Schema Management\\DeployReferenceData.sql*. - -7. Modify the statement: set @User = <user> and substitute the User value used when you deployed the Wingtip Tickets SaaS Multi-tenant Database application. - -8. Press **F5** to run the script. - -#### Observe - -Observe the following items in the *DeployReferenceData.sql* script: - -- **sp\_add\_target\_group** creates the target group name *DemoServerGroup*, and adds target members to the group. - -- **sp\_add\_target\_group\_member** adds the following items: - - A *server* target member type. - - This is the *tenants1-mt-<user>* server that contains the tenants databases. - - Including the server includes the tenant databases that exist at the time the job executes. - - A *database* target member type for the template database (*basetenantdb*) that resides on *catalog-mt-<user>* server, - - A *database* target member type to include the *adhocreporting* database that is used in a later tutorial. - -- **sp\_add\_job** creates a job called *Reference Data Deployment*. - -- **sp\_add\_jobstep** creates the job step containing T-SQL command text to update the reference table, VenueTypes. - -- The remaining views in the script display the existence of the objects and monitor job execution. Use these queries to review the status value in the **lifecycle** column to determine when the job has finished. The job updates the tenants database, and updates the two additional databases that contain the reference table. - -In SSMS, browse to the tenant database on the *tenants1-mt-<user>* server. Query the *VenueTypes* table to confirm that *Motorcycle Racing* and *Swimming Club* are now added to the table. The total count of venue types should have increased by two. - -## Create a job to manage the reference table index - -This exercise creates a job to rebuild the index on the reference table primary key on all the tenant databases. An index rebuild is a typical database management operation that an administrator might run after loading a large amount of data load, to improve performance. - -1. In SSMS, connect to _jobagent_ database in *catalog-mt-<User>.database.windows.net* server. - -2. In SSMS, open *...\\Learning Modules\\Schema Management\\OnlineReindex.sql*. - -3. Press **F5** to run the script. - -#### Observe - -Observe the following items in the *OnlineReindex.sql* script: - -* **sp\_add\_job** creates a new job called *Online Reindex PK\_\_VenueTyp\_\_265E44FD7FD4C885*. - -* **sp\_add\_jobstep** creates the job step containing T-SQL command text to update the index. - -* The remaining views in the script monitor job execution. Use these queries to review the status value in the **lifecycle** column to determine when the job has successfully finished on all target group members. - -## Additional resources - - -* [Managing scaled-out cloud databases](./elastic-jobs-overview.md) - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> * Create a job agent to run T-SQL jobs across multiple databases -> * Update reference data in all tenant databases -> * Create an index on a table in all tenant databases - -Next, try the [Ad hoc reporting tutorial](./saas-multitenantdb-adhoc-reporting.md) to explore running distributed queries across tenant databases. \ No newline at end of file diff --git a/articles/azure-sql/database/saas-multitenantdb-tenant-analytics.md b/articles/azure-sql/database/saas-multitenantdb-tenant-analytics.md deleted file mode 100644 index 6f0b976bc145f..0000000000000 --- a/articles/azure-sql/database/saas-multitenantdb-tenant-analytics.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Run analytics queries -description: "Cross-tenant analytics queries using data extracted from multiple Azure SQL Database databases in a multi-tenant app." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/19/2018 ---- -# Cross-tenant analytics using extracted data - multi-tenant app -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you walk through a complete analytics scenario for a multitenant implementation. The scenario demonstrates how analytics can enable businesses to make smart decisions. Using data extracted from sharded database, you use analytics to gain insights into tenant behavior, including their use of the sample Wingtip Tickets SaaS application. This scenario involves three steps: - -1. **Extract data** from each tenant database into an analytics store. -2. **Optimize the extracted data** for analytics processing. -3. Use **Business Intelligence** tools to draw out useful insights, which can guide decision making. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> - Create the tenant analytics store to extract the data into. -> - Use elastic jobs to extract data from each tenant database into the analytics store. -> - Optimize the extracted data (reorganize into a star-schema). -> - Query the analytics database. -> - Use Power BI for data visualization to highlight trends in tenant data and make recommendation for improvements. - -![Diagram shows an overview of the architecture used for this article.](./media/saas-multitenantdb-tenant-analytics/architectureOverview.png) - -## Offline tenant analytics pattern - -SaaS applications you develop have access to a vast amount of tenant data stored in the cloud. The data provides a rich source of insights about the operation and usage of your application, and about the behavior of the tenants. These insights can guide feature development, usability improvements, and other investments in the app and platform. - -Accessing the data for all tenants is simple when all the data is in just one multi-tenant database. But the access is more complex when distributed at scale across thousands of databases. One way to tame the complexity is to extract the data to an analytics database or a data warehouse. You then query the data warehouse to gather insights from the tickets data of all tenants. - -This tutorial presents a complete analytics scenario for this sample SaaS application. First, elastic jobs are used to schedule the extraction of data from each tenant database. The data is sent to an analytics store. The analytics store could either be an SQL Database or a Azure Synapse Analytics. For large-scale data extraction, [Azure Data Factory](../../data-factory/introduction.md) is commended. - -Next, the aggregated data is shredded into a set of [star-schema](https://www.wikipedia.org/wiki/Star_schema) tables. The tables consist of a central fact table plus related dimension tables: - -- The central fact table in the star-schema contains ticket data. -- The dimension tables contain data about venues, events, customers, and purchase dates. - -Together the central and dimension tables enable efficient analytical processing. The star-schema used in this tutorial is displayed in the following image: - -![Database diagram shows four database objects connected to a central database object.](./media/saas-multitenantdb-tenant-analytics/StarSchema.png) - -Finally, the star-schema tables are queried. The query results are displayed visually to highlight insights into tenant behavior and their use of the application. With this star-schema, you can run queries that help discover items like the following: - -- Who is buying tickets and from which venue. -- Hidden patterns and trends in the following areas: - - The sales of tickets. - - The relative popularity of each venue. - -Understanding how consistently each tenant is using the service provides an opportunity to create service plans to cater to their needs. This tutorial provides basic examples of insights that can be gleaned from tenant data. - -## Setup - -### Prerequisites - -To complete this tutorial, make sure the following prerequisites are met: - -- The Wingtip Tickets SaaS Multi-tenant Database application is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Multi-tenant Database application](./saas-multitenantdb-get-started-deploy.md) -- The Wingtip SaaS scripts and application [source code](https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDB) are downloaded from GitHub. Be sure to *unblock the zip file* before extracting its contents. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. -- Power BI Desktop is installed. [Download Power BI Desktop](https://powerbi.microsoft.com/downloads/) -- The batch of additional tenants has been provisioned, see the [**Provision tenants tutorial**](./saas-multitenantdb-provision-and-catalog.md). -- A job agent and job agent database have been created. See the appropriate steps in the [**Schema management tutorial**](./saas-multitenantdb-schema-management.md#create-a-job-agent-database-and-new-job-agent). - -### Create data for the demo - -In this tutorial, analysis is performed on ticket sales data. In the current step, you generate ticket data for all the tenants. Later this data is extracted for analysis. *Ensure you have provisioned the batch of tenants as described earlier, so that you have a meaningful amount of data*. A sufficiently large amount of data can expose a range of different ticket purchasing patterns. - -1. In **PowerShell ISE**, open *…\Learning Modules\Operational Analytics\Tenant Analytics\Demo-TenantAnalytics.ps1*, and set the following value: - - **$DemoScenario** = **1** Purchase tickets for events at all venues -2. Press **F5** to run the script and create ticket purchasing history for every event in each venue. The script runs for several minutes to generate tens of thousands of tickets. - -### Deploy the analytics store -Often there are numerous transactional sharded databases that together hold all tenant data. You must aggregate the tenant data from the sharded database into one analytics store. The aggregation enables efficient query of the data. In this tutorial, an Azure SQL Database database is used to store the aggregated data. - -In the following steps, you deploy the analytics store, which is called **tenantanalytics**. You also deploy predefined tables that are populated later in the tutorial: -1. In PowerShell ISE, open *…\Learning Modules\Operational Analytics\Tenant Analytics\Demo-TenantAnalytics.ps1* -2. Set the $DemoScenario variable in the script to match your choice of analytics store. For learning purposes, using the database without columnstore is recommended. - - To use SQL Database without columnstore, set **$DemoScenario** = **2** - - To use SQL Database with columnstore, set **$DemoScenario** = **3** -3. Press **F5** to run the demo script (that calls the *Deploy-TenantAnalytics\.ps1* script) which creates the tenant analytics store. - -Now that you have deployed the application and filled it with interesting tenant data, use [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to connect **tenants1-mt-\** and **catalog-mt-\** servers using Login = *developer*, Password = *P\@ssword1*. - -![architectureOverView](./media/saas-multitenantdb-tenant-analytics/ssmsSignIn.png) - -In the Object Explorer, perform the following steps: - -1. Expand the *tenants1-mt-\* server. -2. Expand the Databases node, and see *tenants1* database containing multiple tenants. -3. Expand the *catalog-mt-\* server. -4. Verify that you see the analytics store and the jobaccount database. - -See the following database items in the SSMS Object Explorer by expanding the analytics store node: - -- Tables **TicketsRawData** and **EventsRawData** hold raw extracted data from the tenant databases. -- The star-schema tables are **fact_Tickets**, **dim_Customers**, **dim_Venues**, **dim_Events**, and **dim_Dates**. -- The **sp_ShredRawExtractedData** stored procedure is used to populate the star-schema tables from the raw data tables. - -![Screenshot shows the S S M S Object Explorer for the analytics store node, including Tables, Views, and nodes.](./media/saas-multitenantdb-tenant-analytics/tenantAnalytics.png) - -## Data extraction - -### Create target groups - -Before proceeding, ensure you have deployed the job account and jobaccount database. In the next set of steps, Elastic Jobs is used to extract data from the sharded tenants database, and to store the data in the analytics store. Then the second job shreds the data and stores it into tables in the star-schema. These two jobs run against two different target groups, namely **TenantGroup** and **AnalyticsGroup**. The extract job runs against the TenantGroup, which contains all the tenant databases. The shredding job runs against the AnalyticsGroup, which contains just the analytics store. Create the target groups by using the following steps: - -1. In SSMS, connect to the **jobaccount** database in catalog-mt-\. -2. In SSMS, open *…\Learning Modules\Operational Analytics\Tenant Analytics\ TargetGroups.sql* -3. Modify the @User variable at the top of the script, replacing `` with the user value used when you deployed the Wingtip Tickets SaaS Multi-tenant Database application. -4. Press **F5** to run the script that creates the two target groups. - -### Extract raw data from all tenants - -Transactions might occur more frequently for *ticket and customer* data than for *event and venue* data. Therefore, consider extracting ticket and customer data separately and more frequently than you extract event and venue data. In this section, you define and schedule two separate jobs: - -- Extract ticket and customer data. -- Extract event and venue data. - -Each job extracts its data, and posts it into the analytics store. There a separate job shreds the extracted data into the analytics star-schema. - -1. In SSMS, connect to the **jobaccount** database in catalog-mt-\ server. -2. In SSMS, open *...\Learning Modules\Operational Analytics\Tenant Analytics\ExtractTickets.sql*. -3. Modify @User at the top of the script, and replace `` with the user name used when you deployed the Wingtip Tickets SaaS Multi-tenant Database application. -4. Press **F5** to run the script that creates and runs the job that extracts tickets and customers data from each tenant database. The job saves the data into the analytics store. -5. Query the TicketsRawData table in the tenantanalytics database, to ensure that the table is populated with tickets information from all tenants. - -![Screenshot shows the ExtractTickets database with the TicketsRawData d b o selected in Object Explorer.](./media/saas-multitenantdb-tenant-analytics/ticketExtracts.png) - -Repeat the preceding steps, except this time replace **\ExtractTickets.sql** with **\ExtractVenuesEvents.sql** in step 2. - -Successfully running the job populates the EventsRawData table in the analytics store with new events and venues information from all tenants. - -## Data reorganization - -### Shred extracted data to populate star-schema tables - -The next step is to shred the extracted raw data into a set of tables that are optimized for analytics queries. A star-schema is used. A central fact table holds individual ticket sales records. Dimension tables are populated with data about venues, events, customers, and purchase dates. - -In this section of the tutorial, you define and run a job that merges the extracted raw data with the data in the star-schema tables. After the merge job is finished, the raw data is deleted, leaving the tables ready to be populated by the next tenant data extract job. - -1. In SSMS, connect to the **jobaccount** database in catalog-mt-\. -2. In SSMS, open *…\Learning Modules\Operational Analytics\Tenant Analytics\ShredRawExtractedData.sql*. -3. Press **F5** to run the script to define a job that calls the sp_ShredRawExtractedData stored procedure in the analytics store. -4. Allow enough time for the job to run successfully. - - Check the **Lifecycle** column of jobs.jobs_execution table for the status of job. Ensure that the job **Succeeded** before proceeding. A successful run displays data similar to the following chart: - -![Screenshot shows the successful result of running the sp_ShredRawExtractedData procedure.](./media/saas-multitenantdb-tenant-analytics/shreddingJob.PNG) - -## Data exploration - -### Visualize tenant data - -The data in the star-schema table provides all the ticket sales data needed for your analysis. To make it easier to see trends in large data sets, you need to visualize it graphically. In this section, you learn how to use **Power BI** to manipulate and visualize the tenant data you have extracted and organized. - -Use the following steps to connect to Power BI, and to import the views you created earlier: - -1. Launch Power BI desktop. -2. From the Home ribbon, select **Get Data**, and select **More…** from the menu. -3. In the **Get Data** window, select Azure SQL Database. -4. In the database login window, enter your server name (catalog-mt-\.database.windows.net). Select **Import** for **Data Connectivity Mode**, and then click OK. - - ![Screenshot shows SQL Server database dialog box where you can enter the Server and Database.](./media/saas-multitenantdb-tenant-analytics/powerBISignIn.PNG) - -5. Select **Database** in the left pane, then enter user name = *developer*, and enter password = *P\@ssword1*. Click **Connect**. - - ![Screenshot shows the SQL Server database dialog where you can enter a User name and Password.](./media/saas-multitenantdb-tenant-analytics/databaseSignIn.PNG) - -6. In the **Navigator** pane, under the analytics database, select the star-schema tables: fact_Tickets, dim_Events, dim_Venues, dim_Customers and dim_Dates. Then select **Load**. - -Congratulations! You have successfully loaded the data into Power BI. Now you can start exploring interesting visualizations to help gain insights into your tenants. Next you walk through how analytics can enable you to provide data-driven recommendations to the Wingtip Tickets business team. The recommendations can help to optimize the business model and customer experience. - -You start by analyzing ticket sales data to see the variation in usage across the venues. Select the following options in Power BI to plot a bar chart of the total number of tickets sold by each venue. Due to random variation in the ticket generator, your results may be different. - -![Screenshot shows a Power B I visualization and controls for the data visualization on the right side.](./media/saas-multitenantdb-tenant-analytics/TotalTicketsByVenues.PNG) - -The preceding plot confirms that the number of tickets sold by each venue varies. Venues that sell more tickets are using your service more heavily than venues that sell fewer tickets. There may be an opportunity here to tailor resource allocation according to different tenant needs. - -You can further analyze the data to see how ticket sales vary over time. Select the following options in Power BI to plot the total number of tickets sold each day for a period of 60 days. - -![Screenshot shows Power B I visualization titled Ticket Sale Distribution versus Sale Day.](./media/saas-multitenantdb-tenant-analytics/SaleVersusDate.PNG) - -The preceding chart displays that ticket sales spike for some venues. These spikes reinforce the idea that some venues might be consuming system resources disproportionately. So far there is no obvious pattern in when the spikes occur. - -Next you want to further investigate the significance of these peak sale days. When do these peaks occur after tickets go on sale? To plot tickets sold per day, select the following options in Power BI. - -![SaleDayDistribution](./media/saas-multitenantdb-tenant-analytics/SaleDistributionPerDay.PNG) - -The preceding plot shows that some venues sell a lot of tickets on the first day of sale. As soon as tickets go on sale at these venues, there seems to be a mad rush. This burst of activity by a few venues might impact the service for other tenants. - -You can drill into the data again to see if this mad rush is true for all events hosted by these venues. In previous plots, you observed that Contoso Concert Hall sells a lot of tickets, and that Contoso also has a spike in ticket sales on certain days. Play around with Power BI options to plot cumulative ticket sales for Contoso Concert Hall, focusing on sale trends for each of its events. Do all events follow the same sale pattern? - -![ContosoSales](./media/saas-multitenantdb-tenant-analytics/EventSaleTrends.PNG) - -The preceding plot for Contoso Concert Hall shows that the mad rush does not happen for all events. Play around with the filter options to see sale trends for other venues. - -The insights into ticket selling patterns might lead Wingtip Tickets to optimize their business model. Instead of charging all tenants equally, perhaps Wingtip should introduce service tiers with different compute sizes. Larger venues that need to sell more tickets per day could be offered a higher tier with a higher service level agreement (SLA). Those venues could have their databases placed in pool with higher per-database resource limits. Each service tier could have an hourly sales allocation, with additional fees charged for exceeding the allocation. Larger venues that have periodic bursts of sales would benefit from the higher tiers, and Wingtip Tickets can monetize their service more efficiently. - -Meanwhile, some Wingtip Tickets customers complain that they struggle to sell enough tickets to justify the service cost. Perhaps in these insights there is an opportunity to boost ticket sales for under performing venues. Higher sales would increase the perceived value of the service. Right click fact_Tickets and select **New measure**. Enter the following expression for the new measure called **AverageTicketsSold**: - -``` -AverageTicketsSold = DIVIDE(DIVIDE(COUNTROWS(fact_Tickets),DISTINCT(dim_Venues[VenueCapacity]))*100, COUNTROWS(dim_Events)) -``` - -Select the following visualization options to plot the percentage tickets sold by each venue to determine their relative success. - -![analyticsViews](./media/saas-multitenantdb-tenant-analytics/AvgTicketsByVenues.PNG) - -The preceding plot shows that even though most venues sell more than 80% of their tickets, some are struggling to fill more than half the seats. Play around with the Values Well to select maximum or minimum percentage of tickets sold for each venue. - -Earlier you deepened your analysis to discover that ticket sales tend to follow predictable patterns. This discovery might let Wingtip Tickets help underperforming venues boost ticket sales by recommending dynamic pricing. This discovery could reveal an opportunity to employ machine learning techniques to predict ticket sales for each event. Predictions could also be made for the impact on revenue of offering discounts on ticket sales. Power BI Embedded could be integrated into an event management application. The integration could help visualize predicted sales and the effect of different discounts. The application could help devise an optimum discount to be applied directly from the analytics display. - -You have observed trends in tenant data from the Wingtip Tickets SaaS Multi-tenant Database application. You can contemplate other ways the app can inform business decisions for SaaS application vendors. Vendors can better cater to the needs of their tenants. Hopefully this tutorial has equipped you with tools necessary to perform analytics on tenant data to empower your businesses to make data-driven decisions. - -## Next steps - -In this tutorial, you learned how to: - -> [!div class="checklist"] -> - Deployed a tenant analytics database with pre-defined star schema tables -> - Used elastic jobs to extract data from all the tenant database -> - Merge the extracted data into tables in a star-schema designed for analytics -> - Query an analytics database -> - Use Power BI for data visualization to observe trends in tenant data - -Congratulations! - -## Additional resources - -Additional [tutorials that build upon the Wingtip SaaS application](./saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials). -- [Elastic Jobs](./elastic-jobs-overview.md). -- [Cross-tenant analytics using extracted data - single-tenant app](saas-tenancy-tenant-analytics.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-standaloneapp-get-started-deploy.md b/articles/azure-sql/database/saas-standaloneapp-get-started-deploy.md deleted file mode 100644 index 4e396b1af8909..0000000000000 --- a/articles/azure-sql/database/saas-standaloneapp-get-started-deploy.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Single-tenant SaaS tutorial -description: "Deploy and explore a standalone single-tenant SaaS application, that uses Azure SQL Database." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 11/07/2018 ---- -# Deploy and explore a standalone single-tenant application that uses Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you deploy and explore the Wingtip Tickets SaaS sample application developed using the standalone application, or app-per-tenant, pattern. The application is designed to showcase features of Azure SQL Database that simplify enabling multi-tenant SaaS scenarios. - -The standalone application or app-per-tenant pattern deploys an application instance for each tenant. Each application is configured for a specific tenant and deployed in a separate Azure resource group. Multiple instances of the application are provisioned to provide a multi-tenant solution. This pattern is best suited to smaller numbers, of tenants where tenant isolation is a top priority. Azure has partner programs that allow resources to be deployed into a tenant’s subscription and managed by a service provider on the tenant’s behalf. - -In this tutorial, you'll deploy three standalone applications for three tenants into your Azure subscription. You have full access to explore and work with the individual application components. - -The application source code and management scripts are available in the [WingtipTicketsSaaS-StandaloneApp](https://github.com/Microsoft/WingtipTicketsSaaS-StandaloneApp) GitHub repo. The application was created using Visual Studio 2015, and doesn't successfully open and compile in Visual Studio 2019 without updating. - - -In this tutorial you learn: - -> [!div class="checklist"] -> * How to deploy the Wingtip Tickets SaaS Standalone Application. -> * Where to get the application source code, and management scripts. -> * About the servers and databases that make up the app. - -Additional tutorials will be released. They'll allow you to explore a range of management scenarios based on this application pattern. - -## Deploy the Wingtip Tickets SaaS Standalone Application - -Deploy the app for the three provided tenants: - -1. Click each blue **Deploy to Azure** button to open the deployment template in the [Azure portal](https://portal.azure.com). Each template requires two parameter values; a name for a new resource group, and a user name that distinguishes this deployment from other deployments of the app. The next step provides details for setting these values. - - **Contoso Concert Hall** - [![Image showing a button labeled "Deploy to Azure".](media/saas-standaloneapp-get-started-deploy/deploy.png)](https://aka.ms/deploywingtipsa-contoso) - - **Dogwood Dojo** - [![Image showing a button labeled "Deploy to Azure".](media/saas-standaloneapp-get-started-deploy/deploy.png)](https://aka.ms/deploywingtipsa-dogwood) - - **Fabrikam Jazz Club** - [![Image showing a button labeled "Deploy to Azure".](media/saas-standaloneapp-get-started-deploy/deploy.png)](https://aka.ms/deploywingtipsa-fabrikam) - -2. Enter required parameter values for each deployment. - - > [!IMPORTANT] - > Some authentication and server firewalls are intentionally unsecured for demonstration purposes. **Create a new resource group** for each application deployment. Do not use an existing resource group. Do not use this application, or any resources it creates, for production. Delete all the resource groups when you are finished with the applications to stop related billing. - - It's best to use only lowercase letters, numbers, and hyphens in your resource names. - * For **Resource group**, select Create new, and then provide a lowercase Name for the resource group. **wingtip-sa-\-\** is the recommended pattern. For \, replace the venue name with no spaces. For \, replace the user value from below. With this pattern, resource group names might be *wingtip-sa-contosoconcerthall-af1*, *wingtip-sa-dogwooddojo-af1*, *wingtip-sa-fabrikamjazzclub-af1*. - * Select a **Location** from the drop-down list. - - * For **User** - We recommend a short user value, such as your initials plus a digit: for example, *af1*. - - -3. **Deploy the application**. - - * Click to agree to the terms and conditions. - * Click **Purchase**. - -4. Monitor the status of all three deployments by clicking **Notifications** (the bell icon to the right of the search box). Deploying the apps takes around five minutes. - - -## Run the applications - -The app showcases venues that host events. The venues are the tenants of the application. Each venue gets a personalized web site to list their events and sell tickets. Venue types include concert halls, jazz clubs, and sports clubs. In the sample, the type of venue determines the background photograph shown on the venue's web site. In the standalone app model, each venue has a separate application instance with its own standalone Azure SQL Database. - -1. Open the events page for each of the three tenants in separate browser tabs: - - - http://events.contosoconcerthall.<user>.trafficmanager.net - - http://events.dogwooddojo.<user>.trafficmanager.net - - http://events.fabrikamjazzclub.<user>.trafficmanager.net - - (In each URL, replace <user> with your deployment's user value.) - - ![Events](./media/saas-standaloneapp-get-started-deploy/fabrikam.png) - -To control the distribution of incoming requests, the app uses [*Azure Traffic Manager*](../../traffic-manager/traffic-manager-overview.md). Each tenant-specific app instance includes the tenant name as part of the domain name in the URL. All the tenant URLs include your specific **User** value. The URLs follow the following format: -- http://events.<venuename>.<user>.trafficmanager.net - -Each tenant's database **Location** is included in the app settings of the corresponding deployed app. - -In a production environment, typically you create a CNAME DNS record to [*point a company internet domain*](../../traffic-manager/traffic-manager-point-internet-domain.md) to the URL of the traffic manager profile. - - -## Explore the servers and tenant databases - -Let’s look at some of the resources that were deployed: - -1. In the [Azure portal](https://portal.azure.com), browse to the list of resource groups. -2. You should see the three tenant resource groups. -3. Open the **wingtip-sa-fabrikam-<user>** resource group, which contains the resources for the Fabrikam Jazz Club deployment. The **fabrikamjazzclub-<user>** server contains the **fabrikamjazzclub** database. - -Each tenant database is a 50 DTU *standalone* database. - -## Additional resources - - - -- To learn about multi-tenant SaaS applications, see [Design patterns for multi-tenant SaaS applications](saas-tenancy-app-design-patterns.md). - - -## Delete resource groups to stop billing ## - -When you have finished using the sample, delete all the resource groups you created to stop the associated billing. - -## Next steps - -In this tutorial you learned: - -> [!div class="checklist"] -> * How to deploy the Wingtip Tickets SaaS Standalone Application. -> * About the servers and databases that make up the app. -> * How to delete sample resources to stop related billing. - -Next, try the [Provision and Catalog](saas-standaloneapp-provision-and-catalog.md) tutorial in which you'll explore the use of a catalog of tenants that enables a range of cross-tenant scenarios such as schema management and tenant analytics. diff --git a/articles/azure-sql/database/saas-standaloneapp-provision-and-catalog.md b/articles/azure-sql/database/saas-standaloneapp-provision-and-catalog.md deleted file mode 100644 index 9006c740df99f..0000000000000 --- a/articles/azure-sql/database/saas-standaloneapp-provision-and-catalog.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: Multi-tenant SaaS tutorial -description: "Provision and catalog new tenants using the standalone application pattern" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/24/2018 ---- -# Provision and catalog new tenants using the application per tenant SaaS pattern -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article covers the provisioning and cataloging of new tenants using the standalone app per tenant SaaS pattern. -This article has two major parts: -* Conceptual discussion of provisioning and cataloging new tenants -* A tutorial that highlights sample PowerShell code that accomplishes the provisioning and cataloging - * The tutorial uses the Wingtip Tickets sample SaaS application, adapted to the standalone app per tenant pattern. - -## Standalone application per tenant pattern - -The standalone app per tenant pattern is one of several patterns for multi-tenant SaaS applications. In this pattern, a standalone app is provisioned for each tenant. The application comprises application level components and an Azure SQL Database. Each tenant app can be deployed in the vendor’s subscription. Alternatively, Azure offers a [managed applications program](../../azure-resource-manager/managed-applications/overview.md) in which an app can be deployed in a tenant’s subscription and managed by the vendor on the tenant’s behalf. - - ![app-per-tenant pattern](./media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern.png) - -When deploying an application for a tenant, the app and database are provisioned in a new resource group created for the tenant. Using separate resource groups isolates each tenant's application resources and allows them to be managed independently. Within each resource group, each application instance is configured to access its corresponding database directly. This connection model contrasts with other patterns that use a catalog to broker connections between the app and the database. And as there is no resource sharing, each tenant database must be provisioned with sufficient resources to handle its peak load. This pattern tends to be used for SaaS applications with fewer tenants, where there is a strong emphasis on tenant isolation and less emphasis on resource costs. - -## Using a tenant catalog with the application per tenant pattern - -While each tenant’s app and database are fully isolated, various management and analytics scenarios may operate across tenants. For example, applying a schema change for a new release of the application requires changes to the schema of each tenant database. Reporting and analytics scenarios may also require access to all the tenant databases regardless of where they are deployed. - - ![Diagram that shows how to use a tenant catalog with the application per tenant pattern.](./media/saas-standaloneapp-provision-and-catalog/standalone-app-pattern-with-catalog.png) - -The tenant catalog holds a mapping between a tenant identifier and a tenant database, allowing an identifier to be resolved to a server and database name. In the Wingtip SaaS app, the tenant identifier is computed as a hash of the tenant name, although other schemes could be used. While standalone applications don't need the catalog to manage connections, the catalog can be used to scope other actions to a set of tenant databases. For example, Elastic Query can use the catalog to determine the set of databases across which queries are distributed for cross-tenant reporting. - -## Elastic Database Client Library - -In the Wingtip sample application, the catalog is implemented by the shard management features of the [Elastic Database Client Library](elastic-database-client-library.md) (EDCL). The library enables an application to create, manage, and use a shard map that is stored in a database. In the Wingtip Tickets sample, the catalog is stored in the *tenant catalog* database. The shard maps a tenant key to the shard (database) in which that tenant’s data is stored. EDCL functions manage a *global shard map* stored in tables in the *tenant catalog* database and a *local shard map* stored in each shard. - -EDCL functions can be called from applications or PowerShell scripts to create and manage the entries in the shard map. Other EDCL functions can be used to retrieve the set of shards or connect to the correct database for given tenant key. - -> [!IMPORTANT] -> Do not edit the data in the catalog database or the local shard map in the tenant databases directly. Direct updates are not supported due to the high risk of data corruption. Instead, edit the mapping data by using EDCL APIs only. - -## Tenant provisioning - -Each tenant requires a new Azure resource group, which must be created before resources can be provisioned within it. Once the resource group exists, an Azure Resource Management template can be used to deploy the application components and the database, and then configure the database connection. To initialize the database schema, the template can import a bacpac file. Alternatively, the database can be created as a copy of a ‘template’ database. The database is then further updated with initial venue data and registered in the catalog. - -## Tutorial - -In this tutorial you learn how to: - -* Provision a catalog -* Register the sample tenant databases that you deployed earlier in the catalog -* Provision an additional tenant and register it in the catalog - -An Azure Resource Manager template is used to deploy and configure the application, create the tenant database, and then import a bacpac file to initialize it. The import request may be queued for several minutes before it is actioned. - -At the end of this tutorial, you have a set of standalone tenant applications, with each database registered in the catalog. - -## Prerequisites - -To complete this tutorial, make sure the following prerequisites are completed: - -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) -* The three sample tenant apps are deployed. To deploy these apps in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Standalone Application pattern](./saas-standaloneapp-get-started-deploy.md). - -## Provision the catalog - -In this task, you learn how to provision the catalog used to register all the tenant databases. You will: - -* **Provision the catalog database** using an Azure resource management template. The database is initialized by importing a bacpac file. -* **Register the sample tenant apps** that you deployed earlier. Each tenant is registered using a key constructed from a hash of the tenant name. The tenant name is also stored in an extension table in the catalog. - -1. In PowerShell ISE, open *...\Learning Modules\UserConfig.psm* and update the **\** value to the value you used when deploying the three sample applications. **Save the file**. -1. In PowerShell ISE, open *...\Learning Modules\ProvisionTenants\Demo-ProvisionAndCatalog.ps1* and set **$Scenario = 1**. Deploy the tenant catalog and register the pre-defined tenants. - -1. Add a breakpoint by putting your cursor anywhere on the line that says, `& $PSScriptRoot\New-Catalog.ps1`, and then press **F9**. - - ![setting a breakpoint for tracing](./media/saas-standaloneapp-provision-and-catalog/breakpoint.png) - -1. Run the script by pressing **F5**. -1. After script execution stops at the breakpoint, press **F11** to step into the New-Catalog.ps1 script. -1. Trace the script's execution using the Debug menu options, F10 and F11, to step over or into called functions. - * For more information about debugging PowerShell scripts, see [Tips on working with and debugging PowerShell scripts](/powershell/scripting/components/ise/how-to-debug-scripts-in-windows-powershell-ise). - -Once the script completes, the catalog will exist and all the sample tenants will be registered. - -Now look at the resources you created. - -1. Open the [Azure portal](https://portal.azure.com/) and browse the resource groups. Open the **wingtip-sa-catalog-\** resource group and note the catalog server and database. -1. Open the database in the portal and select *Data explorer* from the left-hand menu. Click the Login command and then enter the Password = **P\@ssword1**. - - -1. Explore the schema of the *tenantcatalog* database. - * The objects in the `__ShardManagement` schema are all provided by the Elastic Database Client Library. - * The `Tenants` table and `TenantsExtended` view are extensions added in the sample that demonstrate how you can extend the catalog to provide additional value. -1. Run the query, `SELECT * FROM dbo.TenantsExtended`. - - ![data explorer](./media/saas-standaloneapp-provision-and-catalog/data-explorer-tenantsextended.png) - - As an alternative to using the Data Explorer you can connect to the database from SQL Server Management Studio. To do this, connect to the server wingtip- - - - Note that you should not edit data directly in the catalog - always use the shard management APIs. - -## Provision a new tenant application - -In this task, you learn how to provision a single tenant application. You will: - -* **Create a new resource group** for the tenant. -* **Provision the application and database** into the new resource group using an Azure resource management template. This action includes initializing the database with common schema and reference data by importing a bacpac file. -* **Initialize the database with basic tenant information**. This action includes specifying the venue type, which determines the photograph used as the background on its events web site. -* **Register the database in the catalog database**. - -1. In PowerShell ISE, open *...\Learning Modules\ProvisionTenants\Demo-ProvisionAndCatalog.ps1* and set **$Scenario = 2**. Deploy the tenant catalog and register the pre-defined tenants - -1. Add a breakpoint in the script by putting your cursor anywhere on line 49 that says, `& $PSScriptRoot\New-TenantApp.ps1`, and then press **F9**. -1. Run the script by pressing **F5**. -1. After script execution stops at the breakpoint, press **F11** to step into the New-Catalog.ps1 script. -1. Trace the script's execution using the Debug menu options, F10 and F11, to step over or into called functions. - -After the tenant has been provisioned, the new tenant's events website is opened. - - ![red maple racing](./media/saas-standaloneapp-provision-and-catalog/redmapleracing.png) - -You can then inspect the new resources created in the Azure portal. - - ![red maple racing resources](./media/saas-standaloneapp-provision-and-catalog/redmapleracing-resources.png) - - -## To stop billing, delete resource groups - -When you have finished exploring the sample, delete all the resource groups you created to stop the associated billing. - -## Additional resources - -- To learn more about multi-tenant SaaS database applications, see [Design patterns for multi-tenant SaaS applications](saas-tenancy-app-design-patterns.md). - -## Next steps - -In this tutorial you learned: - -> [!div class="checklist"] -> * How to deploy the Wingtip Tickets SaaS Standalone Application. -> * About the servers and databases that make up the app. -> * How to delete sample resources to stop related billing. - -You can explore how the catalog is used to support various cross-tenant scenarios using the database-per-tenant version of the [Wingtip Tickets SaaS application](./saas-dbpertenant-wingtip-app-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-app-design-patterns.md b/articles/azure-sql/database/saas-tenancy-app-design-patterns.md deleted file mode 100644 index b95c956bff79e..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-app-design-patterns.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Multi-tenant SaaS patterns -description: "Learn about the requirements and common data architecture patterns of multi-tenant software as a service (SaaS) database applications that run in the Azure cloud environment." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.topic: conceptual -author: scoriani -ms.author: scoriani -ms.reviewer: kendralittle, mathoma -ms.date: 01/25/2019 -ms.custom: seoapril2019, sqldbrb=1 ---- -# Multi-tenant SaaS database tenancy patterns -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes the various tenancy models available for a multi-tenant SaaS application. - -When designing a multi-tenant SaaS application, you must carefully choose the tenancy model that best fits the needs of your application. A tenancy model determines how each tenant's data is mapped to storage. Your choice of tenancy model impacts application design and management. Switching to a different model later is sometimes costly. - -## A. SaaS concepts and terminology - -In the Software as a Service (SaaS) model, your company does not sell *licenses* to your software. Instead, each customer makes rent payments to your company, making each customer a *tenant* of your company. - -In return for paying rent, each tenant receives access to your SaaS application components, and has its data stored in the SaaS system. - -The term *tenancy model* refers to how tenants' stored data is organized: - -- *Single-tenancy:*  Each database stores data from only one tenant. -- *Multi-tenancy:*  Each database stores data from multiple separate tenants (with mechanisms to protect data privacy). -- Hybrid tenancy models are also available. - -## B. How to choose the appropriate tenancy model - -In general, the tenancy model does not impact the function of an application, but it likely impacts other aspects of the overall solution. The following criteria are used to assess each of the models: - -- **Scalability:** - - Number of tenants. - - Storage per-tenant. - - Storage in aggregate. - - Workload. - -- **Tenant isolation:**  Data isolation and performance (whether one tenant's workload impacts others). - -- **Per-tenant cost:**  Database costs. - -- **Development complexity:** - - Changes to schema. - - Changes to queries (required by the pattern). - -- **Operational complexity:** - - Monitoring and managing performance. - - Schema management. - - Restoring a tenant. - - Disaster recovery. - -- **Customizability:**  Ease of supporting schema customizations that are either tenant-specific or tenant class-specific. - -The tenancy discussion is focused on the *data* layer. But consider for a moment the *application* layer. The application layer is treated as a monolithic entity. If you divide the application into many small components, your choice of tenancy model might change. You could treat some components differently than others regarding both tenancy and the storage technology or platform used. - -## C. Standalone single-tenant app with single-tenant database - -#### Application level isolation - -In this model, the whole application is installed repeatedly, once for each tenant. Each instance of the app is a standalone instance, so it never interacts with any other standalone instance. Each instance of the app has only one tenant, and therefore needs only one database. The tenant has the database all to itself. - -![Design of standalone app with exactly one single-tenant database.][image-standalone-app-st-db-111a] - -Each app instance is installed in a separate Azure resource group. The resource group can belong to a subscription that is owned by either the software vendor or the tenant. In either case, the vendor can manage the software for the tenant. Each application instance is configured to connect to its corresponding database. - -Each tenant database is deployed as a single database. This model provides the greatest database isolation. But the isolation requires that sufficient resources be allocated to each database to handle its peak loads. Here it matters that elastic pools cannot be used for databases deployed in different resource groups or to different subscriptions. This limitation makes this standalone single-tenant app model the most expensive solution from an overall database cost perspective. - -#### Vendor management - -The vendor can access all the databases in all the standalone app instances, even if the app instances are installed in different tenant subscriptions. The access is achieved via SQL connections. This cross-instance access can enable the vendor to centralize schema management and cross-database query for reporting or analytics purposes. If this kind of centralized management is desired, a catalog must be deployed that maps tenant identifiers to database URIs. Azure SQL Database provides a sharding library that is used together to provide a catalog. The sharding library is formally named the [Elastic Database Client Library][docu-elastic-db-client-library-536r]. - -## D. Multi-tenant app with database-per-tenant - -This next pattern uses a multi-tenant application with many databases, all being single-tenant databases. A new database is provisioned for each new tenant. The application tier is scaled *up* vertically by adding more resources per node. Or the app is scaled *out* horizontally by adding more nodes. The scaling is based on workload, and is independent of the number or scale of the individual databases. - -![Design of multi-tenant app with database-per-tenant.][image-mt-app-db-per-tenant-132d] - -#### Customize for a tenant - -Like the standalone app pattern, the use of single-tenant databases gives strong tenant isolation. In any app whose model specifies only single-tenant databases, the schema for any one given database can be customized and optimized for its tenant. This customization does not affect other tenants in the app. Perhaps a tenant might need data beyond the basic data fields that all tenants need. Further, the extra data field might need an index. - -With database-per-tenant, customizing the schema for one or more individual tenants is straightforward to achieve. The application vendor must design procedures to carefully manage schema customizations at scale. - -#### Elastic pools - -When databases are deployed in the same resource group, they can be grouped into elastic pools. The pools provide a cost-effective way of sharing resources across many databases. This pool option is cheaper than requiring each database to be large enough to accommodate the usage peaks that it experiences. Even though pooled databases share access to resources they can still achieve a high degree of performance isolation. - -![Design of multi-tenant app with database-per-tenant, using elastic pool.][image-mt-app-db-per-tenant-pool-153p] - -Azure SQL Database provides the tools necessary to configure, monitor, and manage the sharing. Both pool-level and database-level performance metrics are available in the Azure portal, and through Azure Monitor logs. The metrics can give great insights into both aggregate and tenant-specific performance. Individual databases can be moved between pools to provide reserved resources to a specific tenant. These tools enable you to ensure good performance in a cost effective manner. - -#### Operations scale for database-per-tenant - -Azure SQL Database has many management features designed to manage large numbers of databases at scale, such as well over 100,000 databases. These features make the database-per-tenant pattern plausible. - -For example, suppose a system has a 1000-tenant database as its only one database. The database might have 20 indexes. If the system converts to having 1000 single-tenant databases, the quantity of indexes rises to 20,000. In Azure SQL Database as part of [Automatic tuning][docu-sql-db-automatic-tuning-771a], the automatic indexing features are enabled by default. Automatic indexing manages for you all 20,000 indexes and their ongoing create and drop optimizations. These automated actions occur within an individual database, and they are not coordinated or restricted by similar actions in other databases. Automatic indexing treats indexes differently in a busy database than in a less busy database. This type of index management customization would be impractical at the database-per-tenant scale if this huge management task had to be done manually. - -Other management features that scale well include the following: - -- Built-in backups. -- High availability. -- On-disk encryption. -- Performance telemetry. - -#### Automation - -The management operations can be scripted and offered through a [devops](/azure/devops/user-guide/what-is-azure-devops) model. The operations can even be automated and exposed in the application. - -For example, you could automate the recovery of a single tenant to an earlier point in time. The recovery only needs to restore the one single-tenant database that stores the tenant. This restore has no impact on other tenants, which confirms that management operations are at the finely granular level of each individual tenant. - -## E. Multi-tenant app with multi-tenant databases - -Another available pattern is to store many tenants in a multi-tenant database. The application instance can have any number of multi-tenant databases. The schema of a multi-tenant database must have one or more tenant identifier columns so that the data from any given tenant can be selectively retrieved. Further, the schema might require a few tables or columns that are used by only a subset of tenants. However, static code and reference data is stored only once and is shared by all tenants. - -#### Tenant isolation is sacrificed - -*Data:*  A multi-tenant database necessarily sacrifices tenant isolation. The data of multiple tenants is stored together in one database. During development, ensure that queries never expose data from more than one tenant. SQL Database supports [row-level security][docu-sql-svr-db-row-level-security-947w], which can enforce that data returned from a query be scoped to a single tenant. - -*Processing:*  A multi-tenant database shares compute and storage resources across all its tenants. The database as a whole can be monitored to ensure it is performing acceptably. However, the Azure system has no built-in way to monitor or manage the use of these resources by an individual tenant. Therefore, the multi-tenant database carries an increased risk of encountering noisy neighbors, where the workload of one overactive tenant impacts the performance experience of other tenants in the same database. Additional application-level monitoring could monitor tenant-level performance. - -#### Lower cost - -In general, multi-tenant databases have the lowest per-tenant cost. Resource costs for a single database are lower than for an equivalently sized elastic pool. In addition, for scenarios where tenants need only limited storage, potentially millions of tenants could be stored in a single database. No elastic pool can contain millions of databases. However, a solution containing 1000 databases per pool, with 1000 pools, could reach the scale of millions at the risk of becoming unwieldy to manage. - -Two variations of a multi-tenant database model are discussed in what follows, with the sharded multi-tenant model being the most flexible and scalable. - -## F. Multi-tenant app with a single multi-tenant database - -The simplest multi-tenant database pattern uses a single database to host data for all tenants. As more tenants are added, the database is scaled up with more storage and compute resources. This scale up might be all that is needed, although there is always an ultimate scale limit. However, long before that limit is reached the database becomes unwieldy to manage. - -Management operations that are focused on individual tenants are more complex to implement in a multi-tenant database. And at scale these operations might become unacceptably slow. One example is a point-in-time restore of the data for just one tenant. - -## G. Multi-tenant app with sharded multi-tenant databases - -Most SaaS applications access the data of only one tenant at a time. This access pattern allows tenant data to be distributed across multiple databases or shards, where all the data for any one tenant is contained in one shard. Combined with a multi-tenant database pattern, a sharded model allows almost limitless scale. - -![Design of multi-tenant app with sharded multi-tenant databases.][image-mt-app-sharded-mt-db-174s] - -#### Manage shards - -Sharding adds complexity both to the design and operational management. A catalog is required in which to maintain the mapping between tenants and databases. In addition, management procedures are required to manage the shards and the tenant population. For example, procedures must be designed to add and remove shards, and to move tenant data between shards. One way to scale is to by adding a new shard and populating it with new tenants. At other times you might split a densely populated shard into two less-densely populated shards. After several tenants have been moved or discontinued, you might merge sparsely populated shards together. The merge would result in more cost-efficient resource utilization. Tenants might also be moved between shards to balance workloads. - -SQL Database provides a split/merge tool that works in conjunction with the sharding library and the catalog database. The provided app can split and merge shards, and it can move tenant data between shards. The app also maintains the catalog during these operations, marking affected tenants as offline prior to moving them. After the move, the app updates the catalog again with the new mapping, and marking the tenant as back online. - -#### Smaller databases more easily managed - -By distributing tenants across multiple databases, the sharded multi-tenant solution results in smaller databases that are more easily managed. For example, restoring a specific tenant to a prior point in time now involves restoring a single smaller database from a backup, rather than a larger database that contains all tenants. The database size, and number of tenants per database, can be chosen to balance the workload and the management efforts. - -#### Tenant identifier in the schema - -Depending on the sharding approach used, additional constraints may be imposed on the database schema. The SQL Database split/merge application requires that the schema includes the sharding key, which typically is the tenant identifier. The tenant identifier is the leading element in the primary key of all sharded tables. The tenant identifier enables the split/merge application to quickly locate and move data associated with a specific tenant. - -#### Elastic pool for shards - -Sharded multi-tenant databases can be placed in elastic pools. In general, having many single-tenant databases in a pool is as cost efficient as having many tenants in a few multi-tenant databases. Multi-tenant databases are advantageous when there are a large number of relatively inactive tenants. - -## H. Hybrid sharded multi-tenant database model - -In the hybrid model, all databases have the tenant identifier in their schema. The databases are all capable of storing more than one tenant, and the databases can be sharded. So in the schema sense, they are all multi-tenant databases. Yet in practice some of these databases contain only one tenant. Regardless, the quantity of tenants stored in a given database has no effect on the database schema. - -#### Move tenants around - -At any time, you can move a particular tenant to its own multi-tenant database. And at any time, you can change your mind and move the tenant back to a database that contains multiple tenants. You can also assign a tenant to new single-tenant database when you provision the new database. - -The hybrid model shines when there are large differences between the resource needs of identifiable groups of tenants. For example, suppose that tenants participating in a free trial are not guaranteed the same high level of performance that subscribing tenants are. The policy might be for tenants in the free trial phase to be stored in a multi-tenant database that is shared among all the free trial tenants. When a free trial tenant subscribes to the basic service tier, the tenant can be moved to another multi-tenant database that might have fewer tenants. A subscriber that pays for the premium service tier could be moved to its own new single-tenant database. - -#### Pools - -In this hybrid model, the single-tenant databases for subscriber tenants can be placed in resource pools to reduce database costs per tenant. This is also done in the database-per-tenant model. - -## I. Tenancy models compared - -The following table summarizes the differences between the main tenancy models. - -| Measurement | Standalone app | Database-per-tenant | Sharded multi-tenant | -| :---------- | :------------- | :------------------ | :------------------- | -| Scale | Medium
    1-100s | Very high
    1-100,000s | Unlimited
    1-1,000,000s | -| Tenant isolation | Very high | High | Low; except for any single tenant (that is alone in an MT db). | -| Database cost per tenant | High; is sized for peaks. | Low; pools used. | Lowest, for small tenants in MT DBs. | -| Performance monitoring and management | Per-tenant only | Aggregate + per-tenant | Aggregate; although is per-tenant only for singles. | -| Development complexity | Low | Low | Medium; due to sharding. | -| Operational complexity | Low-High. Individually simple, complex at scale. | Low-Medium. Patterns address complexity at scale. | Low-High. Individual tenant management is complex. | - - -## Next steps - -- [Deploy and explore a multi-tenant Wingtip application that uses the database-per-tenant SaaS model - Azure SQL Database][docu-sql-db-saas-tutorial-deploy-wingtip-db-per-tenant-496y] - -- [Welcome to the Wingtip Tickets sample SaaS Azure SQL Database tenancy app][docu-saas-tenancy-welcome-wingtip-tickets-app-384w] - - - - -[http-visual-studio-devops-485m]: https://www.visualstudio.com/devops/ - -[docu-sql-svr-db-row-level-security-947w]: /sql/relational-databases/security/row-level-security - -[docu-elastic-db-client-library-536r]:elastic-database-client-library.md -[docu-sql-db-saas-tutorial-deploy-wingtip-db-per-tenant-496y]: saas-dbpertenant-get-started-deploy.md -[docu-sql-db-automatic-tuning-771a]:automatic-tuning-overview.md -[docu-saas-tenancy-welcome-wingtip-tickets-app-384w]: saas-tenancy-welcome-wingtip-tickets-app.md - - - - -[image-standalone-app-st-db-111a]: media/saas-tenancy-app-design-patterns/saas-standalone-app-single-tenant-database-11.png "Design of standalone app with exactly one single-tenant database." - -[image-mt-app-db-per-tenant-132d]: media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-13.png "Design of multi-tenant app with database-per-tenant." - -[image-mt-app-db-per-tenant-pool-153p]: media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-database-per-tenant-pool-15.png "Design of multi-tenant app with database-per-tenant, using elastic pool." - -[image-mt-app-sharded-mt-db-174s]: media/saas-tenancy-app-design-patterns/saas-multi-tenant-app-sharded-multi-tenant-databases-17.png "Design of multi-tenant app with sharded multi-tenant databases." \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-cross-tenant-reporting.md b/articles/azure-sql/database/saas-tenancy-cross-tenant-reporting.md deleted file mode 100644 index f76a5fe8bd558..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-cross-tenant-reporting.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Reporting queries across multiple databases -description: "Cross-tenant reporting using distributed queries." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle - -ms.reviewers: -ms.date: 01/25/2019 ---- -# Cross-tenant reporting using distributed queries -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you run distributed queries across the entire set of tenant databases for reporting. These queries can extract insights buried in the day-to-day operational data of the Wingtip Tickets SaaS tenants. To do this, you deploy an additional reporting database to the catalog server and use Elastic Query to enable distributed queries. - - -In this tutorial you learn: - -> [!div class="checklist"] -> -> * How to deploy an reporting database -> * How to run distributed queries across all tenant databases -> * How global views in each database can enable efficient querying across tenants - - -To complete this tutorial, make sure the following prerequisites are completed: - - -* The Wingtip Tickets SaaS Database Per Tenant app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS Database Per Tenant application](./saas-dbpertenant-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) -* SQL Server Management Studio (SSMS) is installed. To download and install SSMS, see [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Cross-tenant reporting pattern - -![cross-tenant distributed query pattern](./media/saas-tenancy-cross-tenant-reporting/cross-tenant-distributed-query.png) - -One opportunity with SaaS applications is to use the vast amount of tenant data stored in the cloud to gain insights into the operation and usage of your application. These insights can guide feature development, usability improvements, and other investments in your apps and services. - -Accessing this data in a single multi-tenant database is easy, but not so easy when distributed at scale across potentially thousands of databases. One approach is to use [Elastic Query](elastic-query-overview.md), which enables querying across a distributed set of databases with common schema. These databases can be distributed across different resource groups and subscriptions, but need to share a common login. Elastic Query uses a single *head* database in which external tables are defined that mirror tables or views in the distributed (tenant) databases. Queries submitted to this head database are compiled to produce a distributed query plan, with portions of the query pushed down to the tenant databases as needed. Elastic Query uses the shard map in the catalog database to determine the location of all tenant databases. Setup and query of the head database are straightforward using standard [Transact-SQL](/sql/t-sql/language-reference), and support querying from tools like Power BI and Excel. - -By distributing queries across the tenant databases, Elastic Query provides immediate insight into live production data. As Elastic Query pulls data from potentially many databases, query latency can be higher than equivalent queries submitted to a single multi-tenant database. Design queries to minimize the data that is returned to the head database. Elastic Query is often best suited for querying small amounts of real-time data, as opposed to building frequently used or complex analytics queries or reports. If queries don't perform well, look at the [execution plan](/sql/relational-databases/performance/display-an-actual-execution-plan) to see what part of the query is pushed down to the remote database and how much data is being returned. Queries that require complex aggregation or analytical processing may be better handles by extracting tenant data into a database or data warehouse optimized for analytics queries. This pattern is explained in the [tenant analytics tutorial](saas-tenancy-tenant-analytics.md). - -## Get the Wingtip Tickets SaaS Database Per Tenant application scripts - -The Wingtip Tickets SaaS Multi-tenant Database scripts and application source code are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Create ticket sales data - -To run queries against a more interesting data set, create ticket sales data by running the ticket-generator. - -1. In the *PowerShell ISE*, open the ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReporting.ps1* script and set the following value: - * **$DemoScenario** = 1, **Purchase tickets for events at all venues**. -2. Press **F5** to run the script and generate ticket sales. While the script is running, continue the steps in this tutorial. The ticket data is queried in the *Run ad hoc distributed queries* section, so wait for the ticket generator to complete. - -## Explore the global views - -In the Wingtip Tickets SaaS Database Per Tenant application, each tenant is given a database. Thus, the data contained in the database tables is scoped to the perspective of a single tenant. However, when querying across all databases, it's important that Elastic Query can treat the data as if it is part of a single logical database sharded by tenant. - -To simulate this pattern, a set of 'global' views are added to the tenant database that project a tenant ID into each of the tables that are queried globally. For example, the *VenueEvents* view adds a computed *VenueId* to the columns projected from the *Events* table. Similarly, the *VenueTicketPurchases* and *VenueTickets* views add a computed *VenueId* column projected from their respective tables. These views are used by Elastic Query to parallelize queries and push them down to the appropriate remote tenant database when a *VenueId* column is present. This dramatically reduces the amount of data that is returned and results in a substantial increase in performance for many queries. These global views have been pre-created in all tenant databases. - -1. Open SSMS and [connect to the tenants1-<USER> server](saas-tenancy-wingtip-app-guidance-tips.md#explore-database-schema-and-execute-sql-queries-using-ssms). -1. Expand **Databases**, right-click _contosoconcerthall_, and select **New Query**. -1. Run the following queries to explore the difference between the single-tenant tables and the global views: - - ```T-SQL - -- The base Venue table, that has no VenueId associated. - SELECT * FROM Venue - - -- Notice the plural name 'Venues'. This view projects a VenueId column. - SELECT * FROM Venues - - -- The base Events table, which has no VenueId column. - SELECT * FROM Events - - -- This view projects the VenueId retrieved from the Venues table. - SELECT * FROM VenueEvents - ``` - -In these views, the *VenueId* is computed as a hash of the Venue name, but any approach could be used to introduce a unique value. This approach is similar to the way the tenant key is computed for use in the catalog. - -To examine the definition of the *Venues* view: - -1. In **Object Explorer**, expand **contosoconcerthall** > **Views**: - - ![Screenshot shows the contents of the Views node, including four types of Venue d b o.](./media/saas-tenancy-cross-tenant-reporting/views.png) - -2. Right-click **dbo.Venues**. -3. Select **Script View as** > **CREATE To** > **New Query Editor Window** - -Script any of the other *Venue* views to see how they add the *VenueId*. - -## Deploy the database used for distributed queries - -This exercise deploys the _adhocreporting_ database. This is the head database that contains the schema used for querying across all tenant databases. The database is deployed to the existing catalog server, which is the server used for all management-related databases in the sample app. - -1. in *PowerShell ISE*, open ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReporting.ps1*. - -1. Set **$DemoScenario = 2**, _Deploy Ad hoc reporting database_. - -1. Press **F5** to run the script and create the *adhocreporting* database. - -In the next section, you add schema to the database so it can be used to run distributed queries. - -## Configure the 'head' database for running distributed queries - -This exercise adds schema (the external data source and external table definitions) to the _adhocreporting_ database to enable querying across all tenant databases. - -1. Open SQL Server Management Studio, and connect to the Adhoc Reporting database you created in the previous step. The name of the database is *adhocreporting*. -2. Open ...\Learning Modules\Operational Analytics\Adhoc Reporting\ _Initialize-AdhocReportingDB.sql_ in SSMS. -3. Review the SQL script and note: - - Elastic Query uses a database-scoped credential to access each of the tenant databases. This credential needs to be available in all the databases and should normally be granted the minimum rights required to enable these queries. - - ![create credential](./media/saas-tenancy-cross-tenant-reporting/create-credential.png) - - With the catalog database as the external data source, queries are distributed to all databases registered in the catalog at the time the query runs. As server names are different for each deployment, this script gets the location of the catalog database from the current server (@@servername) where the script is executed. - - ![create external data source](./media/saas-tenancy-cross-tenant-reporting/create-external-data-source.png) - - The external tables that reference the global views described in the previous section, and defined with **DISTRIBUTION = SHARDED(VenueId)**. Because each *VenueId* maps to an individual database, this improves performance for many scenarios as shown in the next section. - - ![create external tables](./media/saas-tenancy-cross-tenant-reporting/external-tables.png) - - The local table _VenueTypes_ that is created and populated. This reference data table is common in all tenant databases, so it can be represented here as a local table and populated with the common data. For some queries, having this table defined in the head database can reduce the amount of data that needs to be moved to the head database. - - ![create table](./media/saas-tenancy-cross-tenant-reporting/create-table.png) - - If you include reference tables in this manner, be sure to update the table schema and data whenever you update the tenant databases. - -4. Press **F5** to run the script and initialize the *adhocreporting* database. - -Now you can run distributed queries, and gather insights across all tenants! - -## Run distributed queries - -Now that the *adhocreporting* database is set up, go ahead and run some distributed queries. Include the execution plan for a better understanding of where the query processing is happening. - -When inspecting the execution plan, hover over the plan icons for details. - -Important to note, is that setting **DISTRIBUTION = SHARDED(VenueId)** when the external data source is defined improves performance for many scenarios. As each *VenueId* maps to an individual database, filtering is easily done remotely, returning only the data needed. - -1. Open ...\\Learning Modules\\Operational Analytics\\Adhoc Reporting\\*Demo-AdhocReportingQueries.sql* in SSMS. -2. Ensure you are connected to the **adhocreporting** database. -3. Select the **Query** menu and click **Include Actual Execution Plan** -4. Highlight the *Which venues are currently registered?* query, and press **F5**. - - The query returns the entire venue list, illustrating how quick, and easy it is to query across all tenants and return data from each tenant. - - Inspect the plan and see that the entire cost is in the remote query.Each tenant database executes the query remotely and returns its venue information to the head database. - - ![SELECT * FROM dbo.Venues](./media/saas-tenancy-cross-tenant-reporting/query1-plan.png) - -5. Select the next query, and press **F5**. - - This query joins data from the tenant databases and the local *VenueTypes* table (local, as it's a table in the *adhocreporting* database). - - Inspect the plan and see that the majority of cost is the remote query. Each tenant database returns its venue info and performs a local join with the local *VenueTypes* table to display the friendly name. - - ![Join on remote and local data](./media/saas-tenancy-cross-tenant-reporting/query2-plan.png) - -6. Now select the *On which day were the most tickets sold?* query, and press **F5**. - - This query does a bit more complex joining and aggregation. Most of the processing occurs remotely. Only single rows, containing each venue's daily ticket sale count per day, are returned to the head database. - - ![query](./media/saas-tenancy-cross-tenant-reporting/query3-plan.png) - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> -> * Run distributed queries across all tenant databases -> * Deploy a reporting database and define the schema required to run distributed queries. - - -Now try the [Tenant Analytics tutorial](saas-tenancy-tenant-analytics.md) to explore extracting data to a separate analytics database for more complex analytics processing. - -## Additional resources - -* Additional [tutorials that build upon the Wingtip Tickets SaaS Database Per Tenant application](./saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [Elastic Query](elastic-query-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md b/articles/azure-sql/database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md deleted file mode 100644 index b08fef8f4b27f..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md +++ /dev/null @@ -1,360 +0,0 @@ ---- -title: Multi-tenant apps with RLS and elastic database tools -description: Use elastic database tools with row-level security to build an application with a highly scalable data tier. -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 12/18/2018 ---- -# Multi-tenant applications with elastic database tools and row-level security -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Elastic database tools](elastic-scale-get-started.md) and [row-level security (RLS)][rls] cooperate to enable scaling the data tier of a multi-tenant application with Azure SQL Database. Together these technologies help you build an application that has a highly scalable data tier. The data tier supports multi-tenant shards, and uses **ADO.NET SqlClient** or **Entity Framework**. For more information, see [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](./saas-tenancy-app-design-patterns.md). - -- **Elastic database tools** enable developers to scale out the data tier with standard sharding practices, by using .NET libraries and Azure service templates. Managing shards by using the [Elastic Database Client Library][s-d-elastic-database-client-library] helps automate and streamline many of the infrastructural tasks typically associated with sharding. -- **Row-level security** enables developers to safely store data for multiple tenants in the same database. RLS security policies filter out rows that do not belong to the tenant executing a query. Centralizing the filter logic inside the database simplifies maintenance and reduces the risk of a security error. The alternative of relying on all client code to enforce security is risky. - -By using these features together, an application can store data for multiple tenants in the same shard database. It costs less per tenant when the tenants share a database. Yet the same application can also offer its premium tenants the option of paying for their own dedicated single-tenant shard. One benefit of single-tenant isolation is firmer performance guarantees. In a single-tenant database, there is no other tenant competing for resources. - -The goal is to use the elastic database client library [data-dependent routing](elastic-scale-data-dependent-routing.md) APIs to automatically connect each given tenant to the correct shard database. Only one shard contains particular TenantId value for the given tenant. The TenantId is the *sharding key*. After the connection is established, an RLS security policy within the database ensures that the given tenant can access only those data rows that contain its TenantId. - -> [!NOTE] -> The tenant identifier might consist of more than one column. For convenience is this discussion, we informally assume a single-column TenantId. - -![Blogging app architecture][1] - -## Download the sample project - -### Prerequisites - -- Use Visual Studio (2012 or higher) -- Create three databases in Azure SQL Database -- Download sample project: [Elastic DB Tools for Azure SQL - Multi-Tenant Shards](https://go.microsoft.com/?linkid=9888163) - - Fill in the information for your databases at the beginning of **Program.cs** - -This project extends the one described in [Elastic DB Tools for Azure SQL - Entity Framework Integration](elastic-scale-use-entity-framework-applications-visual-studio.md) by adding support for multi-tenant shard databases. The project builds a simple console application for creating blogs and posts. The project includes four tenants, plus two multi-tenant shard databases. This configuration is illustrated in the preceding diagram. - -Build and run the application. This run bootstraps the elastic database tools' shard map manager, and performs the following tests: - -1. Using Entity Framework and LINQ, create a new blog and then display all blogs for each tenant -2. Using ADO.NET SqlClient, display all blogs for a tenant -3. Try to insert a blog for the wrong tenant to verify that an error is thrown - -Notice that because RLS has not yet been enabled in the shard databases, each of these tests reveals a problem: tenants are able to see blogs that do not belong to them, and the application is not prevented from inserting a blog for the wrong tenant. The remainder of this article describes how to resolve these problems by enforcing tenant isolation with RLS. There are two steps: - -1. **Application tier**: Modify the application code to always set the current TenantId in the SESSION\_CONTEXT after opening a connection. The sample project already sets the TenantId this way. -2. **Data tier**: Create an RLS security policy in each shard database to filter rows based on the TenantId stored in SESSION\_CONTEXT. Create a policy for each of your shard databases, otherwise rows in multi-tenant shards are not be filtered. - -## 1. Application tier: Set TenantId in the SESSION\_CONTEXT - -First you connect to a shard database by using the data-dependent routing APIs of the elastic database client library. The application still must tell the database which TenantId is using the connection. The TenantId tells the RLS security policy which rows must be filtered out as belonging to other tenants. Store the current TenantId in the [SESSION\_CONTEXT](/sql/t-sql/functions/session-context-transact-sql) of the connection. - -An alternative to SESSION\_CONTEXT is to use [CONTEXT\_INFO](/sql/t-sql/functions/context-info-transact-sql). But SESSION\_CONTEXT is a better option. SESSION\_CONTEXT is easier to use, it returns NULL by default, and it supports key-value pairs. - -### Entity Framework - -For applications using Entity Framework, the easiest approach is to set the SESSION\_CONTEXT within the ElasticScaleContext override described in [Data-dependent routing using EF DbContext](elastic-scale-use-entity-framework-applications-visual-studio.md#data-dependent-routing-using-ef-dbcontext). Create and execute a SqlCommand that sets TenantId in the SESSION\_CONTEXT to the shardingKey specified for the connection. Then return the connection brokered through data-dependent routing. This way, you only need to write code once to set the SESSION\_CONTEXT. - -```csharp -// ElasticScaleContext.cs -// Constructor for data-dependent routing. -// This call opens a validated connection that is routed to the -// proper shard by the shard map manager. -// Note that the base class constructor call fails for an open connection -// if migrations need to be done and SQL credentials are used. -// This is the reason for the separation of constructors. -// ... -public ElasticScaleContext(ShardMap shardMap, T shardingKey, string connectionStr) - : base( - OpenDDRConnection(shardMap, shardingKey, connectionStr), - true) // contextOwnsConnection -{ -} - -public static SqlConnection OpenDDRConnection( - ShardMap shardMap, - T shardingKey, - string connectionStr) -{ - // No initialization. - Database.SetInitializer>(null); - - // Ask shard map to broker a validated connection for the given key. - SqlConnection conn = null; - try - { - conn = shardMap.OpenConnectionForKey( - shardingKey, - connectionStr, - ConnectionOptions.Validate); - - // Set TenantId in SESSION_CONTEXT to shardingKey - // to enable Row-Level Security filtering. - SqlCommand cmd = conn.CreateCommand(); - cmd.CommandText = - @"exec sp_set_session_context - @key=N'TenantId', @value=@shardingKey"; - cmd.Parameters.AddWithValue("@shardingKey", shardingKey); - cmd.ExecuteNonQuery(); - - return conn; - } - catch (Exception) - { - if (conn != null) - { - conn.Dispose(); - } - throw; - } -} -// ... -``` - -Now the SESSION\_CONTEXT is automatically set with the specified TenantId whenever ElasticScaleContext is invoked: - -```csharp -// Program.cs -SqlDatabaseUtils.SqlRetryPolicy.ExecuteAction(() => -{ - using (var db = new ElasticScaleContext( - sharding.ShardMap, tenantId, connStrBldr.ConnectionString)) - { - var query = from b in db.Blogs - orderby b.Name - select b; - - Console.WriteLine("All blogs for TenantId {0}:", tenantId); - foreach (var item in query) - { - Console.WriteLine(item.Name); - } - } -}); -``` - -### ADO.NET SqlClient - -For applications using ADO.NET SqlClient, create a wrapper function around method ShardMap.OpenConnectionForKey. Have the wrapper automatically set TenantId in the SESSION\_CONTEXT to the current TenantId before returning a connection. To ensure that SESSION\_CONTEXT is always set, you should only open connections using this wrapper function. - -```csharp -// Program.cs -// Wrapper function for ShardMap.OpenConnectionForKey() that -// automatically sets SESSION_CONTEXT with the correct -// tenantId before returning a connection. -// As a best practice, you should only open connections using this method -// to ensure that SESSION_CONTEXT is always set before executing a query. -// ... -public static SqlConnection OpenConnectionForTenant( - ShardMap shardMap, int tenantId, string connectionStr) -{ - SqlConnection conn = null; - try - { - // Ask shard map to broker a validated connection for the given key. - conn = shardMap.OpenConnectionForKey( - tenantId, connectionStr, ConnectionOptions.Validate); - - // Set TenantId in SESSION_CONTEXT to shardingKey - // to enable Row-Level Security filtering. - SqlCommand cmd = conn.CreateCommand(); - cmd.CommandText = - @"exec sp_set_session_context - @key=N'TenantId', @value=@shardingKey"; - cmd.Parameters.AddWithValue("@shardingKey", tenantId); - cmd.ExecuteNonQuery(); - - return conn; - } - catch (Exception) - { - if (conn != null) - { - conn.Dispose(); - } - throw; - } -} - -// ... - -// Example query via ADO.NET SqlClient. -// If row-level security is enabled, only Tenant 4's blogs are listed. -SqlDatabaseUtils.SqlRetryPolicy.ExecuteAction(() => -{ - using (SqlConnection conn = OpenConnectionForTenant( - sharding.ShardMap, tenantId4, connStrBldr.ConnectionString)) - { - SqlCommand cmd = conn.CreateCommand(); - cmd.CommandText = @"SELECT * FROM Blogs"; - - Console.WriteLine(@"-- -All blogs for TenantId {0} (using ADO.NET SqlClient):", tenantId4); - - SqlDataReader reader = cmd.ExecuteReader(); - while (reader.Read()) - { - Console.WriteLine("{0}", reader["Name"]); - } - } -}); - -``` - -## 2. Data tier: Create row-level security policy - -### Create a security policy to filter the rows each tenant can access - -Now that the application is setting SESSION\_CONTEXT with the current TenantId before querying, an RLS security policy can filter queries and exclude rows that have a different TenantId. - -RLS is implemented in Transact-SQL. A user-defined function defines the access logic, and a security policy binds this function to any number of tables. For this project: - -1. The function verifies that the application is connected to the database, and that the TenantId stored in the SESSION\_CONTEXT matches the TenantId of a given row. - - The application is connected, rather than some other SQL user. - -2. A FILTER predicate allows rows that meet the TenantId filter to pass through for SELECT, UPDATE, and DELETE queries. - - A BLOCK predicate prevents rows that fail the filter from being INSERTed or UPDATEd. - - If SESSION\_CONTEXT has not been set, the function returns NULL, and no rows are visible or able to be inserted. - -To enable RLS on all shards, execute the following T-SQL by using either Visual Studio (SSDT), SSMS, or the PowerShell script included in the project. Or if you are using [Elastic Database Jobs](./elastic-jobs-overview.md), you can automate execution of this T-SQL on all shards. - -```sql -CREATE SCHEMA rls; -- Separate schema to organize RLS objects. -GO - -CREATE FUNCTION rls.fn_tenantAccessPredicate(@TenantId int) - RETURNS TABLE - WITH SCHEMABINDING -AS - RETURN SELECT 1 AS fn_accessResult - -- Use the user in your application's connection string. - -- Here we use 'dbo' only for demo purposes! - WHERE DATABASE_PRINCIPAL_ID() = DATABASE_PRINCIPAL_ID('dbo') - AND CAST(SESSION_CONTEXT(N'TenantId') AS int) = @TenantId; -GO - -CREATE SECURITY POLICY rls.tenantAccessPolicy - ADD FILTER PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.Blogs, - ADD BLOCK PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.Blogs, - ADD FILTER PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.Posts, - ADD BLOCK PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.Posts; -GO -``` - -> [!TIP] -> In a complex project you might need to add the predicate on hundreds of tables, which could be tedious. There is a helper stored procedure that automatically generates a security policy, and adds a predicate on all tables in a schema. For more information, see the blog post at [Apply Row-Level Security to all tables - helper script (blog)](https://techcommunity.microsoft.com/t5/sql-server/apply-row-level-security-to-all-tables-helper-script/ba-p/384360). - -Now if you run the sample application again, tenants see only rows that belong to them. In addition, the application cannot insert rows that belong to tenants other than the one currently connected to the shard database. Also, the app cannot update the TenantId in any rows it can see. If the app attempts to do either, a DbUpdateException is raised. - -If you add a new table later, ALTER the security policy to add FILTER and BLOCK predicates on the new table. - -```sql -ALTER SECURITY POLICY rls.tenantAccessPolicy - ADD FILTER PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.MyNewTable, - ADD BLOCK PREDICATE rls.fn_tenantAccessPredicate(TenantId) ON dbo.MyNewTable; -GO -``` - -### Add default constraints to automatically populate TenantId for INSERTs - -You can put a default constraint on each table to automatically populate the TenantId with the value currently stored in SESSION\_CONTEXT when inserting rows. An example follows. - -```sql --- Create default constraints to auto-populate TenantId with the --- value of SESSION_CONTEXT for inserts. -ALTER TABLE Blogs - ADD CONSTRAINT df_TenantId_Blogs - DEFAULT CAST(SESSION_CONTEXT(N'TenantId') AS int) FOR TenantId; -GO - -ALTER TABLE Posts - ADD CONSTRAINT df_TenantId_Posts - DEFAULT CAST(SESSION_CONTEXT(N'TenantId') AS int) FOR TenantId; -GO -``` - -Now the application does not need to specify a TenantId when inserting rows: - -```csharp -SqlDatabaseUtils.SqlRetryPolicy.ExecuteAction(() => -{ - using (var db = new ElasticScaleContext( - sharding.ShardMap, tenantId, connStrBldr.ConnectionString)) - { - // The default constraint sets TenantId automatically! - var blog = new Blog { Name = name }; - db.Blogs.Add(blog); - db.SaveChanges(); - } -}); -``` - -> [!NOTE] -> If you use default constraints for an Entity Framework project, it is recommended that you *NOT* include the TenantId column in your EF data model. This recommendation is because Entity Framework queries automatically supply default values that override the default constraints created in T-SQL that use SESSION\_CONTEXT. -> To use default constraints in the sample project, for instance, you should remove TenantId from DataClasses.cs (and run Add-Migration in the Package Manager Console) and use T-SQL to ensure that the field only exists in the database tables. This way, EF does automatically supply incorrect default values when inserting data. - -### (Optional) Enable a *superuser* to access all rows - -Some applications may want to create a *superuser* who can access all rows. A superuser could enable reporting across all tenants on all shards. Or a superuser could perform split-merge operations on shards that involve moving tenant rows between databases. - -To enable a superuser, create a new SQL user (`superuser` in this example) in each shard database. Then alter the security policy with a new predicate function that allows this user to access all rows. Such a function is given next. - -```sql --- New predicate function that adds superuser logic. -CREATE FUNCTION rls.fn_tenantAccessPredicateWithSuperUser(@TenantId int) - RETURNS TABLE - WITH SCHEMABINDING -AS - RETURN SELECT 1 AS fn_accessResult - WHERE - ( - DATABASE_PRINCIPAL_ID() = DATABASE_PRINCIPAL_ID('dbo') -- Replace 'dbo'. - AND CAST(SESSION_CONTEXT(N'TenantId') AS int) = @TenantId - ) - OR - ( - DATABASE_PRINCIPAL_ID() = DATABASE_PRINCIPAL_ID('superuser') - ); -GO - --- Atomically swap in the new predicate function on each table. -ALTER SECURITY POLICY rls.tenantAccessPolicy - ALTER FILTER PREDICATE rls.fn_tenantAccessPredicateWithSuperUser(TenantId) ON dbo.Blogs, - ALTER BLOCK PREDICATE rls.fn_tenantAccessPredicateWithSuperUser(TenantId) ON dbo.Blogs, - ALTER FILTER PREDICATE rls.fn_tenantAccessPredicateWithSuperUser(TenantId) ON dbo.Posts, - ALTER BLOCK PREDICATE rls.fn_tenantAccessPredicateWithSuperUser(TenantId) ON dbo.Posts; -GO -``` - - -### Maintenance - -- **Adding new shards**: Execute the T-SQL script to enable RLS on any new shards, otherwise queries on these shards are not be filtered. -- **Adding new tables**: Add a FILTER and BLOCK predicate to the security policy on all shards whenever a new table is created. Otherwise queries on the new table are not be filtered. This addition can be automated by using a DDL trigger, as described in [Apply Row-Level Security automatically to newly created tables (blog)](https://techcommunity.microsoft.com/t5/SQL-Server/Apply-Row-Level-Security-automatically-to-newly-created-tables/ba-p/384393). - -## Summary - -Elastic database tools and row-level security can be used together to scale out an application's data tier with support for both multi-tenant and single-tenant shards. Multi-tenant shards can be used to store data more efficiently. This efficiency is pronounced where a large number of tenants have only a few rows of data. Single-tenant shards can support premium tenants which have stricter performance and isolation requirements. For more information, see [Row-Level Security reference][rls]. - -## Additional resources - -- [What is an Azure elastic pool?](elastic-pool-overview.md) -- [Scaling out with Azure SQL Database](elastic-scale-introduction.md) -- [Design Patterns for Multi-tenant SaaS Applications with Azure SQL Database](./saas-tenancy-app-design-patterns.md) -- [Authentication in multitenant apps, using Azure AD and OpenID Connect](/azure/architecture/multitenant-identity/authenticate) -- [Tailspin Surveys application](/azure/architecture/multitenant-identity/tailspin) - -## Questions and Feature Requests - -For questions, contact us on the [Microsoft Q&A question page for SQL Database](/answers/topics/azure-sql-database.html). And add any feature requests to the [SQL Database feedback forum](https://feedback.azure.com/d365community/forum/04fe6ee0-3b25-ec11-b6e6-000d3a4f0da0). - - -[1]: ./media/saas-tenancy-elastic-tools-multi-tenant-row-level-security/blogging-app.png - -[rls]: /sql/relational-databases/security/row-level-security -[s-d-elastic-database-client-library]:elastic-database-client-library.md \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-schema-management.md b/articles/azure-sql/database/saas-tenancy-schema-management.md deleted file mode 100644 index 02abb06d43238..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-schema-management.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Manage schema in a single-tenant app -description: "Manage Schema for multiple tenants in a single-tenant app that uses Azure SQL Database" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 09/19/2018 ---- -# Manage schema in a SaaS application using the database-per-tenant pattern with Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -As a database application evolves, changes inevitably need to be made to the database schema or reference data. Database maintenance tasks are also needed periodically. Managing an application that uses the database per tenant pattern requires that you apply these changes or maintenance tasks across a fleet of tenant databases. - -This tutorial explores two scenarios - deploying reference data updates for all tenants, and rebuilding an index on the table containing the reference data. The [Elastic jobs](./elastic-jobs-overview.md) feature is used to execute these actions on all tenant databases, and on the template database used to create new tenant databases. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> -> * Create a job agent -> * Cause T-SQL jobs to be run on all tenant databases -> * Update reference data in all tenant databases -> * Create an index on a table in all tenant databases - - -To complete this tutorial, make sure the following prerequisites are met: - -* The Wingtip Tickets SaaS Database Per Tenant app is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip Tickets SaaS database per tenant application](./saas-dbpertenant-get-started-deploy.md) -* Azure PowerShell is installed. For details, see [Getting started with Azure PowerShell](/powershell/azure/get-started-azureps) -* The latest version of SQL Server Management Studio (SSMS) is installed. [Download and Install SSMS](/sql/ssms/download-sql-server-management-studio-ssms) - - -## Introduction to SaaS schema management patterns - -The database per tenant pattern isolates tenant data effectively, but increases the number of databases to manage and maintain. [Elastic Jobs](./elastic-jobs-overview.md) facilitates administration and management of multiple databases. Jobs enable you to securely and reliably, run tasks (T-SQL scripts) against a group of databases. Jobs can deploy schema and common reference data changes across all tenant databases in an application. Elastic Jobs can also be used to maintain a *template* database used to create new tenants, ensuring it always has the latest schema and reference data. - -![screen](./media/saas-tenancy-schema-management/schema-management-dpt.png) - - -## Elastic Jobs public preview - -There's a new version of Elastic Jobs that is now an integrated feature of Azure SQL Database. This new version of Elastic Jobs is currently in public preview. This public preview currently supports using PowerShell to create a job agent, and T-SQL to create and manage jobs. -See article on [Elastic Database Jobs](./elastic-jobs-overview.md) for more information. - -## Get the Wingtip Tickets SaaS database per tenant application scripts - -The application source code and management scripts are available in the [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) GitHub repo. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. - -## Create a job agent database and new job agent - -This tutorial requires you use PowerShell to create a job agent and its backing job agent database. The job agent database holds job definitions, job status, and history. Once the job agent and its database are created, you can create and monitor jobs immediately. - -1. **In PowerShell ISE**, open …\\Learning Modules\\Schema Management\\*Demo-SchemaManagement.ps1*. -1. Press **F5** to run the script. - -The *Demo-SchemaManagement.ps1* script calls the *Deploy-SchemaManagement.ps1* script to create a database named *osagent* on the catalog server. It then creates the job agent, using the database as a parameter. - -## Create a job to deploy new reference data to all tenants - -In the Wingtip Tickets app, each tenant database includes a set of supported venue types. Each venue is of a specific venue type, which defines the kind of events that can be hosted, and determines the background image used in the app. For the application to support new kinds of events, this reference data must be updated and new venue types added. In this exercise, you deploy an update to all the tenant databases to add two additional venue types: *Motorcycle Racing* and *Swimming Club*. - -First, review the venue types included in each tenant database. Connect to one of the tenant databases in SQL Server Management Studio (SSMS) and inspect the VenueTypes table. You can also query this table in the Query editor in the Azure portal, accessed from the database page. - -1. Open SSMS and connect to the tenant server: *tenants1-dpt-<user>.database.windows.net* -1. To confirm that *Motorcycle Racing* and *Swimming Club* **are not** currently included, browse to the _contosoconcerthall_ database on the *tenants1-dpt-<user>* server and query the *VenueTypes* table. - -Now let’s create a job to update the *VenueTypes* table in all the tenant databases to add the new venue types. - -To create a new job, you use a set of jobs system stored procedures created in the _jobagent_ database when the job agent was created. - -1. In SSMS, connect to the catalog server: *catalog-dpt-<user>.database.windows.net* server -1. In SSMS, open the file …\\Learning Modules\\Schema Management\\DeployReferenceData.sql -1. Modify the statement: SET @wtpUser = <user> and substitute the User value used when you deployed the Wingtip Tickets SaaS Database Per Tenant app -1. Ensure you are connected to the _jobagent_ database and press **F5** to run the script - -Observe the following elements in the *DeployReferenceData.sql* script: -* **sp\_add\_target\_group** creates the target group name DemoServerGroup. -* **sp\_add\_target\_group\_member** is used to define the set of target databases. First the _tenants1-dpt-<user>_ server is added. Adding the server as a target causes the databases in that server at the time of job execution to be included in the job. Then the _basetenantdb_ database and the *adhocreporting* database (used in a later tutorial) are added as targets. -* **sp\_add\_job** creates a job named _Reference Data Deployment_. -* **sp\_add\_jobstep** creates the job step containing T-SQL command text to update the reference table, VenueTypes. -* The remaining views in the script display the existence of the objects and monitor job execution. Use these queries to review the status value in the **lifecycle** column to determine when the job has finished on all the target databases. - -Once the script has completed, you can verify the reference data has been updated. In SSMS, browse to the *contosoconcerthall* database on the *tenants1-dpt-<user>* server and query the *VenueTypes* table. Check that *Motorcycle Racing* and *Swimming Club* **are** now present. - - -## Create a job to manage the reference table index - -This exercise uses a job to rebuild the index on the reference table primary key. This is a typical database maintenance operation that might be done after loading large amounts of data. - -Create a job using the same jobs 'system' stored procedures. - -1. Open SSMS and connect to the _catalog-dpt-<user>.database.windows.net_ server -1. Open the file _…\\Learning Modules\\Schema Management\\OnlineReindex.sql_ -1. Right click, select Connection, and connect to the _catalog-dpt-<user>.database.windows.net_ server, if not already connected -1. Ensure you are connected to the _jobagent_ database and press **F5** to run the script - -Observe the following elements in the _OnlineReindex.sql_ script: -* **sp\_add\_job** creates a new job called “Online Reindex PK\_\_VenueTyp\_\_265E44FD7FD4C885” -* **sp\_add\_jobstep** creates the job step containing T-SQL command text to update the index -* The remaining views in the script monitor job execution. Use these queries to review the status value in the **lifecycle** column to determine when the job has successfully finished on all target group members. - - - -## Next steps - -In this tutorial you learned how to: - -> [!div class="checklist"] -> -> * Create a job agent to run across T-SQL jobs multiple databases -> * Update reference data in all tenant databases -> * Create an index on a table in all tenant databases - -Next, try the [Ad hoc reporting tutorial](./saas-tenancy-cross-tenant-reporting.md) to explore running distributed queries across tenant databases. - - -## Additional resources - -* [Additional tutorials that build upon the Wingtip Tickets SaaS Database Per Tenant application deployment](./saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials) -* [Managing scaled-out cloud databases](./elastic-jobs-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-tenant-analytics-adf.md b/articles/azure-sql/database/saas-tenancy-tenant-analytics-adf.md deleted file mode 100644 index 48e8cc95f3cda..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-tenant-analytics-adf.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Run analytics queries against tenant databases -description: "Cross-tenant analytics queries using data extracted from Azure SQL Database, Azure Synapse Analytics, Azure Data Factory, or Power BI." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/18/2018 ---- -# Explore SaaS analytics with Azure SQL Database, Azure Synapse Analytics, Data Factory, and Power BI -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you walk through an end-to-end analytics scenario. The scenario demonstrates how analytics over tenant data can empower software vendors to make smart decisions. Using data extracted from each tenant database, you use analytics to gain insights into tenant behavior, including their use of the sample Wingtip Tickets SaaS application. This scenario involves three steps: - -1. **Extract data** from each tenant database into an analytics store, in this case, a dedicated SQL pool. -2. **Optimize the extracted data** for analytics processing. -3. Use **Business Intelligence** tools to draw out useful insights, which can guide decision making. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> -> - Create the tenant analytics store for loading. -> - Use Azure Data Factory (ADF) to extract data from each tenant database into the analytics data warehouse. -> - Optimize the extracted data (reorganize into a star-schema). -> - Query the analytics data warehouse. -> - Use Power BI for data visualization to highlight trends in tenant data and make recommendation for improvements. - -![architectureOverView](./media/saas-tenancy-tenant-analytics-adf/adf_overview.png) - -## Analytics over extracted tenant data - -SaaS applications hold a potentially vast amount of tenant data in the cloud. This data can provide a rich source of insights about the operation and usage of your application, and the behavior of your tenants. These insights can guide feature development, usability improvements, and other investments in the apps and platform. - -Accessing the data for all tenants is simple when all the data is in just one multi-tenant database. But access is more complex when distributed at scale across thousands of databases. One way to tame the complexity is to extract the data to an analytics database or a data warehouse for query. - -This tutorial presents an end-to-end analytics scenario for the Wingtip Tickets application. First, [Azure Data Factory (ADF)](../../data-factory/introduction.md) is used as the orchestration tool to extract tickets sales and related data from each tenant database. This data is loaded into staging tables in an analytics store. The analytics store could either be a SQL Database or a dedicated SQL pool. This tutorial uses [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) as the analytics store. - -Next, the extracted data is transformed and loaded into a set of [star-schema](https://www.wikipedia.org/wiki/Star_schema) tables. The tables consist of a central fact table plus related dimension tables: - -- The central fact table in the star-schema contains ticket data. -- The dimension tables contain data about venues, events, customers, and purchase dates. - -Together the central and dimension tables enable efficient analytical processing. The star-schema used in this tutorial is displayed in the following image: - -![Diagram that shows the star schema that is used in this tutorial.](./media/saas-tenancy-tenant-analytics-adf/starschematables.JPG) - -Finally, the star-schema tables are queried. Query results are displayed visually using Power BI to highlight insights into tenant behavior and their use of the application. With this star-schema, you run queries that expose: - -- Who is buying tickets and from which venue. -- Patterns and trends in the sale of tickets. -- The relative popularity of each venue. - -This tutorial provides basic examples of insights that can be gleaned from the Wingtip Tickets data. Understanding how each venue uses the service might cause the Wingtip Tickets vendor to think about different service plans targeted at more or less active venues, for example. - -## Setup - -### Prerequisites - -To complete this tutorial, make sure the following prerequisites are met: - -- The Wingtip Tickets SaaS Database Per Tenant application is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip SaaS application](./saas-dbpertenant-get-started-deploy.md). -- The Wingtip Tickets SaaS Database Per Tenant scripts and application [source code](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant/) are downloaded from GitHub. See download instructions. Be sure to *unblock the zip file* before extracting its contents. -- Power BI Desktop is installed. [Download Power BI Desktop](https://powerbi.microsoft.com/downloads/). -- The batch of additional tenants has been provisioned, see the [**Provision tenants tutorial**](./saas-dbpertenant-provision-and-catalog.md). - -### Create data for the demo - -This tutorial explores analytics over ticket sales data. In this step, you generate ticket data for all the tenants. In a later step, this data is extracted for analysis. _Ensure you provisioned the batch of tenants_ (as described earlier) so that you have enough data to expose a range of different ticket purchasing patterns. - -1. In PowerShell ISE, open *…\Learning Modules\Operational Analytics\Tenant Analytics DW\Demo-TenantAnalyticsDW.ps1*, and set the following value: - - **$DemoScenario** = **1** Purchase tickets for events at all venues -2. Press **F5** to run the script and create ticket purchasing history for all the venues. With 20 tenants, the script generates tens of thousands of tickets and may take 10 minutes or more. - -### Deploy Azure Synapse Analytics, Data Factory, and Blob Storage - -In the Wingtip Tickets app, the tenants' transactional data is distributed over many databases. Azure Data Factory (ADF) is used to orchestrate the Extract, Load, and Transform (ELT) of this data into the data warehouse. To load data into Azure Synapse Analytics most efficiently, ADF extracts data into intermediate blob files and then uses [PolyBase](../../synapse-analytics/sql-data-warehouse/design-elt-data-loading.md) to load the data into the data warehouse. - -In this step, you deploy the additional resources used in the tutorial: a dedicated SQL pool called _tenantanalytics_, an Azure Data Factory called _dbtodwload-\_, and an Azure storage account called _wingtipstaging\_. The storage account is used to temporarily hold extracted data files as blobs before they are loaded into the data warehouse. This step also deploys the data warehouse schema and defines the ADF pipelines that orchestrate the ELT process. - -1. In PowerShell ISE, open *…\Learning Modules\Operational Analytics\Tenant Analytics DW\Demo-TenantAnalyticsDW.ps1* and set: - - **$DemoScenario** = **2** Deploy tenant analytics data warehouse, blob storage, and data factory -1. Press **F5** to run the demo script and deploy the Azure resources. - -Now review the Azure resources you deployed: - -#### Tenant databases and analytics store - -Use [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to connect to **tenants1-dpt-<user>** and **catalog-dpt-<user>** servers. Replace <user> with the value used when you deployed the app. Use Login = *developer* and Password = *P\@ssword1*. See the [introductory tutorial](./saas-dbpertenant-wingtip-app-overview.md) for more guidance. - -![Connect to SQL Database from SSMS](./media/saas-tenancy-tenant-analytics-adf/ssmsSignIn.JPG) - -In the Object Explorer: - -1. Expand the *tenants1-dpt-<user>* server. -1. Expand the Databases node, and see the list of tenant databases. -1. Expand the *catalog-dpt-<user>* server. -1. Verify that you see the analytics store containing the following objects: - 1. Tables **raw_Tickets**, **raw_Customers**, **raw_Events** and **raw_Venues** hold raw extracted data from the tenant databases. - 1. The star-schema tables are **fact_Tickets**, **dim_Customers**, **dim_Venues**, **dim_Events**, and **dim_Dates**. - 1. The stored procedure, **sp_transformExtractedData** is used to transform the data and load it into the star-schema tables. - -![Screenshot shows Object Explorer with Tables expanded to show various database objects.](./media/saas-tenancy-tenant-analytics-adf/DWtables.JPG) - -#### Blob storage - -1. In the [Azure portal](https://portal.azure.com), navigate to the resource group that you used for deploying the application. Verify that a storage account called **wingtipstaging\** has been added. - - ![DWtables](./media/saas-tenancy-tenant-analytics-adf/adf-staging-storage.PNG) - -1. Click **wingtipstaging\** storage account to explore the objects present. -1. Click **Blobs** tile -1. Click the container **configfile** -1. Verify that **configfile** contains a JSON file called **TableConfig.json**. This file contains the source and destination table names, column names, and tracker column name. - -#### Azure Data Factory (ADF) - -In the [Azure portal](https://portal.azure.com) in the resource group, verify that an Azure Data Factory called _dbtodwload-\_ has been added. - - ![adf_portal](./media/saas-tenancy-tenant-analytics-adf/adf-data-factory-portal.png) - -This section explores the data factory created. -Follow the steps below to launch the data factory: - -1. In the portal, click the data factory called **dbtodwload-\**. -2. Click **Author & Monitor** tile to launch the Data Factory designer in a separate tab. - -## Extract, Load, and Transform data - -Azure Data Factory is used for orchestrating extraction, loading, and transformation of data. In this tutorial, you extract data from four different SQL views from each of the tenant databases: **rawTickets**, **rawCustomers**, **rawEvents**, and **rawVenues**. These views include venue ID, so you can discriminate data from each venue in the data warehouse. The data is loaded into corresponding staging tables in the data warehouse: **raw_Tickets**, **raw_customers**, **raw_Events** and **raw_Venue**. A stored procedure then transforms the raw data and populates the star-schema tables: **fact_Tickets**, **dim_Customers**, **dim_Venues**, **dim_Events**, and **dim_Dates**. - -In the previous section, you deployed and initialized the necessary Azure resources, including the data factory. The deployed data factory includes the pipelines, datasets, linked services, etc., required to extract, load, and transform the tenant data. Let's explore these objects further and then trigger the pipeline to move data from tenant databases to the data warehouse. - -### Data factory pipeline overview - -This section explores the objects created in the data factory. The following figure describes the overall workflow of the ADF pipeline used in this tutorial. If you prefer to explore the pipeline later and see the results first, skip to the next section **Trigger the pipeline run**. - -![adf_overview](./media/saas-tenancy-tenant-analytics-adf/adf-data-factory.PNG) - -In the overview page, switch to **Author** tab on the left panel and observe that there are three [pipelines](../../data-factory/concepts-pipelines-activities.md) and three [datasets](../../data-factory/concepts-datasets-linked-services.md) created. -![adf_author](./media/saas-tenancy-tenant-analytics-adf/adf_author_tab.JPG) - -The three nested pipelines are: SQLDBToDW, DBCopy, and TableCopy. - -**Pipeline 1 - SQLDBToDW** looks up the names of the tenant databases stored in the Catalog database (table name: [__ShardManagement].[ShardsGlobal]) and for each tenant database, executes the **DBCopy** pipeline. Upon completion, the provided **sp_TransformExtractedData** stored procedure schema, is executed. This stored procedure transforms the loaded data in the staging tables and populates the star-schema tables. - -**Pipeline 2 - DBCopy** looks up the names of the source tables and columns from a configuration file stored in blob storage. The **TableCopy** pipeline is then run for each of the four tables: TicketFacts, CustomerFacts, EventFacts, and VenueFacts. The **[Foreach](../../data-factory/control-flow-for-each-activity.md)** activity executes in parallel for all 20 databases. ADF allows a maximum of 20 loop iterations to be run in parallel. Consider creating multiple pipelines for more databases. - -**Pipeline 3 - TableCopy** uses row version numbers in SQL Database (_rowversion_) to identify rows that have been changed or updated. This activity looks up the start and the end row version for extracting rows from the source tables. The **CopyTracker** table stored in each tenant database tracks the last row extracted from each source table in each run. New or changed rows are copied to the corresponding staging tables in the data warehouse: **raw_Tickets**, **raw_Customers**, **raw_Venues**, and **raw_Events**. Finally the last row version is saved in the **CopyTracker** table to be used as the initial row version for the next extraction. - -There are also three parameterized linked services that link the data factory to the source SQL Databases, the target dedicated SQL pool, and the intermediate Blob storage. In the **Author** tab, click on **Connections** to explore the linked services, as shown in the following image: - -![adf_linkedservices](./media/saas-tenancy-tenant-analytics-adf/linkedservices.JPG) - -Corresponding to the three linked services, there are three datasets that refer to the data you use in the pipeline activities as inputs or outputs. Explore each of the datasets to observe connections and parameters used. _AzureBlob_ points to the configuration file containing source and target tables and columns, as well as the tracker column in each source. - -### Data warehouse pattern overview - -Azure Synapse is used as the analytics store to perform aggregation on the tenant data. In this sample, PolyBase is used to load data into the data warehouse. Raw data is loaded into staging tables that have an identity column to keep track of rows that have been transformed into the star-schema tables. The following image shows the loading pattern: -![Diagram shows the loading pattern of database tables.](./media/saas-tenancy-tenant-analytics-adf/loadingpattern.JPG) - -Slowly Changing Dimension (SCD) type 1 dimension tables are used in this example. Each dimension has a surrogate key defined using an identity column. As a best practice, the date dimension table is pre-populated to save time. For the other dimension tables, a CREATE TABLE AS SELECT... (CTAS) statement is used to create a temporary table containing the existing modified and non-modified rows, along with the surrogate keys. This is done with IDENTITY_INSERT=ON. New rows are then inserted into the table with IDENTITY_INSERT=OFF. For easy roll-back, the existing dimension table is renamed and the temporary table is renamed to become the new dimension table. Before each run, the old dimension table is deleted. - -Dimension tables are loaded before the fact table. This sequencing ensures that for each arriving fact, all referenced dimensions already exist. As the facts are loaded, the business key for each corresponding dimension is matched and the corresponding surrogate keys are added to each fact. - -The final step of the transform deletes the staging data ready for the next execution of the pipeline. - -### Trigger the pipeline run - -Follow the steps below to run the complete extract, load, and transform pipeline for all the tenant databases: - -1. In the **Author** tab of the ADF user interface, select **SQLDBToDW** pipeline from the left pane. -1. Click **Trigger** and from the pulled down menu click **Trigger Now**. This action runs the pipeline immediately. In a production scenario, you would define a timetable for running the pipeline to refresh the data on a schedule. - ![Screenshot shows Factory Resources for a pipeline named S Q L D B To D W with the Trigger option expanded and Trigger Now selected.](./media/saas-tenancy-tenant-analytics-adf/adf_trigger.JPG) -1. On **Pipeline Run** page, click **Finish**. - -### Monitor the pipeline run - -1. In the ADF user interface, switch to the **Monitor** tab from the menu on the left. -1. Click **Refresh** until SQLDBToDW pipeline's status is **Succeeded**. - ![Screenshot shows the S Q L D B To D W pipeline with a status of Succeeded.](./media/saas-tenancy-tenant-analytics-adf/adf_monitoring.JPG) -1. Connect to the data warehouse with SSMS and query the star-schema tables to verify that data was loaded in these tables. - -Once the pipeline has completed, the fact table holds ticket sales data for all venues and the dimension tables are populated with the corresponding venues, events, and customers. - -## Data Exploration - -### Visualize tenant data - -The data in the star-schema provides all the ticket sales data needed for your analysis. Visualizing data graphically makes it easier to see trends in large data sets. In this section, you use **Power BI** to manipulate and visualize the tenant data in the data warehouse. - -Use the following steps to connect to Power BI, and to import the views you created earlier: - -1. Launch Power BI desktop. -2. From the Home ribbon, select **Get Data**, and select **More…** from the menu. -3. In the **Get Data** window, select **Azure SQL Database**. -4. In the database login window, enter your server name (**catalog-dpt-<User>.database.windows.net**). Select **Import** for **Data Connectivity Mode**, and then click **OK**. - - ![sign-in-to-power-bi](./media/saas-tenancy-tenant-analytics-adf/powerBISignIn.PNG) - -5. Select **Database** in the left pane, then enter user name = *developer*, and enter password = *P\@ssword1*. Click **Connect**. - - ![database-sign-in](./media/saas-tenancy-tenant-analytics-adf/databaseSignIn.PNG) - -6. In the **Navigator** pane, under the analytics database, select the star-schema tables: **fact_Tickets**, **dim_Events**, **dim_Venues**, **dim_Customers** and **dim_Dates**. Then select **Load**. - -Congratulations! You successfully loaded the data into Power BI. Now explore interesting visualizations to gain insights into your tenants. Let's walk through how analytics can provide some data-driven recommendations to the Wingtip Tickets business team. The recommendations can help to optimize the business model and customer experience. - -Start by analyzing ticket sales data to see the variation in usage across the venues. Select the options shown in Power BI to plot a bar chart of the total number of tickets sold by each venue. (Due to random variation in the ticket generator, your results may be different.) - -![TotalTicketsByVenues](./media/saas-tenancy-tenant-analytics-adf/TotalTicketsByVenues-DW.PNG) - -The preceding plot confirms that the number of tickets sold by each venue varies. Venues that sell more tickets are using your service more heavily than venues that sell fewer tickets. There may be an opportunity here to tailor resource allocation according to different tenant needs. - -You can further analyze the data to see how ticket sales vary over time. Select the options shown in the following image in Power BI to plot the total number of tickets sold each day for a period of 60 days. - -![SaleVersusDate](./media/saas-tenancy-tenant-analytics-adf/SaleVersusDate-DW.PNG) - -The preceding chart shows that ticket sales spike for some venues. These spikes reinforce the idea that some venues might be consuming system resources disproportionately. So far there is no obvious pattern in when the spikes occur. - -Next let's investigate the significance of these peak sale days. When do these peaks occur after tickets go on sale? To plot tickets sold per day, select the options shown in the following image in Power BI. - -![SaleDayDistribution](./media/saas-tenancy-tenant-analytics-adf/SaleDistributionPerDay-DW.PNG) - -This plot shows that some venues sell large numbers of tickets on the first day of sale. As soon as tickets go on sale at these venues, there seems to be a mad rush. This burst of activity by a few venues might impact the service for other tenants. - -You can drill into the data again to see if this mad rush is true for all events hosted by these venues. In previous plots, you saw that Contoso Concert Hall sells many tickets, and that Contoso also has a spike in ticket sales on certain days. Play around with Power BI options to plot cumulative ticket sales for Contoso Concert Hall, focusing on sale trends for each of its events. Do all events follow the same sale pattern? Try to produce a plot like the one below. - -![ContosoSales](./media/saas-tenancy-tenant-analytics-adf/EventSaleTrends.PNG) - -This plot of cumulative ticket sales over time for Contoso Concert Hall for each event shows that the mad rush does not happen for all events. Play around with the filter options to explore sale trends for other venues. - -The insights into ticket selling patterns might lead Wingtip Tickets to optimize their business model. Instead of charging all tenants equally, perhaps Wingtip should introduce service tiers with different compute sizes. Larger venues that need to sell more tickets per day could be offered a higher tier with a higher service level agreement (SLA). Those venues could have their databases placed in pool with higher per-database resource limits. Each service tier could have an hourly sales allocation, with additional fees charged for exceeding the allocation. Larger venues that have periodic bursts of sales would benefit from the higher tiers, and Wingtip Tickets can monetize their service more efficiently. - -Meanwhile, some Wingtip Tickets customers complain that they struggle to sell enough tickets to justify the service cost. Perhaps in these insights there is an opportunity to boost ticket sales for underperforming venues. Higher sales would increase the perceived value of the service. Right click fact_Tickets and select **New measure**. Enter the following expression for the new measure called **AverageTicketsSold**: - -```DAX -AverageTicketsSold = DIVIDE(DIVIDE(COUNTROWS(fact_Tickets),DISTINCT(dim_Venues[VenueCapacity]))*100, COUNTROWS(dim_Events)) -``` - -Select the following visualization options to plot the percentage tickets sold by each venue to determine their relative success. - -![AvgTicketsByVenues](./media/saas-tenancy-tenant-analytics-adf/AvgTicketsByVenues-DW.PNG) - -The plot above shows that even though most venues sell more than 80% of their tickets, some are struggling to fill more than half their seats. Play around with the Values Well to select maximum or minimum percentage of tickets sold for each venue. - -## Embedding analytics in your apps - -This tutorial has focused on cross-tenant analytics used to improve the software vendor's understanding of their tenants. Analytics can also provide insights to the _tenants_, to help them manage their business more effectively themselves. - -In the Wingtip Tickets example, you earlier discovered that ticket sales tend to follow predictable patterns. This insight might be used to help underperforming venues boost ticket sales. Perhaps there is an opportunity to employ machine learning techniques to predict ticket sales for events. The effects of price changes could also be modeled, to allow the impact of offering discounts to be predicted. Power BI Embedded could be integrated into an event management application to visualize predicted sales, including the impact of discounts on total seats sold and revenue on low-selling events. With Power BI Embedded, you can even integrate actually applying the discount to the ticket prices, right in the visualization experience. - -## Next steps - -In this tutorial, you learned how to: - -> [!div class="checklist"] -> -> - Create the tenant analytics store for loading. -> - Use Azure Data Factory (ADF) to extract data from each tenant database into the analytics data warehouse. -> - Optimize the extracted data (reorganize into a star-schema). -> - Query the analytics data warehouse. -> - Use Power BI for data visualization to highlight trends in tenant data and make recommendation for improvements. - -Congratulations! - -## Additional resources - -- Additional [tutorials that build upon the Wingtip SaaS application](./saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials). \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-tenant-analytics.md b/articles/azure-sql/database/saas-tenancy-tenant-analytics.md deleted file mode 100644 index 98de51969231d..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-tenant-analytics.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: Cross-tenant analytics using extracted data -description: "Cross-tenant analytics queries using data extracted from multiple Azure SQL databases in a single tenant app." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/18/2018 ---- -# Cross-tenant analytics using extracted data - single-tenant app -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you walk through a complete analytics scenario for a single tenant implementation. The scenario demonstrates how analytics can enable businesses to make smart decisions. Using data extracted from each tenant database, you use analytics to gain insights into tenant behavior, including their use of the sample Wingtip Tickets SaaS application. This scenario involves three steps: - -1. **Extract** data from each tenant database and **Load** into an analytics store. -2. **Transform the extracted data** for analytics processing. -3. Use **business intelligence** tools to draw out useful insights, which can guide decision making. - -In this tutorial you learn how to: - -> [!div class="checklist"] -> - Create the tenant analytics store to extract the data into. -> - Use elastic jobs to extract data from each tenant database into the analytics store. -> - Optimize the extracted data (reorganize into a star-schema). -> - Query the analytics database. -> - Use Power BI for data visualization to highlight trends in tenant data and make recommendation for improvements. - -![Diagram shows an overview of the architecture used for this article.](./media/saas-tenancy-tenant-analytics/architectureOverview.png) - -## Offline tenant analytics pattern - -Multi-tenant SaaS applications typically have a vast amount of tenant data stored in the cloud. This data provides a rich source of insights about the operation and usage of your application, and the behavior of your tenants. These insights can guide feature development, usability improvements, and other investments in the app and platform. - -Accessing data for all tenants is simple when all the data is in just one multi-tenant database. But the access is more complex when distributed at scale across potentially thousands of databases. One way to tame the complexity and to minimize the impact of analytics queries on transactional data is to extract data into a purpose designed analytics database or data warehouse. - -This tutorial presents a complete analytics scenario for Wingtip Tickets SaaS application. First, *Elastic Jobs* is used to extract data from each tenant database and load it into staging tables in an analytics store. The analytics store could either be an SQL Database or a dedicated SQL pool. For large-scale data extraction, [Azure Data Factory](../../data-factory/introduction.md) is recommended. - -Next, the aggregated data is transformed into a set of [star-schema](https://www.wikipedia.org/wiki/Star_schema) tables. The tables consist of a central fact table plus related dimension tables. For Wingtip Tickets: - -- The central fact table in the star-schema contains ticket data. -- The dimension tables describe venues, events, customers, and purchase dates. - -Together the central fact and dimension tables enable efficient analytical processing. The star-schema used in this tutorial is shown in the following image: - -![architectureOverView](./media/saas-tenancy-tenant-analytics/StarSchema.png) - -Finally, the analytics store is queried using **Power BI** to highlight insights into tenant behavior and their use of the Wingtip Tickets application. You run queries that: - -- Show the relative popularity of each venue -- Highlight patterns in ticket sales for different events -- Show the relative success of different venues in selling out their event - -Understanding how each tenant is using the service is used to explore options for monetizing the service and improving the service to help tenants be more successful. This tutorial provides basic examples of the kinds of insights that can be gleaned from tenant data. - -## Setup - -### Prerequisites - -To complete this tutorial, make sure the following prerequisites are met: - -- The Wingtip Tickets SaaS Database Per Tenant application is deployed. To deploy in less than five minutes, see [Deploy and explore the Wingtip SaaS application](./saas-dbpertenant-get-started-deploy.md) -- The Wingtip Tickets SaaS Database Per Tenant scripts and application [source code](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant/) are downloaded from GitHub. See download instructions. Be sure to *unblock the zip file* before extracting its contents. Check out the [general guidance](saas-tenancy-wingtip-app-guidance-tips.md) for steps to download and unblock the Wingtip Tickets SaaS scripts. -- Power BI Desktop is installed. [Download Power BI Desktop](https://powerbi.microsoft.com/downloads/) -- The batch of additional tenants has been provisioned, see the [**Provision tenants tutorial**](./saas-dbpertenant-provision-and-catalog.md). -- A job account and job account database have been created. See the appropriate steps in the [**Schema management tutorial**](./saas-tenancy-schema-management.md#create-a-job-agent-database-and-new-job-agent). - -### Create data for the demo - -In this tutorial, analysis is performed on ticket sales data. In the current step, you generate ticket data for all the tenants. Later this data is extracted for analysis. *Ensure you have provisioned the batch of tenants as described earlier, so that you have a meaningful amount of data*. A sufficiently large amount of data can expose a range of different ticket purchasing patterns. - -1. In PowerShell ISE, open *…\Learning Modules\Operational Analytics\Tenant Analytics\Demo-TenantAnalytics.ps1*, and set the following value: - - **$DemoScenario** = **1** Purchase tickets for events at all venues -2. Press **F5** to run the script and create ticket purchasing history for every event in each venue. The script runs for several minutes to generate tens of thousands of tickets. - -### Deploy the analytics store -Often there are numerous transactional databases that together hold all tenant data. You must aggregate the tenant data from the many transactional databases into one analytics store. The aggregation enables efficient query of the data. In this tutorial, an Azure SQL Database is used to store the aggregated data. - -In the following steps, you deploy the analytics store, which is called **tenantanalytics**. You also deploy predefined tables that are populated later in the tutorial: -1. In PowerShell ISE, open *…\Learning Modules\Operational Analytics\Tenant Analytics\Demo-TenantAnalytics.ps1* -2. Set the $DemoScenario variable in the script to match your choice of analytics store: - - To use SQL Database without column store, set **$DemoScenario** = **2** - - To use SQL Database with column store, set **$DemoScenario** = **3** -3. Press **F5** to run the demo script (that calls the *Deploy-TenantAnalytics\.ps1* script) which creates the tenant analytics store. - -Now that you have deployed the application and filled it with interesting tenant data, use [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to connect **tenants1-dpt-<User>** and **catalog-dpt-<User>** servers using Login = *developer*, Password = *P\@ssword1*. See the [introductory tutorial](./saas-dbpertenant-wingtip-app-overview.md) for more guidance. - -![Screenshot that shows the information needed to connect to SQL Server.](./media/saas-tenancy-tenant-analytics/ssmsSignIn.png) - -In the Object Explorer, perform the following steps: - -1. Expand the *tenants1-dpt-<User>* server. -2. Expand the Databases node, and see the list of tenant databases. -3. Expand the *catalog-dpt-<User>* server. -4. Verify that you see the analytics store and the jobaccount database. - -See the following database items in the SSMS Object Explorer by expanding the analytics store node: - -- Tables **TicketsRawData** and **EventsRawData** hold raw extracted data from the tenant databases. -- The star-schema tables are **fact_Tickets**, **dim_Customers**, **dim_Venues**, **dim_Events**, and **dim_Dates**. -- The stored procedure is used to populate the star-schema tables from the raw data tables. - -![Screenshot of the database items shown in the SSMS Object Explorer.](./media/saas-tenancy-tenant-analytics/tenantAnalytics.png) - -## Data extraction - -### Create target groups - -Before proceeding, ensure you have deployed the job account and jobaccount database. In the next set of steps, Elastic Jobs is used to extract data from each tenant database, and to store the data in the analytics store. Then the second job shreds the data and stores it into tables in the star-schema. These two jobs run against two different target groups, namely **TenantGroup** and **AnalyticsGroup**. The extract job runs against the TenantGroup, which contains all the tenant databases. The shredding job runs against the AnalyticsGroup, which contains just the analytics store. Create the target groups by using the following steps: - -1. In SSMS, connect to the **jobaccount** database in catalog-dpt-<User>. -2. In SSMS, open *…\Learning Modules\Operational Analytics\Tenant Analytics\ TargetGroups.sql* -3. Modify the @User variable at the top of the script, replacing `` with the user value used when you deployed the Wingtip SaaS app. -4. Press **F5** to run the script that creates the two target groups. - -### Extract raw data from all tenants - -Extensive data modifications might occur more frequently for *ticket and customer* data than for *event and venue* data. Therefore, consider extracting ticket and customer data separately and more frequently than you extract event and venue data. In this section, you define and schedule two separate jobs: - -- Extract ticket and customer data. -- Extract event and venue data. - -Each job extracts its data, and posts it into the analytics store. There a separate job shreds the extracted data into the analytics star-schema. - -1. In SSMS, connect to the **jobaccount** database in catalog-dpt-<User> server. -2. In SSMS, open *...\Learning Modules\Operational Analytics\Tenant Analytics\ExtractTickets.sql*. -3. Modify @User at the top of the script, and replace `` with the user name used when you deployed the Wingtip SaaS app -4. Press F5 to run the script that creates and runs the job that extracts tickets and customers data from each tenant database. The job saves the data into the analytics store. -5. Query the TicketsRawData table in the tenantanalytics database, to ensure that the table is populated with tickets information from all tenants. - -![Screenshot shows the ExtractTickets database with the TicketsRawData d b o selected in Object Explorer.](./media/saas-tenancy-tenant-analytics/ticketExtracts.png) - -Repeat the preceding steps, except this time replace **\ExtractTickets.sql** with **\ExtractVenuesEvents.sql** in step 2. - -Successfully running the job populates the EventsRawData table in the analytics store with new events and venues information from all tenants. - -## Data reorganization - -### Shred extracted data to populate star-schema tables - -The next step is to shred the extracted raw data into a set of tables that are optimized for analytics queries. A star-schema is used. A central fact table holds individual ticket sales records. Other tables are populated with related data about venues, events, and customers. And there are time dimension tables. - -In this section of the tutorial, you define and run a job that merges the extracted raw data with the data in the star-schema tables. After the merge job is finished, the raw data is deleted, leaving the tables ready to be populated by the next tenant data extract job. - -1. In SSMS, connect to the **jobaccount** database in catalog-dpt-<User>. -2. In SSMS, open *…\Learning Modules\Operational Analytics\Tenant Analytics\ShredRawExtractedData.sql*. -3. Press **F5** to run the script to define a job that calls the sp_ShredRawExtractedData stored procedure in the analytics store. -4. Allow enough time for the job to run successfully. - - Check the **Lifecycle** column of jobs.jobs_execution table for the status of job. Ensure that the job **Succeeded** before proceeding. A successful run displays data similar to the following chart: - -![shredding](./media/saas-tenancy-tenant-analytics/shreddingJob.PNG) - -## Data exploration - -### Visualize tenant data - -The data in the star-schema table provides all the ticket sales data needed for your analysis. To make it easier to see trends in large data sets, you need to visualize it graphically. In this section, you learn how to use **Power BI** to manipulate and visualize the tenant data you have extracted and organized. - -Use the following steps to connect to Power BI, and to import the views you created earlier: - -1. Launch Power BI desktop. -2. From the Home ribbon, select **Get Data**, and select **More…** from the menu. -3. In the **Get Data** window, select Azure SQL Database. -4. In the database login window, enter your server name (catalog-dpt-<User>.database.windows.net). Select **Import** for **Data Connectivity Mode**, and then click OK. - - ![signinpowerbi](./media/saas-tenancy-tenant-analytics/powerBISignIn.PNG) - -5. Select **Database** in the left pane, then enter user name = *developer*, and enter password = *P\@ssword1*. Click **Connect**. - - ![Screenshot shows the SQL Server database dialog where you can enter a User name and Password.](./media/saas-tenancy-tenant-analytics/databaseSignIn.PNG) - -6. In the **Navigator** pane, under the analytics database, select the star-schema tables: fact_Tickets, dim_Events, dim_Venues, dim_Customers and dim_Dates. Then select **Load**. - -Congratulations! You have successfully loaded the data into Power BI. Now you can start exploring interesting visualizations to help gain insights into your tenants. Next you walk through how analytics can enable you to provide data-driven recommendations to the Wingtip Tickets business team. The recommendations can help to optimize the business model and customer experience. - -You start by analyzing ticket sales data to see the variation in usage across the venues. Select the following options in Power BI to plot a bar chart of the total number of tickets sold by each venue. Due to random variation in the ticket generator, your results may be different. - -![Screenshot shows a Power B I visualization and controls for the data visualization on the right side.](./media/saas-tenancy-tenant-analytics/TotalTicketsByVenues.PNG) - -The preceding plot confirms that the number of tickets sold by each venue varies. Venues that sell more tickets are using your service more heavily than venues that sell fewer tickets. There may be an opportunity here to tailor resource allocation according to different tenant needs. - -You can further analyze the data to see how ticket sales vary over time. Select the following options in Power BI to plot the total number of tickets sold each day for a period of 60 days. - -![Screenshot shows Power B I visualization titled Ticket Sale Distribution versus Sale Day.](./media/saas-tenancy-tenant-analytics/SaleVersusDate.PNG) - -The preceding chart displays that ticket sales spike for some venues. These spikes reinforce the idea that some venues might be consuming system resources disproportionately. So far there is no obvious pattern in when the spikes occur. - -Next you want to further investigate the significance of these peak sale days. When do these peaks occur after tickets go on sale? To plot tickets sold per day, select the following options in Power BI. - -![SaleDayDistribution](./media/saas-tenancy-tenant-analytics/SaleDistributionPerDay.PNG) - -The preceding plot shows that some venues sell a lot of tickets on the first day of sale. As soon as tickets go on sale at these venues, there seems to be a mad rush. This burst of activity by a few venues might impact the service for other tenants. - -You can drill into the data again to see if this mad rush is true for all events hosted by these venues. In previous plots, you observed that Contoso Concert Hall sells a lot of tickets, and that Contoso also has a spike in ticket sales on certain days. Play around with Power BI options to plot cumulative ticket sales for Contoso Concert Hall, focusing on sale trends for each of its events. Do all events follow the same sale pattern? - -![ContosoSales](./media/saas-tenancy-tenant-analytics/EventSaleTrends.PNG) - -The preceding plot for Contoso Concert Hall shows that the mad rush does not happen for all events. Play around with the filter options to see sale trends for other venues. - -The insights into ticket selling patterns might lead Wingtip Tickets to optimize their business model. Instead of charging all tenants equally, perhaps Wingtip should introduce service tiers with different compute sizes. Larger venues that need to sell more tickets per day could be offered a higher tier with a higher service level agreement (SLA). Those venues could have their databases placed in pool with higher per-database resource limits. Each service tier could have an hourly sales allocation, with additional fees charged for exceeding the allocation. Larger venues that have periodic bursts of sales would benefit from the higher tiers, and Wingtip Tickets can monetize their service more efficiently. - -Meanwhile, some Wingtip Tickets customers complain that they struggle to sell enough tickets to justify the service cost. Perhaps in these insights there is an opportunity to boost ticket sales for underperforming venues. Higher sales would increase the perceived value of the service. Right click fact_Tickets and select **New measure**. Enter the following expression for the new measure called **AverageTicketsSold**: - -``` -AverageTicketsSold = AVERAGEX( SUMMARIZE( TableName, TableName[Venue Name] ), CALCULATE( SUM(TableName[Tickets Sold] ) ) ) -``` - -Select the following visualization options to plot the percentage tickets sold by each venue to determine their relative success. - -![Screenshot shows Power B I visualization titled Average Tickets Sold By Each Venue.](./media/saas-tenancy-tenant-analytics/AvgTicketsByVenues.PNG) - -The preceding plot shows that even though most venues sell more than 80% of their tickets, some are struggling to fill more than half the seats. Play around with the Values Well to select maximum or minimum percentage of tickets sold for each venue. - -Earlier you deepened your analysis to discover that ticket sales tend to follow predictable patterns. This discovery might let Wingtip Tickets help underperforming venues boost ticket sales by recommending dynamic pricing. This discover could reveal an opportunity to employ machine learning techniques to predict ticket sales for each event. Predictions could also be made for the impact on revenue of offering discounts on ticket sales. Power BI Embedded could be integrated into an event management application. The integration could help visualize predicted sales and the effect of different discounts. The application could help devise an optimum discount to be applied directly from the analytics display. - -You have observed trends in tenant data from the WingTip application. You can contemplate other ways the app can inform business decisions for SaaS application vendors. Vendors can better cater to the needs of their tenants. Hopefully this tutorial has equipped you with tools necessary to perform analytics on tenant data to empower your businesses to make data-driven decisions. - -## Next steps - -In this tutorial, you learned how to: - -> [!div class="checklist"] -> - Deployed a tenant analytics database with pre-defined star schema tables -> - Used elastic jobs to extract data from all the tenant database -> - Merge the extracted data into tables in a star-schema designed for analytics -> - Query an analytics database -> - Use Power BI for data visualization to observe trends in tenant data - -Congratulations! - -## Additional resources - -- Additional [tutorials that build upon the Wingtip SaaS application](./saas-dbpertenant-wingtip-app-overview.md#sql-database-wingtip-saas-tutorials). -- [Elastic Jobs](./elastic-jobs-overview.md). -- [Cross-tenant analytics using extracted data - multi-tenant app](./saas-multitenantdb-tenant-analytics.md) diff --git a/articles/azure-sql/database/saas-tenancy-video-index-wingtip-brk3120-20171011.md b/articles/azure-sql/database/saas-tenancy-video-index-wingtip-brk3120-20171011.md deleted file mode 100644 index dde5b2aac2af5..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-video-index-wingtip-brk3120-20171011.md +++ /dev/null @@ -1,446 +0,0 @@ ---- -title: SaaS SQL app video -description: "This article indexes various time points in our 81 minutes video about SaaS DB tenancy app design, from the Ignite conference held October 11, 2017. You can skip ahead to the part that interests you. At least 3 patterns are described. Azure features that simplify development and management are described." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.custom: sqldbrb=1 -ms.date: 12/18/2018 -ms.topic: conceptual ---- -# Video indexed and annotated for multi-tenant SaaS app using Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article is an annotated index into the time locations of an 81 minute video about SaaS tenancy models or patterns. This article enables you to skip backward or forward in the video to which portion interests you. The video explains the major design options for a multi-tenant database application on Azure SQL Database. The video includes demos, walkthroughs of management code, and at times more detail informed by experience than might be in our written documentation. - -The video amplifies information in our written documentation, found at: -- *Conceptual:* [Multi-tenant SaaS database tenancy patterns][saas-concept-design-patterns-563e] -- *Tutorials:* [The Wingtip Tickets SaaS application][saas-how-welcome-wingtip-app-679t] - -The video and the articles describe the many phases of creating a multi-tenant application on Azure SQL Database in the cloud. Special features of Azure SQL Database make it easier to develop and implement multi-tenant apps that are both easier to manage and reliably performant. - -We routinely update our written documentation. The video is not edited or updated, so eventually more of its detail may become outdated. - - - -## Sequence of 38 time-indexed screenshots - -This section indexes the time location for 38 discussions throughout the 81 minute video. Each time index is annotated with a screenshot from the video, and sometimes with additional information. - -Each time index is in the format of *h:mm:ss*. For instance, the second indexed time location, labeled **Session objectives**, starts at the approximate time location of **0:03:11**. - - -### Compact links to video indexed time locations - -The following titles are links to their corresponding annotated sections later in this article: - -- [1. **(Start)** Welcome slide, 0:00:03](#anchor-image-wtip-min00001) -- [2. Session objectives, 0:03:11](#anchor-image-wtip-min00311) -- [3. Agenda, 0:04:17](#anchor-image-wtip-min00417) -- [4. Multi-tenant web app, 0:05:05](#anchor-image-wtip-min00505) -- [5. App web form in action, 0:05:55](#anchor-image-wtip-min00555) -- [6. Per-tenant cost (scale, isolation, recovery), 0:09:31](#anchor-image-wtip-min00931) -- [7. Database models for multi-tenant: pros and cons, 0:11:59](#anchor-image-wtip-min01159) -- [8. Hybrid model blends benefits of MT/ST, 0:13:01](#anchor-image-wtip-min01301) -- [9. Single-tenant vs multi-tenant: pros and cons, 0:16:44](#anchor-image-wtip-min01644) -- [10. Pools are cost-effective for unpredictable workloads, 0:19:36](#anchor-image-wtip-min01936) -- [11. Demo of database-per-tenant and hybrid ST/MT, 0:20:08](#anchor-image-wtip-min02008) -- [12. Live app form showing Dojo, 0:20:29](#anchor-image-wtip-min02029) -- [13. MYOB and not a DBA in sight, 0:28:54](#anchor-image-wtip-min02854) -- [14. MYOB elastic pool usage example, 0:29:40](#anchor-image-wtip-min02940) -- [15. Learning from MYOB and other ISVs, 0:31:36](#anchor-image-wtip-min03136) -- [16. Patterns compose into E2E SaaS scenario, 0:43:15](#anchor-image-wtip-min04315) -- [17. Canonical hybrid multi-tenant SaaS app, 0:47:33](#anchor-image-wtip-min04733) -- [18. Wingtip SaaS sample app, 0:48:10](#anchor-image-wtip-min04810) -- [19. Scenarios and patterns explored in the tutorials, 0:49:10](#anchor-image-wtip-min04910) -- [20. Demo of tutorials and GitHub repository, 0:50:18](#anchor-image-wtip-min05018) -- [21. GitHub repo Microsoft/WingtipSaaS, 0:50:38](#anchor-image-wtip-min05038) -- [22. Exploring the patterns, 0:56:20](#anchor-image-wtip-min05620) -- [23. Provisioning tenants and onboarding, 0:57:44](#anchor-image-wtip-min05744) -- [24. Provisioning tenants and application connection, 0:58:58](#anchor-image-wtip-min05858) -- [25. Demo of management scripts provisioning a single tenant, 0:59:43](#anchor-image-wtip-min05943) -- [26. PowerShell to provision and catalog, 1:00:02](#anchor-image-wtip-min10002) -- [27. T-SQL SELECT * FROM TenantsExtended, 1:03:30](#anchor-image-wtip-min10330) -- [28. Managing unpredictable tenant workloads, 1:04:36](#anchor-image-wtip-min10436) -- [29. Elastic pool monitoring, 1:06:39](#anchor-image-wtip-min10639) -- [30. Load generation and performance monitoring, 1:09:42](#anchor-image-wtip-min10942) -- [31. Schema management at scale, 1:10:33](#anchor-image-wtip-min11033) -- [32. Distributed query across tenant databases, 1:12:21](#anchor-image-wtip-min11221) -- [33. Demo of ticket generation, 1:12:32](#anchor-image-wtip-min11232) -- [34. SSMS adhoc analytics, 1:12:46](#anchor-image-wtip-min11246) -- [35. Extract tenant data into Azure Synapse Analytics, 1:16:32](#anchor-image-wtip-min11632) -- [36. Graph of daily sale distribution, 1:16:48](#anchor-image-wtip-min11648) -- [37. Wrap up and call to action, 1:19:52](#anchor-image-wtip-min11952) -- [38. Resources for more information, 1:20:42](#anchor-image-wtip-min12042) - - -  - -### Annotated index time locations in the video - -Clicking any screenshot image takes you to the exact time location in the video. - - -  - -#### 1. *(Start)* Welcome slide, 0:00:01 - -*Learning from MYOB: Design patterns for SaaS applications on Azure SQL Database - BRK3120* - -[![Welcome slide][image-wtip-min00003-brk3120-whole-welcome]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1) - -- Title: Learning from MYOB: Design patterns for SaaS applications on Azure SQL Database -- Bill.Gibson@microsoft.com -- Principal Program Manager, Azure SQL Database -- Microsoft Ignite session BRK3120, Orlando, FL USA, October/11 2017 - - -  - -#### 2. Session objectives, 0:01:53 -[![Session objectives][image-wtip-min00311-session]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=113) - -- Alternative models for multi-tenant apps, with pros and cons. -- SaaS patterns to reduce development, management, and resource costs. -- A sample app + scripts. -- PaaS features + SaaS patterns make SQL Database a highly scalable, cost-efficient data platform for multi-tenant SaaS. - - -  - -#### 3. Agenda, 0:04:09 -[![Agenda][image-wtip-min00417-agenda]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=249) - - -  - -#### 4. Multi-tenant web app, 0:05:00 -[![Wingtip SaaS app: Multi-tenant web app][image-wtip-min00505-web-app]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=300) - - -  - -#### 5. App web form in action, 0:05:39 -[![App web form in action][image-wtip-min00555-app-web-form]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=339) - - -  - -#### 6. Per-tenant cost (scale, isolation, recovery), 0:06:58 -[![Per-tenant cost, scale, isolation, recovery][image-wtip-min00931-per-tenant-cost]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=418) - - -  - -#### 7. Database models for multi-tenant: pros and cons, 0:09:52 -[![Database models for multi-tenant: pros and cons][image-wtip-min01159-db-models-pros-cons]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=592) - - -  - -#### 8. Hybrid model blends benefits of MT/ST, 0:12:29 -[![Hybrid model blends benefits of MT/ST][image-wtip-min01301-hybrid]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=749) - - -  - -#### 9. Single-tenant vs multi-tenant: pros and cons, 0:13:11 -[![Single-tenant vs multi-tenant: pros and cons][image-wtip-min01644-st-vs-mt]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=791) - - -  - -#### 10. Pools are cost-effective for unpredictable workloads, 0:17:49 -[![Pools are cost-effective for unpredictable workloads][image-wtip-min01936-pools-cost]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1069) - - -  - -#### 11. Demo of database-per-tenant and hybrid ST/MT, 0:19:59 -[![Demo of database-per-tenant and hybrid ST/MT][image-wtip-min02008-demo-st-hybrid]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1199) - - -  - -#### 12. Live app form showing Dojo, 0:20:10 -[![Live app form showing Dojo][image-wtip-min02029-live-app-form-dojo]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1210) - -  - -#### 13. MYOB and not a DBA in sight, 0:25:06 -[![MYOB and not a DBA in sight][image-wtip-min02854-myob-no-dba]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1506) - - -  - -#### 14. MYOB elastic pool usage example, 0:29:30 -[![MYOB elastic pool usage example][image-wtip-min02940-myob-elastic]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1770) - - -  - -#### 15. Learning from MYOB and other ISVs, 0:31:25 -[![Learning from MYOB and other ISVs][image-wtip-min03136-learning-isvs]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1885) - - -  - -#### 16. Patterns compose into E2E SaaS scenario, 0:31:42 -[![Patterns compose into E2E SaaS scenario][image-wtip-min04315-patterns-compose]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1902) - - -  - -#### 17. Canonical hybrid multi-tenant SaaS app, 0:46:04 -[![Canonical hybrid multi-tenant SaaS app][image-wtip-min04733-canonical-hybrid]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=2764) - - -  - -#### 18. Wingtip SaaS sample app, 0:48:01 -[![Wingtip SaaS sample app][image-wtip-min04810-wingtip-saas-app]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=2881) - - -  - -#### 19. Scenarios and patterns explored in the tutorials, 0:49:00 -[![Scenarios and patterns explored in the tutorials][image-wtip-min04910-scenarios-tutorials]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=2940) - - -  - -#### 20. Demo of tutorials and GitHub repository, 0:50:12 -[![Demo tutorials and GitHub repo][image-wtip-min05018-demo-tutorials-github]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3012) - - -  - -#### 21. GitHub repo Microsoft/WingtipSaaS, 0:50:32 -[![GitHub repo Microsoft/WingtipSaaS][image-wtip-min05038-github-wingtipsaas]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3032) - - -  - -#### 22. Exploring the patterns, 0:56:15 -[![Exploring the patterns][image-wtip-min05620-exploring-patterns]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3375) - - -  - -#### 23. Provisioning tenants and onboarding, 0:56:19 -[![Provisioning tenants and onboarding][image-wtip-min05744-provisioning-tenants-onboarding-1]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3379) - - -  - -#### 24. Provisioning tenants and application connection, 0:57:52 -[![Provisioning tenants and application connection][image-wtip-min05858-provisioning-tenants-app-connection-2]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3472) - - -  - -#### 25. Demo of management scripts provisioning a single tenant, 0:59:36 -[![Demo of management scripts provisioning a single tenant][image-wtip-min05943-demo-management-scripts-st]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3576) - - -  - -#### 26. PowerShell to provision and catalog, 0:59:56 -[![PowerShell to provision and catalog][image-wtip-min10002-powershell-provision-catalog]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3596) - - -  - -#### 27. T-SQL SELECT * FROM TenantsExtended, 1:03:25 -[![T-SQL SELECT * FROM TenantsExtended][image-wtip-min10330-sql-select-tenantsextended]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3805) - - -  - -#### 28. Managing unpredictable tenant workloads, 1:03:34 -[![Managing unpredictable tenant workloads][image-wtip-min10436-managing-unpredictable-workloads]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3814) - - -  - -#### 29. Elastic pool monitoring, 1:06:32 -[![Elastic pool monitoring][image-wtip-min10639-elastic-pool-monitoring]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=3992) - - -  - -#### 30. Load generation and performance monitoring, 1:09:37 -[![Load generation and performance monitoring][image-wtip-min10942-load-generation]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4117) - - -  - -#### 31. Schema management at scale, 1:09:40 -[![Schema management at scale][image-wtip-min11033-schema-management-scale]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=34120) - - -  - -#### 32. Distributed query across tenant databases, 1:11:18 -[![Distributed query across tenant databases][image-wtip-min11221-distributed-query]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4278) - - -  - -#### 33. Demo of ticket generation, 1:12:28 -[![Demo of ticket generation][image-wtip-min11232-demo-ticket-generation]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4348) - - -  - -#### 34. SSMS adhoc analytics, 1:12:35 -[![SSMS adhoc analytics][image-wtip-min11246-ssms-adhoc-analytics]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4355) - - -  - -#### 35. Extract tenant data into Azure Synapse Analytics, 1:15:46 -[![Extract tenant data into Azure Synapse Analytics][image-wtip-min11632-extract-tenant-data-sql-dw]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4546) - - -  - -#### 36. Graph of daily sale distribution, 1:16:38 -[![Graph of daily sale distribution][image-wtip-min11648-graph-daily-sale-distribution]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4598) - - -  - -#### 37. Wrap up and call to action, 1:17:43 -[![Wrap up and call to action][image-wtip-min11952-wrap-up-call-action]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4663) - - -  - -#### 38. Resources for more information, 1:20:35 -[![Resources for more information][image-wtip-min12042-resources-more-info]](https://www.youtube.com/watch?v=jjNmcKBVjrc&t=4835) - -- [Blog post, May 22, 2017][resource-blog-saas-patterns-app-dev-sql-db-768h] - -- *Conceptual:* [Multi-tenant SaaS database tenancy patterns][saas-concept-design-patterns-563e] - -- *Tutorials:* [The Wingtip Tickets SaaS application][saas-how-welcome-wingtip-app-679t] - -- GitHub repositories for flavors of the Wingtip Tickets SaaS tenancy application: - - [GitHub repo for - Standalone application model][github-wingtip-standaloneapp]. - - [GitHub repo for - DB Per Tenant model][github-wingtip-dbpertenant]. - - [GitHub repo for - Multi-Tenant DB model][github-wingtip-multitenantdb]. - - - - - -## Next steps - -- [First tutorial article][saas-how-welcome-wingtip-app-679t] - - - - - - -[image-wtip-min00003-brk3120-whole-welcome]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00003-brk3120-welcome-myob-design-saas-app-sql-db.png "Welcome slide" - -[image-wtip-min00311-session]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00311-session-objectives-takeaway.png "Session objectives." - -[image-wtip-min00417-agenda]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00417-agenda-app-management-models-patterns.png "Agenda." - -[image-wtip-min00505-web-app]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00505-wingtip-saas-app-mt-web.png "Wingtip SaaS app: Multi-tenant web app" - -[image-wtip-min00555-app-web-form]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00555-app-form-contoso-concert-hall-night-opera.png "App web form in action" - -[image-wtip-min00931-per-tenant-cost]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min00931-saas-data-management-concerns.png "Per-tenant cost, scale, isolation, recovery" - -[image-wtip-min01159-db-models-pros-cons]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01159-db-models-multi-tenant-saas-apps.png "Database models for multi-tenant: pros and cons" - -[image-wtip-min01301-hybrid]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01301-hybrib-model-blends-benefits-mt-st.png "Hybrid model blends benefits of MT/ST" - -[image-wtip-min01644-st-vs-mt]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01644-st-mt-pros-cons.png "Single-tenant vs multi-tenant: pros and cons" - -[image-wtip-min01936-pools-cost]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min01936-pools-cost-effective-unpredictable-workloads.png "Pools are cost-effective for unpredictable workloads" - -[image-wtip-min02008-demo-st-hybrid]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02008-demo-st-hybrid.png "Demo of database-per-tenant and hybrid ST/MT" - -[image-wtip-min02029-live-app-form-dojo]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02029-app-form-dogwwod-dojo.png "Live app form showing Dojo" - -[image-wtip-min02854-myob-no-dba]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02854-myob-no-dba.png "MYOB and not a DBA in sight" - -[image-wtip-min02940-myob-elastic]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min02940-myob-elastic-pool-usage-example.png "MYOB elastic pool usage example" - -[image-wtip-min03136-learning-isvs]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min03136-myob-isv-saas-patterns-design-scale.png "Learning from MYOB and other ISVs" - -[image-wtip-min04315-patterns-compose]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04315-patterns-compose-into-e2e-saas-scenario-st-mt.png "Patterns compose into E2E SaaS scenario" - -[image-wtip-min04733-canonical-hybrid]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04733-canonical-hybrid-mt-saas-app.png "Canonical hybrid multi-tenant SaaS app" - -[image-wtip-min04810-wingtip-saas-app]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04810-saas-sample-app-descr-of-modules-links.png "Wingtip SaaS sample app" - -[image-wtip-min04910-scenarios-tutorials]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min04910-scenarios-patterns-explored-tutorials.png "Scenarios and patterns explored in the tutorials" - -[image-wtip-min05018-demo-tutorials-github]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05018-demo-saas-tutorials-github-repo.png "Demo of tutorials and GitHub repo" - -[image-wtip-min05038-github-wingtipsaas]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05038-github-repo-wingtipsaas.png "GitHub repo Microsoft/WingtipSaaS" - -[image-wtip-min05620-exploring-patterns]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05620-exploring-patterns-tutorials.png "Exploring the patterns" - -[image-wtip-min05744-provisioning-tenants-onboarding-1]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05744-provisioning-tenants-connecting-run-time-1.png "Provisioning tenants and onboarding" - -[image-wtip-min05858-provisioning-tenants-app-connection-2]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05858-provisioning-tenants-connecting-run-time-2.png "Provisioning tenants and application connection" - -[image-wtip-min05943-demo-management-scripts-st]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min05943-demo-management-scripts-provisioning-st.png "Demo of management scripts provisioning a single tenant" - -[image-wtip-min10002-powershell-provision-catalog]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10002-powershell-code.png "PowerShell to provision and catalog" - -[image-wtip-min10330-sql-select-tenantsextended]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10330-ssms-tenantcatalog.png "T-SQL SELECT * FROM TenantsExtended" - -[image-wtip-min10436-managing-unpredictable-workloads]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10436-managing-unpredictable-tenant-workloads.png "Managing unpredictable tenant workloads" - -[image-wtip-min10639-elastic-pool-monitoring]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10639-elastic-pool-monitoring.png "Elastic pool monitoring" - -[image-wtip-min10942-load-generation]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min10942-schema-management-scale.png "Load generation and performance monitoring" - -[image-wtip-min11033-schema-management-scale]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11033-schema-manage-1000s-dbs-one.png "Schema management at scale" - -[image-wtip-min11221-distributed-query]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11221-distributed-query-all-tenants-asif-single-db.png "Distributed query across tenant databases" - -[image-wtip-min11232-demo-ticket-generation]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11232-demo-ticket-generation-distributed-query.png "Demo of ticket generation" - -[image-wtip-min11246-ssms-adhoc-analytics]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11246-tsql-adhoc-analystics-db-elastic-query.png "SSMS adhoc analytics" - -[image-wtip-min11632-extract-tenant-data-sql-dw]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11632-extract-tenant-data-analytics-db-dw.png "Extract tenant data into Azure Synapse Analytics" - -[image-wtip-min11648-graph-daily-sale-distribution]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11648-graph-daily-sale-contoso-concert-hall.png "Graph of daily sale distribution" - -[image-wtip-min11952-wrap-up-call-action]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min11952-wrap-call-action-saasfeedback.png "Wrap up and call to action" - -[image-wtip-min12042-resources-more-info]: media/saas-tenancy-video-index-wingtip-brk3120-20171011/wingtip-20171011-min12042-resources-blog-github-tutorials-get-started.png "Resources for more information" - - - - - - -[saas-concept-design-patterns-563e]: saas-tenancy-app-design-patterns.md - -[saas-how-welcome-wingtip-app-679t]: saas-tenancy-welcome-wingtip-tickets-app.md - - -[video-on-youtube-com-478y]: https://www.youtube.com/watch?v=jjNmcKBVjrc&t=1 - -[resource-blog-saas-patterns-app-dev-sql-db-768h]: https://azure.microsoft.com/blog/saas-patterns-accelerate-saas-application-development-on-sql-database/ - - -[github-wingtip-standaloneapp]: https://github.com/Microsoft/WingtipTicketsSaaS-StandaloneApp/ - -[github-wingtip-dbpertenant]: https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant/ - -[github-wingtip-multitenantdb]: https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDB/ - diff --git a/articles/azure-sql/database/saas-tenancy-welcome-wingtip-tickets-app.md b/articles/azure-sql/database/saas-tenancy-welcome-wingtip-tickets-app.md deleted file mode 100644 index cfcefa939909c..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-welcome-wingtip-tickets-app.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Welcome to Wingtips app -description: "Learn about database tenancy models, and about the sample Wingtips SaaS application, for Azure SQL Database in the cloud environment." -keywords: "sql database tutorial" -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/25/2019 ---- -# The Wingtip Tickets SaaS application -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The same *Wingtip Tickets* SaaS application is implemented in each of three samples. The app is a simple event listing and ticketing SaaS app targeting small venues - theaters, clubs, etc. Each venue is a tenant of the app, and has its own data: venue details, lists of events, customers, ticket orders, etc. The app, together with the management scripts and tutorials, showcases an end-to-end SaaS scenario. This includes provisioning tenants, monitoring and managing performance, schema management, and cross-tenant reporting and analytics. - -## Three SaaS application and tenancy patterns - -Three versions of the app are available; each explores a different database tenancy pattern on Azure SQL Database. The first uses a standalone application per tenant with its own database. The second uses a multi-tenant app with a database per tenant. The third sample uses a multi-tenant app with sharded multi-tenant databases. - -![Three tenancy patterns][image-three-tenancy-patterns] - - Each sample includes the application code, plus management scripts and tutorials that explore a range of design and management patterns. Each sample deploys in less that five minutes. All three can be deployed side-by-side so you can compare the differences in design and management. - -## Standalone application per tenant pattern - -The standalone app per tenant pattern uses a single tenant application with a database for each tenant. Each tenant’s app, including its database, is deployed into a separate Azure resource group. The resource group can be deployed in the service provider’s subscription or the tenant’s subscription and managed by the provider on the tenant’s behalf. The standalone app per tenant pattern provides the greatest tenant isolation, but is typically the most expensive as there's no opportunity to share resources between multiple tenants. This pattern is well suited to applications that might be more complex and which are deployed to smaller numbers of tenants. With standalone deployments, the app can be customized for each tenant more easily than in other patterns. - -Check out the [tutorials][docs-tutorials-for-wingtip-sa] and code on GitHub [.../Microsoft/WingtipTicketsSaaS-StandaloneApp][github-code-for-wingtip-sa]. - -## Database per tenant pattern - -The database per tenant pattern is effective for service providers that are concerned with tenant isolation and want to run a centralized service that allows cost-efficient use of shared resources. A database is created for each venue, or tenant, and all the databases are centrally managed. Databases can be hosted in elastic pools to provide cost-efficient and easy performance management, which leverages the unpredictable workload patterns of the tenants. A catalog database holds the mapping between tenants and their databases. This mapping is managed using the shard map management features of the [Elastic Database Client Library](elastic-database-client-library.md), which provides efficient connection management to the application. - -Check out the [tutorials][docs-tutorials-for-wingtip-dpt] and code on GitHub [.../Microsoft/WingtipTicketsSaaS-DbPerTenant][github-code-for-wingtip-dpt]. - -## Sharded multi-tenant database pattern - -Multi-tenant databases are effective for service providers looking for lower cost per tenant and okay with reduced tenant isolation. This pattern allows packing large numbers of tenants into an individual database, driving the cost-per-tenant down. Near infinite scale is possible by sharding the tenants across multiple databases. A catalog database maps tenants to databases. - -This pattern also allows a *hybrid* model in which you can optimize for cost with multiple tenants in a database, or optimize for isolation with a single tenant in their own database. The choice can be made on a tenant-by-tenant basis, either when the tenant is provisioned or later, with no impact on the application. This model can be used effectively when groups of tenants need to be treated differently. For example, low-cost tenants can be assigned to shared databases, while premium tenants can be assigned to their own databases. - -Check out the [tutorials][docs-tutorials-for-wingtip-mt] and code on GitHub [.../Microsoft/WingtipTicketsSaaS-MultiTenantDb][github-code-for-wingtip-mt]. - -## Next steps - -#### Conceptual descriptions - -- A more detailed explanation of the application tenancy patterns is available at [Multi-tenant SaaS database tenancy patterns][saas-tenancy-app-design-patterns-md] - -#### Tutorials and code - -- Standalone app per tenant: - - [Tutorials for standalone app][docs-tutorials-for-wingtip-sa]. - - [Code for standalone app, on GitHub][github-code-for-wingtip-sa]. - -- Database per tenant: - - [Tutorials for database per tenant][docs-tutorials-for-wingtip-dpt]. - - [Code for database per tenant, on GitHub][github-code-for-wingtip-dpt]. - -- Sharded multi-tenant: - - [Tutorials for sharded multi-tenant][docs-tutorials-for-wingtip-mt]. - - [Code for sharded multi-tenant, on GitHub][github-code-for-wingtip-mt]. - - - - - -[image-three-tenancy-patterns]: media/saas-tenancy-welcome-wingtip-tickets-app/three-tenancy-patterns.png "Three tenancy patterns." - - - -[saas-tenancy-app-design-patterns-md]: saas-tenancy-app-design-patterns.md - - - -[docs-tutorials-for-wingtip-sa]: ./saas-standaloneapp-get-started-deploy.md -[github-code-for-wingtip-sa]: https://github.com/Microsoft/WingtipTicketsSaaS-StandaloneApp - -[docs-tutorials-for-wingtip-dpt]: ./saas-dbpertenant-wingtip-app-overview.md -[github-code-for-wingtip-dpt]: https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant - -[docs-tutorials-for-wingtip-mt]: ./saas-multitenantdb-get-started-deploy.md -[github-code-for-wingtip-mt]: https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDb \ No newline at end of file diff --git a/articles/azure-sql/database/saas-tenancy-wingtip-app-guidance-tips.md b/articles/azure-sql/database/saas-tenancy-wingtip-app-guidance-tips.md deleted file mode 100644 index 8b5c6ac4d4e17..0000000000000 --- a/articles/azure-sql/database/saas-tenancy-wingtip-app-guidance-tips.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Multi-tenant app example - Wingtip SaaS -description: "Provides steps and guidance for installing and running the sample multi-tenant application that uses Azure SQL Database, the Wingtip Tickets SaaS example." -services: sql-database -ms.service: sql-database -ms.subservice: scenario -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 12/18/2018 ---- -# General guidance for working with Wingtip Tickets sample SaaS apps -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article contains general guidance for running the Wingtip Tickets sample SaaS applications that use Azure SQL Database. - -## Download and unblock the Wingtip Tickets SaaS scripts - -Executable contents (scripts, dlls) may be blocked by Windows when zip files are downloaded from an external source and extracted. When extracting the scripts from a zip file, **follow the steps below to unblock the .zip file before extracting**. This ensures the scripts are allowed to run. - -1. Browse to the Wingtip Tickets SaaS GitHub repo for the database tenancy pattern you wish to explore: - - [WingtipTicketsSaaS-StandaloneApp](https://github.com/Microsoft/WingtipTicketsSaaS-StandaloneApp) - - [WingtipTicketsSaaS-DbPerTenant](https://github.com/Microsoft/WingtipTicketsSaaS-DbPerTenant) - - [WingtipTicketsSaaS-MultiTenantDb](https://github.com/Microsoft/WingtipTicketsSaaS-MultiTenantDb) -2. Click **Clone or download**. -3. Click **Download zip** and save the file. -4. Right-click the zip file, and select **Properties**. The zip file name will correspond to the repo name. (ex. _WingtipTicketsSaaS-DbPerTenant-master.zip_) -5. On the **General** tab, select **Unblock**. -6. Click **OK**. -7. Extract the files. - -Scripts are located in the *..\\Learning Modules* folder. - - -## Working with the Wingtip Tickets PowerShell scripts - -To get the most out of the sample you need to dive into the provided scripts. Use breakpoints and step through the scripts as they execute and examine how the different SaaS patterns are implemented. To easily step through the provided scripts and modules for the best understanding, we recommend using the [PowerShell ISE](/powershell/scripting/components/ise/introducing-the-windows-powershell-ise). - -### Update the configuration file for your deployment - -Edit the **UserConfig.psm1** file with the resource group and user value that you set during deployment: - -1. Open the *PowerShell ISE* and load ...\\Learning Modules\\*UserConfig.psm1* -2. Update *ResourceGroupName* and *Name* with the specific values for your deployment (on lines 10 and 11 only). -3. Save the changes! - -Setting these values here simply keeps you from having to update these deployment-specific values in every script. - -### Execute the scripts by pressing F5 - -Several scripts use *$PSScriptRoot* to navigate folders, and *$PSScriptRoot* is only evaluated when scripts are executed by pressing **F5**.  Highlighting and running a selection (**F8**) can result in errors, so press **F5** when running scripts. - -### Step through the scripts to examine the implementation - -The best way to understand the scripts is by stepping through them to see what they do. Check out the included **Demo-** scripts that present an easy to follow high-level workflow. The **Demo-** scripts show the steps required to accomplish each task, so set breakpoints and drill deeper into the individual calls to see implementation details for the different SaaS patterns. - -Tips for exploring and stepping through PowerShell scripts: - -- Open **Demo-** scripts in the PowerShell ISE. -- Execute or continue with **F5** (using **F8** is not advised because *$PSScriptRoot* is not evaluated when running selections of a script). -- Place breakpoints by clicking or selecting a line and pressing **F9**. -- Step over a function or script call using **F10**. -- Step into a function or script call using **F11**. -- Step out of the current function or script call using **Shift + F11**. - - -## Explore database schema and execute SQL queries using SSMS - -Use [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to connect and browse the application servers and databases. - -The deployment initially has tenants and catalog servers to connect to. The naming of the servers depends on the database tenancy pattern (see below for specifics). - - - **Standalone application:** servers for each tenant (ex. *contosoconcerthall-<User>* server) and *catalog-sa-<User>* - - **Database per tenant:** *tenants1-dpt-<User>* and *catalog-dpt-<User>* servers - - **Multi-tenant database:** *tenants1-mt-<User>* and *catalog-mt-<User>* servers - -To ensure a successful demo connection, all servers have a [firewall rule](firewall-configure.md) allowing all IPs through. - - -1. Open *SSMS* and connect to the tenants. The server name depends on the database tenancy pattern you've selected (see below for specifics): - - **Standalone application:** servers of individual tenants (ex. *contosoconcerthall-<User>.database.windows.net*) - - **Database per tenant:** *tenants1-dpt-<User>.database.windows.net* - - **Multi-tenant database:** *tenants1-mt-<User>.database.windows.net* -2. Click **Connect** > **Database Engine...**: - - ![catalog server](./media/saas-tenancy-wingtip-app-guidance-tips/connect.png) - -3. Demo credentials are: Login = *developer*, Password = *P\@ssword1* - - The image below demonstrates the login for the *Database per tenant* pattern. - ![connection](./media/saas-tenancy-wingtip-app-guidance-tips/tenants1-connect.png) - - - -4. Repeat steps 2-3 and connect to the catalog server (see below for specific server names based on the database tenancy pattern selected) - - **Standalone application:** *catalog-sa-<User>.database.windows.net* - - **Database per tenant:** *catalog-dpt-<User>.database.windows.net* - - **Multi-tenant database:** *catalog-mt-<User>.database.windows.net* - - -After successfully connecting you should see all servers. Your list of databases might be different, depending on the tenants you have provisioned. - -The image below demonstrates the log in for the *Database per tenant* pattern. - -![object explorer](./media/saas-tenancy-wingtip-app-guidance-tips/object-explorer.png) - - - -## Next steps -- [Deploy the Wingtip Tickets SaaS Standalone Application](./saas-standaloneapp-get-started-deploy.md) -- [Deploy the Wingtip Tickets SaaS Database per Tenant application](./saas-dbpertenant-get-started-deploy.md) -- [Deploy the Wingtip Tickets SaaS Multi-tenant Database application](./saas-multitenantdb-get-started-deploy.md) \ No newline at end of file diff --git a/articles/azure-sql/database/scale-resources.md b/articles/azure-sql/database/scale-resources.md deleted file mode 100644 index 88bfb8584d75c..0000000000000 --- a/articles/azure-sql/database/scale-resources.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Scale resources -description: This article explains how to scale your database in Azure SQL Database and SQL Managed Instance by adding or removing allocated resources. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma, urmilano, wiassaf -ms.date: 06/25/2019 ---- - -# Dynamically scale database resources with minimal downtime -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and SQL Managed Instance enable you to dynamically add more resources to your database with minimal [downtime](https://azure.microsoft.com/support/legal/sla/azure-sql-database); however, there is a switch over period where connectivity is lost to the database for a short amount of time, which can be mitigated using retry logic. - -## Overview - -When demand for your app grows from a handful of devices and customers to millions, Azure SQL Database and SQL Managed Instance scale on the fly with minimal downtime. Scalability is one of the most important characteristics of platform as a service (PaaS) that enables you to dynamically add more resources to your service when needed. Azure SQL Database enables you to easily change resources (CPU power, memory, IO throughput, and storage) allocated to your databases. - -You can mitigate performance issues due to increased usage of your application that cannot be fixed using indexing or query rewrite methods. Adding more resources enables you to quickly react when your database hits the current resource limits and needs more power to handle the incoming workload. Azure SQL Database also enables you to scale-down the resources when they are not needed to lower the cost. - -You don't need to worry about purchasing hardware and changing underlying infrastructure. Scaling a database can be easily done via the Azure portal using a slider. - -![Scale database performance](./media/scale-resources/scale-performance.svg) - -Azure SQL Database offers the [DTU-based purchasing model](service-tiers-dtu.md) and the [vCore-based purchasing model](service-tiers-vcore.md), while Azure SQL Managed Instance offers just the [vCore-based purchasing model](service-tiers-vcore.md). - -- The [DTU-based purchasing model](service-tiers-dtu.md) offers a blend of compute, memory, and I/O resources in three service tiers to support lightweight to heavyweight database workloads: Basic, Standard, and Premium. Performance levels within each tier provide a different mix of these resources, to which you can add additional storage resources. -- The [vCore-based purchasing model](service-tiers-vcore.md) lets you choose the number of vCores, the amount or memory, and the amount and speed of storage. This purchasing model offers three service tiers: General Purpose, Business Critical, and Hyperscale. - -The service tier, compute tier, and resource limits for a database, elastic pool, or managed instance can be changed at any time. For example, you can build your first app on a single database using the serverless compute tier and then change its service tier manually or programmatically at any time, to the provisioned compute tier, to meet the needs of your solution. - -> [!NOTE] -> Notable exceptions where you cannot change the service tier of a database are: -> - Databases using features which are [only available](features-comparison.md#features-of-sql-database-and-sql-managed-instance) in the Business Critical / Premium service tiers, cannot be changed to use the General Purpose / Standard service tier. -> - Databases originally created in the Hyperscale service tier cannot be migrated to other service tiers. If you migrate an existing database in Azure SQL Database to the Hyperscale service tier, you can reverse migrate to the General Purpose service tier within 45 days of the original migration to Hyperscale. If you wish to migrate the database to another service tier, such as Business Critical, first reverse migrate to the General Purpose service tier, then perform a further migration. Learn more in [How to reverse migrate from Hyperscale](manage-hyperscale-database.md#reverse-migrate-from-hyperscale). - -You can adjust the resources allocated to your database by changing service objective, or scaling, to meet workload demands. This also enables you to only pay for the resources that you need, when you need them. Please refer to the [note](#impact-of-scale-up-or-scale-down-operations) on the potential impact that a scale operation might have on an application. - -> [!NOTE] -> Dynamic scalability is different from autoscale. Autoscale is when a service scales automatically based on criteria, whereas dynamic scalability allows for manual scaling with a minimal downtime. Single databases in Azure SQL Database can be scaled manually, or in the case of the [Serverless tier](serverless-tier-overview.md), set to automatically scale the compute resources. [Elastic pools](elastic-pool-overview.md), which allow databases to share resources in a pool, can currently only be scaled manually. - -Azure SQL Database offers the ability to dynamically scale your databases: - -- With a [single database](single-database-scale.md), you can use either [DTU](resource-limits-dtu-single-databases.md) or [vCore](resource-limits-vcore-single-databases.md) models to define maximum amount of resources that will be assigned to each database. -- [Elastic pools](elastic-pool-scale.md) enable you to define maximum resource limit per group of databases in the pool. - -Azure SQL Managed Instance allows you to scale as well: - -- [SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) uses [vCores](../managed-instance/sql-managed-instance-paas-overview.md#vcore-based-purchasing-model) mode and enables you to define maximum CPU cores and maximum of storage allocated to your instance. All databases within the managed instance will share the resources allocated to the instance. - -## Impact of scale up or scale down operations - -Initiating a scale up, or scale down action, in any of the flavors mentioned above, restarts the database engine process, and moves it to a different virtual machine if needed. Moving the database engine process to a new virtual machine is an **online process** during which you can continue using your existing Azure SQL Database service. Once the target database engine is ready to process queries, open connections to the current database engine will be [terminated](single-database-scale.md#impact), and uncommitted transactions will be rolled back. New connections will be made to the target database engine. - -> [!NOTE] -> It is not recommended to scale your managed instance if a long-running transaction, such as data import, data processing jobs, index rebuild, etc., is running, or if you have any active connection on the instance. To prevent the scaling from taking longer time to complete than usual, you should scale the instance upon the completion of all long-running operations. - -> [!NOTE] -> You can expect a short connection break when the scale up/scale down process is finished. If you have implemented [Retry logic for standard transient errors](troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors), you will not notice the failover. - -## Alternative scale methods - -Scaling resources is the easiest and the most effective way to improve performance of your database without changing either the database or application code. In some cases, even the highest service tiers, compute sizes, and performance optimizations might not handle your workload in a successful and cost-effective way. In that case you have these additional options to scale your database: - -- [Read scale-out](read-scale-out.md) is an available feature where you are getting one read-only replica of your data where you can execute demanding read-only queries such as reports. A read-only replica will handle your read-only workload without affecting resource usage on your primary database. -- [Database sharding](elastic-scale-introduction.md) is a set of techniques that enables you to split your data into several databases and scale them independently. - -## Next steps - -- For information about improving database performance by changing database code, see [Find and apply performance recommendations](database-advisor-find-recommendations-portal.md). -- For information about letting built-in database intelligence optimize your database, see [Automatic tuning](automatic-tuning-overview.md). -- For information about read scale-out in Azure SQL Database, see how to [use read-only replicas to load balance read-only query workloads](read-scale-out.md). -- For information about a Database sharding, see [Scaling out with Azure SQL Database](elastic-scale-introduction.md). -- For an example of using scripts to monitor and scale a single database, see [Use PowerShell to monitor and scale a single SQL Database](scripts/monitor-and-scale-database-powershell.md). diff --git a/articles/azure-sql/database/scripts/add-database-to-failover-group-cli.md b/articles/azure-sql/database/scripts/add-database-to-failover-group-cli.md deleted file mode 100644 index 754ce837cb07a..0000000000000 --- a/articles/azure-sql/database/scripts/add-database-to-failover-group-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Add a database to a failover group" -description: Use this Azure CLI example script to create a database in Azure SQL Database, add it to an auto-failover group, and test failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- - -# Add a database to a failover group using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates a database in Azure SQL Database, creates a failover group, adds the database to it, and tests failover. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-cli.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Description | -|---|---| -| [az sql db](/cli/azure/sql/db) | Database commands. | -| [az sql failover-group](/cli/azure/sql/failover-group) | Failover group commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/add-database-to-failover-group-powershell.md b/articles/azure-sql/database/scripts/add-database-to-failover-group-powershell.md deleted file mode 100644 index 9649255afdf54..0000000000000 --- a/articles/azure-sql/database/scripts/add-database-to-failover-group-powershell.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "PowerShell: Add a database to an auto-failover group" -description: Use an Azure PowerShell example script to create a database in Azure SQL Database, add it to an auto-failover group, and test failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 07/16/2019 ---- - -# Use PowerShell to add a database to a failover group - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example creates a single database in Azure SQL Database, creates a failover group, adds the database to it, and tests failover. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/failover-groups/add-single-db-to-failover-group-az-ps.ps1 "Add a database to a failover group")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a server-level firewall rule for a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a new database. | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) | Gets one or more databases. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more databases to a failover group. | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists failover groups. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of a failover group. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group | - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-cli.md b/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-cli.md deleted file mode 100644 index 8c225d6371fb8..0000000000000 --- a/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Failover group - Azure SQL Database elastic pool" -description: Use this Azure CLI example script to create an Azure SQL Database elastic pool, add it to a failover group, and test failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: -ms.devlang: azurecli -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Add an Azure SQL Database elastic pool to a failover group using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates a single database, adds it to an elastic pool, creates a failover group, and tests failover. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-cli.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql elastic-pool](/cli/azure/sql/elastic-pool) | Elastic pool commands. | -| [az sql failover-group](/cli/azure/sql/failover-group) | Failover group commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure/overview). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-powershell.md b/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-powershell.md deleted file mode 100644 index 019b8161aa006..0000000000000 --- a/articles/azure-sql/database/scripts/add-elastic-pool-to-failover-group-powershell.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "PowerShell: Add an elastic pool to an auto-failover group" -description: Azure PowerShell example script to create an Azure SQL Database elastic pool, add it to an auto-failover group, and test failover. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 07/16/2019 ---- -# Use PowerShell to add an elastic pool to a failover group -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example creates a database in Azure SQL Database, adds it to an elastic pool, creates a failover group, and tests failover. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/failover-groups/add-elastic-pool-to-failover-group-az-ps.ps1 "Add elastic pool to a failover group")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a server-level firewall rule for a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a new database. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic database pool.| -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Sets properties for a database, or moves an existing database into an elastic pool. | -| [New-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/new-azsqldatabasefailovergroup) | Creates a new failover group. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase) | Gets one or more databases. | -| [Add-AzSqlDatabaseToFailoverGroup](/powershell/module/az.sql/add-azsqldatabasetofailovergroup) | Adds one or more databases to a failover group. | -| [Get-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/get-azsqldatabasefailovergroup) | Gets or lists database failover groups. | -| [Switch-AzSqlDatabaseFailoverGroup](/powershell/module/az.sql/switch-azsqldatabasefailovergroup)| Executes a failover of a database failover group. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group | - -## Next steps - -For more information on the Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/auditing-threat-detection-cli.md b/articles/azure-sql/database/scripts/auditing-threat-detection-cli.md deleted file mode 100644 index b6f56de4aa51f..0000000000000 --- a/articles/azure-sql/database/scripts/auditing-threat-detection-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Auditing and Advanced Threat Protection in Azure SQL Database" -description: Use this Azure CLI example script to configure auditing and Advanced Threat Protection in an Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: security, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: DavidTrigano -ms.author: datrigan -ms.reviewer: vanto -ms.date: 01/26/2022 ---- - -# Configure SQL Database auditing and Advanced Threat Protection using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example configures SQL Database auditing and Advanced Threat Protection. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/database-auditing-and-threat-detection/database-auditing-and-threat-detection.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql db audit-policy](/cli/azure/sql/db/audit-policy) | Sets the auditing policy for a database. | -| [az sql db threat-policy](/cli/azure/sql/db/threat-policy) | Sets an Advanced Threat Protection policy on a database. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/auditing-threat-detection-powershell-configure.md b/articles/azure-sql/database/scripts/auditing-threat-detection-powershell-configure.md deleted file mode 100644 index e3043b5bab55f..0000000000000 --- a/articles/azure-sql/database/scripts/auditing-threat-detection-powershell-configure.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: PowerShell example of auditing and Advanced Threat Protection - Azure SQL Database -description: Azure PowerShell example script to configure auditing and Advanced Threat Protection in an Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: security, sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: DavidTrigano -ms.author: datrigan -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 04/28/2020 ---- -# Use PowerShell to configure SQL Database auditing and Advanced Threat Protection -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example configures Azure SQL Database auditing and Advanced Threat Protection. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/database-auditing-and-threat-detection/database-auditing-and-threat-detection.ps1?highlight=15-16 "Configure auditing and threat detection")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database or elastic pool. | -| [New-AzStorageAccount](/powershell/module/az.storage/new-azstorageaccount) | Creates a storage account. | -| [Set-AzSqlDatabaseAuditing](/powershell/module/az.sql/set-azsqldatabaseaudit) | Sets the auditing policy for a database. | -| Set-AzSqlDatabaseThreatDetectionPolicy | Sets an Advanced Threat Protection policy on a database. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/backup-database-cli.md b/articles/azure-sql/database/scripts/backup-database-cli.md deleted file mode 100644 index 7f92be164d7ac..0000000000000 --- a/articles/azure-sql/database/scripts/backup-database-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Backup a database in Azure SQL Database" -description: Use this Azure CLI example script to backup an Azure SQL single database to an Azure storage container -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Backup an Azure SQL single database to an Azure storage container using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI example backs up a database in SQL Database to an Azure storage container. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/backup-database/backup-database.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [az sql server](/cli/azure/sql/server) | Server commands. | -| [az sql db](/cli/azure/sql/db) | Database commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/copy-database-to-new-server-cli.md b/articles/azure-sql/database/scripts/copy-database-to-new-server-cli.md deleted file mode 100644 index 042ab1044290f..0000000000000 --- a/articles/azure-sql/database/scripts/copy-database-to-new-server-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Copy database in Azure SQL Database to new server" -description: Use this Azure CLI example script to copy a database in Azure SQL Database to a new server -services: sql-database -ms.service: sql-database -ms.subservice: data-movement -ms.custom: devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Copy a database in Azure SQL Database to a new server using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates a copy of an existing database in a new server. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/copy-database-to-new-server/copy-database-to-new-server.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $targetResourceGroup -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql db copy](/cli/azure/sql/db#az-sql-db-copy) | Creates a copy of a database that uses the snapshot at the current time. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/copy-database-to-new-server-powershell.md b/articles/azure-sql/database/scripts/copy-database-to-new-server-powershell.md deleted file mode 100644 index 43abc90858ed9..0000000000000 --- a/articles/azure-sql/database/scripts/copy-database-to-new-server-powershell.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "PowerShell: Copy a database to new server" -description: Azure PowerShell example script to copy a database to a new server -services: sql-database -ms.service: sql-database -ms.subservice: data-movement -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Use PowerShell to copy a database to a new server -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example creates a copy of an existing database in Azure SQL Database in a new server. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Copy a database to a new server - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/copy-database-to-new-server/copy-database-to-new-server.ps1?highlight=20-23 "Copy database to new server")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $sourceresourcegroupname -Remove-AzResourceGroup -ResourceGroupName $targetresourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database or elastic pool. | -| [New-AzSqlDatabaseCopy](/powershell/module/az.sql/new-azsqldatabasecopy) | Creates a copy of a database that uses the snapshot at the current time. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/create-and-configure-database-cli.md b/articles/azure-sql/database/scripts/create-and-configure-database-cli.md deleted file mode 100644 index 6b070b9497d89..0000000000000 --- a/articles/azure-sql/database/scripts/create-and-configure-database-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Create a single database" -description: Use this Azure CLI example script to create a single database. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- - -# Create a single database and configure a firewall rule using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates a single database in Azure SQL Database and configures a server-level firewall rule. After the script has been successfully run, the database can be accessed from all Azure services and the allowed IP address range. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Description | -|---|---| -| [az sql server](/cli/azure/sql/server#az-sql-server-create) | Server commands | -| [az sql server firewall](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-create) | Server firewall commands. | -| [az sql db](/cli/azure/sql/db#az-sql-db-create) | Database commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/create-and-configure-database-powershell.md b/articles/azure-sql/database/scripts/create-and-configure-database-powershell.md deleted file mode 100644 index 1fd6431d31c97..0000000000000 --- a/articles/azure-sql/database/scripts/create-and-configure-database-powershell.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "PowerShell: Create a single database" -description: Use an Azure PowerShell example script to create a single database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Use PowerShell to create a single database and configure a server-level firewall rule - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example creates a single database in Azure SQL Database and configures a server-level firewall rule. After the script has been successfully run, the database can be accessed from all Azure services and the allowed IP address range. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/create-and-configure-database/create-and-configure-database.ps1?highlight=15-16 "Create SQL Database")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a server-level firewall rule for a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database in a server. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/import-from-bacpac-cli.md b/articles/azure-sql/database/scripts/import-from-bacpac-cli.md deleted file mode 100644 index 917a78f4a2c80..0000000000000 --- a/articles/azure-sql/database/scripts/import-from-bacpac-cli.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Azure CLI example: Import BACPAC file to database in Azure SQL Database" -description: Use this Azure CLI example script to import a BACPAC file into a database in Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: load & move data, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Import a BACPAC file into a database in SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example imports a database from a *.bacpac* file into a database in SQL Database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-run-local-sign-in.md](../../../../includes/cli-run-local-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/import-from-bacpac/import-from-bacpac.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql server](/cli/azure/sql/server) | Server commands. | -| [az sql db import](/cli/azure/sql/db#az-sql-db-import) | Database import command. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/import-from-bacpac-powershell.md b/articles/azure-sql/database/scripts/import-from-bacpac-powershell.md deleted file mode 100644 index 42f6a71d8fdec..0000000000000 --- a/articles/azure-sql/database/scripts/import-from-bacpac-powershell.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "PowerShell: Import a BACPAC file to a new database in Azure SQL Database" -description: Azure PowerShell example script to import a BACPAC file into a database in SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: load & move data, sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma -ms.date: 05/24/2019 ---- -# Use PowerShell to import a BACPAC file into a database in SQL Database -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example imports a database from a BACPAC file into a new database in SQL Database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/import-from-bacpac/import-from-bacpac.ps1?highlight=20-21 "Create SQL Database")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a server-level firewall rule for a server. | -| [New-AzSqlDatabaseImport](/powershell/module/az.sql/new-azsqldatabaseimport) | Imports a BACPAC file and create a new database on the server. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/monitor-and-scale-database-cli.md b/articles/azure-sql/database/scripts/monitor-and-scale-database-cli.md deleted file mode 100644 index 132328f705f33..0000000000000 --- a/articles/azure-sql/database/scripts/monitor-and-scale-database-cli.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "Azure CLI example: Monitor and scale a single database in Azure SQL Database" -description: Use an Azure CLI example script to monitor and scale a single database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- - -# Monitor and scale a single database in Azure SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example scales a single database in Azure SQL Database to a different compute size after querying the size information of the database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/monitor-and-scale-database/monitor-and-scale-database.sh" id="FullScript"::: - -> [!TIP] -> Use [az sql db op list](/cli/azure/sql/db/op?#az-sql-db-op-list) to get a list of operations performed on the database, and use [az sql db op cancel](/cli/azure/sql/db/op#az-sql-db-op-cancel) to cancel an update operation on the database. - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Script | Description | -|---|---| -| [az sql server](/cli/azure/sql/server) | Server commands. | -| [az sql db show-usage](/cli/azure/sql#az-sql-show-usage) | Shows the size usage information for a database. | - -## Next steps - -For more information on the Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional CLI script samples can be found in [Azure CLI sample scripts](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/monitor-and-scale-database-powershell.md b/articles/azure-sql/database/scripts/monitor-and-scale-database-powershell.md deleted file mode 100644 index 962545657e314..0000000000000 --- a/articles/azure-sql/database/scripts/monitor-and-scale-database-powershell.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Use PowerShell to monitor and scale a single database in Azure SQL Database -description: Use an Azure PowerShell example script to monitor and scale a single database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use PowerShell to monitor and scale a single database in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example monitors the performance metrics of a single database, scales it to a higher compute size, and creates an alert rule on one of the performance metrics. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/monitor-and-scale-database/monitor-and-scale-database.ps1?highlight=15-16 "Monitor and scale single database")] - -> [!NOTE] -> For a full list of metrics, see [metrics supported](../../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserversdatabases). -> [!TIP] -> Use [Get-AzSqlDatabaseActivity](/powershell/module/az.sql/get-azsqldatabaseactivity) to get the status of database operations and use [Stop-AzSqlDatabaseActivity](/powershell/module/az.sql/stop-azsqldatabaseactivity) to cancel a database update operation. - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts a single database or elastic pool. | -| [Get-AzMetric](/powershell/module/az.monitor/get-azmetric) | Shows the size usage information for the database.| -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Updates database properties or moves the database into, out of, or between elastic pools. | -| [Add-AzMetricAlertRule](/powershell/module/az.monitor/add-azmetricalertrule) | Sets an alert rule to automatically monitor metrics in the future. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples can be found in [Azure PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/monitor-and-scale-pool-powershell.md b/articles/azure-sql/database/scripts/monitor-and-scale-pool-powershell.md deleted file mode 100644 index 758f2d53c2e6c..0000000000000 --- a/articles/azure-sql/database/scripts/monitor-and-scale-pool-powershell.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: PowerShell example-monitor-scale-elastic pool-Azure SQL Database -description: Azure PowerShell example script to monitor and scale an elastic pool in Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Use PowerShell to monitor and scale an elastic pool in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example monitors the performance metrics of an elastic pool, scales it to a higher compute size, and creates an alert rule on one of the performance metrics. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/monitor-and-scale-pool/monitor-and-scale-pool.ps1?highlight=17-18 "Monitor and scale an elastic pool in SQL Database")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| - [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases or elastic pools. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic pool. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database in a server. | -| [Get-AzMetric](/powershell/module/az.monitor/get-azmetric) | Shows the size usage information for the database.| -| [Add-AzMetricAlertRule](/powershell/module/az.monitor/add-azmetricalertrule) | Adds or updates a metric-based alert rule. | -| [Set-AzSqlElasticPool](/powershell/module/az.sql/set-azsqlelasticpool) | Updates elastic pool properties. | -| [Add-AzMetricAlertRule](/powershell/module/az.monitor/add-azmetricalertrule) | Sets an alert rule to automatically monitor metrics in the future. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples can be found in [Azure PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/move-database-between-elastic-pools-cli.md b/articles/azure-sql/database/scripts/move-database-between-elastic-pools-cli.md deleted file mode 100644 index a82e3e1c53c32..0000000000000 --- a/articles/azure-sql/database/scripts/move-database-between-elastic-pools-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Move a database between elastic pools" -description: Use this Azure CLI example script to create two elastic pools and move a database in SQL Database from one elastic pool to another. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- - -# Move a database in SQL Database in a SQL elastic pool using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates two elastic pools, moves a pooled database in SQL Database from one SQL elastic pool into another SQL elastic pool, and then moves the pooled database out of the SQL elastic pool to be a single database in SQL Database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/move-database-between-pools/move-database-between-pools.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Description | -|---|---| -| [az sql server](/cli/azure/sql/server) | Server commands. | -| [az sql elastic-pools](/cli/azure/sql/elastic-pool) | Elastic pool commands. | -| [az sql db](/cli/azure/sql/db) | Database commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/move-database-between-elastic-pools-powershell.md b/articles/azure-sql/database/scripts/move-database-between-elastic-pools-powershell.md deleted file mode 100644 index 006798052e201..0000000000000 --- a/articles/azure-sql/database/scripts/move-database-between-elastic-pools-powershell.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "PowerShell: Move a database between elastic pools" -description: Use an Azure PowerShell example script to move a database in SQL Database between two elastic pools. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use PowerShell to create elastic pools and move a database between them - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example creates two elastic pools, moves a pooled database in SQL Database from one SQL elastic pool into another SQL elastic pool, and then moves the pooled database out of the SQL elastic pool to be a single database in Azure SQL Database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/move-database-between-pools-and-standalone/move-database-between-pools-and-standalone.ps1?highlight=18-19 "Move a database between pools")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic pool. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database in a server. | -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Updates database properties or moves a database into, out of, or between elastic pools. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/restore-database-cli.md b/articles/azure-sql/database/scripts/restore-database-cli.md deleted file mode 100644 index c18a0beefda9b..0000000000000 --- a/articles/azure-sql/database/scripts/restore-database-cli.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Azure CLI example: Restore a backup" -description: Use this Azure CLI example script to restore a database in Azure SQL Database to an earlier point in time from automatic backups. -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: carlrab, kendralittle, mathoma -ms.date: 02/11/2022 ---- - -# Restore a single database in Azure SQL Database to an earlier point in time using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI example restores a single database in Azure SQL Database to a specific point in time. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-run-local-sign-in.md](../../../../includes/cli-run-local-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/restore-database/restore-database.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql db restore](/cli/azure/sql/db#az-sql-db-restore) | Restore database command. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/database/scripts/restore-database-powershell.md b/articles/azure-sql/database/scripts/restore-database-powershell.md deleted file mode 100644 index b9ec85f12a504..0000000000000 --- a/articles/azure-sql/database/scripts/restore-database-powershell.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "PowerShell: Restore an automatic backup of a database in SQL Database" -description: Use an Azure PowerShell example script to restore a database in SQL Database to an earlier point in time from automatic backups. -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: kendralittle, mathoma -ms.date: 03/27/2019 ---- - -# Use PowerShell to restore a database to an earlier point in time - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This PowerShell script example restores a database in SQL Database to a specific point in time. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/restore-database/restore-database.ps1?highlight=17-18 "Create SQL Database")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database in a server. | -| [Get-AzSqlDatabaseGeoBackup](/powershell/module/az.sql/get-azsqldatabasegeobackup) | Gets a geo-redundant backup of a standalone or pooled database. | -| [Restore-AzSqlDatabase](/powershell/module/az.sql/restore-azsqldatabase) | Restores a database. | -| [Remove-AzSqlDatabase](/powershell/module/az.sql/remove-azsqldatabase) | Removes a database. | -| [Get-AzSqlDeletedDatabaseBackup](/powershell/module/az.sql/get-azsqldeleteddatabasebackup) | Gets a deleted database that you can restore. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/scale-pool-cli.md b/articles/azure-sql/database/scripts/scale-pool-cli.md deleted file mode 100644 index b1c950d23c246..0000000000000 --- a/articles/azure-sql/database/scripts/scale-pool-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Scale an elastic pool" -description: Use an Azure CLI example script to scale an elastic pool in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: elastic-pools -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: arvindshmicrosoft -ms.author: arvindsh -ms.reviewer: kendralittle, mathoma -ms.date: 01/26/2022 ---- - -# Scale an elastic pool in Azure SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example creates elastic pools in Azure SQL Database, moves pooled databases, and changes elastic pool compute sizes. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/scale-pool/scale-pool.sh" id="FullScript" - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Description | -|---|---| -| [az sql server](/cli/azure/sql/server) | Server commands. | -| [az sql db](/cli/azure/sql/db) | Database commands. | -| [az sql elastic-pools](/cli/azure/sql/elastic-pool) | Elastic pool commands. | - -## Next steps - -For more information on the Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](../az-cli-script-samples-content-guide.md). diff --git a/articles/azure-sql/database/scripts/setup-geodr-and-failover-database-powershell.md b/articles/azure-sql/database/scripts/setup-geodr-and-failover-database-powershell.md deleted file mode 100644 index 48cc617fa79c0..0000000000000 --- a/articles/azure-sql/database/scripts/setup-geodr-and-failover-database-powershell.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "PowerShell: Configure active geo-replication for Azure SQL Database" -description: Use an Azure PowerShell example script to set up active geo-replication for Azure SQL Database and fail it over. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use PowerShell to configure active geo-replication for a database in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example configures active geo-replication for a database in Azure SQL Database and fails it over to a secondary replica of the database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/setup-geodr-and-failover/setup-geodr-and-failover-single-database.ps1?highlight=18-21 "Set up active geo-replication for single database")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $primaryresourcegroupname -Remove-AzResourceGroup -ResourceGroupName $secondaryresourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic pool. | -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Updates database properties or moves a database into, out of, or between elastic pools. | -| [New-AzSqlDatabaseSecondary](/powershell/module/az.sql/new-azsqldatabasesecondary)| Creates a secondary database for an existing database and starts data replication. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase)| Gets one or more databases. | -| [Set-AzSqlDatabaseSecondary](/powershell/module/az.sql/set-azsqldatabasesecondary)| Switches a secondary database to be primary to initiate failover.| -| [Get-AzSqlDatabaseReplicationLink](/powershell/module/az.sql/get-azsqldatabasereplicationlink) | Gets the geo-replication links between an Azure SQL Database and a resource group or logical SQL server. | -| [Remove-AzSqlDatabaseSecondary](/powershell/module/az.sql/remove-azsqldatabasesecondary) | Terminates data replication between a database and the specified secondary database. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/setup-geodr-and-failover-elastic-pool-powershell.md b/articles/azure-sql/database/scripts/setup-geodr-and-failover-elastic-pool-powershell.md deleted file mode 100644 index ce3ccbf213368..0000000000000 --- a/articles/azure-sql/database/scripts/setup-geodr-and-failover-elastic-pool-powershell.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "PowerShell: Configure elastic pool active geo-replication" -description: Azure PowerShell example script to set up active geo-replication for a pooled database in Azure SQL Database and fail it over. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Use PowerShell to configure active geo-replication for a pooled database in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell script example configures active geo-replication for a pooled database in Azure SQL Database and fails it over to the secondary replica of the database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/setup-geodr-and-failover/setup-geodr-and-failover-elastic-pool.ps1?highlight=17-20 "Set up active geo-replication for elastic pool")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $primaryresourcegroupname -Remove-AzResourceGroup -ResourceGroupName $secondaryresourcegroupname -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlElasticPool](/powershell/module/az.sql/new-azsqlelasticpool) | Creates an elastic pool. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database in a server. | -| [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) | Updates database properties or moves a database into, out of, or between elastic pools. | -| [New-AzSqlDatabaseSecondary](/powershell/module/az.sql/new-azsqldatabasesecondary)| Creates a secondary database for an existing database and starts data replication. | -| [Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase)| Gets one or more databases. | -| [Set-AzSqlDatabaseSecondary](/powershell/module/az.sql/set-azsqldatabasesecondary)| Switches a secondary database to be primary in order to initiate failover.| -| [Get-AzSqlDatabaseReplicationLink](/powershell/module/az.sql/get-azsqldatabasereplicationlink) | Gets the geo-replication links between an Azure SQL Database and a resource group or logical SQL server. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in the [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). diff --git a/articles/azure-sql/database/scripts/setup-geodr-failover-database-cli.md b/articles/azure-sql/database/scripts/setup-geodr-failover-database-cli.md deleted file mode 100644 index ec4aaaecd873d..0000000000000 --- a/articles/azure-sql/database/scripts/setup-geodr-failover-database-cli.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Azure CLI example: Active geo-replication-single Azure SQL Database" -description: Use this Azure CLI example script to set up active geo-replication for a single database in Azure SQL Database and fail it over. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: -ms.devlang: azurecli -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Configure active geo-replication for a single database in Azure SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example configures active geo-replication for a single database and fails it over to a secondary replica of the database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/setup-geodr-and-failover/setup-geodr-and-failover-single-database.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql db replica](/cli/azure/sql/db/replica) | Database replica commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/database/scripts/setup-geodr-failover-group-cli.md b/articles/azure-sql/database/scripts/setup-geodr-failover-group-cli.md deleted file mode 100644 index 0c292b541491d..0000000000000 --- a/articles/azure-sql/database/scripts/setup-geodr-failover-group-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Configure a failover group for a group of databases in Azure SQL Database" -description: Use this Azure CLI example script to set up a failover group for a set of databases in Azure SQL Database and fail it over. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: -ms.devlang: azurecli -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Configure a failover group for a group of databases in Azure SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/setup-geodr-and-failover/setup-geodr-and-failover-database-failover-group.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $failoverResourceGroup -y -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) | Creates a failover group. | -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Set the primary of the failover group by failing over all databases from the current primary server | -| [az sql failover-group show](/cli/azure/sql/failover-group) | Gets a failover group | -| [az sql failover-group delete](/cli/azure/sql/failover-group) | Deletes a failover group | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/database/scripts/setup-geodr-failover-pool-cli.md b/articles/azure-sql/database/scripts/setup-geodr-failover-pool-cli.md deleted file mode 100644 index edc60a6b060aa..0000000000000 --- a/articles/azure-sql/database/scripts/setup-geodr-failover-pool-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Configure active geo-replication for an elastic pool" -description: Use this Azure CLI example script to set up active geo-replication for a pooled database in Azure SQL Database and fail it over. -services: sql-database -ms.service: sql-database -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Configure active geo-replication for a pooled database in Azure SQL Database using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure CLI script example configures active geo-replication for a pooled database in Azure SQL Database and fails it over to the secondary replica of the database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/setup-geodr-and-failover/setup-geodr-and-failover-elastic-pool.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -az group delete --name $secondaryResourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql elastic-pool](/cli/azure/sql/elastic-pool) | Elastic pool commands | -| [az sql db replica](/cli/azure/sql/db/replica) | Database replication commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-azure-onprem.md b/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-azure-onprem.md deleted file mode 100644 index 39d73cb9f1b4f..0000000000000 --- a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-azure-onprem.md +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: "PowerShell: Sync data between SQL Database and SQL Server" -description: Use an Azure PowerShell example script to sync data between Azure SQL Database and SQL Server. -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use PowerShell to sync data between SQL Database and SQL Server - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell example configures Data Sync to sync data between Azure SQL Database and SQL Server. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with SQL Data Sync im Azure](../sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does not support Azure SQL Managed Instance at this time. - -## Prerequisites - -- Create a database in Azure SQL Database from an AdventureWorksLT sample database as a hub database. -- Create a database in Azure SQL Database in the same region as sync database. -- Create a database in a SQL Server instance as a member database. -- Update the parameter placeholders before running the example. - -## Example - -```powershell-interactive -using namespace Microsoft.Azure.Commands.Sql.DataSync.Model -using namespace System.Collections.Generic - -# hub database info -$subscriptionId = "" -$resourceGroupName = "" -$serverName = "" -$databaseName = "" - -# sync database info -$syncDatabaseResourceGroupName = "" -$syncDatabaseServerName = "" -$syncDatabaseName = "" - -# sync group info -$syncGroupName = "" -$conflictResolutionPolicy = "HubWin" # can be HubWin or MemberWin -$intervalInSeconds = 300 # sync interval in seconds (must be no less than 300) - -# member database info -$syncMemberName = "" -$memberServerName = "" -$memberDatabaseName = "" -$memberDatabaseType = "SqlServerDatabase" # can be AzureSqlDatabase or SqlServerDatabase -$syncDirection = "Bidirectional" # can be Bidirectional, Onewaymembertohub, Onewayhubtomember - -# sync agent info -$syncAgentName = "" -$syncAgentResourceGroupName = "" -$syncAgentServerName = "" - -# temp file to save the sync schema -$tempFile = $env:TEMP+"\syncSchema.json" - -# list of included columns and tables in quoted name -$includedColumnsAndTables = "[SalesLT].[Address].[AddressID]", - "[SalesLT].[Address].[AddressLine2]", - "[SalesLT].[Address].[rowguid]", - "[SalesLT].[Address].[PostalCode]", - "[SalesLT].[ProductDescription]" -$metadataList = [System.Collections.ArrayList]::new($includedColumnsAndTables) - -Connect-AzAccount -Select-AzSubscription -SubscriptionId $subscriptionId - -# use if it's safe to show password in script, otherwise use PromptForCredential -# $user = "username" -# $password = ConvertTo-SecureString -String "password" -AsPlainText -Force -# $credential = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $user, $password - -$credential = $Host.ui.PromptForCredential("Need credential", - "Please enter your user name and password for server "+$serverName+".database.windows.net", - "", - "") - -# create a new sync agent -Write-Host "Creating new Sync Agent..." -New-AzSqlSyncAgent -ResourceGroupName $resourceGroupName -ServerName $serverName -SyncDatabaseName $syncDatabaseName -SyncAgentName $syncAgentName - -# generate agent key -Write-Host "Generating Agent Key..." -$agentKey = New-AzSqlSyncAgentKey -ResourceGroupName $resourceGroupName -ServerName $serverName -SyncAgentName $syncAgentName -Write-Host "Use your agent key to configure the sync agent. Do this before proceeding." -$agentkey - -# DO THE FOLLOWING BEFORE THE NEXT STEP -# Install the on-premises sync agent on your machine and register the sync agent using the agent key generated above to bring the sync agent online. -# Add the SQL Server database information including server name, database name, user name, password on the configuration tool within the sync agent. - -# create a new sync group -Write-Host "Creating Sync Group "$syncGroupName"..." -New-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -Name $syncGroupName ` - -SyncDatabaseName $syncDatabaseName -SyncDatabaseServerName $syncDatabaseServerName -SyncDatabaseResourceGroupName $syncDatabaseResourceGroupName ` - -ConflictResolutionPolicy $conflictResolutionPolicy -DatabaseCredential $credential - -# use if it's safe to show password in script, otherwise use PromptForCredential -#$user = "username" -#$password = ConvertTo-SecureString -String "password" -AsPlainText -Force -#$credential = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $user, $password - -$credential = $Host.ui.PromptForCredential("Need credential", - "Please enter your user name and password for server "+$memberServerName, - "", - "") - -# get information from sync agent and confirm your SQL Server instance was configured (note the database ID to use for the sqlServerDatabaseID in the next step) -$syncAgentInfo = Get-AzSqlSyncAgentLinkedDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName -SyncAgentName $syncAgentName - -# add a new sync member -Write-Host "Adding member"$syncMemberName" to the sync group..." - -New-AzSqlSyncMember -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -Name $syncMemberName -MemberDatabaseType $memberDatabaseType -SyncAgentResourceGroupName $syncAgentResourceGroupName ` - -SyncAgentServerName $syncAgentServerName -SyncAgentName $syncAgentName -SyncDirection $syncDirection -SqlServerDatabaseID $syncAgentInfo.DatabaseId - -# refresh database schema from hub database, specify the -SyncMemberName parameter if you want to refresh schema from the member database -Write-Host "Refreshing database schema from hub database..." -$startTime = Get-Date -Update-AzSqlSyncSchema -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName - -# waiting for successful refresh -$startTime = $startTime.ToUniversalTime() -$timer=0 -$timeout=90 - -# check the log and see if refresh has gone through -Write-Host "Check for successful refresh..." -$isSucceeded = $false -while ($isSucceeded -eq $false) { - Start-Sleep -s 10 - $timer=$timer+10 - $details = Get-AzSqlSyncSchema -SyncGroupName $syncGroupName -ServerName $serverName -DatabaseName $databaseName -ResourceGroupName $resourceGroupName - if ($details.LastUpdateTime -gt $startTime) { - Write-Host "Refresh was successful" - $isSucceeded = $true - } - if ($timer -eq $timeout) { - Write-Host "Refresh timed out" - break; - } -} - -# get the database schema -Write-Host "Adding tables and columns to the sync schema..." -$databaseSchema = Get-AzSqlSyncSchema -ResourceGroupName $ResourceGroupName -ServerName $ServerName ` - -DatabaseName $DatabaseName -SyncGroupName $SyncGroupName ` - -$databaseSchema | ConvertTo-Json -depth 5 -Compress | Out-File "C:\Users\OnPremiseServer\AppData\Local\Temp\syncSchema.json" - -$newSchema = [AzureSqlSyncGroupSchemaModel]::new() -$newSchema.Tables = [List[AzureSqlSyncGroupSchemaTableModel]]::new(); - -# add columns and tables to the sync schema -foreach ($tableSchema in $databaseSchema.Tables) { - $newTableSchema = [AzureSqlSyncGroupSchemaTableModel]::new() - $newTableSchema.QuotedName = $tableSchema.QuotedName - $newTableSchema.Columns = [List[AzureSqlSyncGroupSchemaColumnModel]]::new(); - $addAllColumns = $false - if ($MetadataList.Contains($tableSchema.QuotedName)) { - if ($tableSchema.HasError) { - $fullTableName = $tableSchema.QuotedName - Write-Host "Can't add table $fullTableName to the sync schema" -foregroundcolor "Red" - Write-Host $tableSchema.ErrorId -foregroundcolor "Red" - continue; - } - else { - $addAllColumns = $true - } - } - foreach($columnSchema in $tableSchema.Columns) { - $fullColumnName = $tableSchema.QuotedName + "." + $columnSchema.QuotedName - if ($addAllColumns -or $MetadataList.Contains($fullColumnName)) { - if ((-not $addAllColumns) -and $tableSchema.HasError) { - Write-Host "Can't add column $fullColumnName to the sync schema" -foregroundcolor "Red" - Write-Host $tableSchema.ErrorId -foregroundcolor "Red" - } - elseif ((-not $addAllColumns) -and $columnSchema.HasError) { - Write-Host "Can't add column $fullColumnName to the sync schema" -foregroundcolor "Red" - Write-Host $columnSchema.ErrorId -foregroundcolor "Red" - } - else { - Write-Host "Adding"$fullColumnName" to the sync schema" - $newColumnSchema = [AzureSqlSyncGroupSchemaColumnModel]::new() - $newColumnSchema.QuotedName = $columnSchema.QuotedName - $newColumnSchema.DataSize = $columnSchema.DataSize - $newColumnSchema.DataType = $columnSchema.DataType - $newTableSchema.Columns.Add($newColumnSchema) - } - } - } - if ($newTableSchema.Columns.Count -gt 0) { - $newSchema.Tables.Add($newTableSchema) - } -} - -# convert sync schema to JSON format -$schemaString = $newSchema | ConvertTo-Json -depth 5 -Compress - -# workaround a powershell bug -$schemaString = $schemaString.Replace('"Tables"', '"tables"').Replace('"Columns"', '"columns"').Replace('"QuotedName"', '"quotedName"').Replace('"MasterSyncMemberName"','"masterSyncMemberName"') - -# save the sync schema to a temp file -$schemaString | Out-File $tempFile - -# update sync schema -Write-Host "Updating the sync schema..." -Update-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName ` - -DatabaseName $databaseName -Name $syncGroupName -Schema $tempFile - -$syncLogStartTime = Get-Date - -# trigger sync manually -Write-Host "Trigger sync manually..." -Start-AzSqlSyncGroupSync -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName - -# check the sync log and wait until the first sync succeeded -Write-Host "Check the sync log..." -$isSucceeded = $false -for ($i = 0; ($i -lt 300) -and (-not $isSucceeded); $i = $i + 10) { - Start-Sleep -s 10 - $syncLogEndTime = Get-Date - $syncLogList = Get-AzSqlSyncGroupLog -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -StartTime $syncLogStartTime.ToUniversalTime() -EndTime $syncLogEndTime.ToUniversalTime() - - if ($synclogList.Length -gt 0) { - foreach ($syncLog in $syncLogList) { - if ($syncLog.Details.Contains("Sync completed successfully")) { - Write-Host $syncLog.TimeStamp : $syncLog.Details - $isSucceeded = $true - } - } - } -} - -if ($isSucceeded) { - # enable scheduled sync - Write-Host "Enable the scheduled sync with 300 seconds interval..." - Update-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -Name $syncGroupName -IntervalInSeconds $intervalInSeconds -} -else { - # output all log if sync doesn't succeed in 300 seconds - $syncLogEndTime = Get-Date - $syncLogList = Get-AzSqlSyncGroupLog -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -StartTime $syncLogStartTime.ToUniversalTime() -EndTime $syncLogEndTime.ToUniversalTime() - - if ($synclogList.Length -gt 0) { - foreach ($syncLog in $syncLogList) { - Write-Host $syncLog.TimeStamp : $syncLog.Details - } - } -} -``` - -## Clean up deployment - -After you run the sample script, you can run the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -Remove-AzResourceGroup -ResourceGroupName $syncDatabaseResourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzSqlSyncAgent](/powershell/module/az.sql/New-azSqlSyncAgent) | Creates a new Sync Agent. | -| [New-AzSqlSyncAgentKey](/powershell/module/az.sql/New-azSqlSyncAgentKey) | Generates the agent key associated with the Sync Agent. | -| [Get-AzSqlSyncAgentLinkedDatabase](/powershell/module/az.sql/Get-azSqlSyncAgentLinkedDatabase) | Get all the information for the Sync Agent. | -| [New-AzSqlSyncMember](/powershell/module/az.sql/New-azSqlSyncMember) | Add a new member to the Sync Group. | -| [Update-AzSqlSyncSchema](/powershell/module/az.sql/Update-azSqlSyncSchema) | Refreshes the database schema information. | -| [Get-AzSqlSyncSchema](/powershell/module/az.sql/Get-azSqlSyncSchema) | Get the database schema information. | -| [Update-AzSqlSyncGroup](/powershell/module/az.sql/Update-azSqlSyncGroup) | Updates the Sync Group. | -| [Start-AzSqlSyncGroupSync](/powershell/module/az.sql/Start-azSqlSyncGroupSync) | Triggers a sync. | -| [Get-AzSqlSyncGroupLog](/powershell/module/az.sql/Get-azSqlSyncGroupLog) | Checks the Sync Log. | - - -## Next steps - -For more information about Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). - -For more information about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with Azure SQL Data Sync](../sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - Use the Azure portal - [Tutorial: Set up SQL Data Sync to sync data between a database in Azure SQL Database and a SQL Server on-premises database](../sql-data-sync-sql-server-configure.md) - - Use PowerShell - [Use PowerShell to sync between multiple databases in Azure SQL Database](sql-data-sync-sync-data-between-sql-databases.md) -- Data Sync Agent - [Data Sync Agent for SQL Data Sync in Azure](../sql-data-sync-agent-overview.md) -- Best practices - [Best practices for SQL Data Sync in Azure](../sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](../monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with SQL Data Sync in Azure](../sql-data-sync-troubleshoot.md) -- Update the sync schema - - Use Transact-SQL - [Automate the replication of schema changes in SQL Data Sync in Azure](../sql-data-sync-update-sync-schema.md) - - Use PowerShell - [Use PowerShell to update the sync schema in an existing sync group](update-sync-schema-in-sync-group.md) - -For more information about Azure SQL Database, see: - -- [SQL Database overview](../sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) diff --git a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases-rest-api.md b/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases-rest-api.md deleted file mode 100644 index 202bb979995f7..0000000000000 --- a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases-rest-api.md +++ /dev/null @@ -1,324 +0,0 @@ ---- -title: "REST API: Sync between multiple databases" -description: Use a REST API example script to sync between multiple databases. -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1 -ms.devlang: rest-api -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use REST API to sync data between multiple databases - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This REST API example configures SQL Data Sync to sync data between multiple databases. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](../sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does not support Azure SQL Managed Instance at this time. - -## Create sync group - -Use the [create or update](/rest/api/sql/syncgroups/createorupdate) template to create a sync group. - -When creating a sync group, do not pass in the sync schema (table\column) and do not pass in masterSyncMemberName, because at this time sync group does not have table\column information yet. - -Sample request for creating a sync group: - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187?api-version=2015-05-01-preview -``` - -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser" - } -} -``` - -Sample response for creating a sync group: - -Status code: 200 -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser", - "syncState": "NotReady" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187", - "name": "syncgroupcrud-3187", - "type": "Microsoft.Sql/servers/databases/syncGroups" -} -``` - -Status code: 201 -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser", - "syncState": "NotReady" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187", - "name": "syncgroupcrud-3187", - "type": "Microsoft.Sql/servers/databases/syncGroups" -} -``` - -## Create sync member - -Use the [create or update](/rest/api/sql/syncmembers/createorupdate) template to create a sync member. - -Sample request for creating a sync member: - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879?api-version=2015-05-01-preview -``` - -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - } -} -``` -Sample response for creating a sync member: - -Status code:200 -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879", - "name": "syncgroupcrud-4879", - "type": "Microsoft.Sql/servers/databases/syncGroups/syncMembers" -} -``` - -Status code:201 -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879", - "name": "syncgroupcrud-4879", - "type": "Microsoft.Sql/servers/databases/syncGroups/syncMembers" -} -``` - -## Refresh schema - -Once your sync group is created successfully, refresh schema using the following templates. - -Use the [refresh hub schema](/rest/api/sql/syncgroups/refreshhubschema) template to refresh the schema for the hub database. - -Sample request for refreshing a hub database schema: - -```http -POST https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/refreshHubSchema?api-version=2015-05-01-preview -``` - -Sample response for refreshing a hub database schema: - -Status code: 200 - -Status code: 202 - -Use the [list hub schemas](/rest/api/sql/syncgroups/listhubschemas) template to list the hub database schema. - -Use the [refresh member schema](/rest/api/sql/syncmembers/refreshmemberschema) template to refresh the member database schema. - -Use the [list member schema](/rest/api/sql/syncmembers/listmemberschemas) template to list member database schema. - -Only proceed to the next step once your schema refreshes successfully. - -## Update sync group - -Use the [create or update](/rest/api/sql/syncgroups/createorupdate) template to update your sync group. - -Update sync group by specifying the sync schema. Include your schema and masterSyncMemberName, which is the name that holds the schema you want to use. - -Sample request for updating sync group: - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187?api-version=2015-05-01-preview -``` - -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser" - } -} -``` - -Sample response for updating sync group: - -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser", - "syncState": "NotReady" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187", - "name": "syncgroupcrud-3187", - "type": "Microsoft.Sql/servers/databases/syncGroups" -} -``` - -```json -{ - "properties": { - "interval": -1, - "lastSyncTime": "0001-01-01T08:00:00Z", - "conflictResolutionPolicy": "HubWin", - "syncDatabaseId": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328", - "hubDatabaseUserName": "hubUser", - "syncState": "NotReady" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-3521/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187", - "name": "syncgroupcrud-3187", - "type": "Microsoft.Sql/servers/databases/syncGroups" -} -``` -## Update sync member - -Use the [create or update](/rest/api/sql/syncmembers/createorupdate) template to update your sync member. - -Sample request for updating a sync member: - -```http -PUT https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879?api-version=2015-05-01-preview -``` - -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - } -} -``` - -Sample response for updating a sync member: - -Status code: 200 -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879", - "name": "syncgroupcrud-4879", - "type": "Microsoft.Sql/servers/databases/syncGroups/syncMembers" -} -``` - -Status code: 201 -```json -{ - "properties": { - "databaseType": "AzureSqlDatabase", - "serverName": "syncgroupcrud-3379.database.windows.net", - "databaseName": "syncgroupcrud-7421", - "userName": "myUser", - "syncDirection": "Bidirectional", - "syncState": "UnProvisioned" - }, - "id": "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/syncMembers/syncgroupcrud-4879", - "name": "syncgroupcrud-4879", - "type": "Microsoft.Sql/servers/databases/syncGroups/syncMembers" -} -``` - -## Trigger sync - -Use the [trigger sync](/rest/api/sql/syncgroups/triggersync) template to trigger a sync operation. - -Sample request for triggering sync operation: - -```http -POST https://management.azure.com/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/syncgroupcrud-65440/providers/Microsoft.Sql/servers/syncgroupcrud-8475/databases/syncgroupcrud-4328/syncGroups/syncgroupcrud-3187/triggerSync?api-version=2015-05-01-preview -``` - -Sample response for triggering sync operation: - -Status code: 200 - -## Next steps - -For more information about Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). - -For more information about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](../sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - Use the Azure portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](../sql-data-sync-sql-server-configure.md) - - Use PowerShell - [Use PowerShell to sync data between a database in Azure SQL Database and SQL Server](sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for SQL Data Sync in Azure](../sql-data-sync-agent-overview.md) -- Best practices - [Best practices for SQL Data Sync in Azure](../sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](../monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with SQL Data Sync in Azure](../sql-data-sync-troubleshoot.md) -- Update the sync schema - - Use Transact-SQL - [Automate the replication of schema changes in SQL Data Sync in Azure](../sql-data-sync-update-sync-schema.md) - - Use PowerShell - [Use PowerShell to update the sync schema in an existing sync group](update-sync-schema-in-sync-group.md) - -For more information about SQL Database, see: - -- [SQL Database overview](../sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) diff --git a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases.md b/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases.md deleted file mode 100644 index f7927865e7a99..0000000000000 --- a/articles/azure-sql/database/scripts/sql-data-sync-sync-data-between-sql-databases.md +++ /dev/null @@ -1,331 +0,0 @@ ---- -title: "PowerShell: Sync between multiple databases in Azure SQL Database" -description: Use an Azure PowerShell example script to sync between multiple databases in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- - -# Use PowerShell to sync data between multiple databases in Azure SQL Database - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell example configures SQL Data Sync to sync data between multiple databases in Azure SQL Database. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](../sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does not support Azure SQL Managed Instance at this time. - -## Prerequisites - -- Create a database in Azure SQL Database from an AdventureWorksLT sample database as a hub database. -- Create a database in Azure SQL Database in the same region as the sync database. -- Update the parameter placeholders before running the example. - -## Example - -```powershell-interactive -using namespace Microsoft.Azure.Commands.Sql.DataSync.Model -using namespace System.Collections.Generic - -# hub database info -$subscriptionId = "" -$resourceGroupName = "" -$serverName = "" -$databaseName = "" - -# sync database info -$syncDatabaseResourceGroupName = "" -$syncDatabaseServerName = "" -$syncDatabaseName = "" - -# sync group info -$syncGroupName = "" -$conflictResolutionPolicy = "HubWin" # can be HubWin or MemberWin -$intervalInSeconds = 300 # sync interval in seconds (must be no less than 300) - -# member database info -$syncMemberName = "" -$memberServerName = "" -$memberDatabaseName = "" -$memberDatabaseType = "SqlServerDatabase" # can be AzureSqlDatabase or SqlServerDatabase -$syncDirection = "Bidirectional" # can be Bidirectional, Onewaymembertohub, Onewayhubtomember - -# sync agent info -$syncAgentName = "" -$syncAgentResourceGroupName = "" -$syncAgentServerName = "" - -$syncMemberResourceId = "/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/servers/$serverName/databases/$syncMemberDBName" - -# temp file to save the sync schema -$tempFile = $env:TEMP+"\syncSchema.json" - -# list of included columns and tables in quoted name -$includedColumnsAndTables = "[SalesLT].[Address].[AddressID]", - "[SalesLT].[Address].[AddressLine2]", - "[SalesLT].[Address].[rowguid]", - "[SalesLT].[Address].[PostalCode]", - "[SalesLT].[ProductDescription]" -$metadataList = [System.Collections.ArrayList]::new($includedColumnsAndTables) - -Connect-AzAccount -Select-AzSubscription -SubscriptionId $subscriptionId - -# use if it's safe to show password in script, otherwise use PromptForCredential -# $user = "username" -# $password = ConvertTo-SecureString -String "password" -AsPlainText -Force -# $credential = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $user, $password - -$credential = $Host.ui.PromptForCredential("Need credential", - "Please enter your user name and password for server "+$serverName+".database.windows.net", - "", - "") - -# create a new sync group (if you use private link, make sure to manually approve it) -Write-Host "Creating Sync Group "$syncGroupName"..." -New-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -Name $syncGroupName ` - -SyncDatabaseName $syncDatabaseName -SyncDatabaseServerName $syncDatabaseServerName -SyncDatabaseResourceGroupName $syncDatabaseResourceGroupName ` - -ConflictResolutionPolicy $conflictResolutionPolicy -DatabaseCredential $credential -UsePrivateLinkConnection | Format-list - -# use if it's safe to show password in script, otherwise use PromptForCredential -# $user = "username" -# $password = ConvertTo-SecureString -String "password" -AsPlainText -Force -# $credential = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $user, $password - -$credential = $Host.ui.PromptForCredential("Need credential", - "Please enter your user name and password for server "+$serverName+".database.windows.net", - "", - "") - -# add a new sync member (if you use private link, make sure to manually approve it) -Write-Host "Adding member"$syncMemberName" to the sync group..." -New-AzSqlSyncMember -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -Name $syncMemberName -MemberDatabaseType $memberDatabaseType -SyncAgentResourceGroupName $syncAgentResourceGroupName ` - -SyncAgentServerName $syncAgentServerName -SyncAgentName $syncAgentName -SyncDirection $syncDirection -SqlServerDatabaseID $syncAgentInfo.DatabaseId ` - -SyncMemberAzureDatabaseResourceId $syncMemberResourceId -UsePrivateLinkConnection | Format-list - -# update existing sync member to use private link connection -Update-AzSqlSyncMember ` - -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName -Name $syncMemberName ` - -MemberDatabaseCredential $memberDatabaseCredential -SyncMemberAzureDatabaseResourceId $syncMemberResourceId -UsePrivateLinkConnection $true - -# update existing sync group and remove private link connection -Update-AzSqlSyncGroup ` - -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -Name $syncGroupName -UsePrivateLinkConnection $false - -# run the following Get-AzSqlSyncGroup/ Get-AzSqlSyncMember commands to confirm that a private link has been setup for Data Sync, if you decide to use private link. -# Get-AzSqlSyncMember returns information about one or more Azure SQL Database Sync Members. Specify the name of a sync member to see information for only that sync member. -Get-AzSqlSyncMember ` - -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName -Name $syncMemberName ` | Format-List -# Get-AzSqlSyncGroup returns information about one or more Azure SQL Database Sync Groups. Specify the name of a sync group to see information for only that sync group. -Get-AzSqlSyncGroup ` - -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` | Format-List - -# approve private endpoint connection, if you decide to use private link -Approve-AzPrivateEndpointConnection ` - -Name myPrivateEndpointConnection -ResourceGroupName myResourceGroup -ServiceName myPrivateLinkService - -# refresh database schema from hub database, specify the -SyncMemberName parameter if you want to refresh schema from the member database -Write-Host "Refreshing database schema from hub database..." -$startTime = Get-Date -Update-AzSqlSyncSchema -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName - -# waiting for successful refresh -$startTime = $startTime.ToUniversalTime() -$timer=0 -$timeout=90 - -# check the log and see if refresh has gone through -Write-Host "Check for successful refresh..." -$isSucceeded = $false -while ($isSucceeded -eq $false) { - Start-Sleep -s 10 - $timer=$timer+10 - $details = Get-AzSqlSyncSchema -SyncGroupName $syncGroupName -ServerName $serverName -DatabaseName $databaseName -ResourceGroupName $resourceGroupName - if ($details.LastUpdateTime -gt $startTime) { - Write-Host "Refresh was successful" - $isSucceeded = $true - } - if ($timer -eq $timeout) { - Write-Host "Refresh timed out" - break; - } -} - -# get the database schema -Write-Host "Adding tables and columns to the sync schema..." -$databaseSchema = Get-AzSqlSyncSchema -ResourceGroupName $ResourceGroupName -ServerName $ServerName ` - -DatabaseName $DatabaseName -SyncGroupName $SyncGroupName ` - -$databaseSchema | ConvertTo-Json -depth 5 -Compress | Out-File "C:\Users\OnPremiseServer\AppData\Local\Temp\syncSchema.json" - -$newSchema = [AzureSqlSyncGroupSchemaModel]::new() -$newSchema.Tables = [List[AzureSqlSyncGroupSchemaTableModel]]::new(); - -# add columns and tables to the sync schema -foreach ($tableSchema in $databaseSchema.Tables) { - $newTableSchema = [AzureSqlSyncGroupSchemaTableModel]::new() - $newTableSchema.QuotedName = $tableSchema.QuotedName - $newTableSchema.Columns = [List[AzureSqlSyncGroupSchemaColumnModel]]::new(); - $addAllColumns = $false - if ($MetadataList.Contains($tableSchema.QuotedName)) { - if ($tableSchema.HasError) { - $fullTableName = $tableSchema.QuotedName - Write-Host "Can't add table $fullTableName to the sync schema" -foregroundcolor "Red" - Write-Host $tableSchema.ErrorId -foregroundcolor "Red" - continue; - } - else { - $addAllColumns = $true - } - } - foreach($columnSchema in $tableSchema.Columns) { - $fullColumnName = $tableSchema.QuotedName + "." + $columnSchema.QuotedName - if ($addAllColumns -or $MetadataList.Contains($fullColumnName)) { - if ((-not $addAllColumns) -and $tableSchema.HasError) { - Write-Host "Can't add column $fullColumnName to the sync schema" -foregroundcolor "Red" - Write-Host $tableSchema.ErrorId -foregroundcolor "Red" - } - elseif ((-not $addAllColumns) -and $columnSchema.HasError) { - Write-Host "Can't add column $fullColumnName to the sync schema" -foregroundcolor "Red" - Write-Host $columnSchema.ErrorId -foregroundcolor "Red" - } - else { - Write-Host "Adding"$fullColumnName" to the sync schema" - $newColumnSchema = [AzureSqlSyncGroupSchemaColumnModel]::new() - $newColumnSchema.QuotedName = $columnSchema.QuotedName - $newColumnSchema.DataSize = $columnSchema.DataSize - $newColumnSchema.DataType = $columnSchema.DataType - $newTableSchema.Columns.Add($newColumnSchema) - } - } - } - if ($newTableSchema.Columns.Count -gt 0) { - $newSchema.Tables.Add($newTableSchema) - } -} - -# convert sync schema to JSON format -$schemaString = $newSchema | ConvertTo-Json -depth 5 -Compress - -# work around a PowerShell bug -$schemaString = $schemaString.Replace('"Tables"', '"tables"').Replace('"Columns"', '"columns"').Replace('"QuotedName"', '"quotedName"').Replace('"MasterSyncMemberName"','"masterSyncMemberName"') - -# save the sync schema to a temp file -$schemaString | Out-File $tempFile - -# update sync schema -Write-Host "Updating the sync schema..." -Update-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName ` - -DatabaseName $databaseName -Name $syncGroupName -Schema $tempFile - -$syncLogStartTime = Get-Date - -# trigger sync manually -Write-Host "Trigger sync manually..." -Start-AzSqlSyncGroupSync -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName -SyncGroupName $syncGroupName - -# check the sync log and wait until the first sync succeeded -Write-Host "Check the sync log..." -$isSucceeded = $false -for ($i = 0; ($i -lt 300) -and (-not $isSucceeded); $i = $i + 10) { - Start-Sleep -s 10 - $syncLogEndTime = Get-Date - $syncLogList = Get-AzSqlSyncGroupLog -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -StartTime $syncLogStartTime.ToUniversalTime() -EndTime $syncLogEndTime.ToUniversalTime() - - if ($synclogList.Length -gt 0) { - foreach ($syncLog in $syncLogList) { - if ($syncLog.Details.Contains("Sync completed successfully")) { - Write-Host $syncLog.TimeStamp : $syncLog.Details - $isSucceeded = $true - } - } - } -} - -if ($isSucceeded) { - # enable scheduled sync - Write-Host "Enable the scheduled sync with 300 seconds interval..." - Update-AzSqlSyncGroup -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -Name $syncGroupName -IntervalInSeconds $intervalInSeconds -} -else { - # output all log if sync doesn't succeed in 300 seconds - $syncLogEndTime = Get-Date - $syncLogList = Get-AzSqlSyncGroupLog -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -SyncGroupName $syncGroupName -StartTime $syncLogStartTime.ToUniversalTime() -EndTime $syncLogEndTime.ToUniversalTime() - - if ($synclogList.Length -gt 0) { - foreach ($syncLog in $syncLogList) { - Write-Host $syncLog.TimeStamp : $syncLog.Details - } - } -} -``` - -## Clean up deployment - -After you run the sample script, you can run the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $ResourceGroupName -Remove-AzResourceGroup -ResourceGroupName $SyncDatabaseResourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzSqlSyncAgent](/powershell/module/az.sql/New-azSqlSyncAgent) | Creates a new Sync Agent. | -| [New-AzSqlSyncAgentKey](/powershell/module/az.sql/New-azSqlSyncAgentKey) | Generates the agent key associated with the Sync Agent. | -| [Get-AzSqlSyncAgentLinkedDatabase](/powershell/module/az.sql/Get-azSqlSyncAgentLinkedDatabase) | Get all the information for the Sync Agent. | -| [New-AzSqlSyncMember](/powershell/module/az.sql/New-azSqlSyncMember) | Add a new member to the sync group. | -| [Update-AzSqlSyncSchema](/powershell/module/az.sql/Update-azSqlSyncSchema) | Refreshes the database schema information. | -| [Get-AzSqlSyncSchema](/powershell/module/az.sql/Get-azSqlSyncSchema) | Get the database schema information. | -| [Update-AzSqlSyncGroup](/powershell/module/az.sql/Update-azSqlSyncGroup) | Updates the sync group. | -| [Start-AzSqlSyncGroupSync](/powershell/module/az.sql/Start-azSqlSyncGroupSync) | Triggers a sync. | -| [Get-AzSqlSyncGroupLog](/powershell/module/az.sql/Get-azSqlSyncGroupLog) | Checks the Sync Log. | - - -## Next steps - -For more information about Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). - -For more information about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](../sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - Use the Azure portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](../sql-data-sync-sql-server-configure.md) - - Use PowerShell - [Use PowerShell to sync data between a database in Azure SQL Database and SQL Server](sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for SQL Data Sync in Azure](../sql-data-sync-agent-overview.md) -- Best practices - [Best practices for SQL Data Sync in Azure](../sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](../monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with SQL Data Sync in Azure](../sql-data-sync-troubleshoot.md) -- Update the sync schema - - Use Transact-SQL - [Automate the replication of schema changes in SQL Data Sync in Azure](../sql-data-sync-update-sync-schema.md) - - Use PowerShell - [Use PowerShell to update the sync schema in an existing sync group](update-sync-schema-in-sync-group.md) - -For more information about SQL Database, see: - -- [SQL Database overview](../sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) diff --git a/articles/azure-sql/database/scripts/update-sync-schema-in-sync-group.md b/articles/azure-sql/database/scripts/update-sync-schema-in-sync-group.md deleted file mode 100644 index 1ec744fdad001..0000000000000 --- a/articles/azure-sql/database/scripts/update-sync-schema-in-sync-group.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "PowerShell: Update SQL Data Sync sync schema" -description: Azure PowerShell example script to update the sync schema for SQL Data Sync -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1 -ms.devlang: PowerShell -ms.topic: sample -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 03/12/2019 ---- -# Use PowerShell to update the sync schema in an existing sync group -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -This Azure PowerShell example updates the sync schema in an existing SQL Data Sync sync group. When you're syncing multiple tables, this script helps you to update the sync schema efficiently. This example demonstrates the use of the **UpdateSyncSchema** script, which is available on GitHub as [UpdateSyncSchema.ps1](https://github.com/Microsoft/sql-server-samples/tree/master/samples/features/sql-data-sync/UpdateSyncSchema.ps1). - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Az PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with Azure SQL Data Sync](../sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does not support Azure SQL Managed Instance at this time. - -## Examples - -### Add all tables to the sync schema - -The following example refreshes the database schema and adds all valid tables in the hub database to the sync schema. - -```powershell-interactive -UpdateSyncSchema.ps1 -SubscriptionId -ResourceGroupName -ServerName -DatabaseName ` - -SyncGroupName -RefreshDatabaseSchema $true -AddAllTables $true -``` - -### Add and remove tables and columns - -The following example adds `[dbo].[Table1]` and `[dbo].[Table2].[Column1]` to the sync schema and removes `[dbo].[Table3]`. - -```powershell-interactive -UpdateSyncSchema.ps1 -SubscriptionId -ResourceGroupName -ServerName -DatabaseName ` - -SyncGroupName -TablesAndColumnsToAdd "[dbo].[Table1],[dbo].[Table2].[Column1]" -TablesAndColumnsToRemove "[dbo].[Table3]" -``` - -## Script parameters - -The **UpdateSyncSchema** script has the following parameters: - -| Parameter | Notes | -|---|---| -| $subscriptionId | The subscription where the sync group is created. | -| $resourceGroupName | The resource group where the sync group is created.| -| $serverName | The server name of the hub database.| -| $databaseName | The hub database name. | -| $syncGroupName | The sync group name. | -| $memberName | Specify the member name if you want to load the database schema from the sync member instead of from the hub database. If you want to load the database schema from the hub, leave this parameter empty. | -| $timeoutInSeconds | Timeout when the script refreshes database schema. Default is 900 seconds. | -| $refreshDatabaseSchema | Specify whether the script needs to refresh the database schema. If your database schema changed from the previous configuration (for example, if you added a new table or anew column), you need to refresh the schema before you reconfigure it. Default is false. | -| $addAllTables | If this value is true, all valid tables and columns are added to the sync schema. The values of $TablesAndColumnsToAdd and $TablesAndColumnsToRemove are ignored. | -| $tablesAndColumnsToAdd | Specify tables or columns to be added to the sync schema. Each table or column name needs to be fully delimited with the schema name. For example: `[dbo].[Table1]`, `[dbo].[Table2].[Column1]`. Multiple table or column names can be specified and separated by a comma (,). | -| $tablesAndColumnsToRemove | Specify tables or columns to be removed from the sync schema. Each table or column name needs to be fully delimited with schema name. For example: `[dbo].[Table1]`, `[dbo].[Table2].[Column1]`. Multiple table or column names can be specified and separated by a comma (,). | - -## Script explanation - -The **UpdateSyncSchema** script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [Get-AzSqlSyncGroup](/powershell/module/az.sql/get-azsqlsyncgroup) | Returns information about a sync group. | -| [Update-AzSqlSyncGroup](/powershell/module/az.sql/update-azsqlsyncgroup) | Updates a sync group. | -| [Get-AzSqlSyncMember](/powershell/module/az.sql/get-azsqlsyncmember) | Returns information about a sync member. | -| [Get-AzSqlSyncSchema](/powershell/module/az.sql/get-azsqlsyncschema) | Returns information about a sync schema. | -| [Update-AzSqlSyncSchema](/powershell/module/az.sql/update-azsqlsyncschema) | Updates a sync schema. | - -## Next steps - -For more information about Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional SQL Database PowerShell script samples can be found in [Azure SQL Database PowerShell scripts](../powershell-script-content-guide.md). - -For more information about SQL Data Sync, see: - -- Overview - [Sync data between Azure SQL Database and SQL Server with SQL Data Sync in Azure](../sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - Use the Azure portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](../sql-data-sync-sql-server-configure.md) - - Use PowerShell - - [Use PowerShell to sync data between multiple databases in Azure SQL Database](sql-data-sync-sync-data-between-sql-databases.md) - - [Use PowerShell to sync data between Azure SQL Database and SQL Server](sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for SQL Data Sync in Azure](../sql-data-sync-agent-overview.md) -- Best practices - [Best practices for SQL Data Sync in Azure](../sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](../monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with SQL Data Sync in Azure](../sql-data-sync-troubleshoot.md) -- Update the sync schema - - Use Transact-SQL - [Automate the replication of schema changes in SQL Data Sync in Azure](../sql-data-sync-update-sync-schema.md) - -For more information about SQL Database, see: - -- [SQL Database overview](../sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) \ No newline at end of file diff --git a/articles/azure-sql/database/scripts/vnet-service-endpoint-rule-powershell-create.md b/articles/azure-sql/database/scripts/vnet-service-endpoint-rule-powershell-create.md deleted file mode 100644 index 1fd27ff334340..0000000000000 --- a/articles/azure-sql/database/scripts/vnet-service-endpoint-rule-powershell-create.md +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: PowerShell for VNet endpoints and rules for single and pooled databases -description: "Provides PowerShell scripts to create and manage Virtual Service endpoints for your Azure SQL Database and Azure Synapse." -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.devlang: PowerShell -ms.topic: conceptual -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 04/17/2019 -ms.custom: sqldbrb=1, devx-track-azurepowershell -tags: azure-synapse ---- -# PowerShell: Create a Virtual Service endpoint and VNet rule for Azure SQL Database -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqldb.md)] - -*Virtual network rules* are one firewall security feature that controls whether the [logical SQL server](../logical-servers.md) for your [Azure SQL Database](../sql-database-paas-overview.md) databases, elastic pools, or databases in [Azure Synapse](../../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) accept communications that are sent from particular subnets in virtual networks. - -> [!IMPORTANT] -> This article applies to Azure SQL Database, including Azure Synapse (formerly SQL DW). For simplicity, the term Azure SQL Database in this article applies to databases belonging to either Azure SQL Database or Azure Synapse. This article does *not* apply to Azure SQL Managed Instance because it does not have a service endpoint associated with it. - -This article demonstrates a PowerShell script that takes the following actions: - -1. Creates a Microsoft Azure *Virtual Service endpoint* on your subnet. -2. Adds the endpoint to the firewall of your server, to create a *virtual network rule*. - -For more background, see [Virtual Service endpoints for Azure SQL Database][sql-db-vnet-service-endpoint-rule-overview-735r]. - -> [!TIP] -> If all you need is to assess or add the Virtual Service endpoint *type name* for Azure SQL Database to your subnet, you can skip ahead to our more [direct PowerShell script](#a-verify-subnet-is-endpoint-ps-100). - -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the [`Az.Sql` Cmdlets](/powershell/module/az.sql). For the older module, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -## Major cmdlets - -This article emphasizes the [**New-AzSqlServerVirtualNetworkRule** cmdlet](/powershell/module/az.sql/new-azsqlservervirtualnetworkrule) that adds the subnet endpoint to the access control list (ACL) of your server, thereby creating a rule. - -The following list shows the sequence of other *major* cmdlets that you must run to prepare for your call to **New-AzSqlServerVirtualNetworkRule**. In this article, these calls occur in [script 3 "Virtual network rule"](#a-script-30): - -1. [New-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/new-azvirtualnetworksubnetconfig): Creates a subnet object. -2. [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork): Creates your virtual network, giving it the subnet. -3. [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/Set-azVirtualNetworkSubnetConfig): Assigns a Virtual Service endpoint to your subnet. -4. [Set-AzVirtualNetwork](/powershell/module/az.network/Set-azVirtualNetwork): Persists updates made to your virtual network. -5. [New-AzSqlServerVirtualNetworkRule](/powershell/module/az.sql/new-azsqlservervirtualnetworkrule): After your subnet is an endpoint, adds your subnet as a virtual network rule, into the ACL of your server. - - This cmdlet Offers the parameter **-IgnoreMissingVNetServiceEndpoint**, starting in Azure RM PowerShell Module version 5.1.1. - -## Prerequisites for running PowerShell - -- You can already log in to Azure, such as through the [Azure portal][http-azure-portal-link-ref-477t]. -- You can already run PowerShell scripts. - -> [!NOTE] -> Please ensure that service endpoints are turned on for the VNet/Subnet that you want to add to your Server otherwise creation of the VNet Firewall Rule will fail. - -## One script divided into four chunks - -Our demonstration PowerShell script is divided into a sequence of smaller scripts. The division eases learning and provides flexibility. The scripts must be run in their indicated sequence. If you do not have time now to run the scripts, our actual test output is displayed after script 4. - - - -### Script 1: Variables - -This first PowerShell script assigns values to variables. The subsequent scripts depend on these variables. - -> [!IMPORTANT] -> Before you run this script, you can edit the values, if you like. For example, if you already have a resource group, you might want to edit your resource group name as the assigned value. -> -> Your subscription name should be edited into the script. - -### PowerShell script 1 source code - -```powershell -######### Script 1 ######################################## -## LOG into to your Azure account. ## -## (Needed only one time per powershell.exe session.) ## -########################################################### - -$yesno = Read-Host 'Do you need to log into Azure (only one time per powershell.exe session)? [yes/no]' -if ('yes' -eq $yesno) { Connect-AzAccount } - -########################################################### -## Assignments to variables used by the later scripts. ## -########################################################### - -# You can edit these values, if necessary. -$SubscriptionName = 'yourSubscriptionName' -Select-AzSubscription -SubscriptionName $SubscriptionName - -$ResourceGroupName = 'RG-YourNameHere' -$Region = 'westcentralus' - -$VNetName = 'myVNet' -$SubnetName = 'mySubnet' -$VNetAddressPrefix = '10.1.0.0/16' -$SubnetAddressPrefix = '10.1.1.0/24' -$VNetRuleName = 'myFirstVNetRule-ForAcl' - -$SqlDbServerName = 'mysqldbserver-forvnet' -$SqlDbAdminLoginName = 'ServerAdmin' -$SqlDbAdminLoginPassword = 'ChangeYourAdminPassword1' - -$ServiceEndpointTypeName_SqlDb = 'Microsoft.Sql' # Official type name. - -Write-Host 'Completed script 1, the "Variables".' -``` - - - -### Script 2: Prerequisites - -This script prepares for the next script, where the endpoint action is. This script creates for you the following listed items, but only if they do not already exist. You can skip script 2 if you are sure these items already exist: - -- Azure resource group -- Logical SQL server - -### PowerShell script 2 source code - -```powershell -######### Script 2 ######################################## -## Ensure your Resource Group already exists. ## -########################################################### - -Write-Host "Check whether your Resource Group already exists." - -$gottenResourceGroup = $null -$gottenResourceGroup = Get-AzResourceGroup -Name $ResourceGroupName -ErrorAction SilentlyContinue - -if ($null -eq $gottenResourceGroup) { - Write-Host "Creating your missing Resource Group - $ResourceGroupName." - New-AzResourceGroup -Name $ResourceGroupName -Location $Region -} else { - Write-Host "Good, your Resource Group already exists - $ResourceGroupName." -} - -$gottenResourceGroup = $null - -########################################################### -## Ensure your server already exists. ## -########################################################### - -Write-Host "Check whether your server already exists." - -$sqlDbServer = $null -$azSqlParams = @{ - ResourceGroupName = $ResourceGroupName - ServerName = $SqlDbServerName - ErrorAction = 'SilentlyContinue' -} -$sqlDbServer = Get-AzSqlServer @azSqlParams - -if ($null -eq $sqlDbServer) { - Write-Host "Creating the missing server - $SqlDbServerName." - Write-Host "Gather the credentials necessary to next create a server." - - $sqlAdministratorCredentials = [pscredential]::new($SqlDbAdminLoginName,(ConvertTo-SecureString -String $SqlDbAdminLoginPassword -AsPlainText -Force)) - - if ($null -eq $sqlAdministratorCredentials) { - Write-Host "ERROR, unable to create SQL administrator credentials. Now ending." - return - } - - Write-Host "Create your server." - - $sqlSrvParams = @{ - ResourceGroupName = $ResourceGroupName - ServerName = $SqlDbServerName - Location = $Region - SqlAdministratorCredentials = $sqlAdministratorCredentials - } - New-AzSqlServer @sqlSrvParams -} else { - Write-Host "Good, your server already exists - $SqlDbServerName." -} - -$sqlAdministratorCredentials = $null -$sqlDbServer = $null - -Write-Host 'Completed script 2, the "Prerequisites".' -``` - - - -## Script 3: Create an endpoint and a rule - -This script creates a virtual network with a subnet. Then the script assigns the **Microsoft.Sql** endpoint type to your subnet. Finally the script adds your subnet to the access control list (ACL), thereby creating a rule. - -### PowerShell script 3 source code - -```powershell -######### Script 3 ######################################## -## Create your virtual network, and give it a subnet. ## -########################################################### - -Write-Host "Define a subnet '$SubnetName', to be given soon to a virtual network." - -$subnetParams = @{ - Name = $SubnetName - AddressPrefix = $SubnetAddressPrefix - ServiceEndpoint = $ServiceEndpointTypeName_SqlDb -} -$subnet = New-AzVirtualNetworkSubnetConfig @subnetParams - -Write-Host "Create a virtual network '$VNetName'.`nGive the subnet to the virtual network that we created." - -$vnetParams = @{ - Name = $VNetName - AddressPrefix = $VNetAddressPrefix - Subnet = $subnet - ResourceGroupName = $ResourceGroupName - Location = $Region -} -$vnet = New-AzVirtualNetwork @vnetParams - -########################################################### -## Create a Virtual Service endpoint on the subnet. ## -########################################################### - -Write-Host "Assign a Virtual Service endpoint 'Microsoft.Sql' to the subnet." - -$vnetSubParams = @{ - Name = $SubnetName - AddressPrefix = $SubnetAddressPrefix - VirtualNetwork = $vnet - ServiceEndpoint = $ServiceEndpointTypeName_SqlDb -} -$vnet = Set-AzVirtualNetworkSubnetConfig @vnetSubParams - -Write-Host "Persist the updates made to the virtual network > subnet." - -$vnet = Set-AzVirtualNetwork -VirtualNetwork $vnet - -$vnet.Subnets[0].ServiceEndpoints # Display the first endpoint. - -########################################################### -## Add the Virtual Service endpoint Id as a rule, ## -## into SQL Database ACLs. ## -########################################################### - -Write-Host "Get the subnet object." - -$vnet = Get-AzVirtualNetwork -ResourceGroupName $ResourceGroupName -Name $VNetName - -$subnet = Get-AzVirtualNetworkSubnetConfig -Name $SubnetName -VirtualNetwork $vnet - -Write-Host "Add the subnet .Id as a rule, into the ACLs for your server." - -$ruleParams = @{ - ResourceGroupName = $ResourceGroupName - ServerName = $SqlDbServerName - VirtualNetworkRuleName = $VNetRuleName - VirtualNetworkSubnetId = $subnet.Id -} -New-AzSqlServerVirtualNetworkRule @ruleParams - -Write-Host "Verify that the rule is in the SQL Database ACL." - -$rule2Params = @{ - ResourceGroupName = $ResourceGroupName - ServerName = $SqlDbServerName - VirtualNetworkRuleName = $VNetRuleName -} -Get-AzSqlServerVirtualNetworkRule @rule2Params - -Write-Host 'Completed script 3, the "Virtual-Network-Rule".' -``` - - - -## Script 4: Clean-up - -This final script deletes the resources that the previous scripts created for the demonstration. However, the script asks for confirmation before it deletes the following: - -- Logical SQL server -- Azure Resource Group - -You can run script 4 any time after script 1 completes. - -### PowerShell script 4 source code - -```powershell -######### Script 4 ######################################## -## Clean-up phase A: Unconditional deletes. ## -## ## -## 1. The test rule is deleted from SQL Database ACL. ## -## 2. The test endpoint is deleted from the subnet. ## -## 3. The test virtual network is deleted. ## -########################################################### - -Write-Host "Delete the rule from the SQL Database ACL." - -$removeParams = @{ - ResourceGroupName = $ResourceGroupName - ServerName = $SqlDbServerName - VirtualNetworkRuleName = $VNetRuleName - ErrorAction = 'SilentlyContinue' -} -Remove-AzSqlServerVirtualNetworkRule @removeParams - -Write-Host "Delete the endpoint from the subnet." - -$vnet = Get-AzVirtualNetwork -ResourceGroupName $ResourceGroupName -Name $VNetName - -Remove-AzVirtualNetworkSubnetConfig -Name $SubnetName -VirtualNetwork $vnet - -Write-Host "Delete the virtual network (thus also deletes the subnet)." - -$removeParams = @{ - Name = $VNetName - ResourceGroupName = $ResourceGroupName - ErrorAction = 'SilentlyContinue' -} -Remove-AzVirtualNetwork @removeParams - -########################################################### -## Clean-up phase B: Conditional deletes. ## -## ## -## These might have already existed, so user might ## -## want to keep. ## -## ## -## 1. Logical SQL server ## -## 2. Azure resource group ## -########################################################### - -$yesno = Read-Host 'CAUTION !: Do you want to DELETE your server AND your resource group? [yes/no]' -if ('yes' -eq $yesno) { - Write-Host "Remove the server." - - $removeParams = @{ - ServerName = $SqlDbServerName - ResourceGroupName = $ResourceGroupName - ErrorAction = 'SilentlyContinue' - } - Remove-AzSqlServer @removeParams - - Write-Host "Remove the Azure Resource Group." - - Remove-AzResourceGroup -Name $ResourceGroupName -ErrorAction SilentlyContinue -} else { - Write-Host "Skipped over the DELETE of SQL Database and resource group." -} - -Write-Host 'Completed script 4, the "Clean-Up".' -``` - - - - - -## Verify your subnet is an endpoint - -You might have a subnet that was already assigned the **Microsoft.Sql** type name, meaning it is already a Virtual Service endpoint. You could use the [Azure portal][http-azure-portal-link-ref-477t] to create a virtual network rule from the endpoint. - -Or, you might be unsure whether your subnet has the **Microsoft.Sql** type name. You can run the following PowerShell script to take these actions: - -1. Ascertain whether your subnet has the **Microsoft.Sql** type name. -2. Optionally, assign the type name if it is absent. - - The script asks you to *confirm*, before it applies the absent type name. - -### Phases of the script - -Here are the phases of the PowerShell script: - -1. LOG into to your Azure account, needed only once per PS session. Assign variables. -2. Search for your virtual network, and then for your subnet. -3. Is your subnet tagged as **Microsoft.Sql** endpoint server type? -4. Add a Virtual Service endpoint of type name **Microsoft.Sql**, on your subnet. - -> [!IMPORTANT] -> Before you run this script, you must edit the values assigned to the $-variables, near the top of the script. - -### Direct PowerShell source code - -This PowerShell script does not update anything, unless you respond yes if is asks you for confirmation. The script can add the type name **Microsoft.Sql** to your subnet. But the script tries the add only if your subnet lacks the type name. - -```powershell -### 1. LOG into to your Azure account, needed only once per PS session. Assign variables. -$yesno = Read-Host 'Do you need to log into Azure (only one time per powershell.exe session)? [yes/no]' -if ('yes' -eq $yesno) { Connect-AzAccount } - -# Assignments to variables used by the later scripts. -# You can EDIT these values, if necessary. - -$SubscriptionName = 'yourSubscriptionName' -Select-AzSubscription -SubscriptionName "$SubscriptionName" - -$ResourceGroupName = 'yourRGName' -$VNetName = 'yourVNetName' -$SubnetName = 'yourSubnetName' -$SubnetAddressPrefix = 'Obtain this value from the Azure portal.' # Looks roughly like: '10.0.0.0/24' - -$ServiceEndpointTypeName_SqlDb = 'Microsoft.Sql' # Do NOT edit. Is official value. - -### 2. Search for your virtual network, and then for your subnet. -# Search for the virtual network. -$vnet = $null -$vnet = Get-AzVirtualNetwork -ResourceGroupName $ResourceGroupName -Name $VNetName - -if ($vnet -eq $null) { - Write-Host "Caution: No virtual network found by the name '$VNetName'." - return -} - -$subnet = $null -for ($nn = 0; $nn -lt $vnet.Subnets.Count; $nn++) { - $subnet = $vnet.Subnets[$nn] - if ($subnet.Name -eq $SubnetName) { break } - $subnet = $null -} - -if ($null -eq $subnet) { - Write-Host "Caution: No subnet found by the name '$SubnetName'" - Return -} - -### 3. Is your subnet tagged as 'Microsoft.Sql' endpoint server type? -$endpointMsSql = $null -for ($nn = 0; $nn -lt $subnet.ServiceEndpoints.Count; $nn++) { - $endpointMsSql = $subnet.ServiceEndpoints[$nn] - if ($endpointMsSql.Service -eq $ServiceEndpointTypeName_SqlDb) { - $endpointMsSql - break - } - $endpointMsSql = $null -} - -if ($null -eq $endpointMsSql) { - Write-Host "Good: Subnet found, and is already tagged as an endpoint of type '$ServiceEndpointTypeName_SqlDb'." - return -} else { - Write-Host "Caution: Subnet found, but not yet tagged as an endpoint of type '$ServiceEndpointTypeName_SqlDb'." - - # Ask the user for confirmation. - $yesno = Read-Host 'Do you want the PS script to apply the endpoint type name to your subnet? [yes/no]' - if ('no' -eq $yesno) { return } -} - -### 4. Add a Virtual Service endpoint of type name 'Microsoft.Sql', on your subnet. -$setParams = @{ - Name = $SubnetName - AddressPrefix = $SubnetAddressPrefix - VirtualNetwork = $vnet - ServiceEndpoint = $ServiceEndpointTypeName_SqlDb -} -$vnet = Set-AzVirtualNetworkSubnetConfig @setParams - -# Persist the subnet update. -$vnet = Set-AzVirtualNetwork -VirtualNetwork $vnet - -for ($nn = 0; $nn -lt $vnet.Subnets.Count; $nn++) { - $vnet.Subnets[0].ServiceEndpoints # Display. -} -``` - - -[sql-db-vnet-service-endpoint-rule-overview-735r]:../vnet-service-endpoint-rule-overview.md -[http-azure-portal-link-ref-477t]: https://portal.azure.com/ diff --git a/articles/azure-sql/database/secure-database-tutorial.md b/articles/azure-sql/database/secure-database-tutorial.md deleted file mode 100644 index 3165380331b5a..0000000000000 --- a/articles/azure-sql/database/secure-database-tutorial.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -title: Secure a database -description: This tutorial teaches you the about techniques and features to secure an Azure SQL Database, whether it's a single database, or pooled. -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.topic: tutorial -author: VanMSFT -ms.author: vanto -ms.reviewer: kendralittle, mathoma -ms.date: 09/21/2020 -ms.custom: seoapril2019 sqldbrb=1 ---- -# Tutorial: Secure a database in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial you learn how to: - -> [!div class="checklist"] -> -> - Create server-level and database-level firewall rules -> - Configure an Azure Active Directory (Azure AD) administrator -> - Manage user access with SQL authentication, Azure AD authentication, and secure connection strings -> - Enable security features, such as Microsoft Defender for SQL, auditing, data masking, and encryption - -Azure SQL Database secures data by allowing you to: - -- Limit access using firewall rules -- Use authentication mechanisms that require identity -- Use authorization with role-based memberships and permissions -- Enable security features - -> [!NOTE] -> Azure SQL Managed Instance is secured using network security rules and private endpoints as described in [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) and [connectivity architecture](../managed-instance/connectivity-architecture-overview.md). - -To learn more, see the [Azure SQL Database security overview](./security-overview.md) and [capabilities](security-overview.md) articles. - -> [!TIP] -> The following Microsoft Learn module helps you learn for free about how to [Secure your database in Azure SQL Database](/learn/modules/secure-your-azure-sql-database/). - -## Prerequisites - -To complete the tutorial, make sure you have the following prerequisites: - -- [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) -- A [server](logical-servers.md) and a single database - - Create them with the [Azure portal](single-database-create-quickstart.md), [CLI](az-cli-script-samples-content-guide.md), or [PowerShell](powershell-script-content-guide.md) - -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/) before you begin. - -## Sign in to the Azure portal - -For all steps in the tutorial, sign in to the [Azure portal](https://portal.azure.com/) - -## Create firewall rules - -Databases in SQL Database are protected by firewalls in Azure. By default, all connections to the server and database are rejected. To learn more, see [server-level and database-level firewall rules](firewall-configure.md). - -Set **Allow access to Azure services** to **OFF** for the most secure configuration. Then, create a [reserved IP (classic deployment)](/previous-versions/azure/virtual-network/virtual-networks-reserved-public-ip) for the resource that needs to connect, such as an Azure VM or cloud service, and only allow that IP address access through the firewall. If you're using the [Resource Manager](../../virtual-network/ip-services/public-ip-addresses.md) deployment model, a dedicated public IP address is required for each resource. - -> [!NOTE] -> SQL Database communicates over port 1433. If you're trying to connect from within a corporate network, outbound traffic over port 1433 may not be allowed by your network's firewall. If so, you can't connect to the server unless your administrator opens port 1433. - -### Set up server-level firewall rules - -Server-level IP firewall rules apply to all databases within the same server. - -To set up a server-level firewall rule: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - - ![server firewall rule](./media/secure-database-tutorial/server-name.png) - - > [!NOTE] - > Be sure to copy your fully qualified server name (such as *yourserver.database.windows.net*) for use later in the tutorial. - -1. On the **Overview** page, select **Set server firewall**. The **Firewall settings** page for the server opens. - - 1. Select **Add client IP** on the toolbar to add your current IP address to a new firewall rule. The rule can open port 1433 for a single IP address or a range of IP addresses. Select **Save**. - - ![set server firewall rule](./media/secure-database-tutorial/server-firewall-rule2.png) - - 1. Select **OK** and close the **Firewall settings** page. - -You can now connect to any database in the server with the specified IP address or IP address range. - -### Setup database firewall rules - -Database-level firewall rules only apply to individual databases. The database will retain these rules during a server failover. Database-level firewall rules can only be configured using Transact-SQL (T-SQL) statements, and only after you've configured a server-level firewall rule. - -To set up a database-level firewall rule: - -1. Connect to the database, for example using [SQL Server Management Studio](connect-query-ssms.md). - -1. In **Object Explorer**, right-click the database and select **New Query**. - -1. In the query window, add this statement and modify the IP address to your public IP address: - - ```sql - EXECUTE sp_set_database_firewall_rule N'Example DB Rule','0.0.0.4','0.0.0.4'; - ``` - -1. On the toolbar, select **Execute** to create the firewall rule. - -> [!NOTE] -> You can also create a server-level firewall rule in SSMS by using the [sp_set_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-set-firewall-rule-azure-sql-database?view=azuresqldb-current&preserve-view=true) command, though you must be connected to the *master* database. - -## Create an Azure AD admin - -Make sure you're using the appropriate Azure Active Directory (AD) managed domain. To select the AD domain, use the upper-right corner of the Azure portal. This process confirms the same subscription is used for both Azure AD and the logical SQL server hosting your database or data warehouse. - - ![choose-ad](./media/secure-database-tutorial/8choose-ad.png) - -To set the Azure AD administrator: - -1. In the Azure portal, on the **SQL server** page, select **Active Directory admin**. Next select **Set admin**. - - ![select active directory](./media/secure-database-tutorial/admin-settings.png) - - > [!IMPORTANT] - > You need to be a "Global Administrator" to perform this task. - -1. On the **Add admin** page, search and select the AD user or group and choose **Select**. All members and groups of your Active Directory are listed, and entries grayed out are not supported as Azure AD administrators. See [Azure AD features and limitations](authentication-aad-overview.md#azure-ad-features-and-limitations). - - ![select admin](./media/secure-database-tutorial/admin-select.png) - - > [!IMPORTANT] - > Azure role-based access control (Azure RBAC) only applies to the portal and isn't propagated to SQL Server. - -1. At the top of the **Active Directory admin** page, select **Save**. - - The process of changing an administrator may take several minutes. The new administrator will appear in the **Active Directory admin** box. - -> [!NOTE] -> When setting an Azure AD admin, the new admin name (user or group) cannot exist as a SQL Server login or user in the *master* database. If present, the setup will fail and roll back changes, indicating that such an admin name already exists. Since the SQL Server login or user is not part of Azure AD, any effort to connect the user using Azure AD authentication fails. - -For information about configuring Azure AD, see: - -- [Integrate your on-premises identities with Azure AD](../../active-directory/hybrid/whatis-hybrid-identity.md) -- [Add your own domain name to Azure AD](../../active-directory/fundamentals/add-custom-domain.md) -- [Microsoft Azure now supports federation with Windows Server AD](https://azure.microsoft.com/blog/20../../windows-azure-now-supports-federation-with-windows-server-active-directory/) -- [Administer your Azure AD directory](../../active-directory/fundamentals/active-directory-whatis.md) -- [Manage Azure AD using PowerShell](/powershell/azure/) -- [Hybrid identity required ports and protocols](../../active-directory/hybrid/reference-connect-ports.md) - -## Manage database access - -Manage database access by adding users to the database, or allowing user access with secure connection strings. Connection strings are useful for external applications. To learn more, see [Manage logins and user accounts](logins-create-manage.md) and [AD authentication](authentication-aad-overview.md). - -To add users, choose the database authentication type: - -- **SQL authentication**, use a username and password for logins and are only valid in the context of a specific database within the server - -- **Azure AD authentication**, use identities managed by Azure AD - -### SQL authentication - -To add a user with SQL authentication: - -1. Connect to the database, for example using [SQL Server Management Studio](connect-query-ssms.md). - -1. In **Object Explorer**, right-click the database and choose **New Query**. - -1. In the query window, enter the following command: - - ```sql - CREATE USER ApplicationUser WITH PASSWORD = 'YourStrongPassword1'; - ``` - -1. On the toolbar, select **Execute** to create the user. - -1. By default, the user can connect to the database, but has no permissions to read or write data. To grant these permissions, execute the following commands in a new query window: - - ```sql - ALTER ROLE db_datareader ADD MEMBER ApplicationUser; - ALTER ROLE db_datawriter ADD MEMBER ApplicationUser; - ``` - -> [!NOTE] -> Create non-administrator accounts at the database level, unless they need to execute administrator tasks like creating new users. - -### Azure AD authentication - -Azure Active Directory authentication requires that database users are created as contained. A contained database user maps to an identity in the Azure AD directory associated with the database and has no login in the *master* database. The Azure AD identity can either be for an individual user or a group. For more information, see [Contained database users, make your database portable](/sql/relational-databases/security/contained-database-users-making-your-database-portable) and review the [Azure AD tutorial](authentication-aad-configure.md) on how to authenticate using Azure AD. - -> [!NOTE] -> Database users (excluding administrators) cannot be created using the Azure portal. Azure roles do not propagate to SQL servers, databases, or data warehouses. They are only used to manage Azure resources and do not apply to database permissions. -> -> For example, the *SQL Server Contributor* role does not grant access to connect to a database or data warehouse. This permission must be granted within the database using T-SQL statements. - -> [!IMPORTANT] -> Special characters like colon `:` or ampersand `&` are not supported in user names in the T-SQL `CREATE LOGIN` and `CREATE USER` statements. - -To add a user with Azure AD authentication: - -1. Connect to your server in Azure using an Azure AD account with at least the *ALTER ANY USER* permission. - -1. In **Object Explorer**, right-click the database and select **New Query**. - -1. In the query window, enter the following command and modify `` to the principal name of the Azure AD user or the display name of the Azure AD group: - - ```sql - CREATE USER [] FROM EXTERNAL PROVIDER; - ``` - -> [!NOTE] -> Azure AD users are marked in the database metadata with type `E (EXTERNAL_USER)` and type `X (EXTERNAL_GROUPS)` for groups. For more information, see [sys.database_principals](/sql/relational-databases/system-catalog-views/sys-database-principals-transact-sql). - -### Secure connection strings - -To ensure a secure, encrypted connection between the client application and SQL Database, a connection string must be configured to: - -- Request an encrypted connection -- Not trust the server certificate - -The connection is established using Transport Layer Security (TLS) and reduces the risk of a man-in-the-middle attack. Connection strings are available per database and are pre-configured to support client drivers such as ADO.NET, JDBC, ODBC, and PHP. For information about TLS and connectivity, see [TLS considerations](connect-query-content-reference-guide.md#tls-considerations-for-database-connectivity). - -To copy a secure connection string: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - -1. On the **Overview** page, select **Show database connection strings**. - -1. Select a driver tab and copy the complete connection string. - - ![ADO.NET connection string](./media/secure-database-tutorial/connection.png) - -## Enable security features - -Azure SQL Database provides security features that are accessed using the Azure portal. These features are available for both the database and server, except for data masking, which is only available on the database. To learn more, see [Microsoft Defender for SQL](azure-defender-for-sql.md), [Auditing](/azure/azure-sql/database/auditing-overview), [Dynamic data masking](dynamic-data-masking-overview.md), and [Transparent data encryption](transparent-data-encryption-tde-overview.md). - -### Microsoft Defender for SQL - -The Microsoft Defender for SQL feature detects potential threats as they occur and provides security alerts on anomalous activities. Users can explore these suspicious events using the auditing feature, and determine if the event was to access, breach, or exploit data in the database. Users are also provided a security overview that includes a vulnerability assessment and the data discovery and classification tool. - -> [!NOTE] -> An example threat is SQL injection, a process where attackers inject malicious SQL into application inputs. An application can then unknowingly execute the malicious SQL and allow attackers access to breach or modify data in the database. - -To enable Microsoft Defender for SQL: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - -1. On the **Overview** page, select the **Server name** link. The server page will open. - -1. On the **SQL server** page, find the **Security** section and select **Defender for Cloud**. - - 1. Select **ON** under **Microsoft Defender for SQL** to enable the feature. Choose a storage account for saving vulnerability assessment results. Then select **Save**. - - ![Navigation pane](./media/secure-database-tutorial/threat-settings.png) - - You can also configure emails to receive security alerts, storage details, and threat detection types. - -1. Return to the **SQL databases** page of your database and select **Defender for Cloud** under the **Security** section. Here you'll find various security indicators available for the database. - - ![Threat status](./media/secure-database-tutorial/threat-status.png) - -If anomalous activities are detected, you receive an email with information on the event. This includes the nature of the activity, database, server, event time, possible causes, and recommended actions to investigate and mitigate the potential threat. If such an email is received, select the **Azure SQL Auditing Log** link to launch the Azure portal and show relevant auditing records for the time of the event. - - ![Threat detection email](./media/secure-database-tutorial/threat-email.png) - -### Auditing - -The auditing feature tracks database events and writes events to an audit log in either Azure storage, Azure Monitor logs, or to an event hub. Auditing helps maintain regulatory compliance, understand database activity, and gain insight into discrepancies and anomalies that could indicate potential security violations. - -To enable auditing: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - -1. In the **Security** section, select **Auditing**. - -1. Under **Auditing** settings, set the following values: - - 1. Set **Auditing** to **ON**. - - 1. Select **Audit log destination** as any of the following: - - - **Storage**, an Azure storage account where event logs are saved and can be downloaded as *.xel* files - - > [!TIP] - > Use the same storage account for all audited databases to get the most from auditing report templates. - - - **Log Analytics**, which automatically stores events for query or further analysis - - > [!NOTE] - > A **Log Analytics workspace** is required to support advanced features such as analytics, custom alert rules, and Excel or Power BI exports. Without a workspace, only the query editor is available. - - - **Event Hub**, which allows events to be routed for use in other applications - - 1. Select **Save**. - - ![Audit settings](./media/secure-database-tutorial/audit-settings.png) - -1. Now you can select **View audit logs** to view database events data. - - ![Audit records](./media/secure-database-tutorial/audit-records.png) - -> [!IMPORTANT] -> See [SQL Database auditing](/azure/azure-sql/database/auditing-overview) on how to further customize audit events using PowerShell or REST API. - -### Dynamic data masking - -The data masking feature will automatically hide sensitive data in your database. - -To enable data masking: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - -1. In the **Security** section, select **Dynamic Data Masking**. - -1. Under **Dynamic data masking** settings, select **Add mask** to add a masking rule. Azure will automatically populate available database schemas, tables, and columns to choose from. - - ![Mask settings](./media/secure-database-tutorial/mask-settings.png) - -1. Select **Save**. The selected information is now masked for privacy. - - ![Mask example](./media/secure-database-tutorial/mask-query.png) - -### Transparent data encryption - -The encryption feature automatically encrypts your data at rest, and requires no changes to applications accessing the encrypted database. For new databases, encryption is on by default. You can also encrypt data using SSMS and the [Always encrypted](always-encrypted-certificate-store-configure.md) feature. - -To enable or verify encryption: - -1. In the Azure portal, select **SQL databases** from the left-hand menu, and select your database on the **SQL databases** page. - -1. In the **Security** section, select **Transparent data encryption**. - -1. If necessary, set **Data encryption** to **ON**. Select **Save**. - - ![Transparent Data Encryption](./media/secure-database-tutorial/encryption-settings.png) - -> [!NOTE] -> To view encryption status, connect to the database using [SSMS](connect-query-ssms.md) and query the `encryption_state` column of the [sys.dm_database_encryption_keys](/sql/relational-databases/system-dynamic-management-views/sys-dm-database-encryption-keys-transact-sql) view. A state of `3` indicates the database is encrypted. - -## Next steps - -In this tutorial, you've learned to improve the security of your database with just a few simple steps. You learned how to: - -> [!div class="checklist"] -> -> - Create server-level and database-level firewall rules -> - Configure an Azure Active Directory (AD) administrator -> - Manage user access with SQL authentication, Azure AD authentication, and secure connection strings -> - Enable security features, such as Microsoft Defender for SQL, auditing, data masking, and encryption - -Advance to the next tutorial to learn how to implement geo-distribution. - -> [!div class="nextstepaction"] ->[Implement a geo-distributed database](geo-distributed-application-configure-tutorial.md) diff --git a/articles/azure-sql/database/security-best-practice.md b/articles/azure-sql/database/security-best-practice.md deleted file mode 100644 index 5579017532985..0000000000000 --- a/articles/azure-sql/database/security-best-practice.md +++ /dev/null @@ -1,796 +0,0 @@ ---- -title: Playbook for addressing common security requirements -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: This article provides common security requirements and best practices in Azure SQL Database and Azure SQL Managed Instance. -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=2 -author: VanMSFT -ms.author: vanto -ms.topic: article -ms.date: 04/13/2021 -ms.reviewer: kendralittle, mathoma ---- - -# Playbook for addressing common security requirements with Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article provides best practices on how to solve common security requirements. Not all requirements are applicable to all environments, and you should consult your database and security team on which features to implement. - -## Solving common security requirements - -This document provides guidance on how to solve common security requirements for new or existing applications using Azure SQL Database and Azure SQL Managed Instance. It's organized by high-level security areas. For addressing specific threats, refer to the [Common security threats and potential mitigations](#common-security-threats-and-potential-mitigations) section. Although some of the presented recommendations are applicable when migrating applications from on-premises to Azure, migration scenarios are not the focus of this document. - -### Azure SQL Database deployment offers covered in this guide - -- [Azure SQL Database](./index.yml): [single databases](single-database-overview.md) and [elastic pools](elastic-pool-overview.md) in [servers](logical-servers.md) -- [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) - -### Deployment offers not covered in this guide - -- Azure Synapse Analytics -- Azure SQL VMs (IaaS) -- SQL Server - -### Audience - -The intended audiences for this guide are customers facing questions on how to secure Azure SQL Database. The roles interested in this best practice article include, but not limited to: - -- Security Architects -- Security Managers -- Compliance Officers -- Privacy Officers -- Security Engineers - -### Using this guide - -This document is intended as a companion to our existing [Azure SQL Database security](security-overview.md) documentation. - -Unless otherwise stated, we recommend you follow all best practices listed in each section to achieve the respective goal or requirement. To meet specific security compliance standards or best practices, important regulatory compliance controls are listed under the Requirements or Goals section wherever applicable. These are the security standards and regulations that are referenced in this paper: - -- [FedRAMP](https://www.fedramp.gov/documents/): AC-04, AC-06 -- [SOC](https://www.aicpa.org/interestareas/frc/assuranceadvisoryservices/sorhome.html): CM-3, SDL-3 -- [ISO/IEC 27001](https://www.iso27001security.com/html/27001.html): Access Control, Cryptography -- [Microsoft Operational Security Assurance (OSA) practices](https://www.microsoft.com/securityengineering/osa/practices): Practice #1-6 and #9 -- [NIST Special Publication 800-53 Security Controls](https://nvd.nist.gov/800-53): AC-5, AC-6 -- [PCI DSS](https://www.pcisecuritystandards.org/document_library): 6.3.2, 6.4.2 - -We plan on continuing to update the recommendations and best practices listed here. Provide input or any corrections for this document using the **Feedback** link at the bottom of this article. - -## Authentication - -Authentication is the process of proving the user is who they claim to be. Azure SQL Database and SQL Managed Instance support two types of authentication: - -- SQL authentication -- Azure Active Directory authentication - -> [!NOTE] -> Azure Active Directory authentication may not be supported for all tools and 3rd party applications. - -### Central management for identities - -Central identity management offers the following benefits: - -- Manage group accounts and control user permissions without duplicating logins across servers, databases and managed instances. -- Simplified and flexible permission management. -- Management of applications at scale. - -**How to implement**: - -- Use Azure Active Directory (Azure AD) authentication for centralized identity management. - -**Best practices**: - -- Create an Azure AD tenant and [create users](../../active-directory/fundamentals/add-users-azure-active-directory.md) to represent human users and create [service principals](../../active-directory/develop/app-objects-and-service-principals.md) to represent apps, services, and automation tools. Service principals are equivalent to service accounts in Windows and Linux. - -- Assign access rights to resources to Azure AD principals via group assignment: Create Azure AD groups, grant access to groups, and add individual members to the groups. In your database, create contained database users that map your Azure AD groups. To assign permissions inside the database, put the users that are associated with your Azure AD groups in database roles with the appropriate permissions. - - See the articles, [Configure and manage Azure Active Directory authentication with SQL](authentication-aad-configure.md) and [Use Azure AD for authentication with SQL](authentication-aad-overview.md). - > [!NOTE] - > In SQL Managed Instance, you can also create logins that map to Azure AD principals in the master database. See [CREATE LOGIN (Transact-SQL)](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true). - -- Using Azure AD groups simplifies permission management and both the group owner, and the resource owner can add/remove members to/from the group. - -- Create a separate group for Azure AD administrators for each server or managed instance. - - - See the article, [Provision an Azure Active Directory administrator for your server](authentication-aad-configure.md#provision-azure-ad-admin-sql-database). - -- Monitor Azure AD group membership changes using Azure AD audit activity reports. - -- For a managed instance, a separate step is required to create an Azure AD admin. - - See the article, [Provision an Azure Active Directory administrator for your managed instance](authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). - -> [!NOTE] -> -> - Azure AD authentication is recorded in Azure SQL audit logs, but not in Azure AD sign-in logs. -> - Azure RBAC permissions granted in Azure do not apply to Azure SQL Database or SQL Managed Instance permissions. Such permissions must be created/mapped manually using existing SQL permissions. -> - On the client-side, Azure AD authentication needs access to the internet or via User Defined Route (UDR) to a virtual network. -> - The Azure AD access token is cached on the client side and its lifetime depends on token configuration. See the article, [Configurable token lifetimes in Azure Active Directory](../../active-directory/develop/active-directory-configurable-token-lifetimes.md) -> - For guidance on troubleshooting Azure AD Authentication issues, see the following blog: [Troubleshooting Azure AD](https://techcommunity.microsoft.com/t5/azure-sql-database/troubleshooting-problems-related-to-azure-ad-authentication-with/ba-p/1062991). - -### Azure AD Multi-Factor Authentication - -> Mentioned in: OSA Practice #2, ISO Access Control (AC) - -Azure AD Multi-Factor Authentication helps provides additional security by requiring more than one form of authentication. - -**How to implement**: - -- [Enable Multi-Factor Authentication](../../active-directory/authentication/concept-mfa-howitworks.md) in Azure AD using Conditional Access and use interactive authentication. - -- The alternative is to enable Multi-Factor Authentication for the entire Azure AD or AD domain. - -**Best practices**: - -- Activate Conditional Access in Azure AD (requires Premium subscription). - - See the article, [Conditional Access in Azure AD](../../active-directory/conditional-access/overview.md). - -- Create Azure AD group(s) and enable Multi-Factor Authentication policy for selected groups using Azure AD Conditional Access. - - See the article, [Plan Conditional Access Deployment](../../active-directory/conditional-access/plan-conditional-access.md). - -- Multi-Factor Authentication can be enabled for the entire Azure AD or for the whole Active Directory federated with Azure AD. - -- Use Azure AD Interactive authentication mode for Azure SQL Database and Azure SQL Managed Instance where a password is requested interactively, followed by Multi-Factor Authentication: - - Use Universal Authentication in SSMS. See the article, [Using Multi-factor Azure AD authentication with Azure SQL Database, SQL Managed Instance, Azure Synapse (SSMS support for Multi-Factor Authentication)](authentication-mfa-ssms-overview.md). - - Use Interactive Authentication supported in SQL Server Data Tools (SSDT). See the article, [Azure Active Directory support in SQL Server Data Tools (SSDT)](/sql/ssdt/azure-active-directory?view=azuresqldb-current&preserve-view=true). - - Use other SQL tools supporting Multi-Factor Authentication. - - SSMS Wizard support for export/extract/deploy database - - [sqlpackage.exe](/sql/tools/sqlpackage): option '/ua' - - [sqlcmd Utility](/sql/tools/sqlcmd-utility): option -G (interactive) - - [bcp Utility](/sql/tools/bcp-utility): option -G (interactive) - -- Implement your applications to connect to Azure SQL Database or Azure SQL Managed Instance using interactive authentication with Multi-Factor Authentication support. - - See the article, [Connect to Azure SQL Database with Azure AD Multi-Factor Authentication](active-directory-interactive-connect-azure-sql-db.md). - > [!NOTE] - > This authentication mode requires user-based identities. In cases where a trusted identity model is used that is bypassing individual Azure AD user authentication (e.g. using managed identity for Azure resources), Multi-Factor Authentication does not apply. - -### Minimize the use of password-based authentication for users - -> Mentioned in: OSA Practice #4, ISO Access Control (AC) - -Password-based authentication methods are a weaker form of authentication. Credentials can be compromised or mistakenly given away. - -**How to implement**: - -- Use an Azure AD integrated authentication that eliminates the use of passwords. - -**Best practices**: - -- Use single sign-on authentication using Windows credentials. Federate the on-premises AD domain with Azure AD and use integrated Windows authentication (for domain-joined machines with Azure AD). - - See the article, [SSMS support for Azure AD Integrated authentication](authentication-aad-configure.md#active-directory-integrated-authentication). - -### Minimize the use of password-based authentication for applications - -> Mentioned in: OSA Practice #4, ISO Access Control (AC) - -**How to implement**: - -- Enable Azure Managed Identity. You can also use integrated or certificate-based authentication. - -**Best practices**: - -- Use [managed identities for Azure resources](../../active-directory/managed-identities-azure-resources/overview.md). - - [System-assigned managed identity](../../active-directory/managed-identities-azure-resources/tutorial-windows-vm-access-sql.md) - - [User-assigned managed identity](../../active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-portal.md) - - [Use Azure SQL Database from Azure App Service with managed identity (without code changes)](https://github.com/Azure-Samples/app-service-msi-entityframework-dotnet) - -- Use cert-based authentication for an application. - - See this [code sample](https://github.com/Microsoft/sql-server-samples/tree/master/samples/features/security/azure-active-directory-auth/token). - -- Use Azure AD authentication for integrated federated domain and domain-joined machine (see section above). - - See the [sample application for integrated authentication](https://github.com/Microsoft/sql-server-samples/tree/master/samples/features/security/azure-active-directory-auth/integrated). - -### Protect passwords and secrets - -For cases when passwords aren't avoidable, make sure they're secured. - -**How to implement**: - -- Use Azure Key Vault to store passwords and secrets. Whenever applicable, use Multi-Factor Authentication for Azure SQL Database with Azure AD users. - -**Best practices**: - -- If avoiding passwords or secrets aren't possible, store user passwords and application secrets in Azure Key Vault and manage access through Key Vault access policies. - -- Various app development frameworks may also offer framework-specific mechanisms for protecting secrets in the app. For example: [ASP.NET core app](/aspnet/core/security/app-secrets?tabs=windows). - -### Use SQL authentication for legacy applications - -SQL authentication refers to the authentication of a user when connecting to Azure SQL Database or SQL Managed Instance using username and password. A login will need to be created in each server or managed instance, and a user created in each database. - -**How to implement**: - -- Use SQL authentication. - -**Best practices**: - -- As a server or instance admin, create logins and users. Unless using contained database users with passwords, all passwords are stored in master database. - - See the article, [Controlling and granting database access to SQL Database, SQL Managed Instance and Azure Synapse Analytics](logins-create-manage.md). - -## Access management - -Access management (also called Authorization) is the process of controlling and managing authorized users' access and privileges to Azure SQL Database or SQL Managed Instance. - -### Implement principle of least privilege - -> Mentioned in: FedRamp controls AC-06, NIST: AC-6, OSA Practice #3 - -The principle of least privilege states that users shouldn't have more privileges than needed to complete their tasks. For more information, see the article [Just enough administration](/powershell/scripting/learn/remoting/jea/overview). - -**How to implement**: - -Assign only the necessary [permissions](/sql/relational-databases/security/permissions-database-engine) to complete the required tasks: - -- In SQL Databases: - - Use granular permissions and user-defined database roles (or server-roles in Managed Instance): - 1. Create the required roles - - [CREATE ROLE](/sql/t-sql/statements/create-role-transact-sql) - - [CREATE SERVER ROLE](/sql/t-sql/statements/create-server-role-transact-sql) - 1. Create required users - - [CREATE USER](/sql/t-sql/statements/create-user-transact-sql) - 1. Add users as members to roles - - [ALTER ROLE](/sql/t-sql/statements/alter-role-transact-sql) - - [ALTER SERVER ROLE](/sql/t-sql/statements/alter-server-role-transact-sql) - 1. Then assign permissions to roles. - - [GRANT](/sql/t-sql/statements/grant-transact-sql) - - Make sure to not assign users to unnecessary roles. - -- In Azure Resource Manager: - - Use built-in roles if available or Azure custom roles and assign the necessary permissions. - - [Azure built-in roles](../../role-based-access-control/built-in-roles.md) - - [Azure custom roles](../../role-based-access-control/custom-roles.md) - -**Best practices**: - -The following best practices are optional but will result in better manageability and supportability of your security strategy: - -- If possible, start with the least possible set of permissions and start adding permissions one by one if there's a real necessity (and justification) – as opposed to the opposite approach: taking permissions away step by step. - -- Refrain from assigning permissions to individual users. Use roles (database or server roles) consistently instead. Roles helps greatly with reporting and troubleshooting permissions. (Azure RBAC only supports permission assignment via roles.) - -- Create and use custom roles with the exact permissions needed. Typical roles that are used in practice: - - Security deployment - - Administrator - - Developer - - Support personnel - - Auditor - - Automated processes - - End user - -- Use built-in roles only when the permissions of the roles match exactly the needed permissions for the user. You can assign users to multiple roles. - -- Remember that permissions in the database engine can be applied within the following scopes (the smaller the scope, the smaller the impact of the granted permissions): - - Server (special roles in master database) in Azure - - Database - - Schema - - It is a best practice to use schemas to grant permissions inside a database. (also see: [Schema-design: Recommendations for Schema design with security in mind](http://andreas-wolter.com/en/schema-design-for-sql-server-recommendations-for-schema-design-with-security-in-mind/)) - - Object (table, view, procedure, etc.) - - > [!NOTE] - > It is not recommended to apply permissions on the object level because this level adds unnecessary complexity to the overall implementation. If you decide to use object-level permissions, those should be clearly documented. The same applies to column-level-permissions, which are even less recommendable for the same reasons. Also be aware that by default a table-level [DENY](/sql/t-sql/statements/deny-object-permissions-transact-sql) does not override a column-level GRANT. This would require the [common criteria compliance Server Configuration](/sql/database-engine/configure-windows/common-criteria-compliance-enabled-server-configuration-option) to be activated. - -- Perform regular checks using [Vulnerability Assessment (VA)](/sql/relational-databases/security/sql-vulnerability-assessment) to test for too many permissions. - -### Implement Separation of Duties - -> Mentioned in: FedRamp: AC-04, NIST: AC-5, ISO: A.6.1.2, PCI 6.4.2, SOC: CM-3, SDL-3 - -Separation of Duties, also called Segregation of Duties describes the requirement to split sensitive tasks into multiple tasks that are assigned to different users. Separation of Duties helps prevent data breaches. - -**How to implement**: - -- Identify the required level of Separation of Duties. Examples: - - Between Development/Test and Production environments - - Security-wise sensitive tasks vs Database Administrator (DBA) management level tasks vs developer tasks. - - Examples: Auditor, creation of security policy for Role-level Security (RLS), Implementing SQL Database objects with DDL-permissions. - -- Identify a comprehensive hierarchy of users (and automated processes) that access the system. - -- Create roles according to the needed user-groups and assign permissions to roles. - - For management-level tasks in Azure portal or via PowerShell-automation use Azure roles. Either find a built-in role matching the requirement, or create an Azure custom role using the available permissions - - Create Server roles for server-wide tasks (creating new logins, databases) in a managed instance. - - Create Database Roles for database-level tasks. - -- For certain sensitive tasks, consider creating special stored procedures signed by a certificate to execute the tasks on behalf of the users. One important advantage of digitally signed stored procedures is that if the procedure is changed, the permissions that were granted to the previous version of the procedure are immediately removed. - - Example: [Tutorial: Signing Stored Procedures with a Certificate](/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate) - -- Implement Transparent Data Encryption (TDE) with customer-managed keys in Azure Key Vault to enable Separation of Duties between data owner and security owner. - - See the article, [Configure customer-managed keys for Azure Storage encryption from the Azure portal](../../storage/common/customer-managed-keys-configure-key-vault.md). - -- To ensure that a DBA can't see data that is considered highly sensitive and can still do DBA tasks, you can use Always Encrypted with role separation. - - See the articles, [Overview of Key Management for Always Encrypted](/sql/relational-databases/security/encryption/overview-of-key-management-for-always-encrypted), [Key Provisioning with Role Separation](/sql/relational-databases/security/encryption/configure-always-encrypted-keys-using-powershell#KeyProvisionWithRoles), and [Column Master Key Rotation with Role Separation](/sql/relational-databases/security/encryption/rotate-always-encrypted-keys-using-powershell#column-master-key-rotation-with-role-separation). - -- In cases where the use of Always Encrypted isn't feasible, or at least not without major costs and efforts that may even render the system near unusable, compromises can be made and mitigated through the use of compensating controls such as: - - Human intervention in processes. - - Audit trails – for more information on Auditing, see, [Audit critical security events](#audit-critical-security-events). - -**Best practices**: - -- Make sure that different accounts are used for Development/Test and Production environments. Different accounts help to comply with separation of Test and Production systems. - -- Refrain from assigning permissions to individual users. Use roles (database or server roles) consistently instead. Having roles helps greatly with reporting and troubleshooting permissions. - -- Use built-in roles when the permissions match exactly the needed permissions – if the union of all permissions from multiple built-in roles leads to a 100% match, you can assign multiple roles concurrently as well. - -- Create and use user-defined roles when built-in roles grant too many permissions or insufficient permissions. - -- Role assignments can also be done temporarily, also known as Dynamic Separation of Duties (DSD), either within SQL Agent Job steps in T-SQL or using Azure PIM for Azure roles. - -- Make sure that DBAs don't have access to the encryption keys or key stores, and that Security Administrators with access to the keys have no access to the database in turn. The use of [Extensible Key Management (EKM)](/sql/relational-databases/security/encryption/extensible-key-management-ekm) can make this separation easier to achieve. [Azure Key Vault](https://azure.microsoft.com/services/key-vault/) can be used to implement EKM. - -- Always make sure to have an Audit trail for security-related actions. - -- You can retrieve the definition of the Azure built-in roles to see the permissions used and create a custom role based on excerpts and cumulations of these via PowerShell. - -- Because any member of the db_owner database role can change security settings like Transparent Data Encryption (TDE), or change the SLO, this membership should be granted with care. However, there are many tasks that require db_owner privileges. Task like changing any database setting such as changing DB options. Auditing plays a key role in any solution. - -- It is not possible to restrict permissions of a db_owner, and therefore prevent an administrative account from viewing user data. If there's highly sensitive data in a database, Always Encrypted can be used to safely prevent db_owners or any other DBA from viewing it. - -> [!NOTE] -> Achieving Separation of Duties (SoD) is challenging for security-related or troubleshooting tasks. Other areas like development and end-user roles are easier to segregate. Most compliance related controls allow the use of alternate control functions such as Auditing when other solutions aren't practical. - -For the readers that want to dive deeper into SoD, we recommend the following resources: - -- For Azure SQL Database and SQL Managed Instance: - - [Controlling and granting database access](logins-create-manage.md) - - [Engine Separation of Duties for the Application Developer](/previous-versions/sql/sql-server-2008/cc974525(v=sql.100)) - - [Separation of Duties](https://www.microsoft.com/download/details.aspx?id=39269) - - [Signing Stored Procedures](/dotnet/framework/data/adonet/sql/signing-stored-procedures-in-sql-server) - -- For Azure Resource Management: - - [Azure built-in roles](../../role-based-access-control/built-in-roles.md) - - [Azure custom roles](../../role-based-access-control/custom-roles.md) - - [Using Azure AD Privileged Identity Management for elevated access](https://www.microsoft.com/itshowcase/using-azure-ad-privileged-identity-management-for-elevated-access) - -### Perform regular code reviews - -> Mentioned in: PCI: 6.3.2, SOC: SDL-3 - -Separation of Duties is not limited to the data in a database, but includes application code. Malicious code can potentially circumvent security controls. Before deploying custom code to production, it is essential to review what's being deployed. - -**How to implement**: - -- Use a database tool like Azure Data Studio that supports source control. - -- Implement a segregated code deployment process. - -- Before committing to main branch, a person (other than the author of the code itself) has to inspect the code for potential elevation of privileges risks as well as malicious data modifications to protect against fraud and rogue access. This can be done using source control mechanisms. - -**Best practices**: - -- Standardization: It helps to implement a standard procedure that is to be followed for any code updates. - -- Vulnerability Assessment contains rules that check for excessive permissions, the use of old encryption algorithms, and other security problems within a database schema. - -- Further checks can be done in a QA or test environment using Advanced Threat Protection that scans for code that is vulnerable to SQL-injection. - -- Examples of what to look out for: - - Creation of a user or changing security settings from within an automated SQL-code-update deployment. - - A stored procedure, which, depending on the parameters provided, updates a monetary value in a cell in a non-conforming way. - -- Make sure the person conducting the review is an individual other than the originating code author and knowledgeable in code-reviews and secure coding. - -- Be sure to know all sources of code-changes. Code can be in T-SQL Scripts. It can be ad-hoc commands to be executed or be deployed in forms of Views, Functions, Triggers, and Stored Procedures. It can be part of SQL Agent Job definitions (Steps). It can also be executed from within SSIS packages, Azure Data Factory, and other services. - -## Data protection - -Data protection is a set of capabilities for safeguarding important information from compromise by encryption or obfuscation. - -> [!NOTE] -> Microsoft attests to Azure SQL Database and SQL Managed Instance as being FIPS 140-2 Level 1 compliant. This is done after verifying the strict use of FIPS 140-2 Level 1 acceptable algorithms and FIPS 140-2 Level 1 validated instances of those algorithms including consistency with required key lengths, key management, key generation, and key storage. This attestation is meant to allow our customers to respond to the need or requirement for the use of FIPS 140-2 Level 1 validated instances in the processing of data or delivery of systems or applications. We define the terms "FIPS 140-2 Level 1 compliant" and "FIPS 140-2 Level 1 compliance" used in the above statement to demonstrate their intended applicability to U.S. and Canadian government use of the different term "FIPS 140-2 Level 1 validated." - -### Encrypt data in transit - -> Mentioned in: OSA Practice #6, ISO Control Family: Cryptography - -Protects your data while data moves between your client and server. Refer to [Network Security](#network-security). - -### Encrypt data at rest - -> Mentioned in: OSA Practice #6, ISO Control Family: Cryptography - -Encryption at rest is the cryptographic protection of data when it is persisted in database, log, and backup files. - -**How to implement**: - -- [Transparent Database Encryption (TDE)](transparent-data-encryption-tde-overview.md) with service managed keys are enabled by default for any databases created after 2017 in Azure SQL Database and SQL Managed Instance. -- In a managed instance, if the database is created from a restore operation using an on-premises server, the TDE setting of the original database will be honored. If the original database doesn't have TDE enabled, we recommend that TDE be manually turned on for the managed instance. - -**Best practices**: - -- Don't store data that requires encryption-at-rest in the master database. The master database can't be encrypted with TDE. - -- Use customer-managed keys in Azure Key Vault if you need increased transparency and granular control over the TDE protection. Azure Key Vault allows the ability to revoke permissions at any time to render the database inaccessible. You can centrally manage TDE protectors along with other keys, or rotate the TDE protector at your own schedule using Azure Key Vault. - -- If you're using customer-managed keys in Azure Key Vault, follow the articles, [Guidelines for configuring TDE with Azure Key Vault](transparent-data-encryption-byok-overview.md#recommendations-when-configuring-akv) and [How to configure Geo-DR with Azure Key Vault](transparent-data-encryption-byok-overview.md#geo-dr-and-customer-managed-tde). - -### Protect sensitive data in use from high-privileged, unauthorized users - -Data in use is the data stored in memory of the database system during the execution of SQL queries. If your database stores sensitive data, your organization may be required to ensure that high-privileged users are prevented from viewing sensitive data in your database. High-privilege users, such as Microsoft operators or DBAs in your organization should be able to manage the database, but prevented from viewing and potentially exfiltrating sensitive data from the memory of the SQL process or by querying the database. - -The policies that determine which data is sensitive and whether the sensitive data must be encrypted in memory and not accessible to administrators in plaintext, are specific to your organization and compliance regulations you need to adhere to. Please see the related requirement: [Identify and tag sensitive data](#identify-and-tag-sensitive-data). - -**How to implement**: - -- Use [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine) to ensure sensitive data isn't exposed in plaintext in Azure SQL Database or SQL Managed Instance, even in memory/in use. Always Encrypted protects the data from Database Administrators (DBAs) and cloud admins (or bad actors who can impersonate high-privileged but unauthorized users) and gives you more control over who can access your data. - -**Best practices**: - -- Always Encrypted isn't a substitute to encrypt data at rest (TDE) or in transit (SSL/TLS). Always Encrypted shouldn't be used for non-sensitive data to minimize performance and functionality impact. Using Always Encrypted in conjunction with TDE and Transport Layer Security (TLS) is recommended for comprehensive protection of data at-rest, in-transit, and in-use. - -- Assess the impact of encrypting the identified sensitive data columns before you deploy Always Encrypted in a production database. In general, Always Encrypted reduces the functionality of queries on encrypted columns and has other limitations, listed in [Always Encrypted - Feature Details](/sql/relational-databases/security/encryption/always-encrypted-database-engine#feature-details). Therefore, you may need to rearchitect your application to re-implement the functionality, a query does not support, on the client side or/and refactor your database schema, including the definitions of stored procedures, functions, views and triggers. Existing applications may not work with encrypted columns if they do not adhere to the restrictions and limitations of Always Encrypted. While the ecosystem of Microsoft tools, products and services supporting Always Encrypted is growing, a number of them do not work with encrypted columns. Encrypting a column may also impact query performance, depending on the characteristics of your workload. - -- Manage Always Encrypted keys with role separation if you're using Always Encrypted to protect data from malicious DBAs. With role separation, a security admin creates the physical keys. The DBA creates the column master key and column encryption key metadata objects describing the physical keys in the database. During this process, the security admin doesn't need access to the database, and the DBA doesn't need access to the physical keys in plaintext. - - See the article, [Managing Keys with Role Separation](/sql/relational-databases/security/encryption/overview-of-key-management-for-always-encrypted#managing-keys-with-role-separation) for details. - -- Store your column master keys in Azure Key Vault for ease of management. Avoid using Windows Certificate Store (and in general, distributed key store solutions, as opposed central key management solutions) that make key management hard. - -- Think carefully through the tradeoffs of using multiple keys (column master key or column encryption keys). Keep the number of keys small to reduce key management cost. One column master key and one column encryption key per database is typically sufficient in steady-state environments (not in the middle of a key rotation). You may need additional keys if you have different user groups, each using different keys and accessing different data. - -- Rotate column master keys per your compliance requirements. If you also need to rotate column encryption keys, consider using online encryption to minimize application downtime. - - See the article, [Performance and Availability Considerations](/sql/relational-databases/security/encryption/configure-column-encryption-using-powershell#performance-and-availability-considerations). - -- Use deterministic encryption if computations (equality) on data need to be supported. Otherwise, use randomized encryption. Avoid using deterministic encryption for low-entropy data sets, or data sets with publicly known distribution. - -- If you're concerned about third parties accessing your data legally without your consent, ensure that all application and tools that have access to the keys and data in plaintext run outside of Microsoft Azure Cloud. Without access to the keys, the third party will have no way of decrypting the data unless they bypass the encryption. - -- Always Encrypted doesn't easily support granting temporary access to the keys (and the protected data). For example, if you need to share the keys with a DBA to allow the DBA to do some cleansing operations on sensitive and encrypted data. The only way to reliability revoke the access to the data from the DBA will be to rotate both the column encryption keys and the column master keys protecting the data, which is an expensive operation. - -- To access the plaintext values in encrypted columns, a user needs to have access to the Column Master Key (CMK) that protects columns, which is configured in the key store holding the CMK. The user also needs to have the **VIEW ANY COLUMN MASTER KEY DEFINITION** and **VIEW ANY COLUMN ENCRYPTION KEY DEFINITION** database permissions. - -### Control access of application users to sensitive data through encryption - -Encryption can be used as a way to ensure that only specific application users who have access to cryptographic keys can view or update the data. - -**How to implement**: - -- Use Cell-level Encryption (CLE). See the article, [Encrypt a Column of Data](/sql/relational-databases/security/encryption/encrypt-a-column-of-data) for details. -- Use Always Encrypted, but be aware of its limitation. The limitations are listed below. - -**Best practices** - -When using CLE: - -- Control access to keys through SQL permissions and roles. - -- Use AES (AES 256 recommended) for data encryption. Algorithms, such RC4, DES and TripleDES, are deprecated and shouldn't be used because of known vulnerabilities. - -- Protect symmetric keys with asymmetric keys/certificates (not passwords) to avoid using 3DES. - -- Be careful when migrating a database using Cell-Level Encryption via export/import (bacpac files). - - See the article, [Recommendations for using Cell Level Encryption in Azure SQL Database](/archive/blogs/sqlsecurity/recommendations-for-using-cell-level-encryption-in-azure-sql-database) on how to prevent losing keys when migrating data, and for other best practice guidance. - -Keep in mind that Always Encrypted is primarily designed to protect sensitive data in use from high-privilege users of Azure SQL Database (cloud operators, DBAs) - see [Protect sensitive data in use from high-privileged, unauthorized users](#protect-sensitive-data-in-use-from-high-privileged-unauthorized-users). Be aware of the following challenges when using Always Encrypted to protect data from application users: - -- By default, all Microsoft client drivers supporting Always Encrypted maintain a global (one per application) cache of column encryption keys. Once a client driver acquires a plaintext column encryption key by contacting a key store holding a column master key, the plaintext column encryption key is cached. This makes isolating data from users of a multi-user application challenging. If your application impersonates end users when interacting with a key store (such as Azure Key Vault), after a user's query populates the cache with a column encryption key, a subsequent query that requires the same key but is triggered by another user will use the cached key. The driver won't call the key store and it won't check if the second user has a permission to access the column encryption key. As a result, the user can see the encrypted data even if the user doesn't have access to the keys. To achieve the isolation of users within a multi-user application, you can disable column encryption key caching. Disabling caching will cause additional performance overheads, as the driver will need to contact the key store for each data encryption or decryption operation. - -### Protect data against unauthorized viewing by application users while preserving data format - -Another technique for preventing unauthorized users from viewing data is to obfuscate or mask the data while preserving data types and formats to ensure that user applications can continue handle and display the data. - -**How to implement**: - -- Use [Dynamic Data Masking](/sql/relational-databases/security/dynamic-data-masking) to obfuscate table columns. - -> [!NOTE] -> Always Encrypted does not work with Dynamic Data Masking. It is not possible to encrypt and mask the same column, which implies that you need to prioritize protecting data in use vs. masking the data for your app users via Dynamic Data Masking. - -**Best practices**: - -> [!NOTE] -> Dynamic Data Masking cannot be used to protect data from high-privilege users. Masking policies do not apply to users with administrative access like db_owner. - -- Don't permit app users to run ad-hoc queries (as they may be able to work around Dynamic Data Masking). - - See the article, [Bypassing masking using inference or brute-force techniques](/sql/relational-databases/security/dynamic-data-masking#security-note-bypassing-masking-using-inference-or-brute-force-techniques) for details. - -- Use a proper access control policy (via SQL permissions, roles, RLS) to limit user permissions to make updates in the masked columns. Creating a mask on a column doesn't prevent updates to that column. Users that receive masked data when querying the masked column, can update the data if they have write-permissions. - -- Dynamic Data Masking doesn't preserve the statistical properties of the masked values. This may impact query results (for example, queries containing filtering predicates or joins on the masked data). - -## Network security - -Network security refers to access controls and best practices to secure your data in transit to Azure SQL Database. - -### Configure my client to connect securely to SQL Database/SQL Managed Instance - -Best practices on how to prevent client machines and applications with well-known vulnerabilities (for example, using older TLS protocols and cipher suites) from connecting to Azure SQL Database and SQL Managed Instance. - -**How to implement**: - -- Ensure that client machines connecting to Azure SQL Database and SQL Managed Instance are using the latest [Transport Layer Security (TLS)](security-overview.md#transport-layer-security-encryption-in-transit) version. - -**Best practices**: - -- Enforce a minimal TLS version at the [SQL Database server](connectivity-settings.md#minimal-tls-version) or [SQL Managed Instance](../managed-instance/minimal-tls-version-configure.md) level using the minimal TLS version setting. We recommend setting the minimal TLS version to 1.2, after testing to confirm your applications supports it. TLS 1.2 includes fixes for vulnerabilities found in previous versions. - -- Configure all your apps and tools to connect to SQL Database with encryption enabled - - Encrypt = On, TrustServerCertificate = Off (or equivalent with non-Microsoft drivers). - -- If your app uses a driver that doesn't support TLS or supports an older version of TLS, replace the driver, if possible. If not possible, carefully evaluate the security risks. - - Reduce attack vectors via vulnerabilities in SSL 2.0, SSL 3.0, TLS 1.0, and TLS 1.1 by disabling them on client machines connecting to Azure SQL Database per [Transport Layer Security (TLS) registry settings](/windows-server/security/tls/tls-registry-settings#tls-10). - - Check cipher suites available on the client: [Cipher Suites in TLS/SSL (Schannel SSP)](/windows/desktop/SecAuthN/cipher-suites-in-schannel). Specifically, disable 3DES per [Configuring TLS Cipher Suite Order](/windows-server/security/tls/manage-tls#configuring-tls-cipher-suite-order). - -### Minimize attack surface - -Minimize the number of features that can be attacked by a malicious user. Implement network access controls for Azure SQL Database. - -> Mentioned in: OSA Practice #5 - -**How to implement**: - -In SQL Database: - -- Set Allow Access to Azure services to OFF at the server-level -- Use VNet Service endpoints and VNet Firewall Rules. -- Use Private Link. - -In SQL Managed Instance: - -- Follow the guidelines in [Network requirements](../managed-instance/connectivity-architecture-overview.md#network-requirements). - -**Best practices**: - -- Restricting access to Azure SQL Database and SQL Managed Instance by connecting on a private endpoint (for example, using a private data path): - - A managed instance can be isolated inside a virtual network to prevent external access. Applications and tools that are in the same or peered virtual network in the same region could access it directly. Applications and tools that are in different region could use virtual-network-to-virtual-network connection or ExpressRoute circuit peering to establish connection. Customer should use Network Security Groups (NSG) to restrict access over port 1433 only to resources that require access to a managed instance. - - For a SQL Database, use the [Private Link](../../private-link/private-endpoint-overview.md) feature that provides a dedicated private IP for the server inside your virtual network. You can also use [Virtual network service endpoints with virtual network firewall rules](vnet-service-endpoint-rule-overview.md) to restrict access to your servers. - - Mobile users should use point-to-site VPN connections to connect over the data path. - - Users connected to their on-premises network should use site-to-site VPN connection or ExpressRoute to connect over the data path. - -- You can access Azure SQL Database and SQL Managed Instance by connecting to a public endpoint (for example, using a public data path). The following best practices should be considered: - - For a server in SQL Database, use [IP firewall rules](firewall-configure.md) to restrict access to only authorized IP addresses. - - For SQL Managed Instance, use Network Security Groups (NSG) to restrict access over port 3342 only to required resources. For more information, see [Use a managed instance securely with public endpoints](../managed-instance/public-endpoint-overview.md). - -> [!NOTE] -> The SQL Managed Instance public endpoint is not enabled by default and it and must be explicitly enabled. If company policy disallows the use of public endpoints, use [Azure Policy](../../governance/policy/overview.md) to prevent enabling public endpoints in the first place. - -- Set up Azure Networking components: - - Follow [Azure best practices for network security](../../security/fundamentals/network-best-practices.md). - - Plan Virtual Network configuration per best practices outlined in [Azure Virtual Network frequently asked questions (FAQ)](../../virtual-network/virtual-networks-faq.md) and plan. - - Segment a virtual network into multiple subnets and assign resources for similar role to the same subnet (for example, front-end vs back-end resources). - - Use [Network Security Groups (NSGs)](../../virtual-network/network-security-groups-overview.md) to control traffic between subnets inside the Azure virtual network boundary. - - Enable [Azure Network Watcher](../../network-watcher/network-watcher-monitoring-overview.md) for your subscription to monitor inbound and outbound network traffic. - -### Configure Power BI for secure connections to SQL Database/SQL Managed Instance - -**Best practices**: - -- For Power BI Desktop, use private data path whenever possible. - -- Ensure that Power BI Desktop is connecting using TLS1.2 by setting the registry key on the client machine as per [Transport Layer Security (TLS)](/windows-server/security/tls/tls-registry-settings) registry settings. - -- Restrict data access for specific users via [Row-level security (RLS) with Power BI](/power-bi/service-admin-rls). - -- For Power BI Service, use the [on-premises data gateway](/power-bi/service-gateway-onprem), keeping in mind [Limitations and Considerations](/power-bi/service-gateway-deployment-guidance#installation-considerations-for-the-on-premises-data-gateway). - -### Configure App Service for secure connections to SQL Database/SQL Managed Instance - -**Best practices**: - -- For a simple Web App, connecting over public endpoint requires setting **Allow Azure Services** to ON. - -- [Integrate your app with an Azure Virtual Network](../../app-service/overview-vnet-integration.md) for private data path connectivity to a managed instance. Optionally, you can also deploy a Web App with [App Service Environments (ASE)](../../app-service/environment/intro.md). - -- For Web App with ASE or virtual network Integrated Web App connecting to a database in SQL Database, you can use [virtual network service endpoints and virtual network firewall rules](vnet-service-endpoint-rule-overview.md) to limit access from a specific virtual network and subnet. Then set **Allow Azure Services** to OFF. You can also connect ASE to a managed instance in SQL Managed Instance over a private data path. - -- Ensure that your Web App is configured per the article, [Best practices for securing platform as a service (PaaS) web and mobile applications using Azure App Service](../../security/fundamentals/paas-applications-using-app-services.md). - -- Install [Web Application Firewall (WAF)](../../web-application-firewall/ag/ag-overview.md) to protect your web app from common exploits and vulnerabilities. - -### Configure Azure virtual machine hosting for secure connections to SQL Database/SQL Managed Instance - -**Best practices**: - -- Use a combination of Allow and Deny rules on the NSGs of Azure virtual machines to control which regions can be accessed from the VM. - -- Ensure that your VM is configured per the article, [Security best practices for IaaS workloads in Azure](../../security/fundamentals/iaas.md). - -- Ensure that all VMs are associated with a specific virtual network and subnet. - -- Evaluate if you need the default route 0.0.0.0/Internet per the guidance at [about forced tunneling](../../vpn-gateway/vpn-gateway-forced-tunneling-rm.md#about-forced-tunneling). - - If yes – for example, front-end subnet - then keep the default route. - - If no – for example, middle tier or back-end subnet – then enable force tunneling so no traffic goes over Internet to reach on-premises (a.k.a cross-premises). - -- Implement [optional default routes](../../virtual-network/virtual-networks-udr-overview.md#optional-default-routes) if you're using peering or connecting to on-premises. - -- Implement [User Defined Routes](../../virtual-network/virtual-networks-udr-overview.md#user-defined) if you need to send all traffic in the virtual network to a Network Virtual Appliance for packet inspection. - -- Use [virtual network service endpoints](vnet-service-endpoint-rule-overview.md) for secure access to PaaS services like Azure Storage via the Azure backbone network. - -### Protect against Distributed Denial of Service (DDoS) attacks - -Distributed Denial of Service (DDoS) attacks are attempts by a malicious user to send a flood of network traffic to Azure SQL Database with the aim of overwhelming the Azure infrastructure and causing it to reject valid logins and workload. - -> Mentioned in: OSA Practice #9 - -**How to implement**: - -DDoS protection is automatically enabled as part of the Azure Platform. It includes always-on traffic monitoring and real-time mitigation of network-level attacks on public endpoints. - -- Use [Azure DDoS Protection](../../ddos-protection/ddos-protection-overview.md) to monitor public IP addresses associated to resources deployed in virtual networks. - -- Use [Advanced Threat Protection for Azure SQL Database](threat-detection-overview.md) to detect Denial of Service (DoS) attacks against databases. - -**Best practices**: - -- Follow the practices described in [Minimize Attack Surface](#minimize-attack-surface) helps minimize DDoS attack threats. - -- The Advanced Threat Protection **Brute force SQL credentials** alert helps to detect brute force attacks. In some cases, the alert can even distinguish penetration testing workloads. - -- For Azure VM hosting applications connecting to SQL Database: - - Follow recommendation to Restrict access through Internet-facing endpoints in Microsoft Defender for Cloud. - - Use virtual machine scale sets to run multiple instances of your application on Azure VMs. - - Disable RDP and SSH from Internet to prevent brute force attack. - -## Monitoring, Logging, and Auditing - -This section refers to capabilities to help you detect anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. It also describes best practices to configure database auditing to track and capture database events. - -### Protect databases against attacks - -Advanced threat protection enables you to detect and respond to potential threats as they occur by providing security alerts on anomalous activities. - -**How to implement**: - -- Use [Advanced Threat Protection for SQL](threat-detection-overview.md#alerts) to detect unusual and potentially harmful attempts to access or exploit databases, including: - - SQL injection attack. - - Credentials theft/leak. - - Privilege abuse. - - Data exfiltration. - -**Best practices**: - -- Configure [Microsoft Defender for SQL](azure-defender-for-sql.md) for a specific server or a managed instance. You can also configure Microsoft Defender for SQL for all servers and managed instances in a subscription by enabling [Microsoft Defender for Cloud](../../security-center/security-center-pricing.md). - -- For a full investigation experience, it's recommended to enable [SQL Database Auditing](/azure/azure-sql/database/auditing-overview). With auditing, you can track database events and write them to an audit log in an Azure Storage account or Azure Log Analytics workspace. - -### Audit critical security events - -Tracking of database events helps you understand database activity. You can gain insight into discrepancies and anomalies that could indicate business concerns or suspected security violations. It also enables and facilitates adherence to compliance standards. - -**How to implement**: - -- Enable [SQL Database Auditing](/azure/azure-sql/database/auditing-overview) or [Managed Instance Auditing](../managed-instance/auditing-configure.md) to track database events and write them to an audit log in your Azure Storage account, Log Analytics workspace (preview), or Event Hubs (preview). - -- Audit logs can be written to an Azure Storage account, to a Log Analytics workspace for consumption by Azure Monitor logs, or to event hub for consumption using event hub. You can configure any combination of these options, and audit logs will be written to each. - -**Best practices**: - -- By configuring [SQL Database Auditing](/azure/azure-sql/database/auditing-overview) on your server or [Managed Instance Auditing](../managed-instance/auditing-configure.md) to audit events, all existing and newly created databases on that server will be audited. -- By default auditing policy includes all actions (queries, stored procedures and successful and failed logins) against the databases, which may result in high volume of audit logs. It's recommended for customers to [configure auditing for different types of actions and action groups using PowerShell](./auditing-overview.md#manage-auditing). Configuring this will help control the number of audited actions, and minimize the risk of event loss. Custom audit configurations allow customers to capture only the audit data that is needed. -- Audit logs can be consumed directly in the [Azure portal](https://portal.azure.com/), or from the storage location that was configured. - -> [!NOTE] -> Enabling auditing to Log Analytics will incur cost based on ingestion rates. Please be aware of the associated cost with using this [option](https://azure.microsoft.com/pricing/details/monitor/), or consider storing the audit logs in an Azure storage account. - -**Further resources**: - -- [SQL Database Auditing](/azure/azure-sql/database/auditing-overview) -- [SQL Server Auditing](/sql/relational-databases/security/auditing/sql-server-audit-database-engine) - -### Secure audit logs - -Restrict access to the storage account to support Separation of Duties and to separate DBA from Auditors. - -**How to implement**: - -- When saving Audit logs to Azure Storage, make sure that access to the Storage Account is restricted to the minimal security principles. Control who has access to the storage account. -- For more information, see [Authorizing access to Azure Storage](../../storage/common/authorize-data-access.md?toc=%2fazure%2fstorage%2fblobs%2ftoc.json). - -**Best practices**: - -- Controlling Access to the Audit Target is a key concept in separating DBA from Auditors. - -- When auditing access to sensitive data, consider securing the data with data encryption to avoid information leakage to the Auditor. For more information, see the section [Protect sensitive data in use from high-privileged, unauthorized users](#protect-sensitive-data-in-use-from-high-privileged-unauthorized-users). - -## Security Management - -This section describes the different aspects and best practices for managing your databases security posture. It includes best practices for ensuring your databases are configured to meet security standards, for discovering and for classifying and tracking access to potentially sensitive data in your databases. - -### Ensure that the databases are configured to meet security best practices - -Proactively improve your database security by discovering and remediating potential database vulnerabilities. - -**How to implement**: - -- Enable [SQL Vulnerability Assessment](/sql/relational-databases/security/sql-vulnerability-assessment) (VA) to scan your database for security issues, and to automatically run periodically on your databases. - -**Best practices**: - -- Initially, run VA on your databases and iterate by remediating failing checks that oppose security best practices. Set up baselines for acceptable configurations until the scan comes out _clean_, or all checks has passed. - -- Configure periodic recurring scans to run once a week and configure the relevant person to receive summary emails. - -- Review the VA summary following each weekly scan. For any vulnerabilities found, evaluate the drift from the previous scan result and determine if the check should be resolved. Review if there's a legitimate reason for the change in configuration. - -- Resolve checks and update baselines where relevant. Create ticket items for resolving actions and track these until they're resolved. - -**Further resources**: - -- [SQL Vulnerability Assessment](/sql/relational-databases/security/sql-vulnerability-assessment) -- [SQL Vulnerability Assessment service helps you identify database vulnerabilities](sql-vulnerability-assessment.md) - -### Identify and tag sensitive data - -Discover columns that potentially contain sensitive data. What is considered sensitive data heavily depends on the customer, compliance regulation, etc., and needs to be evaluated by the users in charge of that data. Classify the columns to use advanced sensitivity-based auditing and protection scenarios. - -**How to implement**: - -- Use [SQL Data Discovery and Classification](data-discovery-and-classification-overview.md) to discover, classify, label, and protect the sensitive data in your databases. - - View the classification recommendations that are created by the automated discovery in the SQL Data Discovery and Classification dashboard. Accept the relevant classifications, such that your sensitive data is persistently tagged with classification labels. - - Manually add classifications for any additional sensitive data fields that were not discovered by the automated mechanism. -- For more information, see [SQL Data Discovery and Classification](/sql/relational-databases/security/sql-data-discovery-and-classification). - -**Best practices**: - -- Monitor the classification dashboard on a regular basis for an accurate assessment of the database's classification state. A report on the database classification state can be exported or printed to share for compliance and auditing purposes. - -- Continuously monitor the status of recommended sensitive data in SQL Vulnerability Assessment. Track the sensitive data discovery rule and identify any drift in the recommended columns for classification. - -- Use classification in a way that is tailored to the specific needs of your organization. Customize your Information Protection policy (sensitivity labels, information types, discovery logic) in the [SQL Information Protection](../../security-center/security-center-info-protection-policy.md) policy in Microsoft Defender for Cloud. - -### Track access to sensitive data - -Monitor who accesses sensitive data and capture queries on sensitive data in audit logs. - -**How to implement**: - -- Use SQL Audit and Data Classification in combination. - - In your [SQL Database Audit](/azure/azure-sql/database/auditing-overview) log, you can track access specifically to sensitive data. You can also view information such as the data that was accessed, as well as its sensitivity label. For more information, see [Data Discovery and Classification](data-discovery-and-classification-overview.md) and [Auditing access to sensitive data](data-discovery-and-classification-overview.md#audit-sensitive-data). - -**Best practices**: - -- See best practices for the Auditing and Data Classification sections: - - [Audit critical security events](#audit-critical-security-events) - - [Identify and tag sensitive data](#identify-and-tag-sensitive-data) - -### Visualize security and compliance status - -Use a unified infrastructure security management system that strengthens the security posture of your data centers (including databases in SQL Database). View a list of recommendations concerning the security of your databases and compliance status. - -**How to implement**: - -- Monitor SQL-related security recommendations and active threats in [Microsoft Defender for Cloud](https://azure.microsoft.com/documentation/services/security-center/). - -## Common security threats and potential mitigations - -This section helps you find security measures to protect against certain attack vectors. It's expected that most mitigations can be achieved by following one or more of the security guidelines above. - -### Security threat: Data exfiltration - -Data exfiltration is the unauthorized copying, transfer, or retrieval of data from a computer or server. See a definition for [data exfiltration](https://en.wikipedia.org/wiki/Data_exfiltration) on Wikipedia. - -Connecting to server over a public endpoint presents a data exfiltration risk as it requires customers open their firewalls to public IPs. - -**Scenario 1**: An application on an Azure VM connects to a database in Azure SQL Database. A rogue actor gets access to the VM and compromises it. In this scenario, data exfiltration means that an external entity using the rogue VM connects to the database, copies personal data, and stores it in a blob storage or a different SQL Database in a different subscription. - -**Scenario 2**: A Rouge DBA. This scenario is often raised by security sensitive customers from regulated industries. In this scenario, a high privilege user might copy data from Azure SQL Database to another subscription not controlled by the data owner. - -**Potential mitigations**: - -Today, Azure SQL Database and SQL Managed Instance offers the following techniques for mitigating data exfiltration threats: - -- Use a combination of Allow and Deny rules on the NSGs of Azure VMs to control which regions can be accessed from the VM. -- If using a server in SQL Database, set the following options: - - Allow Azure Services to OFF. - - Only allow traffic from the subnet containing your Azure VM by setting up a VNet Firewall rule. - - Use [Private Link](../../private-link/private-endpoint-overview.md) -- For SQL Managed Instance, using private IP access by default addresses the first data exfiltration concern of a rogue VM. Turn on the subnet delegation feature on a subnet to automatically set the most restrictive policy on a SQL Managed Instance subnet. -- The Rogue DBA concern is more exposed with SQL Managed Instance as it has a larger surface area and networking requirements are visible to customers. The best mitigation for this is applying all of the practices in this security guide to prevent the Rogue DBA scenario in the first place (not only for data exfiltration). Always Encrypted is one method to protect sensitive data by encrypting it and keeping the key inaccessible for the DBA. - -## Security aspects of business continuity and availability - -Most security standards address data availability in terms of operational continuity, achieved by implementing redundancy and fail-over capabilities to avoid single points of failure. For disaster scenarios, it's a common practice to keep backups of Data and Log files. The following section provides a high-level overview of the capabilities that are built-into Azure. It also provides additional options that can be configured to meet specific needs: - -- Azure offers built-in high-availability: [High-availability with SQL Database and SQL Managed Instance](high-availability-sla.md) - -- The Business Critical tier includes failover groups, full and differential log backups, and point-in-time-restore backups enabled by default: - - [Automated backups](automated-backups-overview.md) - - [Recover a database using automated database backups - Point-in-time restore](recovery-using-backups.md#point-in-time-restore) - -- Additional business continuity features such as the zone redundant configuration and auto-failover groups across different Azure geos can be configured: - - [High-availability - Zone redundant configuration for Premium & Business Critical service tiers](high-availability-sla.md#premium-and-business-critical-service-tier-zone-redundant-availability) - - [High-availability - Zone redundant configuration for General Purpose service tier](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) - - [Overview of business continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md) - -## Next steps - -- See [An overview of Azure SQL Database security capabilities](security-overview.md) diff --git a/articles/azure-sql/database/security-controls-policy.md b/articles/azure-sql/database/security-controls-policy.md deleted file mode 100644 index 43ea6761be20a..0000000000000 --- a/articles/azure-sql/database/security-controls-policy.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Azure Policy Regulatory Compliance controls for Azure SQL Database -description: Lists Azure Policy Regulatory Compliance controls available for Azure SQL Database and SQL Managed Instance. These built-in policy definitions provide common approaches to managing the compliance of your Azure resources. -ms.date: 03/10/2022 -ms.topic: sample -author: LitKnd -ms.author: kendralittle -ms.service: sql-database -ms.subservice: security -ms.custom: subject-policy-compliancecontrols ---- -# Azure Policy Regulatory Compliance controls for Azure SQL Database & SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -[Regulatory Compliance in Azure Policy](../../governance/policy/concepts/regulatory-compliance.md) -provides Microsoft created and managed initiative definitions, known as _built-ins_, for the -**compliance domains** and **security controls** related to different compliance standards. This -page lists the **compliance domains** and **security controls** for Azure SQL Database and SQL -Managed Instance. You can assign the built-ins for a **security control** individually to help make -your Azure resources compliant with the specific standard. - -[!INCLUDE [azure-policy-compliancecontrols-introwarning](../../../includes/policy/standards/intro-warning.md)] - -[!INCLUDE [azure-policy-compliancecontrols-sql](../../../includes/policy/standards/byrp/microsoft.sql.md)] - -## Next steps - -- Learn more about [Azure Policy Regulatory Compliance](../../governance/policy/concepts/regulatory-compliance.md). -- See the built-ins on the [Azure Policy GitHub repo](https://github.com/Azure/azure-policy). diff --git a/articles/azure-sql/database/security-overview.md b/articles/azure-sql/database/security-overview.md deleted file mode 100644 index ea7e06a412010..0000000000000 --- a/articles/azure-sql/database/security-overview.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: Security Overview -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn about security in Azure SQL Database and Azure SQL Managed Instance, including how it differs from SQL Server. -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: jaszymas -ms.author: jaszymas -ms.reviewer: kendralittle, vanto, emlisa, mathoma -ms.date: 08/23/2021 ---- -# An overview of Azure SQL Database and SQL Managed Instance security capabilities -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article outlines the basics of securing the data tier of an application using [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md). The security strategy described follows the layered defense-in-depth approach as shown in the picture below, and moves from the outside in: - -![Diagram of layered defense-in-depth. Customer data is encased in layers of network security, access management and threat and information protections.](./media/security-overview/sql-security-layer.png) - -## Network security - -Microsoft Azure SQL Database, SQL Managed Instance, and Azure Synapse Analytics provide a relational database service for cloud and enterprise applications. To help protect customer data, firewalls prevent network access to the server until access is explicitly granted based on IP address or Azure Virtual network traffic origin. - -### IP firewall rules - -IP firewall rules grant access to databases based on the originating IP address of each request. For more information, see [Overview of Azure SQL Database and Azure Synapse Analytics firewall rules](firewall-configure.md). - -### Virtual network firewall rules - -[Virtual network service endpoints](../../virtual-network/virtual-network-service-endpoints-overview.md) extend your virtual network connectivity over the Azure backbone and enable Azure SQL Database to identify the virtual network subnet that traffic originates from. To allow traffic to reach Azure SQL Database, use the SQL [service tags](../../virtual-network/network-security-groups-overview.md) to allow outbound traffic through Network Security Groups. - -[Virtual network rules](vnet-service-endpoint-rule-overview.md) enable Azure SQL Database to only accept communications that are sent from selected subnets inside a virtual network. - -> [!NOTE] -> Controlling access with firewall rules does *not* apply to **SQL Managed Instance**. For more information about the networking configuration needed, see [Connecting to a managed instance](../managed-instance/connect-application-instance.md) - -## Access management - -> [!IMPORTANT] -> Managing databases and servers within Azure is controlled by your portal user account's role assignments. For more information on this article, see [Azure role-based access control in the Azure portal](../../role-based-access-control/overview.md). - -### Authentication - -Authentication is the process of proving the user is who they claim to be. Azure SQL Database and SQL Managed Instance support SQL authentication and Azure AD authentication. SQL Managed instance additionally supports Windows Authentication for Azure AD principals. - -- **SQL authentication**: - - SQL authentication refers to the authentication of a user when connecting to Azure SQL Database or Azure SQL Managed Instance using username and password. A **server admin** login with a username and password must be specified when the server is being created. Using these credentials, a **server admin** can authenticate to any database on that server or instance as the database owner. After that, additional SQL logins and users can be created by the server admin, which enable users to connect using username and password. - -- **Azure Active Directory authentication**: - - Azure Active Directory authentication is a mechanism of connecting to [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md) and [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) by using identities in Azure Active Directory (Azure AD). Azure AD authentication allows administrators to centrally manage the identities and permissions of database users along with other Azure services in one central location. This includes the minimization of password storage and enables centralized password rotation policies. - - A server admin called the **Active Directory administrator** must be created to use Azure AD authentication with SQL Database. For more information, see [Connecting to SQL Database By Using Azure Active Directory Authentication](authentication-aad-overview.md). Azure AD authentication supports both managed and federated accounts. The federated accounts support Windows users and groups for a customer domain federated with Azure AD. - - Additional Azure AD authentication options available are [Active Directory Universal Authentication for SQL Server Management Studio](authentication-mfa-ssms-overview.md) connections including [multi-factor authentication](../../active-directory/authentication/concept-mfa-howitworks.md) and [Conditional Access](conditional-access-configure.md). - -- **Windows Authentication for Azure AD Principals (Preview)**: - - [Kerberos authentication for Azure AD Principals](../managed-instance/winauth-azuread-overview.md) (Preview) enables Windows Authentication for Azure SQL Managed Instance. Windows Authentication for managed instances empowers customers to move existing services to the cloud while maintaining a seamless user experience and provides the basis for infrastructure modernization. - - To enable Windows Authentication for Azure Active Directory (Azure AD) principals, you will turn your Azure AD tenant into an independent Kerberos realm and create an incoming trust in the customer domain. Learn [how Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos](../managed-instance/winauth-implementation-aad-kerberos.md). - -> [!IMPORTANT] -> Managing databases and servers within Azure is controlled by your portal user account's role assignments. For more information on this article, see [Azure role-based access control in Azure portal](../../role-based-access-control/overview.md). Controlling access with firewall rules does *not* apply to **SQL Managed Instance**. Please see the following article on [connecting to a managed instance](../managed-instance/connect-application-instance.md) for more information about the networking configuration needed. - -## Authorization - -Authorization refers to controlling access on resources and commands within a database. This is done by assigning permissions to a user within a database in Azure SQL Database or Azure SQL Managed Instance. Permissions are ideally managed by adding user accounts to [database roles](/sql/relational-databases/security/authentication-access/database-level-roles) and assigning database-level permissions to those roles. Alternatively an individual user can also be granted certain [object-level permissions](/sql/relational-databases/security/permissions-database-engine). For more information, see [Logins and users](logins-create-manage.md) - -As a best practice, create custom roles when needed. Add users to the role with the least privileges required to do their job function. Do not assign permissions directly to users. The server admin account is a member of the built-in db_owner role, which has extensive permissions and should only be granted to few users with administrative duties. To further limit the scope of what a user can do, the [EXECUTE AS](/sql/t-sql/statements/execute-as-clause-transact-sql) can be used to specify the execution context of the called module. Following these best practices is also a fundamental step towards Separation of Duties. - -### Row-level security - -Row-Level Security enables customers to control access to rows in a database table based on the characteristics of the user executing a query (for example, group membership or execution context). Row-Level Security can also be used to implement custom Label-based security concepts. For more information, see [Row-Level security](/sql/relational-databases/security/row-level-security). - -![Diagram showing that Row-Level Security shields individual rows of a SQL database from access by users via a client app.](./media/security-overview/azure-database-rls.png) - -## Threat protection - -SQL Database and SQL Managed Instance secure customer data by providing auditing and threat detection capabilities. - -### SQL auditing in Azure Monitor logs and Event Hubs - -SQL Database and SQL Managed Instance auditing tracks database activities and helps maintain compliance with security standards by recording database events to an audit log in a customer-owned Azure storage account. Auditing allows users to monitor ongoing database activities, as well as analyze and investigate historical activity to identify potential threats or suspected abuse and security violations. For more information, see Get started with [SQL Database Auditing](/azure/azure-sql/database/auditing-overview). - -### Advanced Threat Protection - -Advanced Threat Protection is analyzing your logs to detect unusual behavior and potentially harmful attempts to access or exploit databases. Alerts are created for suspicious activities such as SQL injection, potential data infiltration, and brute force attacks or for anomalies in access patterns to catch privilege escalations and breached credentials use. Alerts are viewed from the [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/), where the details of the suspicious activities are provided and recommendations for further investigation given along with actions to mitigate the threat. Advanced Threat Protection can be enabled per server for an additional fee. For more information, see [Get started with SQL Database Advanced Threat Protection](threat-detection-configure.md). - -![Diagram showing SQL Threat Detection monitoring access to the SQL database for a web app from an external attacker and malicious insider.](./media/security-overview/azure-database-td.jpg) - -## Information protection and encryption - -### Transport Layer Security (Encryption-in-transit) - -SQL Database, SQL Managed Instance, and Azure Synapse Analytics secure customer data by encrypting data in motion with [Transport Layer Security (TLS)](https://support.microsoft.com/help/3135244/tls-1-2-support-for-microsoft-sql-server). - -SQL Database, SQL Managed Instance, and Azure Synapse Analytics enforce encryption (SSL/TLS) at all times for all connections. This ensures all data is encrypted "in transit" between the client and server irrespective of the setting of **Encrypt** or **TrustServerCertificate** in the connection string. - -As a best practice, recommend that in the connection string used by the application, you specify an encrypted connection and _**not**_ trust the server certificate. This forces your application to verify the server certificate and thus prevents your application from being vulnerable to man in the middle type attacks. - -For example when using the ADO.NET driver this is accomplished via **Encrypt=True** and **TrustServerCertificate=False**. If you obtain your connection string from the Azure portal, it will have the correct settings. - -> [!IMPORTANT] -> Note that some non-Microsoft drivers may not use TLS by default or rely on an older version of TLS (<1.2) in order to function. In this case the server still allows you to connect to your database. However, we recommend that you evaluate the security risks of allowing such drivers and application to connect to SQL Database, especially if you store sensitive data. -> -> For further information about TLS and connectivity, see [TLS considerations](connect-query-content-reference-guide.md#tls-considerations-for-database-connectivity) - -### Transparent Data Encryption (Encryption-at-rest) - -[Transparent data encryption (TDE) for SQL Database, SQL Managed Instance, and Azure Synapse Analytics](transparent-data-encryption-tde-overview.md) adds a layer of security to help protect data at rest from unauthorized or offline access to raw files or backups. Common scenarios include data center theft or unsecured disposal of hardware or media such as disk drives and backup tapes. TDE encrypts the entire database using an AES encryption algorithm, which doesn't require application developers to make any changes to existing applications. - -In Azure, all newly created databases are encrypted by default and the database encryption key is protected by a built-in server certificate. Certificate maintenance and rotation are managed by the service and require no input from the user. Customers who prefer to take control of the encryption keys can manage the keys in [Azure Key Vault](../../key-vault/general/security-features.md). - -### Key management with Azure Key Vault - -[Bring Your Own Key](transparent-data-encryption-byok-overview.md) (BYOK) support for [Transparent Data Encryption](/sql/relational-databases/security/encryption/transparent-data-encryption) (TDE) allows customers to take ownership of key management and rotation using [Azure Key Vault](../../key-vault/general/security-features.md), Azure's cloud-based external key management system. If the database's access to the key vault is revoked, a database cannot be decrypted and read into memory. Azure Key Vault provides a central key management platform, leverages tightly monitored hardware security modules (HSMs), and enables separation of duties between management of keys and data to help meet security compliance requirements. - -### Always Encrypted (Encryption-in-use) - -![Diagram showing the basics of the Always Encrypted feature. An SQL database with a lock is only accessed by an app containing a key.](./media/security-overview/azure-database-ae.png) - -[Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine) is a feature designed to protect sensitive data stored in specific database columns from access (for example, credit card numbers, national identification numbers, or data on a _need to know_ basis). This includes database administrators or other privileged users who are authorized to access the database to perform management tasks, but have no business need to access the particular data in the encrypted columns. The data is always encrypted, which means the encrypted data is decrypted only for processing by client applications with access to the encryption key. The encryption key is never exposed to SQL Database or SQL Managed Instance and can be stored either in the [Windows Certificate Store](always-encrypted-certificate-store-configure.md) or in [Azure Key Vault](always-encrypted-azure-key-vault-configure.md). - -### Dynamic data masking - -![Diagram showing dynamic data masking. A business app sends data to a SQL database which masks the data before sending it back to the business app.](./media/security-overview/azure-database-ddm.png) - -Dynamic data masking limits sensitive data exposure by masking it to non-privileged users. Dynamic data masking automatically discovers potentially sensitive data in Azure SQL Database and SQL Managed Instance and provides actionable recommendations to mask these fields, with minimal impact to the application layer. It works by obfuscating the sensitive data in the result set of a query over designated database fields, while the data in the database is not changed. For more information, see [Get started with SQL Database and SQL Managed Instance dynamic data masking](dynamic-data-masking-overview.md). - -## Security management - -### Vulnerability assessment - -[Vulnerability assessment](sql-vulnerability-assessment.md) is an easy to configure service that can discover, track, and help remediate potential database vulnerabilities with the goal to proactively improve overall database security. Vulnerability assessment (VA) is part of the Microsoft Defender for SQL offering, which is a unified package for advanced SQL security capabilities. Vulnerability assessment can be accessed and managed via the central Microsoft Defender for SQL portal. - -### Data discovery and classification - -Data discovery and classification (currently in preview) provides basic capabilities built into Azure SQL Database and SQL Managed Instance for discovering, classifying and labeling the sensitive data in your databases. Discovering and classifying your utmost sensitive data (business/financial, healthcare, personal data, etc.) can play a pivotal role in your organizational Information protection stature. It can serve as infrastructure for: - -- Various security scenarios, such as monitoring (auditing) and alerting on anomalous access to sensitive data. -- Controlling access to, and hardening the security of, databases containing highly sensitive data. -- Helping meet data privacy standards and regulatory compliance requirements. - -For more information, see [Get started with data discovery and classification](data-discovery-and-classification-overview.md). - -### Compliance - -In addition to the above features and functionality that can help your application meet various security requirements, Azure SQL Database also participates in regular audits, and has been certified against a number of compliance standards. For more information, see the [Microsoft Azure Trust Center](https://www.microsoft.com/trust-center/compliance/compliance-overview) where you can find the most current list of SQL Database compliance certifications. - -## Next steps - -- For a discussion of the use of logins, user accounts, database roles, and permissions in SQL Database and SQL Managed Instance, see [Manage logins and user accounts](logins-create-manage.md). -- For a discussion of database auditing, see [auditing](/azure/azure-sql/database/auditing-overview). -- For a discussion of threat detection, see [threat detection](threat-detection-configure.md). diff --git a/articles/azure-sql/database/security-server-roles.md b/articles/azure-sql/database/security-server-roles.md deleted file mode 100644 index a341b143b5834..0000000000000 --- a/articles/azure-sql/database/security-server-roles.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Server roles -titleSuffix: Azure SQL Database -description: This article provides an overview of server roles for the logical server of Azure SQL Database -ms.service: sql-database -ms.subservice: security -author: AndreasWolter -ms.author: anwolter -ms.topic: conceptual -ms.date: 03/14/2022 -ms.reviewer: kendralittle, vanto, mathoma ---- - -# Azure SQL Database server roles for permission management - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In Azure SQL Database, the server is a logical concept and permissions cannot be granted on a server level. To simplify permission management, Azure SQL Database provides a set of fixed server-level roles to help you manage the permissions on a [logical server](logical-servers.md). Roles are security principals that group logins. - -> [!NOTE] -> The *roles* concept in this article are like *groups* in the Windows operating system. - -These special fixed server-level roles use the prefix **##MS_** and the suffix **##** to distinguish from other regular user-created principals. - -Like SQL Server on-premises, server permissions are organized hierarchically. The permissions that are held by these server-level roles can propagate to database permissions. For the permissions to be effectively propagated to the database, a login needs to have a user account in the database. - -For example, the server-level role **##MS_ServerStateReader##** holds the permission **VIEW SERVER STATE**. If a login who is member of this role has a user account in the databases *master* and *WideWorldImporters*, this user will have the permission, **VIEW DATABASE STATE** in those two databases. - -> [!NOTE] -> Any permission can be denied within user databases, in effect, overriding the server-wide grant via role membership. However, in the system database *master*, permissions cannot be granted or denied. - -Azure SQL Database currently provides three fixed server roles. The permissions that are granted to the fixed server roles cannot be changed and these roles can't have other fixed roles as members. You can add server-level logins as members to server-level roles. - -> [!IMPORTANT] -> Each member of a fixed server role can add other logins to that same role. - -For more information on Azure SQL Database logins and users, see [Authorize database access to SQL Database, SQL Managed Instance, and Azure Synapse Analytics](logins-create-manage.md). - -## Built-in server-level roles - -The following table shows the fixed server-level roles and their capabilities. - -|Built-in server-level role|Description| -|------------------------------|-----------------| -|**##MS_DefinitionReader##**|Members of the **##MS_DefinitionReader##** fixed server role can read all catalog views that are covered by **VIEW ANY DEFINITION**, respectively **VIEW DEFINITION** on any database on which the member of this role has a user account.| -|**##MS_ServerStateReader##**|Members of the **##MS_ServerStateReader##** fixed server role can read all dynamic management views (DMVs) and functions that are covered by **VIEW SERVER STATE**, respectively **VIEW DATABASE STATE** on any database on which the member of this role has a user account.| -|**##MS_ServerStateManager##**|Members of the **##MS_ServerStateManager##** fixed server role have the same permissions as the **##MS_ServerStateReader##** role. Also, it holds the **ALTER SERVER STATE** permission, which allows access to several management operations, such as: `DBCC FREEPROCCACHE`, `DBCC FREESYSTEMCACHE ('ALL')`, `DBCC SQLPERF()`; | - - -## Permissions of fixed server roles - -Each built-in server-level role has certain permissions assigned to it. The following table shows the permissions assigned to the server-level roles. It also shows the database-level permissions inherited if a user account exist in the database. - -|Fixed server-level role|Server-level permissions|Database-level permissions (if a database user matching the login exists) -|-------------|----------|-----------------| -|**##MS_DefinitionReader##**|VIEW ANY DATABASE, VIEW ANY DEFINITION, VIEW ANY SECURITY DEFINITION|VIEW DEFINITION, VIEW SECURITY DEFINITION| -|**##MS_ServerStateReader##**|VIEW SERVER STATE, VIEW SERVER PERFORMANCE STATE, VIEW SERVER SECURITY STATE|VIEW DATABASE STATE, VIEW DATABASE PERFORMANCE STATE, VIEW DATABASE SECURITY STATE| -|**##MS_ServerStateManager##**|ALTER SERVER STATE, VIEW SERVER STATE, VIEW SERVER PERFORMANCE STATE, VIEW SERVER SECURITY STATE|VIEW DATABASE STATE, VIEW DATABASE PERFORMANCE STATE, VIEW DATABASE SECURITY STATE| - - -## Working with server-level roles - -The following table explains the system views, and functions that you can use to work with server-level roles in Azure SQL Database. - -|Feature|Type|Description| -|-------------|----------|-----------------| -|[IS_SRVROLEMEMBER (Transact-SQL)](/sql/t-sql/functions/is-srvrolemember-transact-sql)|Metadata|Indicates whether a SQL login is a member of the specified server-level role.| -|[sys.server_role_members (Transact-SQL)](/sql/relational-databases/system-catalog-views/sys-server-role-members-transact-sql)|Metadata|Returns one row for each member of each server-level role.| -|[sys.sql_logins (Transact-SQL)](/sql/relational-databases/system-catalog-views/sys-sql-logins-transact-sql)|Metadata|Returns one row for each SQL login.| -|[ALTER SERVER ROLE (Transact-SQL)](/sql/t-sql/statements/alter-server-role-transact-sql)|Command|Changes the membership of a server role.| - -## Examples - -The examples in this section show how to work with server-level roles in Azure SQL Database. - -### A. Adding a SQL login to a server-level role - -The following example adds the SQL login 'Jiao' to the server-level role ##MS_ServerStateReader##. This statement has to be run in the virtual master database. - -```sql -ALTER SERVER ROLE ##MS_ServerStateReader## - ADD MEMBER Jiao; -GO -``` - -### B. Listing all principals (SQL authentication) which are members of a server-level role - -The following statement returns all members of any fixed server-level role using the `sys.server_role_members` and `sys.sql_logins` catalog views. This statement has to be run in the virtual master database. - -```sql -SELECT - sql_logins.principal_id AS MemberPrincipalID - , sql_logins.name AS MemberPrincipalName - , roles.principal_id AS RolePrincipalID - , roles.name AS RolePrincipalName -FROM sys.server_role_members AS server_role_members -INNER JOIN sys.server_principals AS roles - ON server_role_members.role_principal_id = roles.principal_id -INNER JOIN sys.sql_logins AS sql_logins - ON server_role_members.member_principal_id = sql_logins.principal_id -; -GO -``` - -### C. Complete example: Adding a login to a server-level role, retrieving metadata for role membership and permissions, and running a test query - -#### Part 1: Preparing role membership and user account - -Run this command from the virtual master database. - -```sql -ALTER SERVER ROLE ##MS_ServerStateReader## - ADD MEMBER Jiao - --- check membership in metadata: -select IS_SRVROLEMEMBER('##MS_ServerStateReader##', 'Jiao') ---> 1 = Yes - -SELECT - sql_logins.principal_id AS MemberPrincipalID - , sql_logins.name AS MemberPrincipalName - , roles.principal_id AS RolePrincipalID - , roles.name AS RolePrincipalName -FROM sys.server_role_members AS server_role_members -INNER JOIN sys.server_principals AS roles - ON server_role_members.role_principal_id = roles.principal_id -INNER JOIN sys.sql_logins AS sql_logins - ON server_role_members.member_principal_id = sql_logins.principal_id -; -GO -``` - -Here is the result set. - -``` -MemberPrincipalID MemberPrincipalName RolePrincipalID RolePrincipalName -------------- ------------- ------------------ ----------- -6 Jiao 11 ##MS_ServerStateReader## -``` - -Run this command from a user database. - -```sql --- Creating a database-User for 'Jiao' -CREATE USER Jiao - FROM LOGIN Jiao -; -GO -``` - -#### Part 2: Testing role membership - -Log in as login `Jiao` and connect to the user database used in the example. - -```sql --- retrieve server-level permissions of currently logged on User -SELECT * FROM sys.fn_my_permissions(NULL, 'Server') -; - --- check server-role membership for `##MS_ServerStateReader##` of currently logged on User -SELECT USER_NAME(), IS_SRVROLEMEMBER('##MS_ServerStateReader##') ---> 1 = Yes - --- Does the currently logged in User have the `VIEW DATABASE STATE`-permission? -SELECT HAS_PERMS_BY_NAME(NULL, 'DATABASE', 'VIEW DATABASE STATE'); ---> 1 = Yes - --- retrieve database-level permissions of currently logged on User -SELECT * FROM sys.fn_my_permissions(NULL, 'DATABASE') -GO - --- example query: -SELECT * FROM sys.dm_exec_query_stats ---> will return data since this user has the necessary permission - -``` - -### D. Check server-level roles for Azure AD logins - -Run this command in the virtual master database to see all Azure AD logins that are part of server-level roles in SQL Database. For more information on Azure AD server logins, see [Azure Active Directory server principals](authentication-azure-ad-logins.md). - -```sql -SELECT roles.principal_id AS RolePID,roles.name AS RolePName, - server_role_members.member_principal_id AS MemberPID, members.name AS MemberPName - FROM sys.server_role_members AS server_role_members - INNER JOIN sys.server_principals AS roles - ON server_role_members.role_principal_id = roles.principal_id - INNER JOIN sys.server_principals AS members - ON server_role_members.member_principal_id = members.principal_id; -``` - -### E. Check the virtual master database roles for specific logins - -Run this command in the virtual master database to check with roles `bob` has, or change the value to match your principal. - -```sql -SELECT DR1.name AS DbRoleName, isnull (DR2.name, 'No members') AS DbUserName - FROM sys.database_role_members AS DbRMem RIGHT OUTER JOIN sys.database_principals AS DR1 - ON DbRMem.role_principal_id = DR1.principal_id LEFT OUTER JOIN sys.database_principals AS DR2 - ON DbRMem.member_principal_id = DR2.principal_id - WHERE DR1.type = 'R' and DR2.name like 'bob%' -``` - -## Limitations of server-level roles - -- Role assignments may take up to 5 minutes to become effective. Also for existing sessions, changes to server role assignments don't take effect until the connection is closed and reopened. This is due to the distributed architecture between the *master* database and other databases on the same logical server. - - Partial workaround: to reduce the waiting period and ensure that server role assignments are current in a database, a server administrator, or an Azure AD administrator can run `DBCC FLUSHAUTHCACHE` in the user database(s) on which the login has access. Current logged on users still have to reconnect after running `DBCC FLUSHAUTHCACHE` for the membership changes to take effect on them. - -- `IS_SRVROLEMEMBER()` isn't supported in the *master* database. - - -## See also - -- [Database-Level Roles](/sql/relational-databases/security/authentication-access/database-level-roles) -- [Security Catalog Views (Transact-SQL)](/sql/relational-databases/system-catalog-views/security-catalog-views-transact-sql) -- [Security Functions (Transact-SQL)](/sql/t-sql/functions/security-functions-transact-sql) -- [Permissions (Database Engine)](/sql/relational-databases/security/permissions-database-engine) -- [DBCC FLUSHAUTHCACHE (Transact-SQL)](/sql/t-sql/database-console-commands/dbcc-flushauthcache-transact-sql) diff --git a/articles/azure-sql/database/serverless-tier-overview.md b/articles/azure-sql/database/serverless-tier-overview.md deleted file mode 100644 index 974df613ddac4..0000000000000 --- a/articles/azure-sql/database/serverless-tier-overview.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -title: Serverless compute tier -description: This article describes the new serverless compute tier and compares it with the existing provisioned compute tier for Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: test sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: conceptual -author: oslake -ms.author: moslake -ms.reviewer: kendralittle, mathoma, wiassaf -ms.date: 04/06/2022 ---- -# Azure SQL Database serverless -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Serverless is a compute tier for single databases in Azure SQL Database that automatically scales compute based on workload demand and bills for the amount of compute used per second. The serverless compute tier also automatically pauses databases during inactive periods when only storage is billed and automatically resumes databases when activity returns. - -## Serverless compute tier - -The serverless compute tier for single databases in Azure SQL Database is parameterized by a compute autoscaling range and an auto-pause delay. The configuration of these parameters shapes the database performance experience and compute cost. - -![serverless billing](./media/serverless-tier-overview/serverless-billing.png) - -### Performance configuration - -- The **minimum vCores** and **maximum vCores** are configurable parameters that define the range of compute capacity available for the database. Memory and IO limits are proportional to the vCore range specified.  -- The **auto-pause delay** is a configurable parameter that defines the period of time the database must be inactive before it is automatically paused. The database is automatically resumed when the next login or other activity occurs. Alternatively, automatic pausing can be disabled. - -### Cost - -- The cost for a serverless database is the summation of the compute cost and storage cost. -- When compute usage is between the min and max limits configured, the compute cost is based on vCore and memory used. -- When compute usage is below the min limits configured, the compute cost is based on the min vCores and min memory configured. -- When the database is paused, the compute cost is zero and only storage costs are incurred. -- The storage cost is determined in the same way as in the provisioned compute tier. - -For more cost details, see [Billing](serverless-tier-overview.md#billing). - -## Scenarios - -Serverless is price-performance optimized for single databases with intermittent, unpredictable usage patterns that can afford some delay in compute warm-up after idle usage periods. In contrast, the provisioned compute tier is price-performance optimized for single databases or multiple databases in elastic pools with higher average usage that cannot afford any delay in compute warm-up. - -### Scenarios well suited for serverless compute - -- Single databases with intermittent, unpredictable usage patterns interspersed with periods of inactivity, and lower average compute utilization over time. -- Single databases in the provisioned compute tier that are frequently rescaled and customers who prefer to delegate compute rescaling to the service. -- New single databases without usage history where compute sizing is difficult or not possible to estimate prior to deployment in SQL Database. - -### Scenarios well suited for provisioned compute - -- Single databases with more regular, predictable usage patterns and higher average compute utilization over time. -- Databases that cannot tolerate performance trade-offs resulting from more frequent memory trimming or delays in resuming from a paused state. -- Multiple databases with intermittent, unpredictable usage patterns that can be consolidated into elastic pools for better price-performance optimization. - -## Comparison with provisioned compute tier - -The following table summarizes distinctions between the serverless compute tier and the provisioned compute tier: - -| | **Serverless compute** | **Provisioned compute** | -|:---|:---|:---| -|**Database usage pattern**| Intermittent, unpredictable usage with lower average compute utilization over time. | More regular usage patterns with higher average compute utilization over time, or multiple databases using elastic pools.| -| **Performance management effort** |Lower|Higher| -|**Compute scaling**|Automatic|Manual| -|**Compute responsiveness**|Lower after inactive periods|Immediate| -|**Billing granularity**|Per second|Per hour| - -## Purchasing model and service tier - -SQL Database serverless is currently only supported in the General Purpose tier on Generation 5 hardware in the vCore purchasing model. - -## Autoscaling - -### Scaling responsiveness - -In general, serverless databases are run on a machine with sufficient capacity to satisfy resource demand without interruption for any amount of compute requested within limits set by the max vCores value. Occasionally, load balancing automatically occurs if the machine is unable to satisfy resource demand within a few minutes. For example, if the resource demand is 4 vCores, but only 2 vCores are available, then it may take up to a few minutes to load balance before 4 vCores are provided. The database remains online during load balancing except for a brief period at the end of the operation when connections are dropped. - -### Memory management - -Memory for serverless databases is reclaimed more frequently than for provisioned compute databases. This behavior is important to control costs in serverless and can impact performance. - -#### Cache reclamation - -Unlike provisioned compute databases, memory from the SQL cache is reclaimed from a serverless database when CPU or active cache utilization is low. - -- Active cache utilization is considered low when the total size of the most recently used cache entries falls below a threshold for a period of time. -- When cache reclamation is triggered, the target cache size is reduced incrementally to a fraction of its previous size and reclaiming only continues if usage remains low. -- When cache reclamation occurs, the policy for selecting cache entries to evict is the same selection policy as for provisioned compute databases when memory pressure is high. -- The cache size is never reduced below the min memory limit as defined by min vCores, that can be configured. - -In both serverless and provisioned compute databases, cache entries may be evicted if all available memory is used. - -When CPU utilization is low, active cache utilization can remain high depending on the usage pattern and prevent memory reclamation. Also, there can be other delays after user activity stops before memory reclamation occurs due to periodic background processes responding to prior user activity. For example, delete operations and Query Store cleanup tasks generate ghost records that are marked for deletion, but are not physically deleted until the ghost cleanup process runs. Ghost cleanup may involve reading additional data pages into cache. - -#### Cache hydration - -The SQL cache grows as data is fetched from disk in the same way and with the same speed as for provisioned databases. When the database is busy, the cache is allowed to grow unconstrained up to the max memory limit. - -## Auto-pausing and auto-resuming - -### Auto-pausing - -Auto-pausing is triggered if all of the following conditions are true for the duration of the auto-pause delay: - -- Number of sessions = 0 -- CPU = 0 for user workload running in the user resource pool - -An option is provided to disable auto-pausing if desired. - -The following features do not support auto-pausing, but do support auto-scaling. If any of the following features are used, then auto-pausing must be disabled and the database will remain online regardless of the duration of database inactivity: - -- Geo-replication ([active geo-replication](active-geo-replication-overview.md) and [auto-failover groups](auto-failover-group-overview.md)). -- [Long-term backup retention](long-term-retention-overview.md) (LTR). -- The sync database used in [SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md). Unlike sync databases, hub and member databases support auto-pausing. -- [DNS alias](dns-alias-overview.md) created for the logical server containing a serverless database. -- [Elastic Jobs (preview)](elastic-jobs-overview.md), when the job database is a serverless database. Databases targeted by elastic jobs support auto-pausing, and will be resumed by job connections. - -Auto-pausing is temporarily prevented during the deployment of some service updates which require the database be online. In such cases, auto-pausing becomes allowed again once the service update completes. - -#### Auto-pause troubleshooting - -If auto-pausing is enabled, but a database does not auto-pause after the delay period, and the features listed above are not used, the application or user sessions may be preventing auto-pausing. To see if there are any application or user sessions currently connected to the database, connect to the database using any client tool, and execute the following query: - -```sql -SELECT session_id, - host_name, - program_name, - client_interface_name, - login_name, - status, - login_time, - last_request_start_time, - last_request_end_time -FROM sys.dm_exec_sessions AS s -INNER JOIN sys.dm_resource_governor_workload_groups AS wg -ON s.group_id = wg.group_id -WHERE s.session_id <> @@SPID - AND - ( - ( - wg.name like 'UserPrimaryGroup.DB%' - AND - TRY_CAST(RIGHT(wg.name, LEN(wg.name) - LEN('UserPrimaryGroup.DB') - 2) AS int) = DB_ID() - ) - OR - wg.name = 'DACGroup' - ); -``` - -> [!TIP] -> After running the query, make sure to disconnect from the database. Otherwise, the open session used by the query will prevent auto-pausing. - -If the result set is non-empty, it indicates that there are sessions currently preventing auto-pausing. - -If the result set is empty, it is still possible that sessions were open, possibly for a short time, at some point earlier during the auto-pause delay period. To see if such activity has occurred during the delay period, you can use [Azure SQL Auditing](auditing-overview.md) and examine audit data for the relevant period. - -The presence of open sessions, with or without concurrent CPU utilization in the user resource pool, is the most common reason for a serverless database to not auto-pause as expected. - -### Auto-resuming - -Auto-resuming is triggered if any of the following conditions are true at any time: - -|Feature|Auto-resume trigger| -|---|---| -|Authentication and authorization|Login| -|Threat detection|Enabling/disabling threat detection settings at the database or server level.
    Modifying threat detection settings at the database or server level.| -|Data discovery and classification|Adding, modifying, deleting, or viewing sensitivity labels| -|Auditing|Viewing auditing records.
    Updating or viewing auditing policy.| -|Data masking|Adding, modifying, deleting, or viewing data masking rules| -|Transparent data encryption|Viewing state or status of transparent data encryption| -|Vulnerability assessment|Ad hoc scans and periodic scans if enabled| -|Query (performance) data store|Modifying or viewing query store settings| -|Performance recommendations|Viewing or applying performance recommendations| -|Auto-tuning|Application and verification of auto-tuning recommendations such as auto-indexing| -|Database copying|Create database as copy.
    Export to a BACPAC file.| -|SQL data sync|Synchronization between hub and member databases that run on a configurable schedule or are performed manually| -|Modifying certain database metadata|Adding new database tags.
    Changing max vCores, min vCores, or auto-pause delay.| -|SQL Server Management Studio (SSMS)|Using SSMS versions earlier than 18.1 and opening a new query window for any database in the server will resume any auto-paused database in the same server. This behavior does not occur if using SSMS version 18.1 or later.| - -Monitoring, management, or other solutions performing any of the operations listed above will trigger auto-resuming. - -Auto-resuming is also triggered during the deployment of some service updates that require the database be online. - -### Connectivity - -If a serverless database is paused, then the first login will resume the database and return an error stating that the database is unavailable with error code 40613. Once the database is resumed, the login must be retried to establish connectivity. Database clients with connection retry logic should not need to be modified. For connection retry logic options that are built-in to the SqlClient driver, see [configurable retry logic in SqlClient](/sql/connect/ado-net/configurable-retry-logic). - -### Latency - -The latency to auto-resume and auto-pause a serverless database is generally order of 1 minute to auto-resume and 1-10 minutes after the expiration of the delay period to auto-pause. - -### Customer managed transparent data encryption (BYOK) - -If using [customer managed transparent data encryption](transparent-data-encryption-byok-overview.md) (BYOK) and the serverless database is auto-paused when key deletion or revocation occurs, then the database remains in the auto-paused state. In this case, after the database is next resumed, the database becomes inaccessible within approximately 10 minutes. Once the database becomes inaccessible, the recovery process is the same as for provisioned compute databases. If the serverless database is online when key deletion or revocation occurs, then the database also becomes inaccessible within approximately 10 minutes in the same way as with provisioned compute databases. - -## Onboarding into serverless compute tier - -Creating a new database or moving an existing database into a serverless compute tier follows the same pattern as creating a new database in provisioned compute tier and involves the following two steps. - -1. Specify the service objective. The service objective prescribes the service tier, hardware configuration, and max vCores. For service objective options, see [serverless resource limits](resource-limits-vcore-single-databases.md#general-purpose---serverless-compute---gen5) - - -2. Optionally, specify the min vCores and auto-pause delay to change their default values. The following table shows the available values for these parameters. - - |Parameter|Value choices|Default value| - |---|---|---|---| - |Min vCores|Depends on max vCores configured - see [resource limits](resource-limits-vcore-single-databases.md#general-purpose---serverless-compute---gen5).|0.5 vCores| - |Autopause delay|Minimum: 60 minutes (1 hour)
    Maximum: 10080 minutes (7 days)
    Increments: 10 minutes
    Disable autopause: -1|60 minutes| - - -### Create a new database in the serverless compute tier - -The following examples create a new database in the serverless compute tier. - -#### Use Azure portal - -See [Quickstart: Create a single database in Azure SQL Database using the Azure portal](single-database-create-quickstart.md). - - -#### Use PowerShell - -```powershell -New-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -ComputeModel Serverless -Edition GeneralPurpose -ComputeGeneration Gen5 ` - -MinVcore 0.5 -MaxVcore 2 -AutoPauseDelayInMinutes 720 -``` -#### Use Azure CLI - -```azurecli -az sql db create -g $resourceGroupName -s $serverName -n $databaseName ` - -e GeneralPurpose -f Gen5 --min-capacity 0.5 -c 2 --compute-model Serverless --auto-pause-delay 720 -``` - - -#### Use Transact-SQL (T-SQL) - -When using T-SQL, default values are applied for the min vcores and autopause delay. They can later be changed from the portal or via other management APIs (PowerShell, Azure CLI, REST API). - -```sql -CREATE DATABASE testdb -( EDITION = 'GeneralPurpose', SERVICE_OBJECTIVE = 'GP_S_Gen5_1' ) ; -``` - -For details, see [CREATE DATABASE](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true). - -### Move a database from the provisioned compute tier into the serverless compute tier - -The following examples move a database from the provisioned compute tier into the serverless compute tier. - -#### Use PowerShell - -```powershell -Set-AzSqlDatabase -ResourceGroupName $resourceGroupName -ServerName $serverName -DatabaseName $databaseName ` - -Edition GeneralPurpose -ComputeModel Serverless -ComputeGeneration Gen5 ` - -MinVcore 1 -MaxVcore 4 -AutoPauseDelayInMinutes 1440 -``` - -#### Use Azure CLI - -```azurecli -az sql db update -g $resourceGroupName -s $serverName -n $databaseName ` - --edition GeneralPurpose --min-capacity 1 --capacity 4 --family Gen5 --compute-model Serverless --auto-pause-delay 1440 -``` - -#### Use Transact-SQL (T-SQL) - -When using T-SQL, default values are applied for the min vcores and auto-pause delay. They can later be changed from the portal or via other management APIs (PowerShell, Azure CLI, REST API). - -```sql -ALTER DATABASE testdb -MODIFY ( SERVICE_OBJECTIVE = 'GP_S_Gen5_1') ; -``` - -For details, see [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true). - -### Move a database from the serverless compute tier into the provisioned compute tier - -A serverless database can be moved into a provisioned compute tier in the same way as moving a provisioned compute database into a serverless compute tier. - -## Modifying serverless configuration - -### Use PowerShell - -Modifying the maximum or minimum vCores, and autopause delay, is performed by using the [Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase) command in PowerShell using the `MaxVcore`, `MinVcore`, and `AutoPauseDelayInMinutes` arguments. - -### Use Azure CLI - -Modifying the maximum or minimum vCores, and autopause delay, is performed by using the [az sql db update](/cli/azure/sql/db#az-sql-db-update) command in Azure CLI using the `capacity`, `min-capacity`, and `auto-pause-delay` arguments. - -## Monitoring - -### Resources used and billed - -The resources of a serverless database are encapsulated by app package, SQL instance, and user resource pool entities. - -#### App package - -The app package is the outer most resource management boundary for a database, regardless of whether the database is in a serverless or provisioned compute tier. The app package contains the SQL instance and external services like Full-text Search that all together scope all user and system resources used by a database in SQL Database. The SQL instance generally dominates the overall resource utilization across the app package. - -#### User resource pool - -The user resource pool is an inner resource management boundary for a database, regardless of whether the database is in a serverless or provisioned compute tier. The user resource pool scopes CPU and IO for user workload generated by DDL queries such as CREATE and ALTER, DML queries such as INSERT, UPDATE, DELETE, and MERGE, and SELECT queries. These queries generally represent the most substantial proportion of utilization within the app package. - -### Metrics - -Metrics for monitoring the resource usage of the app package and user resource pool of a serverless database are listed in the following table: - -|Entity|Metric|Description|Units| -|---|---|---|---| -|App package|app_cpu_percent|Percentage of vCores used by the app relative to max vCores allowed for the app.|Percentage| -|App package|app_cpu_billed|The amount of compute billed for the app during the reporting period. The amount paid during this period is the product of this metric and the vCore unit price.

    Values of this metric are determined by aggregating over time the maximum of CPU used and memory used each second. If the amount used is less than the minimum amount provisioned as set by the min vCores and min memory, then the minimum amount provisioned is billed. In order to compare CPU with memory for billing purposes, memory is normalized into units of vCores by rescaling the amount of memory in GB by 3 GB per vCore.|vCore seconds| -|App package|app_memory_percent|Percentage of memory used by the app relative to max memory allowed for the app.|Percentage| -|User resource pool|cpu_percent|Percentage of vCores used by user workload relative to max vCores allowed for user workload.|Percentage| -|User resource pool|data_IO_percent|Percentage of data IOPS used by user workload relative to max data IOPS allowed for user workload.|Percentage| -|User resource pool|log_IO_percent|Percentage of log MB/s used by user workload relative to max log MB/s allowed for user workload.|Percentage| -|User resource pool|workers_percent|Percentage of workers used by user workload relative to max workers allowed for user workload.|Percentage| -|User resource pool|sessions_percent|Percentage of sessions used by user workload relative to max sessions allowed for user workload.|Percentage| - -### Pause and resume status - -In the Azure portal, the database status is displayed in the overview pane of the server that lists the databases it contains. The database status is also displayed in the overview pane for the database. - -Using the following commands to query the pause and resume status of a database: - -#### Use PowerShell - -```powershell -Get-AzSqlDatabase -ResourceGroupName $resourcegroupname -ServerName $servername -DatabaseName $databasename ` - | Select -ExpandProperty "Status" -``` - -#### Use Azure CLI - -```azurecli -az sql db show --name $databasename --resource-group $resourcegroupname --server $servername --query 'status' -o json -``` - -## Resource limits - -For resource limits, see [serverless compute tier](resource-limits-vcore-single-databases.md#general-purpose---serverless-compute---gen5). - -## Billing - -The amount of compute billed is the maximum of CPU used and memory used each second. If the amount of CPU used and memory used is less than the minimum amount provisioned for each, then the provisioned amount is billed. In order to compare CPU with memory for billing purposes, memory is normalized into units of vCores by rescaling the amount of memory in GB by 3 GB per vCore. - -- **Resource billed**: CPU and memory -- **Amount billed**: vCore unit price * max (min vCores, vCores used, min memory GB * 1/3, memory GB used * 1/3) -- **Billing frequency**: Per second - -The vCore unit price is the cost per vCore per second. Refer to the [Azure SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/single/) for specific unit prices in a given region. - -The amount of compute billed is exposed by the following metric: - -- **Metric**: app_cpu_billed (vCore seconds) -- **Definition**: max (min vCores, vCores used, min memory GB * 1/3, memory GB used * 1/3) -- **Reporting frequency**: Per minute - -This quantity is calculated each second and aggregated over 1 minute. - -### Minimum compute bill - -If a serverless database is paused, then the compute bill is zero. If a serverless database is not paused, then the minimum compute bill is no less than the amount of vCores based on max (min vCores, min memory GB * 1/3). - -Examples: - -- Suppose a serverless database is not paused and configured with 8 max vCores and 1 min vCore corresponding to 3.0 GB min memory. Then the minimum compute bill is based on max (1 vCore, 3.0 GB * 1 vCore / 3 GB) = 1 vCore. -- Suppose a serverless database is not paused and configured with 4 max vCores and 0.5 min vCores corresponding to 2.1 GB min memory. Then the minimum compute bill is based on max (0.5 vCores, 2.1 GB * 1 vCore / 3 GB) = 0.7 vCores. - -The [Azure SQL Database pricing calculator](https://azure.microsoft.com/pricing/calculator/?service=sql-database) for serverless can be used to determine the min memory configurable based on the number of max and min vCores configured. As a rule, if the min vCores configured is greater than 0.5 vCores, then the minimum compute bill is independent of the min memory configured and based only on the number of min vCores configured. - -### Example scenario - -Consider a serverless database configured with 1 min vCore and 4 max vCores. This configuration corresponds to around 3 GB min memory and 12 GB max memory. Suppose the auto-pause delay is set to 6 hours and the database workload is active during the first 2 hours of a 24-hour period and otherwise inactive. - -In this case, the database is billed for compute and storage during the first 8 hours. Even though the database is inactive starting after the second hour, it is still billed for compute in the subsequent 6 hours based on the minimum compute provisioned while the database is online. Only storage is billed during the remainder of the 24-hour period while the database is paused. - -More precisely, the compute bill in this example is calculated as follows: - -|Time Interval|vCores used each second|GB used each second|Compute dimension billed|vCore seconds billed over time interval| -|---|---|---|---|---| -|0:00-1:00|4|9|vCores used|4 vCores * 3600 seconds = 14400 vCore seconds| -|1:00-2:00|1|12|Memory used|12 GB * 1/3 * 3600 seconds = 14400 vCore seconds| -|2:00-8:00|0|0|Min memory provisioned|3 GB * 1/3 * 21600 seconds = 21600 vCore seconds| -|8:00-24:00|0|0|No compute billed while paused|0 vCore seconds| -|Total vCore seconds billed over 24 hours||||50400 vCore seconds| - -Suppose the compute unit price is $0.000145/vCore/second. Then the compute billed for this 24-hour period is the product of the compute unit price and vCore seconds billed: $0.000145/vCore/second * 50400 vCore seconds ~ $7.31. - -### Azure Hybrid Benefit and reserved capacity - -Azure Hybrid Benefit (AHB) and reserved capacity discounts do not apply to the serverless compute tier. - -## Available regions - -The serverless compute tier is available worldwide except the following regions: China East, China North, Germany Central, Germany Northeast, and US Gov Central (Iowa). - -## Next steps - -- To get started, see [Quickstart: Create a single database in Azure SQL Database using the Azure portal](single-database-create-quickstart.md). -- For resource limits, see [Serverless compute tier resource limits](resource-limits-vcore-single-databases.md#general-purpose---serverless-compute---gen5). diff --git a/articles/azure-sql/database/service-tier-business-critical.md b/articles/azure-sql/database/service-tier-business-critical.md deleted file mode 100644 index 1d128eabbe0e4..0000000000000 --- a/articles/azure-sql/database/service-tier-business-critical.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Business Critical service tier -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn about the Business Critical service tier for Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: danimir -ms.author: danil -ms.reviewer: kendralittle, mathoma, urmilano -ms.date: 04/13/2022 ---- -# Business Critical tier - Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and Azure SQL Managed Instance are both based on the SQL Server database engine architecture adjusted for the cloud environment in order to ensure default SLA availability even in cases of infrastructure failures. - -This article describes and compares the Business Critical service tier used by Azure SQL Database and Azure SQL Managed instance. The Business Critical service tier is best used for applications requiring high transaction rate, low IO latency, and high IO throughput. This service tier offers the highest resilience to failures and fast failovers using multiple synchronously updated replicas. - -## Overview - -The Business Critical service tier model is based on a cluster of database engine processes. This architectural model relies on a fact that there's always a quorum of available database engine nodes and has minimal performance impact on your workload even during maintenance activities. - -Azure upgrades and patches underlying operating system, drivers, and SQL Server database engine transparently with the minimal down-time for end users. - -Premium availability is enabled in the Business Critical service tier and is designed for intensive workloads that can't tolerate reduced availability due to the ongoing maintenance operations. - -Compute and storage is integrated on the single node in the premium model. High availability in this architectural model is achieved by replication of compute (SQL Server database engine process) and storage (locally attached SSD) deployed to a four node cluster, using technology similar to SQL Server [Always On availability groups](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). - -![Cluster of database engine nodes](./media/service-tier-business-critical/business-critical-service-tier.png) - -Both the SQL Server database engine process and underlying .mdf/.ldf files are placed on the same node with locally attached SSD storage providing low latency to your workload. High availability is implemented using technology similar to SQL Server [Always On availability groups](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). Every database is a cluster of database nodes with one primary database that is accessible for customer workloads, and a three secondary processes containing copies of data. The primary node constantly pushes changes to the secondary nodes in order to ensure that the data is available on secondary replicas if the primary node fails for any reason. Failover is handled by the SQL Server database engine – one secondary replica becomes the primary node and a new secondary replica is created to ensure there are enough nodes in the cluster. The workload is automatically redirected to the new primary node. - -In addition, the Business Critical cluster has built-in [Read Scale-Out](read-scale-out.md) capability that provides free-of charge built-in read-only replica that can be used to run read-only queries (for example reports) that shouldn't affect performance of your primary workload. - -## When to choose this service tier - -The Business Critical service tier is designed for applications that require low-latency responses from the underlying SSD storage (1-2 ms in average), fast recovery if the underlying infrastructure fails, or need to off-load reports, analytics, and read-only queries to the free of charge readable secondary replica of the primary database. - -The key reasons why you should choose Business Critical service tier instead of General Purpose tier are: -- **Low I/O latency requirements** – workloads that need a fast response from the storage layer (1-2 milliseconds in average) should use Business Critical tier. -- **Workload with reporting and analytic queries** that can be redirected to the free-of-charge secondary read-only replica. -- **Higher resiliency and faster recovery from failures**. In a case of system failure, the database on primary instance will be disabled and one of the secondary replicas will be immediately became new read-write primary database that is ready to process queries. The database engine doesn't need to analyze and redo transactions from the log file and load all data in the memory buffer. -- **Advanced data corruption protection**. The Business Critical tier leverages database replicas behind-the-scenes for business continuity purposes, and so the service also then leverages automatic page repair, which is the same technology used for SQL Server database [mirroring and availability groups](/sql/sql-server/failover-clusters/automatic-page-repair-availability-groups-database-mirroring). In the event that a replica can't read a page due to a data integrity issue, a fresh copy of the page will be retrieved from another replica, replacing the unreadable page without data loss or customer downtime. This functionality is applicable in General Purpose tier if the database has geo-secondary replica. -- **Higher availability** - The Business Critical tier in Multi-AZ configuration provides resiliency to zonal failures and a higher availability SLA. -- **Fast geo-recovery** - When [active geo-replication](active-geo-replication-overview.md) is configured, the Business Critical tier has a guaranteed Recovery Point Objective (RPO) of 5 seconds and Recovery Time Objective (RTO) of 30 seconds for 100% of deployed hours. - -## Compare Business Critical resource limits - - - -Review the table in this section for a brief overview comparison of the resource limits for Azure SQL Database and Azure SQL managed Instance in the Business Critical service tier. - -For comprehensive details about resource limits, review: -- Azure SQL Database: [vCore single database](resource-limits-vcore-single-databases.md), [vCore pooled database ](resource-limits-vcore-elastic-pools.md), [Hyperscale](service-tier-hyperscale.md), [DTU single database](resource-limits-dtu-single-databases.md) and [DTU pooled databases](resource-limits-dtu-elastic-pools.md) -- Azure SQL Managed Instance: [vCore instance limits](../managed-instance/resource-limits.md) - -To compare features between SQL Database and SQL Managed Instance, see the [database engine features](features-comparison.md). - -The following table shows resource limits for both Azure SQL Database and Azure SQL Managed Instance in the Business Critical service tier. - -| **Category** | **Azure SQL Database** | **Azure SQL Managed Instance** | -|:--|:--|:--| -| **Compute size**|1 to 128 vCores | 4, 8, 16, 24, 32, 40, 64, 80 vCores| -| **Storage type** |Local SSD storage|Local SSD storage | -| **Storage size** | 1 GB – 4 TB |32 GB – 16 TB | -| **Tempdb size** | [32 GB per vCore](resource-limits-vcore-single-databases.md) |Up to 4 TB - [limited by storage size](../managed-instance/resource-limits.md#service-tier-characteristics) | -| **Log write throughput** | Single databases: [12 MB/s per vCore (max 96 MB/s)](resource-limits-vcore-single-databases.md)
    Elastic pools: [15 MB/s per vCore (max 120 MB/s)](resource-limits-vcore-elastic-pools.md) | [4 MB/s per vCore (max 48 MB/s)](../managed-instance/resource-limits.md#service-tier-characteristics) | -| **Availability** | [Default SLA](https://azure.microsoft.com/support/legal/sla/azure-sql-database/)
    99.995% SLA with [zone redundancy](high-availability-sla.md#premium-and-business-critical-service-tier-zone-redundant-availability) | [Default SLA](https://azure.microsoft.com/support/legal/sla/azure-sql-sql-managed-instance/)| -| **Backups** | RA-GRS, 1-35 days (7 days by default) | RA-GRS, 1-35 days (7 days by default)| -| [**Read-only replicas**](read-scale-out.md) |1 built-in high availability replica is readable
    0 - 4 [geo-replicas](active-geo-replication-overview.md) |1 built-in high availability replica is readable
    0 - 1 geo-replicas using [auto-failover groups](auto-failover-group-overview.md#best-practices-for-sql-managed-instance) | -| **Pricing/Billing** |[vCore, reserved storage, backup storage, and geo-replicas](https://azure.microsoft.com/pricing/details/sql-database/single/) are charged.
    High availability replicas aren't charged.
    IOPS isn't charged. |[vCore, reserved storage, backup storage, and geo-replicas](https://azure.microsoft.com/pricing/details/sql-database/managed/) are charged.
    High availability replicas aren't charged.
    IOPS isn't charged. | -| **Discount models** |[Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions|[Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions | - - - -## Next steps - -- Find resource characteristics (number of cores, I/O, memory) of Business Critical tier in [SQL Managed Instance](../managed-instance/resource-limits.md#service-tier-characteristics), Single database in [vCore model](resource-limits-vcore-single-databases.md) or [DTU model](resource-limits-dtu-single-databases.md#premium-service-tier), or Elastic pool in [vCore model](resource-limits-vcore-elastic-pools.md) and [DTU model](resource-limits-dtu-elastic-pools.md#premium-elastic-pool-limits). -- Learn about [General Purpose](service-tier-general-purpose.md) and [Hyperscale](service-tier-hyperscale.md) service tiers. -- Learn about [Service Fabric](../../service-fabric/service-fabric-overview.md). -- For more options for high availability and disaster recovery, see [Business Continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). diff --git a/articles/azure-sql/database/service-tier-general-purpose.md b/articles/azure-sql/database/service-tier-general-purpose.md deleted file mode 100644 index 65e26ebf60c89..0000000000000 --- a/articles/azure-sql/database/service-tier-general-purpose.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: General Purpose service tier -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn about the General Purpose service tier for Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: danimir -ms.author: danil -ms.reviewer: kendralittle, mathoma, urmilano -ms.date: 04/13/2022 ---- -# General Purpose service tier - Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Azure SQL Database and Azure SQL Managed Instance are based on the SQL Server database engine architecture adapted for the cloud environment in order to ensure default availability even in the cases of infrastructure failures. - -This article describes and compares the General Purpose service tier used by Azure SQL Database and Azure SQL Managed instance. The General Purpose service tier is best used for budget-oriented, balanced compute and storage options. - - -## Overview - -The architectural model for the General Purpose service tier is based on a separation of compute and storage. This architectural model relies on high availability and reliability of Azure Blob storage that transparently replicates database files and guarantees no data loss if underlying infrastructure failure happens. - -The following figure shows four nodes in standard architectural model with the separated compute and storage layers. - -![Separation of compute and storage](./media/service-tier-general-purpose/general-purpose-service-tier.png) - -In the architectural model for the General Purpose service tier, there are two layers: - -- A stateless compute layer that is running the `sqlservr.exe` process and contains only transient and cached data (for example – plan cache, buffer pool, column store pool). This stateless node is operated by Azure Service Fabric that initializes process, controls health of the node, and performs failover to another place if necessary. -- A stateful data layer with database files (.mdf/.ldf) that are stored in Azure Blob storage. Azure Blob storage guarantees that there will be no data loss of any record that is placed in any database file. Azure Storage has built-in data availability/redundancy that ensures that every record in log file or page in data file will be preserved even if the process crashes. - -Whenever the database engine or operating system is upgraded, some part of underlying infrastructure fails, or if some critical issue is detected in the `sqlservr.exe` process, Azure Service Fabric will move the stateless process to another stateless compute node. There is a set of spare nodes that is waiting to run new compute service if a failover of the primary node happens in order to minimize failover time. Data in Azure storage layer is not affected, and data/log files are attached to newly initialized process. This process guarantees 99.99% availability by default and 99.995% availability when [zone redundancy](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) is enabled. There may be some performance impacts on heavy workloads that are running due to transition time and the fact the new node starts with cold cache. - -## When to choose this service tier - -The General Purpose service tier is a default service tier in Azure SQL Database and Azure SQL Managed Instance that is designed for most of generic workloads. If you need a fully managed database engine with a default SLA and storage latency between 5 and 10 ms, the General Purpose tier is the option for you. - -## Compare General Purpose resource limits - - - -Review the table in this section for a brief overview comparison of the resource limits for Azure SQL Database and Azure SQL managed Instance in the General Purpose service tier. - -For comprehensive details about resource limits, review: -- Azure SQL Database: [vCore single database](resource-limits-vcore-single-databases.md), [vCore pooled database ](resource-limits-vcore-elastic-pools.md), [Hyperscale](service-tier-hyperscale.md), [DTU single database](resource-limits-dtu-single-databases.md) and [DTU pooled databases](resource-limits-dtu-elastic-pools.md) -- Azure SQL Managed Instance: [vCore instance limits](../managed-instance/resource-limits.md) - - -To compare features between SQL Database and SQL Managed Instance, see the [database engine features](features-comparison.md). - -The following table shows resource limits for both Azure SQL Database and Azure SQL Managed Instance in the General Purpose service tier: - -| **Category** | **Azure SQL Database** | **Azure SQL Managed Instance** | -|:--|:--|:--| -| **Compute size**| 1 - 80 vCores | 4, 8, 16, 24, 32, 40, 64, 80 vCores| -| **Storage type** | Remote storage | Remote storage| -| **Storage size** | 1 GB - 4 TB | 2 GB - 16 TB| -| **Tempdb size** | [32 GB per vCore](resource-limits-vcore-single-databases.md) | [24 GB per vCore](../managed-instance/resource-limits.md#service-tier-characteristics) | -| **Log write throughput** | Single databases: [4.5 MB/s per vCore (max 50 MB/s)](resource-limits-vcore-single-databases.md)
    Elastic pools: [6 MB/s per vCore (max 62.5 MB/s)](resource-limits-vcore-elastic-pools.md) | [3 MB/s per vCore (max 22 MB/s)](../managed-instance/resource-limits.md#service-tier-characteristics)| -| **Availability** | [Default SLA](https://azure.microsoft.com/support/legal/sla/azure-sql-database/)
    99.995% SLA with [zone redundancy](high-availability-sla.md#general-purpose-service-tier-zone-redundant-availability) | [Default SLA](https://azure.microsoft.com/support/legal/sla/azure-sql-sql-managed-instance/)| -| **Backups** | 1-35 days (7 days by default) | 1-35 days (7 days by default)| -| [**Read-only replicas**](read-scale-out.md) | 0 built-in
    0 - 4 [geo-replicas](active-geo-replication-overview.md) | 0 built-in
    0 - 1 geo-replicas using [auto-failover groups](auto-failover-group-overview.md#best-practices-for-sql-managed-instance) | -| **Pricing/Billing** | [vCore, reserved storage, backup storage, and geo-replicas](https://azure.microsoft.com/pricing/details/sql-database/single/) are charged.
    IOPS is not charged.| [vCore, reserved storage, backup storage, and geo-replicas](https://azure.microsoft.com/pricing/details/sql-database/managed/) are charged.
    IOPS is not charged. | -| **Discount models** |[Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions | [Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions| - - - -## Next steps - -- Find resource characteristics (number of cores, I/O, memory) of the General Purpose/standard tier in [SQL Managed Instance](../managed-instance/resource-limits.md#service-tier-characteristics), single database in [vCore model](resource-limits-vcore-single-databases.md) or [DTU model](resource-limits-dtu-single-databases.md#single-database-storage-sizes-and-compute-sizes), or elastic pool in [vCore model](resource-limits-vcore-elastic-pools.md) and [DTU model](resource-limits-dtu-elastic-pools.md#standard-elastic-pool-limits). -- Learn about [Business Critical](service-tier-business-critical.md) and [Hyperscale](service-tier-hyperscale.md) service tiers. -- Learn about [Service Fabric](../../service-fabric/service-fabric-overview.md). -- For more options for high availability and disaster recovery, see [Business Continuity](business-continuity-high-availability-disaster-recover-hadr-overview.md). diff --git a/articles/azure-sql/database/service-tier-hyperscale-frequently-asked-questions-faq.yml b/articles/azure-sql/database/service-tier-hyperscale-frequently-asked-questions-faq.yml deleted file mode 100644 index d37783233b876..0000000000000 --- a/articles/azure-sql/database/service-tier-hyperscale-frequently-asked-questions-faq.yml +++ /dev/null @@ -1,476 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Azure SQL Database Hyperscale FAQ - description: Answers to common questions customers ask about a database in SQL Database in the Hyperscale service tier - commonly called a Hyperscale database. - services: sql-database - ms.service: sql-database - ms.subservice: service-overview - ms.custom: sqldbrb=1 - ms.devlang: - ms.topic: faq - author: dimitri-furman - ms.author: dfurman - ms.reviewer: kendralittle, mathoma - ms.date: 03/02/2022 -title: Azure SQL Database Hyperscale FAQ -summary: | - [!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - This article provides answers to frequently asked questions for customers considering a database in the Azure SQL Database Hyperscale service tier, referred to as just Hyperscale in the remainder of this FAQ. This article describes the scenarios that Hyperscale supports and the features that are compatible with Hyperscale. - - - This FAQ is intended for readers who have a brief understanding of the Hyperscale service tier and are looking to have their specific questions and concerns answered. - - This FAQ isn’t meant to be a guidebook or answer questions on how to use a Hyperscale database. For an introduction to Hyperscale, we recommend you refer to the [Azure SQL Database Hyperscale](service-tier-hyperscale.md) documentation. - - -sections: - - name: General questions - questions: - - question: | - What is a Hyperscale database? - answer: | - A Hyperscale database is a database in SQL Database in the Hyperscale service tier that is backed by the Hyperscale scale-out storage technology. A Hyperscale database supports up to 100 TB of data and provides high throughput and performance, as well as rapid scaling to adapt to the workload requirements. Scaling is transparent to the application – connectivity, query processing, etc. work like any other database in Azure SQL Database. - - - question: | - What resource types and purchasing models support Hyperscale? - answer: | - The Hyperscale service tier is only available for single databases using the vCore-based purchasing model in Azure SQL Database. - - - question: | - How does the Hyperscale service tier differ from the General Purpose and Business Critical service tiers? - answer: | - The vCore-based service tiers are differentiated based on database availability and storage type, performance, and maximum storage size as described in [resource limit comparison](service-tier-hyperscale.md#compare-resource-limits). - - - question: | - Who should use the Hyperscale service tier? - answer: | - The Hyperscale service tier is intended for customers who have large on-premises SQL Server databases and want to modernize their applications by moving to the cloud, or for customers who are already using Azure SQL Database and want to significantly expand the potential for database growth. Hyperscale is also intended for customers who seek both high performance and high scalability. With Hyperscale, you get: - - - Database size up to 100 TB - - Fast database backups regardless of database size (backups are based on storage snapshots) - - Fast database restores regardless of database size (restores are from storage snapshots) - - Higher log throughput regardless of database size and the number of vCores - - Read Scale-out using one or more read-only replicas, used for read offloading and as hot standbys. - - Rapid scale up of compute, in constant time, to be more powerful to accommodate the heavy workload and then scale down, in constant time. This is similar to scaling up and down between a P6 and a P11, for example, but much faster as this is not a size of data operation. - - - question: | - What regions currently support Hyperscale? - answer: | - The Hyperscale service tier is currently available in the regions listed under [Azure SQL Database Hyperscale Overview](service-tier-hyperscale.md#regions). - - - question: | - Can I create multiple Hyperscale databases per server? - answer: | - Yes. For more information and limits on the number of Hyperscale databases per server, see [SQL Database resource limits for single and pooled databases on a server](resource-limits-logical-server.md). - - - question: | - What are the performance characteristics of a Hyperscale database? - answer: | - The Hyperscale architecture provides high performance and throughput while supporting large database sizes. - - - question: | - What is the scalability of a Hyperscale database? - answer: | - Hyperscale provides rapid scalability based on your workload demand. - - - **Scaling Up/Down** - - With Hyperscale, you can scale up the primary compute size in terms of resources like CPU and memory, and then scale down, in constant time. Because the storage is shared, scaling up and scaling down is not a size of data operation. - - **Scaling In/Out** - - With Hyperscale, you also get the ability to provision one or more additional compute replicas that you can use to serve your read requests. This means that you can use these additional compute replicas as read-only replicas to offload your read workload from the primary compute. In addition to read-only, these replicas also serve as hot-standbys in case of a failover from the primary. - - Provisioning of each of these additional compute replicas can be done in constant time and is an online operation. You can connect to these additional read-only compute replicas by setting the `ApplicationIntent` argument on your connection string to `ReadOnly`. Any connections with the `ReadOnly` application intent are automatically routed to one of the additional read-only compute replicas. - - - name: Deep dive questions - questions: - - question: | - Can I mix Hyperscale and single databases in a single server? - answer: | - Yes, you can. - - - question: | - Does Hyperscale require my application programming model to change? - answer: | - No, your application programming model stays as is. You use your connection string as usual and the other regular ways to interact with your Hyperscale database. - - - question: | - What transaction isolation level is the default in a Hyperscale database? - answer: | - On the primary replica, the default transaction isolation level is RCSI (Read Committed Snapshot Isolation). On the Read Scale-out secondary replicas, the default isolation level is Snapshot. - - - question: | - Can I bring my on-premises or IaaS SQL Server license to Hyperscale? - answer: | - Yes, [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) is available for Hyperscale. Every SQL Server Standard core can map to 1 Hyperscale vCores. Every SQL Server Enterprise core can map to 4 Hyperscale vCores. You don’t need a SQL license for secondary replicas. The Azure Hybrid Benefit price will be automatically applied to Read Scale-out (secondary) replicas. - - - question: | - What kind of workloads is Hyperscale designed for? - answer: | - Hyperscale supports all workload types, including OLTP, Hybrid (HTAP), and Analytical (data mart) workloads. - - - question: | - How can I choose between Azure Synapse Analytics and Azure SQL Database Hyperscale? - answer: | - If you are currently running interactive analytics queries using SQL Server as a data warehouse, Hyperscale is a great option because you can host small and mid-size data warehouses (such as a few TB up to 100 TB) at a lower cost, and you can migrate your SQL Server data warehouse workloads to Hyperscale with minimal T-SQL code changes. - - If you are running data analytics on a large scale with complex queries and sustained ingestion rates higher than 100 MB/s, or using Parallel Data Warehouse (PDW), Teradata, or other Massively Parallel Processing (MPP) data warehouses, Azure Synapse Analytics may be the best choice. - - - name: Hyperscale compute questions - questions: - - question: | - Can I pause my compute at any time? - answer: | - Not at this time, however you can scale your compute and number of replicas down to reduce cost during non-peak times. - - - question: | - Can I provision a compute replica with extra RAM for my memory-intensive workload? - answer: | - No. To get more RAM, you need to upgrade to a higher compute size. For more information, see [Hyperscale storage and compute sizes](resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen5). - - - question: | - Can I provision multiple compute replicas of different sizes? - answer: | - Yes. This can be achieved using [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview). - - - question: | - How many Read Scale-out replicas are supported? - answer: | - You can scale the number of read-only replicas between 0 and 4 using [Azure portal](https://portal.azure.com) or [REST API](/rest/api/sql/databases/createorupdate). Additionally, many read scale-out scenarios can be enabled using [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview). - - - question: | - For high availability, do I need to provision additional compute replicas? - answer: | - In Hyperscale databases, data resiliency is provided at the storage level. You only need one replica to provide resiliency. When the compute replica is down, a new replica is created automatically with no data loss. - - However, if there's only one replica, it may take some time to build the local cache in the new replica after failover. During the cache rebuild phase, the database fetches data directly from the page servers, resulting in higher storage latency and degraded query performance. - - For mission-critical apps that require high availability with minimal failover impact, you should provision at least 2 compute replicas including the primary compute replica. That way there is a hot-standby replica available that serves as a failover target. - - - name: Data size and storage questions - questions: - - question: | - What is the maximum database size supported with Hyperscale? - answer: 100 TB. - - - question: | - What is the size of the transaction log with Hyperscale? - answer: | - The transaction log on hyperscale is practically infinite, with the restriction that a single transaction cannot generate more than 1TB of log. Additionally, if using [Change Data Capture](/sql/relational-databases/track-changes/about-change-data-capture-sql-server), at most 1 TB of log can be generated since the start of the oldest active transaction. It is recommended to avoid unnecessarily large transactions to stay below this limit. Other than the restrictions stated, you do not need to worry about running out of log space on a system that has a high log throughput. However, log generation rate might be throttled for continuous aggressively writing workloads. The peak sustained log generation rate is 100 MB/s. - - - question: | - Does my `tempdb` scale as my database grows? - answer: | - Your `tempdb` database is located on local SSD storage and is sized proportionally to the compute size that you provision. Your `tempdb` is optimized to provide maximum performance benefits. `tempdb` size is not configurable and is managed for you. To determine maximum `tempdb` size for your database, see [Hyperscale storage and compute sizes](resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen5). - - - question: | - Does my database size automatically grow, or do I have to manage the size of data files? - answer: | - Your database size automatically grows as you insert/ingest more data. - - - question: | - What is the smallest database size that Hyperscale supports? - answer: 40 GB. A Hyperscale database is created with a starting size of 10 GB. Then, it starts growing by 10 GB every 10 minutes, until it reaches the size of 40 GB. Each of these 10-GB chunks is allocated in a different page server in order to provide more IOPS and higher I/O parallelism. Because of this optimization, even if you choose initial database size smaller than 40 GB, the database will grow to at least 40 GB automatically. - - - question: | - In what increments does my database size grow? - answer: | - Each data file grows by 10 GB. Multiple data files may grow at the same time. - - - question: | - Is the storage in Hyperscale local or remote? - answer: | - In Hyperscale, data files are stored in Azure standard storage. Data is fully cached on local SSD storage, on page servers that are close to the compute replicas. In addition, compute replicas have data caches on local SSD and in memory, to reduce the frequency of fetching data from remote page servers. - - - question: | - Can I manage or define files or filegroups with Hyperscale? - answer: | - No. Data files are added automatically. The common reasons for creating additional filegroups do not apply in the Hyperscale storage architecture. - - - question: | - Can I provision a hard cap on the data growth for my database? - answer: | - No. - - - question: | - How are data files laid out with Hyperscale? - answer: | - The data files are controlled by page servers, with one page server per data file. As the data size grows, data files and associated page servers are added. - - - question: | - Is database shrink supported? - answer: | - No. - - - question: | - Is data compression supported? - answer: | - Yes, including row, page, and columnstore compression. - - - question: | - If I have a huge table, does my table data get spread out across multiple data files? - answer: | - Yes. The data pages associated with a given table can end up in multiple data files, which are all part of the same filegroup. SQL Server uses [proportional fill strategy](/sql/relational-databases/databases/database-files-and-filegroups#file-and-filegroup-fill-strategy) to distribute data over data files. - - - name: Data migration questions - questions: - - question: | - Can I move my existing databases in Azure SQL Database to the Hyperscale service tier? - answer: | - Yes. You can move your existing databases in Azure SQL Database to Hyperscale. For proofs of concept (POCs), we recommend you make a copy of your database and migrate the copy to Hyperscale. - - Reverse migration to the General Purpose service tier allows customers who have recently migrated an existing database in Azure SQL Database to the Hyperscale service tier to move back in an emergency, should Hyperscale not meet their needs. While reverse migration is initiated by a service tier change, it's essentially a size-of-data move between different architectures. Learn the [limitations for reverse migration](manage-hyperscale-database.md#limitations-for-reverse-migration). - - The time required to move an existing database to Hyperscale consists of the time to copy data, and the time to replay the changes made in the source database while copying data. The data copy time is proportional to data size. The time to replay changes will be shorter if the move is done during a period of low write activity. - - Get sample code to migrate existing Azure SQL Databases to Hyperscale in the Azure portal, Azure CLI, PowerShell, and Transact-SQL in [Migrate an existing database to Hyperscale](manage-hyperscale-database.md#migrate-an-existing-database-to-hyperscale). - - - question: | - Can I move my Hyperscale databases to other service tiers? - answer: | - If you previously migrated an existing Azure SQL Database to the Hyperscale service tier, you can reverse migrate a Hyperscale database to the General Purpose service tier within 45 days of the original migration to Hyperscale. If you wish to migrate the database to another service tier, such as Business Critical, first reverse migrate to the General Purpose service tier, then modify the service tier. Reverse migration is a size of data operation. - - Databases created in the Hyperscale service tier cannot be moved to other service tiers. - - Learn [how to reverse migrate from Hyperscale](manage-hyperscale-database.md#reverse-migrate-from-hyperscale), including the [limitations for reverse migration](manage-hyperscale-database.md#limitations-for-reverse-migration) and [Limitations for reverse migration](manage-hyperscale-database.md#limitations-for-reverse-migration) and impacted [backup policies](manage-hyperscale-database.md#backup-policies). - - - question: | - Do I lose any functionality or capabilities after migration to the Hyperscale service tier? - answer: | - Yes. Some Azure SQL Database features are not supported in Hyperscale yet, including but not limited to long term backup retention. After you migrate your databases to Hyperscale, those features stop working. We expect these limitations to be temporary. For details, see [Known limitations](service-tier-hyperscale.md#known-limitations). - - - question: | - Can I move my on-premises SQL Server database, or my SQL Server database in a cloud virtual machine to Hyperscale? - answer: | - Yes. You can use all existing migration technologies to migrate to Hyperscale, including transactional replication, and any other data movement technologies (Bulk Copy, Azure Data Factory, Azure Databricks, SSIS). See also the [Azure Database Migration Service](../../dms/dms-overview.md), which supports many migration scenarios. - - - question: | - What is my downtime during migration from an on-premises or virtual machine environment to Hyperscale, and how can I minimize it? - answer: | - Downtime for migration to Hyperscale is the same as the downtime when you migrate your databases to other Azure SQL Database service tiers. You can use [transactional replication](replication-to-sql-database.md#data-migration-scenario - ) to minimize downtime migration for databases up to few TB in size. For very large databases (10+ TB), you can consider to migrate data using ADF, Spark, or other data movement technologies. - - - question: | - How much time would it take to bring in X amount of data to Hyperscale? - answer: | - Hyperscale is capable of consuming 100 MB/s of new/changed data, but the time needed to move data into databases in Azure SQL Database is also affected by available network throughput, source read speed and the target database service level objective. - - - question: | - Can I read data from blob storage and do fast load (like Polybase in Azure Synapse Analytics)? - answer: | - You can have a client application read data from Azure Storage and load data load into a Hyperscale database (just like you can with any other database in Azure SQL Database). Polybase is currently not supported in Azure SQL Database. As an alternative to provide fast load, you can use [Azure Data Factory](../../data-factory/index.yml), or use a Spark job in [Azure Databricks](/azure/azure-databricks/) with the [Spark connector for SQL](spark-connector.md). The Spark connector to SQL supports bulk insert. - - It is also possible to bulk read data from Azure Blob store using BULK INSERT or OPENROWSET: [Examples of Bulk Access to Data in Azure Blob Storage](/sql/relational-databases/import-export/examples-of-bulk-access-to-data-in-azure-blob-storage#accessing-data-in-a-csv-file-referencing-an-azure-blob-storage-location). - - Simple recovery or bulk logging model is not supported in Hyperscale. Full recovery model is required to provide high availability and point-in-time recovery. However, Hyperscale log architecture provides better data ingest rate compared to other Azure SQL Database service tiers. - - - question: | - Does Hyperscale allow provisioning multiple nodes for parallel ingesting of large amounts of data? - answer: | - No. Hyperscale is a symmetric multi-processing (SMP) architecture and is not a massively parallel processing (MPP) or a multi-master architecture. You can only create multiple replicas to scale out read-only workloads. - - - question: | - What is the oldest SQL Server version supported for migration to Hyperscale? - answer: | - SQL Server 2005. For more information, see [Migrate to a single database or a pooled database](migrate-to-database-from-sql-server.md#migrate-to-a-single-database-or-a-pooled-database). For compatibility issues, see [Resolving database migration compatibility issues](migrate-to-database-from-sql-server.md#resolving-database-migration-compatibility-issues). - - - question: | - Does Hyperscale support migration from other data sources such as Amazon Aurora, MySQL, PostgreSQL, Oracle, DB2, and other database platforms? - answer: | - Yes. [Azure Database Migration Service](../../dms/dms-overview.md) supports many migration scenarios. - - - name: Business continuity and disaster recovery questions - questions: - - question: | - What SLAs are provided for a Hyperscale database? - answer: | - See [SLA for Azure SQL Database](https://azure.microsoft.com/support/legal/sla/azure-sql-database). Additional secondary compute replicas increase availability, up to 99.99% for a database with two or more secondary compute replicas. - - - question: | - Are the database backups managed for me by Azure SQL Database? - answer: | - Yes. - - - question: | - Does Hyperscale support Availability Zones? - answer: | - Yes, Hyperscale supports [zone redundant configuration](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview) in public preview. At least 1 HA compute replica and the use of zone-redundant backup storage is required for enabling the zone redundant configuration for Hyperscale. - - - question: | - How often are the database backups taken? - answer: | - There are no traditional full, differential, and transaction log backups for Hyperscale databases. Instead, there are regular storage snapshots of data files. Generated transaction log is retained as-is for the configured retention period. At restore time, relevant transaction log records are applied to the restored storage snapshot, resulting in a transactionally consistent database without any data loss as of the specified point in time within the retention period. - - - question: | - Does Hyperscale support point-in-time restore? - answer: | - Yes. - - - question: | - What is the Recovery Point Objective (RPO)/Recovery Time Objective (RTO) for database restore in Hyperscale? - answer: | - The RPO for point-in-time restore is 0 min. Most point-in-time restore operations complete within 60 minutes regardless of database size. Restore time may be longer for larger databases, and if the database had experienced significant write activity before and up to the restore point in time. - - - question: | - Does database backup affect compute performance on my primary or secondary replicas? - answer: | - No. Backups are managed by the storage subsystem, and leverage storage snapshots. They do not impact user workloads. - - - question: | - Can I perform geo-restore with a Hyperscale database? - answer: | - Yes. Geo-restore is fully supported. Unlike point-in-time restore, geo-restore requires a size-of-data operation. Data files are copied in parallel, so the duration of this operation depends primarily on the size of the largest file in the database, rather than on total database size. Geo-restore time will be significantly shorter if the database is restored in the Azure region that is [paired](../../availability-zones/cross-region-replication-azure.md) with the region of the source database. - - - question: | - Can I set up geo-replication with a Hyperscale database? - answer: | - Yes. [Geo-replication](active-geo-replication-overview.md) for Hyperscale databases is in public preview. - - - question: | - Can I take a Hyperscale database backup and restore it to my on-premises server, or on SQL Server in a VM? - answer: | - No. The storage format for Hyperscale databases is different from any released version of SQL Server, and you don’t control backups or have access to them. To take your data out of a Hyperscale database, you can extract data using any data movement technologies, i.e. Azure Data Factory, Azure Databricks, SSIS, etc. - - - name: Cross-feature questions - questions: - - question: | - Do I lose any functionality or capabilities after migration to the Hyperscale service tier? - answer: | - Yes. Some Azure SQL Database features are not supported in Hyperscale, including but not limited to long term backup retention. After you migrate your databases to Hyperscale, those features stop working. For details, see [Known limitations](service-tier-hyperscale.md#known-limitations). - - - question: | - Will PolyBase work with Hyperscale? - answer: | - No. PolyBase is not supported in Azure SQL Database. - - - question: | - Does Hyperscale have support for R and Python? - answer: | - Not at this time. - - - question: | - Are compute nodes containerized? - answer: | - No. Hyperscale processes run on [Service Fabric](https://azure.microsoft.com/services/service-fabric/) nodes (VMs), not in containers. - - - name: Performance questions - questions: - - question: | - How much write throughput can I push in a Hyperscale database? - answer: | - Transaction log throughput cap is set to 100 MB/s for any Hyperscale compute size. The ability to achieve this rate depends on multiple factors, including but not limited to workload type, client configuration, and having sufficient compute capacity on the primary compute replica to produce log at this rate. - - - question: | - How many IOPS do I get on the largest compute? - answer: | - IOPS and IO latency will vary depending on the workload patterns. If the data being accessed is cached in RBPEX on the compute replica, you will see similar IO performance as in Business Critical or Premium service tiers. - - - question: | - Does my throughput get affected by backups? - answer: | - No. Compute is decoupled from the storage layer. This eliminates performance impact of backup. - - - question: | - Does my throughput get affected as I provision additional compute replicas? - answer: | - Because the storage is shared and there is no direct physical replication happening between primary and secondary compute replicas, the throughput on primary replica will not be directly affected by adding secondary replicas. However, we may throttle continuous aggressively writing workload on the primary to allow log apply on secondary replicas and page servers to catch up, to avoid poor read performance on secondary replicas. - - - question: | - Is Hyperscale well suited for resource-intensive, long-running queries and transactions? - answer: | - Yes. However, please consider that very infrequently, connections might be terminated by transient errors, which may abort long-running queries and roll back transactions. One cause of transient errors is when the system quickly shifts the database to a different compute node to ensure continued compute and storage resource availability, or to perform planned maintenance. Most of these reconfiguration events finish in less than 10 seconds. Applications that connect to your database should be built to expect and tolerate these infrequent transient errors by implementing retry logic. Additionally, consider configuring a [maintenance window](maintenance-window.md) that matches your workload schedule to avoid transient errors due to planned maintenance. - - - question: | - How do I diagnose and troubleshoot performance problems in a Hyperscale database? - answer: | - For most performance problems, particularly the ones not rooted in storage performance, common SQL diagnostic and troubleshooting steps apply. For Hyperscale-specific storage diagnostics, see [SQL Hyperscale performance troubleshooting diagnostics](hyperscale-performance-diagnostics.md). - - - name: Scalability questions - questions: - - question: | - How long would it take to scale up and down a compute replica? - answer: | - Scaling compute up or down typically takes up to 2 minutes regardless of data size. - - - question: | - Is my database offline while the scaling up/down operation is in progress? - answer: | - No. The scaling up and down will be online. - - - question: | - Should I expect connection drop when the scaling operations are in progress? - answer: | - Scaling up or down results in existing connections being dropped when a failover happens at the end of the scaling operation. Adding or removing secondary replicas does not result in connection drops on the primary. - - - question: | - Is the scaling up and down of compute replicas automatic or end-user triggered operation? - answer: | - End-user. Not automatic. - - - question: | - Does the size of my `tempdb` database and RBPEX cache also grow as the compute is scaled up? - answer: | - Yes. The `tempdb` database and [RBPEX cache](service-tier-hyperscale.md#distributed-functions-architecture) size on compute nodes will scale up automatically as the number of cores is increased. For details, see [Hyperscale storage and compute sizes](resource-limits-vcore-single-databases.md#hyperscale---provisioned-compute---gen5). - - - question: | - Can I provision multiple primary compute replicas, such as a multi-master system, where multiple primary compute heads can drive a higher level of concurrency? - answer: | - No. Only the primary compute replica accepts read/write requests. Secondary compute replicas only accept read-only requests. - - - name: Read scale-out questions - questions: - - question: | - What kinds of secondary (read scale-out) replicas are available in Hyperscale? - answer: | - Hyperscale supports High Availability (HA) replicas, named replicas, and geo-replicas. See [Hyperscale secondary replicas](service-tier-hyperscale-replicas.md) for details. - - - question: | - How many secondary HA replicas can I provision? - answer: | - Between 0 and 4. If you want to adjust the number of replicas, you can do so using [Azure portal](https://portal.azure.com) or [REST API](/rest/api/sql/databases/createorupdate). - - - question: | - How do I connect to secondary HA replicas? - answer: | - You can connect to these additional read-only compute replicas by setting the `ApplicationIntent` argument in your connection string to `ReadOnly`. Any connections marked with `ReadOnly` are automatically routed to one of the secondary HA replicas, if they were added for your database. For details, see [Use read-only replicas to offload read-only query workloads](read-scale-out.md). - - - question: | - How do I validate if I have successfully connected to secondary compute replica using SSMS or other client tools? - answer: | - You can execute the following T-SQL query: `SELECT DATABASEPROPERTYEX ('', 'Updateability')`. The result is `READ_ONLY` if you are connected to a read-only secondary replica, and `READ_WRITE` if you are connected to the primary replica. Note that the database context must be set to the name of the Hyperscale database, not to the `master` database. - - - question: | - Can I create a dedicated endpoint for a secondary HA replica? - answer: | - No. You can only connect to HA replicas by specifying `ApplicationIntent=ReadOnly`. However, you can use dedicated endpoints for [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview). - - - question: | - Does the system do intelligent load balancing of the read workload on HA replicas? - answer: | - No. A new connection with read-only intent is redirected to an arbitrary HA replica. - - - question: | - Can I scale up/down HA replicas independently of the primary replica? - answer: | - No. HA replicas are used as high availability failover targets, so they need to have the same configuration as the primary to provide expected performance after failover. [Named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview) provide the ability to scale each replica independently. - - - question: | - Do I get different `tempdb` sizing for my primary compute and my HA replicas? - answer: | - No. Your `tempdb` database is configured based on the provisioned compute size, your HA replicas are the same size, including `tempdb`, as the primary compute. On [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview), `tempdb` is sized according to the compute size of the replica, thus it can be smaller or larger than `tempdb` on the primary. - - - question: | - Can I add indexes and views on my secondary compute replicas? - answer: | - No. Hyperscale databases have shared storage, meaning that all compute replicas see the same tables, indexes, and views. If you want additional indexes optimized for reads on secondary, you must add them on the primary. - - - question: | - How much delay is there going to be between the primary and secondary compute replicas? - answer: | - Data latency from the time a transaction is committed on the primary to the time it is readable on a secondary depends on current log generation rate, transaction size, load on the replica, and other factors. Typical data latency for small transactions is in tens of milliseconds, however there is no upper bound on data latency. Data on a given secondary replica is always transactionally consistent. However, at a given point in time data latency may be different for different secondary replicas. Workloads that need to read committed data immediately should run on the primary replica. - -additionalContent: | - - ## Next steps - - For more information about the Hyperscale service tier, see [Hyperscale service tier](service-tier-hyperscale.md). - diff --git a/articles/azure-sql/database/service-tier-hyperscale-named-replicas-faq.yml b/articles/azure-sql/database/service-tier-hyperscale-named-replicas-faq.yml deleted file mode 100644 index 7b73074b618e6..0000000000000 --- a/articles/azure-sql/database/service-tier-hyperscale-named-replicas-faq.yml +++ /dev/null @@ -1,83 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Azure SQL Database Hyperscale named replicas FAQ - description: Answers to common questions customers ask about Hyperscale named replicas - services: sql-database - ms.service: sql-database - ms.subservice: service-overview - ms.custom: - ms.devlang: - ms.topic: faq - author: yorek - ms.author: damauri - ms.reviewer: kendralittle, mathoma - ms.date: 07/27/2021 -title: Azure SQL Database Hyperscale named replicas FAQ -summary: | - [!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - - This FAQ is intended for readers who have a general understanding of the Hyperscale [named replicas](service-tier-hyperscale-replicas.md#named-replica-in-preview) and are looking to have their specific questions and concerns answered. - -sections: - - name: General questions - questions: - - question: | - Can a named replica be used as a failover target? - answer: | - No, named replicas cannot be used as failover targets for the primary replica. Add HA replicas for that purpose. - - - question: | - How can I distribute a read-only workload across my named replicas? - answer: | - Since every named replica may have a different service level objective and thus be used for different use cases, there is no built-in way to direct read-only traffic sent to the primary to a set of named replicas. For example, you may have eight named replicas, and you may want to direct OLTP workload only to named replicas 1 to 4, while all the Power BI analytical workloads will use named replicas 5 and 6 and the data science workload will use replicas 7 and 8. Depending on which tool or programming language you use, strategies to distribute such workload may vary. One example of creating a workload routing solution to allow a REST backend to scale out is here: [OLTP scale-out sample](https://github.com/Azure-Samples/azure-sql-db-named-replica-oltp-scaleout). - - - question: | - Can a named replica be in a region different from the region of the primary replica? - answer: | - No, as named replicas use the same page servers of the primary replica, they must be in the same region. - - - question: | - Can a named replica impact availability or performance of the primary replica? - answer: | - A named replica cannot impact the availability of the primary replica. Named replicas, under normal circumstances, are unlikely to impact the primary's performance, but it can happen if there are intensive workloads running. Just like an HA replica, a named replica is kept in sync with the primary via the transaction log service. If a named replica, for any reason, is not able to consume the transaction log fast enough, it will start asking the primary replica to slow down (throttle) its log generation, so that it can catch up. While this behavior will not impact the primary's availability, it may impact performance of write workloads on the primary. To avoid this situation, make sure that your named replicas have enough resource headroom – mainly CPU – to process transaction log without delay. For example, if the primary is processing numerous data changes, it is recommended to have named replicas with at least the same Service Level Objective as the primary, to avoid saturating CPU on the replicas and thus forcing the primary to slow down. - - - question: | - What happens to named replicas if the primary replica is unavailable, for example because of planned maintenance? - answer: | - Named replicas will still be available for read-only access, as usual. - - - question: | - How do I validate if I have successfully connected to secondary compute replica using SQL Server Management Studio (SSMS) or other client tools? - answer: | - You can execute the following T-SQL query. The database context must be set to the name of the Hyperscale database, not to the `master` database. - - ```sql - SELECT @@SERVERNAME AS logical_server_name, DB_NAME() AS database_name, DATABASEPROPERTYEX(DB_NAME(), 'Updateability') AS replica_updateability; - ``` - - `replica_updateability` will be `READ_ONLY` if you are connected to a read-only secondary replica, and `READ_WRITE` if you are connected to the primary replica. - - - question: | - Can I create any objects or indexes on my secondary compute replicas? - answer: | - No. Hyperscale databases have shared storage, meaning that all compute replicas see the same tables, indexes, views, etc. If you want additional indexes optimized for reads on secondary, you must add them on the primary. - - You can still create temporary tables (table names prefixed with # or ##) on each secondary replica to store temporary data. Temporary tables are read-write. - - - question: | - How much delay is there between the primary and secondary compute replicas? - answer: | - Data latency from the time a transaction is committed on the primary to the time it is readable on a secondary depends on current log generation rate, transaction size, load on the replica, and other factors. Typical data latency for small transactions is in tens of milliseconds, however there is no upper bound on data latency. Data on a given secondary replica is always transactionally consistent. However, at a given point in time data latency may be different for different secondary replicas. Workloads that need to read committed data immediately should run on the primary replica. - - - question: | - How can I improve availability of named replicas? - answer: | - By default, named replicas do not have any HA replicas of their own. A failover of a named replica requires creating a new replica first, which typically takes about 1-2 minutes. However, named replicas can also benefit from higher availability and shorter failovers provided by HA replicas. To add HA replicas for a named replica, you can use the parameter `ha-replicas` with [AZ CLI](/cli/azure/sql/db/replica#az_sql_db_replica_create), or the parameter `HighAvailabilityReplicaCount` with [PowerShell](/powershell/module/az.sql/set-azsqldatabase), or the `highAvailabilityReplicaCount` property with [REST API](/rest/api/sql/2021-02-01-preview/databases/create-or-update). The number of HA replicas can be set during the creation of a named replica and can be changed – only via AZ CLI, PowerShell or REST API – anytime after the named replica has been created. Pricing of HA replicas for named replicas is the same of HA replicas for regular Hyperscale databases. - - -additionalContent: | - - ## Next steps - - For more information about the Hyperscale service tier, see [Hyperscale service tier](service-tier-hyperscale.md). - For more information about the Hyperscale replicas, see [Hyperscale Replicas](service-tier-hyperscale-replicas.md). diff --git a/articles/azure-sql/database/service-tier-hyperscale-replicas.md b/articles/azure-sql/database/service-tier-hyperscale-replicas.md deleted file mode 100644 index f4f2fe0e3db3f..0000000000000 --- a/articles/azure-sql/database/service-tier-hyperscale-replicas.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: Hyperscale secondary replicas -description: This article describes the different types of secondary replicas available in the Hyperscale service tier. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.topic: overview -author: yorek -ms.author: damauri -ms.reviewer: kendralittle, mathoma -ms.date: 9/24/2021 ---- - -# Hyperscale secondary replicas -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -As described in [Distributed functions architecture](service-tier-hyperscale.md), Azure SQL Database Hyperscale has two different types of compute nodes, also referred to as replicas: - -- Primary: serves read and write operations -- Secondary: provides [read scale-out](read-scale-out.md), [high availability](high-availability-sla.md), and [geo-replication](active-geo-replication-overview.md) - -Secondary replicas are always read-only, and can be of three different types: - -- High Availability replica -- Named replica (in [Preview](https://azure.microsoft.com/support/legal/preview-supplemental-terms/)) -- Geo-replica (in [Preview](https://azure.microsoft.com/support/legal/preview-supplemental-terms/)) - -Each type has a different architecture, feature set, purpose, and cost. Based on the features you need, you may use just one or even all of the three together. - -## High Availability replica - -A High Availability (HA) replica uses the same page servers as the primary replica, so no data copy is required to add an HA replica. HA replicas are mainly used to increase database availability; they act as hot standbys for failover purposes. If the primary replica becomes unavailable, failover to one of the existing HA replicas is automatic and quick. Connection string doesn't need to change; during failover applications may experience minimal downtime due to active connections being dropped. As usual for this scenario, proper retry logic is recommended. Several drivers already provide some degree of automatic retry logic. If you are using .NET, the [latest Microsoft.Data.SqlClient](https://devblogs.microsoft.com/azure-sql/configurable-retry-logic-for-microsoft-data-sqlclient/) library provides native full support for configurable automatic retry logic. - -HA replicas use the same server and database name as the primary replica. Their Service Level Objective is also always the same as for the primary replica. HA replicas are not visible or manageable as a stand-alone resource from the portal or from any API. - -There can be zero to four HA replicas. Their number can be changed during the creation of a database or after the database has been created, via the common management endpoints and tools (for example: PowerShell, AZ CLI, Portal, REST API). Creating or removing HA replicas does not affect active connections on the primary replica. - -### Connecting to an HA replica - -In Hyperscale databases, the `ApplicationIntent` argument in the connection string used by the client dictates whether the connection is routed to the read-write primary replica or to a read-only HA replica. If `ApplicationIntent` is set to `ReadOnly` and the database doesn't have a secondary replica, connection will be routed to the primary replica and will default to the `ReadWrite` behavior. - -```csharp --- Connection string with application intent -Server=tcp:.database.windows.net;Database=;ApplicationIntent=ReadOnly;User ID=;Password=;Trusted_Connection=False; Encrypt=True; -``` - -All HA replicas are identical in their resource capacity. If more than one HA replica is present, the read-intent workload is distributed arbitrarily across all available HA replicas. When there are multiple HA replicas, keep in mind that each one could have different data latency with respect to data changes made on the primary. Each HA replica uses the same data as the primary on the same set of page servers. However, local data caches on each HA replica reflect the changes made on the primary via the transaction log service, which forwards log records from the primary replica to HA replicas. As the result, depending on the workload being processed by an HA replica, application of log records may happen at different speeds, and thus different replicas could have different data latency relative to the primary replica. - -## Named replica (in Preview) - -A named replica, just like an HA replica, uses the same page servers as the primary replica. Similar to HA replicas, there is no data copy needed to add a named replica. - -The difference from HA replicas is that named replicas: - -- appear as regular (read-only) Azure SQL databases in the portal and in API (AZ CLI, PowerShell, T-SQL) calls; -- can have database name different from the primary replica, and optionally be located on a different logical server (as long as it is in the same region as the primary replica); -- have their own Service Level Objective that can be set and changed independently from the primary replica; -- support for up to 30 named replicas (for each primary replica); -- support different authentication for each named replica by creating different logins on logical servers hosting named replicas. - -As a result, named replicas offers several benefits over HA replicas, for what concern read-only workloads: - -- users connected to a named replica will suffer no disconnection if the primary replica is scaled up or down; at the same time users connected to primary replica will be unaffected by named replicas scaling up or down -- workloads running on any replica, primary or named, will be unaffected by long running queries running on other replicas - -The main goal of named replicas is to enable massive OLTP [read scale-out](read-scale-out.md) scenario, and to improve Hybrid Transactional and Analytical Processing (HTAP) workloads. Examples of how to create such solutions are available here: - -- [OLTP scale-out sample](https://github.com/Azure-Samples/azure-sql-db-named-replica-oltp-scaleout) -- [HTAP scale-out sample](https://github.com/Azure-Samples/azure-sql-db-named-replica-htap) - -Aside from the main scenarios listed above, named replicas offer flexibility and elasticity to also satisfy many other use cases: -- [Access Isolation](hyperscale-named-replica-security-configure.md): you can grant access to a specific named replica, but not the primary replica or other named replicas. -- Workload-dependent service level objective: as a named replica can have its own service level objective, it is possible to use different named replicas for different workloads and use cases. For example, one named replica could be used to serve Power BI requests, while another can be used to serve data to Apache Spark for Data Science tasks. Each one can have an independent service level objective and scale independently. -- Workload-dependent routing: with up to 30 named replicas, it is possible to use named replicas in groups so that an application can be isolated from another. For example, a group of four named replicas could be used to serve requests coming from mobile applications, while another group two named replicas can be used to serve requests coming from a web application. This approach would allow a fine-grained tuning of performance and costs for each group. - -The following example creates a named replica `WideWorldImporters_NR` for database `WideWorldImporters`. The primary replica uses service level objective HS_Gen5_4, while the named replica uses HS_Gen5_2. Both use the same logical server `MyServer`. If you prefer to use REST API directly, this option is also possible: [Databases - Create A Database As Named Replica Secondary](/rest/api/sql/2020-11-01-preview/databases/createorupdate#creates-a-database-as-named-replica-secondary). - -# [T-SQL](#tab/tsql) -```sql -ALTER DATABASE [WideWorldImporters] -ADD SECONDARY ON SERVER [MyServer] -WITH (SERVICE_OBJECTIVE = 'HS_Gen5_2', SECONDARY_TYPE = Named, DATABASE_NAME = [WideWorldImporters_NR]); -``` -# [PowerShell](#tab/azure-powershell) -```azurepowershell -New-AzSqlDatabaseSecondary -ResourceGroupName "MyResourceGroup" -ServerName "MyServer" -DatabaseName "WideWorldImporters" -PartnerResourceGroupName "MyResourceGroup" -PartnerServerName "MyServer" -PartnerDatabaseName "WideWorldImporters_NR" -SecondaryType Named -SecondaryServiceObjectiveName HS_Gen5_2 -``` -# [Azure CLI](#tab/azure-cli) -```azurecli -az sql db replica create -g MyResourceGroup -n WideWorldImporters -s MyServer --secondary-type named --partner-database WideWorldImporters_NR --partner-server MyServer --service-objective HS_Gen5_2 -``` - ---- - -As there is no data movement involved, in most cases a named replica will be created in about a minute. Once the named replica is available, it will be visible from the portal or any command-line tool like AZ CLI or PowerShell. A named replica is usable as a regular read-only database. - -> [!NOTE] -> For frequently asked questions on Hyperscale named replicas, see [Azure SQL Database Hyperscale named replicas FAQ](service-tier-hyperscale-named-replicas-faq.yml). - -### Connecting to a named replica - -To connect to a named replica, you must use the connection string for that named replica, referencing its server and database names. There is no need to specify the option "ApplicationIntent=ReadOnly" as named replicas are always read-only. - -Just like for HA replicas, even though the primary, HA, and named replicas share the same data on the same set of page servers, data caches on each named replica are kept in sync with the primary via the transaction log service, which forwards log records from the primary to named replicas. As the result, depending on the workload being processed by a named replica, application of the log records may happen at different speeds, and thus different replicas could have different data latency relative to the primary replica. - -### Modifying a named replica - -You can define the service level objective of a named replica when you create it, via the `ALTER DATABASE` command or in any other supported way (AZ CLI, PowerShell, REST API). If you need to change the service level objective after the named replica has been created, you can do it using the `ALTER DATABASE ... MODIFY` command on the named replica itself. For example, if `WideWorldImporters_NR` is the named replica of `WideWorldImporters` database, you can do it as shown below. - -# [T-SQL](#tab/tsql) -```sql -ALTER DATABASE [WideWorldImporters_NR] MODIFY (SERVICE_OBJECTIVE = 'HS_Gen5_4') -``` -# [PowerShell](#tab/azure-powershell) -```azurepowershell -Set-AzSqlDatabase -ResourceGroup "MyResourceGroup" -ServerName "MyServer" -DatabaseName "WideWorldImporters_NR" -RequestedServiceObjectiveName "HS_Gen5_4" -``` -# [Azure CLI](#tab/azure-cli) -```azurecli -az sql db update -g MyResourceGroup -s MyServer -n WideWorldImporters_NR --service-objective HS_Gen5_4 -``` - ---- - -### Removing a named replica - -To remove a named replica, you drop it just like you would a regular database. Make sure you are connected to the `master` database of the server with the named replica you want to drop, and then use the following command: - -# [T-SQL](#tab/tsql) -```sql -DROP DATABASE [WideWorldImporters_NR]; -``` -# [PowerShell](#tab/azure-powershell) -```azurepowershell -Remove-AzSqlDatabase -ResourceGroupName "MyResourceGroup" -ServerName "MyServer" -DatabaseName "WideWorldImporters_NR" -``` -# [Azure CLI](#tab/azure-cli) -```azurecli -az sql db delete -g MyResourceGroup -s MyServer -n WideWorldImporters_NR -``` ---- - -> [!IMPORTANT] -> Named replicas will be automatically removed when the primary replica from which they have been created is deleted. - -### Known issues - -#### Partially incorrect data returned from sys.databases -During Public Preview, row values returned from `sys.databases`, for named replicas, in columns other than `name` and `database_id`, may be inconsistent and incorrect. For example, the `compatibility_level` column for a named replica could be reported as 140 even if the primary database from which the named replica has been created is set to 150. A workaround, when possible, is to get the same data using the `DATABASEPROPERTYEX()` function, which will return correct data. - -## Geo-replica (in Preview) - -With [active geo-replication](active-geo-replication-overview.md), you can create a readable secondary replica of the primary Hyperscale database in the same or in a different Azure region. Geo-replicas must be created on a different logical server. The database name of a geo-replica always matches the database name of the primary. - -When creating a geo-replica, all data is copied from the primary to a different set of page servers. A geo-replica does not share page servers with the primary, even if they are in the same region. This architecture provides the necessary redundancy for geo-failovers. - -Geo-replicas are used to maintain a transactionally consistent copy of the database via asynchronous replication. If a geo-replica is in a different Azure region, it can be used for disaster recovery in case of a disaster or outage in the primary region. Geo-replicas can also be used for geographic read scale-out scenarios. - -In Hyperscale, a geo-failover must be initiated manually. After failover, the new primary will have a different connection end point, referencing the logical server name hosting the new primary replica. For more information, see [active geo-replication](active-geo-replication-overview.md). - -Geo-replication for Hyperscale databases is currently in preview, with the following limitations: -- Only one geo-replica can be created (in the same or different region). -- Failover groups are not supported. -- Planned failover is not supported. -- Point in time restore of the geo-replica is not supported. -- Creating a database copy of the geo-replica is not supported. -- Secondary of a secondary (also known as "geo-replica chaining") is not supported. - -## Next steps - -- [Hyperscale service tier](service-tier-hyperscale.md) -- [Active geo-replication](active-geo-replication-overview.md) -- [Configure Security to allow isolated access to Azure SQL Database Hyperscale Named Replicas](hyperscale-named-replica-security-configure.md) -- [Azure SQL Database Hyperscale named replicas FAQ](service-tier-hyperscale-named-replicas-faq.yml) diff --git a/articles/azure-sql/database/service-tier-hyperscale.md b/articles/azure-sql/database/service-tier-hyperscale.md deleted file mode 100644 index 4ffa542397b84..0000000000000 --- a/articles/azure-sql/database/service-tier-hyperscale.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: What is the Hyperscale service tier? -description: This article describes the Hyperscale service tier in the vCore-based purchasing model in Azure SQL Database and explains how it's different from the General Purpose and Business Critical service tiers. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 03/02/2022 ---- - -# Hyperscale service tier -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database is based on SQL Server Database Engine architecture that is adjusted for the cloud environment to ensure [high availability](https://azure.microsoft.com/support/legal/sla/azure-sql-database/) even in cases of infrastructure failures. There are three architectural models that are used in Azure SQL Database: - -- General Purpose/Standard -- Hyperscale -- Business Critical/Premium - -The Hyperscale service tier in Azure SQL Database is the newest service tier in the vCore-based purchasing model. This service tier is a highly scalable storage and compute performance tier that leverages the Azure architecture to scale out the storage and compute resources for an Azure SQL Database substantially beyond the limits available for the General Purpose and Business Critical service tiers. - -> [!NOTE] -> -> - For details on the General Purpose and Business Critical service tiers in the vCore-based purchasing model, see [General Purpose](service-tier-general-purpose.md) and [Business Critical](service-tier-business-critical.md) service tiers. For a comparison of the vCore-based purchasing model with the DTU-based purchasing model, see [Azure SQL Database purchasing models and resources](purchasing-models.md). -> - The Hyperscale service tier is currently only available for Azure SQL Database, and not Azure SQL Managed Instance. - -## What are the Hyperscale capabilities - -The Hyperscale service tier in Azure SQL Database provides the following additional capabilities: - -- Support for up to 100 TB of database size. -- Nearly instantaneous database backups (based on file snapshots stored in Azure Blob storage) regardless of size with no IO impact on compute resources. -- Fast database restores (based on file snapshots) in minutes rather than hours or days (not a size of data operation). -- Higher overall performance due to higher transaction log throughput and faster transaction commit times regardless of data volumes. -- Rapid scale out - you can provision one or more [read-only replicas](service-tier-hyperscale-replicas.md) for offloading your read workload and for use as hot-standbys. -- Rapid Scale up - you can, in constant time, scale up your compute resources to accommodate heavy workloads when needed, and then scale the compute resources back down when not needed. - -The Hyperscale service tier removes many of the practical limits traditionally seen in cloud databases. Where most other databases are limited by the resources available in a single node, databases in the Hyperscale service tier have no such limits. With its flexible storage architecture, storage grows as needed. In fact, Hyperscale databases aren't created with a defined max size. A Hyperscale database grows as needed - and you're billed only for the capacity you use. For read-intensive workloads, the Hyperscale service tier provides rapid scale-out by provisioning additional replicas as needed for offloading read workloads. - -Additionally, the time required to create database backups or to scale up or down is no longer tied to the volume of data in the database. Hyperscale databases can be backed up virtually instantaneously. You can also scale a database in the tens of terabytes up or down in minutes. This capability frees you from concerns about being boxed in by your initial configuration choices. - -For more information about the compute sizes for the Hyperscale service tier, see [Service tier characteristics](service-tiers-vcore.md#service-tiers). - -## Who should consider the Hyperscale service tier - -The Hyperscale service tier is intended for most business workloads as it provides great flexibility and high performance with independently scalable compute and storage resources. With the ability to autoscale storage up to 100 TB, it's a great choice for customers who: - -- Have large databases on-premises and want to modernize their applications by moving to the cloud -- Are already in the cloud and are limited by the maximum database size restrictions of other service tiers (1-4 TB) -- Have smaller databases, but require fast vertical and horizontal compute scaling, high performance, instant backup, and fast database restore. - -The Hyperscale service tier supports a broad range of SQL Server workloads, from pure OLTP to pure analytics, but it's primarily optimized for OLTP and hybrid transaction and analytical processing (HTAP) workloads. - -> [!IMPORTANT] -> Elastic pools do not support the Hyperscale service tier. - -## Hyperscale pricing model - -Hyperscale service tier is only available in [vCore model](service-tiers-vcore.md). To align with the new architecture, the pricing model is slightly different from General Purpose or Business Critical service tiers: - -- **Compute**: - - The Hyperscale compute unit price is per replica. The [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) price is applied to high-availabilty and named replicas automatically. Users may adjust the total number of high-availability secondary replicas from 0 to 4, depending on [SLA](https://azure.microsoft.com/support/legal/sla/azure-sql-database/) requirements. - -- **Storage**: - - You don't need to specify the max data size when configuring a Hyperscale database. In the Hyperscale tier, you're charged for storage for your database based on actual allocation. Storage is automatically allocated between 40 GB and 100 TB, in 10-GB increments. Multiple data files can grow at the same time if needed. A Hyperscale database is created with a starting size of 10 GB and it starts growing by 10 GB every 10 minutes, until it reaches the size of 40 GB. - -For more information about Hyperscale pricing, see [Azure SQL Database Pricing](https://azure.microsoft.com/pricing/details/sql-database/single/) - -## Compare resource limits - - - -The vCore-based service tiers are differentiated based on database availability and storage type, performance, and maximum storage size, as described in the following table: - -|ㅤ| **General Purpose** | **Hyperscale** | **Business Critical** | -|:---:|:---:|:---:|:---:| -|**Best for** |Offers budget oriented balanced compute and storage options. |Most business workloads. Autoscaling storage size up to 100 TB, fast vertical and horizontal compute scaling, fast database restore. |OLTP applications with high transaction rate and low IO latency. Offers highest resilience to failures and fast failovers using multiple synchronously updated replicas. | -|**Compute size** |1 to 80 vCores |1 to 80 vCores1 |1 to 80 vCores | -|**Storage type** |Premium remote storage (per instance) |De-coupled storage with local SSD cache (per instance) |Super-fast local SSD storage (per instance) | -|**Storage size**1 |5 GB – 4 TB |Up to 100 TB |5 GB – 4 TB | -|**IOPS** |500 IOPS per vCore with 7,000 maximum IOPS. |Hyperscale is a multi-tiered architecture with caching at multiple levels. Effective IOPS will depend on the workload. |5,000 IOPS with 200,000 maximum IOPS. | -|**Availability** |1 replica, no Read Scale-out, zone-redundant HA (preview), no local cache. |Multiple replicas, up to 4 Read Scale-out, zone-redundant HA (preview), partial local cache. |3 replicas, 1 Read Scale-out, zone-redundant HA, full local storage. | -|**Backups** |A choice of geo-redundant, zone-redundant, or locally redundant backup storage, 1-35 day retention (default 7 days). |A choice of geo-redundant, zone-redundant, or locally redundant backup storage, 7 day retention. |A choice of geo-redundant,zone-redundant, or locally redundant backup storage, 1-35 day retention (default 7 days). | - -1 Elastic pools are not supported in the Hyperscale service tier. - -## Distributed functions architecture - -Hyperscale separates the query processing engine from the components that provide long-term storage and durability for the data. This architecture provides the ability to smoothly scale storage capacity as far as needed (initial target is 100 TB), as well as the ability to scale compute resources rapidly. - -The following diagram illustrates the different types of nodes in a Hyperscale database: - -![architecture](./media/service-tier-Hyperscale/Hyperscale-architecture.png) - -Learn more about the [Hyperscale distributed functions architecture](hyperscale-architecture.md). - -## Scale and performance advantages - -With the ability to rapidly spin up/down additional read-only compute nodes, the Hyperscale architecture allows significant read scale capabilities and can also free up the primary compute node for serving more write requests. Also, the compute nodes can be scaled up/down rapidly due to the shared-storage architecture of the Hyperscale architecture. - -## Create and manage Hyperscale databases - -You can create and manage Hyperscale databases using the [Azure portal](https://portal.azure.com), [Transact-SQL](/sql/t-sql/statements/create-database-transact-sql), [PowerShell](/powershell/module/azurerm.sql/new-azurermsqldatabase), and the [Azure CLI](/cli/azure/sql/db#az_sql_db_create). - - -| **Operation** | **Details** | **Learn more** | -|:---|:---|:---| -|**Create a Hyperscale database**| Hyperscale databases are available only using the [vCore-based purchasing model](service-tiers-vcore.md). | Find examples to create a Hyperscale database in [Quickstart: Create a Hyperscale database in Azure SQL Database](hyperscale-database-create-quickstart.md). | -| **Upgrade an existing database to Hyperscale** | Migrating an existing database in Azure SQL Database to the Hyperscale tier is a size of data operation. | Learn [how to migrate an existing database to Hyperscale](manage-hyperscale-database.md#migrate-an-existing-database-to-hyperscale).| -| **Reverse migrate a Hyperscale database to the General Purpose service tier (preview)** | If you previously migrated an existing Azure SQL Database to the Hyperscale service tier, you can reverse migrate the database to the General Purpose service tier within 45 days of the original migration to Hyperscale.

    If you wish to migrate the database to another service tier, such as Business Critical, first reverse migrate to the General Purpose service tier, then change the service tier. | Learn [how to reverse migrate from Hyperscale](manage-hyperscale-database.md#reverse-migrate-from-hyperscale), including the [limitations for reverse migration](manage-hyperscale-database.md#limitations-for-reverse-migration).| -| | | | - -## Database high availability in Hyperscale - -As in all other service tiers, Hyperscale guarantees data durability for committed transactions regardless of compute replica availability. The extent of downtime due to the primary replica becoming unavailable depends on the type of failover (planned vs. unplanned), [whether zone redundancy is configured](high-availability-sla.md#hyperscale-service-tier-zone-redundant-availability-preview), and on the presence of at least one high-availability replica. In a planned failover (i.e. a maintenance event), the system either creates the new primary replica before initiating a failover, or uses an existing high-availability replica as the failover target. In an unplanned failover (i.e. a hardware failure on the primary replica), the system uses a high-availability replica as a failover target if one exists, or creates a new primary replica from the pool of available compute capacity. In the latter case, downtime duration is longer due to extra steps required to create the new primary replica. - -For Hyperscale SLA, see [SLA for Azure SQL Database](https://azure.microsoft.com/support/legal/sla/azure-sql-database). - -## Backup and restore - -Backup and restore operations for Hyperscale databases are file-snapshot based. This enables these operations to be nearly instantaneous. Since Hyperscale architecture utilizes the storage layer for backup and restore, processing burden and performance impact to compute replicas are significantly reduced. Learn more in [Hyperscale backups and storage redundancy](automated-backups-overview.md#hyperscale-backups-and-storage-redundancy). - -## Disaster recovery for Hyperscale databases - -If you need to restore a Hyperscale database in Azure SQL Database to a region other than the one it's currently hosted in, as part of a disaster recovery operation or drill, relocation, or any other reason, the primary method is to do a geo-restore of the database. Geo-restore is only available when geo-redundant storage (RA-GRS) has been chosen for storage redundancy. - -Learn more in [restoring a Hyperscale database to a different region](automated-backups-overview.md#restoring-a-hyperscale-database-to-a-different-region). - -## Available regions - -The Azure SQL Database Hyperscale tier is enabled in the vast majority of Azure regions. If you want to create a Hyperscale database in a region where Hyperscale is not enabled by default, you can send an onboarding request via Azure portal. For instructions, see [Request quota increases for Azure SQL Database](quota-increase-request.md). When submitting your request, use the following guidelines: - -- Use the [Region access](quota-increase-request.md#region) SQL Database quota type. -- In the description, add the compute SKU/total cores including high-availability and named replicas, and indicate that you are requesting Hyperscale capacity. -- Also specify a projection of the total size of all databases over time in TB. - -## Known limitations - -These are the current limitations of the Hyperscale service tier. We're actively working to remove as many of these limitations as possible. - -| Issue | Description | -| :---- | :--------- | -| Backup retention is currently seven days; long-term retention policies aren't yet supported. | Hyperscale has a unique method for managing backups, so a non-Hyperscale database can't be restored as a Hyperscale database, and a Hyperscale database can't be restored as a non-Hyperscale database.

    For databases migrated to Hyperscale from other Azure SQL Database service tiers, pre-migration backups are kept for the duration of [backup retention](automated-backups-overview.md#backup-retention) period of the source database, including long-term retention policies. Restoring a pre-migration backup within the backup retention period of the database is supported [programmatically](recovery-using-backups.md#programmatic-recovery-using-automated-backups). You can restore these backups to any non-Hyperscale service tier.| -| Service tier change from Hyperscale to another tier is not supported directly | Reverse migration to the General Purpose service tier allows customers who have recently migrated an existing database in Azure SQL Database to the Hyperscale service tier to move back in an emergency, should Hyperscale not meet their needs. While reverse migration is initiated by a service tier change, it's essentially a size-of-data move between different architectures. Databases created in the Hyperscale service tier are not eligible for reverse migration. Learn the [limitations for reverse migration](manage-hyperscale-database.md#limitations-for-reverse-migration).

    For databases that don't qualify for reverse migration, the only way to migrate from Hyperscale to a non-Hyperscale service tier is to export/import using a bacpac file or other data movement technologies (Bulk Copy, Azure Data Factory, Azure Databricks, SSIS, etc.) Bacpac export/import from Azure portal, from PowerShell using [New-AzSqlDatabaseExport](/powershell/module/az.sql/new-azsqldatabaseexport) or [New-AzSqlDatabaseImport](/powershell/module/az.sql/new-azsqldatabaseimport), from Azure CLI using [az sql db export](/cli/azure/sql/db#az_sql_db_export) and [az sql db import](/cli/azure/sql/db#az_sql_db_import), and from [REST API](/rest/api/sql/) is not supported. Bacpac import/export for smaller Hyperscale databases (up to 200 GB) is supported using SSMS and [SqlPackage](/sql/tools/sqlpackage) version 18.4 and later. For larger databases, bacpac export/import may take a long time, and may fail for various reasons. | -| When changing Azure SQL Database service tier to Hyperscale, the operation fails if the database has any data files larger than 1 TB | In some cases, it may be possible to work around this issue by [shrinking](file-space-manage.md#shrinking-data-files) the large files to be less than 1 TB before attempting to change the service tier to Hyperscale. Use the following query to determine the current size of database files. `SELECT file_id, name AS file_name, size * 8. / 1024 / 1024 AS file_size_GB FROM sys.database_files WHERE type_desc = 'ROWS'`;| -| SQL Managed Instance | Azure SQL Managed Instance isn't currently supported with Hyperscale databases. | -| Elastic Pools | Elastic Pools aren't currently supported with Hyperscale.| -| Migration of databases with In-Memory OLTP objects | Hyperscale supports a subset of In-Memory OLTP objects, including memory-optimized table types, table variables, and natively compiled modules. However, when any kind of In-Memory OLTP objects are present in the database being migrated, migration from Premium and Business Critical service tiers to Hyperscale is not supported. To migrate such a database to Hyperscale, all In-Memory OLTP objects and their dependencies must be dropped. After the database is migrated, these objects can be recreated. Durable and non-durable memory-optimized tables are not currently supported in Hyperscale, and must be changed to disk tables.| -| Geo-replication | [Geo-replication](active-geo-replication-overview.md) and [auto-failover groups](auto-failover-group-overview.md) on Hyperscale is now in public preview. | -| Intelligent Database Features | With the exception of the "Force Plan" option, all other Automatic Tuning options aren't yet supported on Hyperscale: options may appear to be enabled, but there won't be any recommendations or actions made. | -| Query Performance Insights | Query Performance Insights is currently not supported for Hyperscale databases. | -| Shrink Database | DBCC SHRINKDATABASE or DBCC SHRINKFILE isn't currently supported for Hyperscale databases. | -| Database integrity check | DBCC CHECKDB isn't currently supported for Hyperscale databases. DBCC CHECKTABLE ('TableName') WITH TABLOCK and DBCC CHECKFILEGROUP WITH TABLOCK may be used as a workaround. See [Data Integrity in Azure SQL Database](https://azure.microsoft.com/blog/data-integrity-in-azure-sql-database/) for details on data integrity management in Azure SQL Database. | -| Elastic Jobs | Using a Hyperscale database as the Job database is not supported. However, elastic jobs can target Hyperscale databases in the same way as any other database in Azure SQL Database. | -|Data Sync| Using a Hyperscale database as a Hub or Sync Metadata database is not supported. However, a Hyperscale database can be a member database in a Data Sync topology. | -|Import Export | Import-Export service is currently not supported for Hyperscale databases. | - -## Next steps - -Learn more about Hyperscale in Azure SQL Database in the following articles: - -- For an FAQ on Hyperscale, see [Frequently asked questions about Hyperscale](service-tier-hyperscale-frequently-asked-questions-faq.yml). -- For information about service tiers, see [Service tiers](purchasing-models.md). -- See [Overview of resource limits on a server](resource-limits-logical-server.md) for information about limits at the server and subscription levels. -- For purchasing model limits for a single database, see [Azure SQL Database vCore-based purchasing model limits for a single database](resource-limits-vcore-single-databases.md). -- For a features and comparison list, see [SQL common features](features-comparison.md). -- Learn about the [Hyperscale distributed functions architecture](hyperscale-architecture.md). -- Learn [How to manage a Hyperscale database](manage-hyperscale-database.md). diff --git a/articles/azure-sql/database/service-tiers-dtu.md b/articles/azure-sql/database/service-tiers-dtu.md deleted file mode 100644 index 8cda8e6c04101..0000000000000 --- a/articles/azure-sql/database/service-tiers-dtu.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: DTU-based purchasing model -description: Learn about the DTU-based purchasing model for Azure SQL Database and compare compute and storage sizes based on service tiers. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: references_regions -ms.devlang: -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 04/06/2022 ---- -# DTU-based purchasing model overview -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this article, learn about the DTU-based purchasing model for Azure SQL Database. - -To learn more, review [vCore-based purchasing model](service-tiers-vcore.md) and [compare purchasing models](purchasing-models.md). - -## Database transaction units (DTUs) - -A database transaction unit (DTU) represents a blended measure of CPU, memory, reads, and writes. Service tiers in the DTU-based purchasing model are differentiated by a range of compute sizes with a fixed amount of included storage, fixed retention period for backups, and fixed price. All service tiers in the DTU-based purchasing model provide flexibility of changing compute sizes with minimal [downtime](https://azure.microsoft.com/support/legal/sla/azure-sql-database); however, there is a switch over period where connectivity is lost to the database for a short amount of time, which can be mitigated using retry logic. Single databases and elastic pools are billed hourly based on service tier and compute size. - -For a single database at a specific compute size within a [service tier](single-database-scale.md), Azure SQL Database guarantees a certain level of resources for that database (independent of any other database). This guarantee provides a predictable level of performance. The amount of resources allocated for a database is calculated as a number of DTUs and is a bundled measure of compute, storage, and I/O resources. - -The ratio among these resources is originally determined by an [online transaction processing (OLTP) benchmark workload](dtu-benchmark.md) designed to be typical of real-world OLTP workloads. When your workload exceeds the amount of any of these resources, your throughput is throttled, resulting in slower performance and time-outs. - -For single databases, the resources used by your workload don't impact the resources available to other databases in the Azure cloud. Likewise, the resources used by other workloads don't impact the resources available to your database. - -![Bounding box](./media/purchasing-models/bounding-box.png) - -DTUs are most useful for understanding the relative resources that are allocated for databases at different compute sizes and service tiers. For example: - -- Doubling the DTUs by increasing the compute size of a database equates to doubling the set of resources available to that database. -- A premium service tier P11 database with 1750 DTUs provides 350 times more DTU compute power than a basic service tier database with 5 DTUs. - -To gain deeper insight into the resource (DTU) consumption of your workload, use [query-performance insights](query-performance-insight-use.md) to: - -- Identify the top queries by CPU/duration/execution count that can potentially be tuned for improved performance. For example, an I/O-intensive query might benefit from [in-memory optimization techniques](../in-memory-oltp-overview.md) to make better use of the available memory at a certain service tier and compute size. -- Drill down into the details of a query to view its text and its history of resource usage. -- Access performance-tuning recommendations that show actions taken by [SQL Database Advisor](database-advisor-implement-performance-recommendations.md). - -### Elastic database transaction units (eDTUs) - -Rather than provide a dedicated set of resources (DTUs) that might not always be needed, you can place these databases into an [elastic pool](elastic-pool-overview.md). The databases in an elastic pool use a single instance of the database engine and share the same pool of resources. - -The shared resources in an elastic pool are measured by elastic database transaction units (eDTUs). Elastic pools provide a simple, cost-effective solution to manage performance goals for multiple databases that have widely varying and unpredictable usage patterns. An elastic pool guarantees that all the resources can't be consumed by one database in the pool, while ensuring that each database in the pool always has a minimum amount of necessary resources available. - -A pool is given a set number of eDTUs for a set price. In the elastic pool, individual databases can autoscale within the configured boundaries. A database under a heavier load will consume more eDTUs to meet demand. Databases under lighter loads will consume fewer eDTUs. Databases with no load will consume no eDTUs. Because resources are provisioned for the entire pool, rather than per database, elastic pools simplify your management tasks and provide a predictable budget for the pool. - -You can add additional eDTUs to an existing pool with minimal database downtime. Similarly, if you no longer need extra eDTUs, remove them from an existing pool at any time. You can also add databases to or remove databases from a pool at any time. To reserve eDTUs for other databases, limit the number of eDTUs databases can use under a heavy load. If a database has consistently high resource utilization that impacts other databases in the pool, move it out of the pool and configure it as a single database with a predictable amount of required resources. - -#### Workloads that benefit from an elastic pool of resources - -Pools are well suited for databases with a low resource-utilization average and relatively infrequent utilization spikes. For more information, see [When should you consider a SQL Database elastic pool?](elastic-pool-overview.md). - -## Determine the number of DTUs needed by a workload - -If you want to migrate an existing on-premises or SQL Server virtual machine workload to SQL Database, see [SKU recommendations](/sql/dma/dma-sku-recommend-sql-db) to approximate the number of DTUs needed. For an existing SQL Database workload, use [query-performance insights](query-performance-insight-use.md) to understand your database-resource consumption (DTUs) and gain deeper insights for optimizing your workload. The [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) dynamic management view (DMV) lets you view resource consumption for the last hour. The [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) catalog view displays resource consumption for the last 14 days, but at a lower fidelity of five-minute averages. - -## Determine DTU utilization - -To determine the average percentage of DTU/eDTU utilization relative to the DTU/eDTU limit of a database or an elastic pool, use the following formula: - -`avg_dtu_percent = MAX(avg_cpu_percent, avg_data_io_percent, avg_log_write_percent)` - -The input values for this formula can be obtained from [sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database), [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database), and [sys.elastic_pool_resource_stats](/sql/relational-databases/system-catalog-views/sys-elastic-pool-resource-stats-azure-sql-database) DMVs. In other words, to determine the percentage of DTU/eDTU utilization toward the DTU/eDTU limit of a database or an elastic pool, pick the largest percentage value from the following: `avg_cpu_percent`, `avg_data_io_percent`, and `avg_log_write_percent` at a given point in time. - -> [!NOTE] -> The DTU limit of a database is determined by CPU, reads, writes, and memory available to the database. However, because the SQL Database engine typically uses all available memory for its data cache to improve performance, the `avg_memory_usage_percent` value will usually be close to 100 percent, regardless of current database load. Therefore, even though memory does indirectly influence the DTU limit, it is not used in the DTU utilization formula. - -## Hardware configuration - -In the DTU-based purchasing model, customers cannot choose the hardware configuration used for their databases. While a given database usually stays on a specific type of hardware for a long time (commonly for multiple months), there are certain events that can cause a database to be moved to different hardware. - -For example, a database can be moved to different hardware if it's scaled up or down to a different service objective, or if the current infrastructure in a datacenter is approaching its capacity limits, or if the currently used hardware is being decommissioned due to its end of life. - -If a database is moved to different hardware, workload performance can change. The DTU model guarantees that the throughput and response time of the [DTU benchmark](dtu-benchmark.md) workload will remain substantially identical as the database moves to a different hardware type, as long as its service objective (the number of DTUs) stays the same. - -However, across the wide spectrum of customer workloads running in Azure SQL Database, the impact of using different hardware for the same service objective can be more pronounced. Different workloads may benefit from different hardware configurations and features. Therefore, for workloads other than the [DTU benchmark](dtu-benchmark.md), it's possible to see performance differences if the database moves from one type of hardware to another. - -For example, an application that is sensitive to network latency can see better performance on Gen5 hardware vs. Gen4 due to the use of Accelerated Networking in Gen5, but an application using intensive read IO can see better performance on Gen4 hardware versus Gen5 due to a higher memory per core ratio on Gen4. - -Customers can use the [vCore](service-tiers-vcore.md) model to choose their preferred hardware configuration during database creation and scaling. In the vCore model, detailed resource limits of each service objective in each hardware configuration are documented for [single databases](resource-limits-vcore-single-databases.md) and [elastic pools](resource-limits-vcore-elastic-pools.md). For more information about hardware in the vCore model, see [Hardware configuration for SQL Database](./service-tiers-sql-database-vcore.md#hardware-configuration) or [Hardware configuration for SQL Managed Instance](../managed-instance/service-tiers-managed-instance-vcore.md#hardware-configurations). - -## Compare service tiers - -Choosing a service tier depends primarily on business continuity, storage, and performance requirements. - -||Basic|Standard|Premium| -| :-- | --: |--:| --:| -|**Target workload**|Development and production|Development and production|Development and production| -|**Uptime SLA**|99.99%|99.99%|99.99%| -|**Maximum backup retention**|7 days|35 days|35 days| -|**CPU**|Low|Low, Medium, High|Medium, High| -|**IOPS (approximate)**\* |1-4 IOPS per DTU| 1-4 IOPS per DTU | >25 IOPS per DTU| -|**IO latency (approximate)**|5 ms (read), 10 ms (write)|5 ms (read), 10 ms (write)|2 ms (read/write)| -|**Columnstore indexing** |N/A|S3 and above|Supported| -|**In-memory OLTP**|N/A|N/A|Supported| - -\* All read and write IOPS against data files, including background IO (checkpoint and lazy writer) - -> [!IMPORTANT] -> The Basic, S0, S1 and S2 service objectives provide less than one vCore (CPU). For CPU-intensive workloads, a service objective of S3 or greater is recommended. -> -> In the Basic, S0, and S1 service objectives, database files are stored in Azure Standard Storage, which uses hard disk drive (HDD)-based storage media. These service objectives are best suited for development, testing, and other infrequently accessed workloads that are less sensitive to performance variability. -> - -> [!TIP] -> To see actual [resource governance](resource-limits-logical-server.md#resource-governance) limits for a database or elastic pool, query the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) view. - -> [!NOTE] -> You can get a free database in Azure SQL Database at the Basic service tier in conjunction with an Azure free account to explore Azure. For information, see [Create a managed cloud database with your Azure free account](https://azure.microsoft.com/free/services/sql-database/). - -## Resource limits - -Resource limits differ for single and pooled databases. - -### Single database storage limits - -Compute sizes are expressed in terms of Database Transaction Units (DTUs) for single databases and elastic Database Transaction Units (eDTUs) for elastic pools. To learn more, review [Resource limits for single databases](resource-limits-dtu-single-databases.md). - -||Basic|Standard|Premium| -| :-- | --: | --: | --: | -| **Maximum storage size** | 2 GB | 1 TB | 4 TB | -| **Maximum DTUs** | 5 | 3000 | 4000 | - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -### Elastic pool limits - -To learn more, review [Resource limits for pooled databases](resource-limits-dtu-elastic-pools.md). - -|| **Basic** | **Standard** | **Premium** | -| :-- | --: | --: | --: | -| **Maximum storage size per database** | 2 GB | 1 TB | 1 TB | -| **Maximum storage size per pool** | 156 GB | 4 TB | 4 TB | -| **Maximum eDTUs per database** | 5 | 3000 | 4000 | -| **Maximum eDTUs per pool** | 1600 | 3000 | 4000 | -| **Maximum number of databases per pool** | 500 | 500 | 100 | - -> [!IMPORTANT] -> More than 1 TB of storage in the Premium tier is currently available in all regions except: China East, China North, Germany Central, and Germany Northeast. In these regions, the storage max in the Premium tier is limited to 1 TB. For more information, see [P11-P15 current limitations](single-database-scale.md#p11-and-p15-constraints-when-max-size-greater-than-1-tb). -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [manage file space in Azure SQL Database](file-space-manage.md). - -## DTU Benchmark - -Physical characteristics (CPU, memory, IO) associated with each DTU measure are calibrated using a benchmark that simulates real-world database workload. - -Learn about the schema, transaction types used, workload mix, users and pacing, scaling rules, and metrics associated with the [DTU benchmark](dtu-benchmark.md). - -## Compare DTU-based and vCore purchasing models - -While the DTU-based purchasing model is based on a bundled measure of compute, storage, and I/O resources, by comparison the [vCore purchasing model for Azure SQL Database](service-tiers-sql-database-vcore.md) allows you to independently choose and scale compute and storage resources. - -The vCore-based purchasing model also allows you to use [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) for SQL Server to save costs, and offers [Serverless](serverless-tier-overview.md) and [Hyperscale](service-tier-hyperscale.md) options for Azure SQL Database that are not available in the DTU-based purchasing model. - -Learn more in [Compare vCore and DTU-based purchasing models of Azure SQL Database](purchasing-models.md). - -## Next steps - -Learn more about purchasing models and related concepts in the following articles: - -- For details on specific compute sizes and storage size choices available for single databases, see [SQL Database DTU-based resource limits for single databases](resource-limits-dtu-single-databases.md#single-database-storage-sizes-and-compute-sizes). -- For details on specific compute sizes and storage size choices available for elastic pools, see [SQL Database DTU-based resource limits](resource-limits-dtu-elastic-pools.md#elastic-pool-storage-sizes-and-compute-sizes). -- For information on the benchmark associated with the DTU-based purchasing model, see [DTU benchmark](dtu-benchmark.md). -- [Compare vCore and DTU-based purchasing models of Azure SQL Database](purchasing-models.md). diff --git a/articles/azure-sql/database/service-tiers-sql-database-vcore.md b/articles/azure-sql/database/service-tiers-sql-database-vcore.md deleted file mode 100644 index af302bb5298d5..0000000000000 --- a/articles/azure-sql/database/service-tiers-sql-database-vcore.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: vCore purchasing model -description: The vCore purchasing model lets you independently scale compute and storage resources, match on-premises performance, and optimize price for Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, sashan, moslake, mathoma -ms.date: 04/22/2022 -ms.custom: references_regions, ignite-fall-2021 ---- -# vCore purchasing model - Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](service-tiers-sql-database-vcore.md) -> * [Azure SQL Managed Instance](../managed-instance/service-tiers-managed-instance-vcore.md) - -This article reviews the [vCore purchasing model](service-tiers-vcore.md) for [Azure SQL Database](sql-database-paas-overview.md). For help choosing between the vCore and DTU purchasing models, see the [differences between the vCore and DTU purchasing models](purchasing-models.md). - -## Overview - -[!INCLUDE [vcore-overview](../includes/vcore-overview.md)] - -> [!IMPORTANT] -> Compute resources, I/O, and data and log storage are charged per database or elastic pool. Backup storage is charged per each database. - -The vCore purchasing model used by Azure SQL Database provides several benefits over the DTU purchasing model: - -- Higher compute, memory, I/O, and storage limits. -- Choice of hardware configuration to better match compute and memory requirements of the workload. -- Pricing discounts for [Azure Hybrid Benefit (AHB)](../azure-hybrid-benefit.md). -- Greater transparency in the hardware details that power the compute, that facilitates planning for migrations from on-premises deployments. -- [Reserved instance pricing](reserved-capacity-overview.md) is only available for vCore purchasing model. -- Higher scaling granularity with multiple compute sizes available. - - -## Service tiers - -Service tier options in the vCore purchasing model include General Purpose, Business Critical, and Hyperscale. The service tier generally service tier defines hardware, storage type and IOPS, high availability and disaster recovery options, and other features like memory-optimized object types. - -For greater details, review resource limits for [logical server](resource-limits-logical-server.md), [single databases](resource-limits-vcore-single-databases.md), and [pooled databases](resource-limits-vcore-elastic-pools.md). - -|**Use case**|**General Purpose**|**Business Critical**|**Hyperscale**| -|---|---|---|---| -|**Best for**|Most business workloads. Offers budget-oriented, balanced, and scalable compute and storage options. |Offers business applications the highest resilience to failures by using several isolated replicas, and provides the highest I/O performance per database replica.|Most business workloads with highly scalable storage and read-scale requirements. Offers higher resilience to failures by allowing configuration of more than one isolated database replica. | -|**Availability**|1 replica, no read-scale replicas,
    zone-redundant high availability (HA) |3 replicas, 1 [read-scale replica](read-scale-out.md),
    zone-redundant high availability (HA)|zone-redundant high availability (HA) (preview)| -|**Pricing/billing** | [vCore, reserved storage, and backup storage](https://azure.microsoft.com/pricing/details/sql-database/single/) are charged.
    IOPS is not charged. |[vCore, reserved storage, and backup storage](https://azure.microsoft.com/pricing/details/sql-database/single/) are charged.
    IOPS is not charged. | [vCore for each replica and used storage](https://azure.microsoft.com/pricing/details/sql-database/single/) are charged.
    IOPS not yet charged. | -|**Discount models**| [Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions|[Reserved instances](reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions | [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions| - - - -> [!NOTE] -> For more information on the Service Level Agreement (SLA), see [SLA for Azure SQL Database](https://azure.microsoft.com/support/legal/sla/azure-sql-database/) - -### Choosing a service tier - -For information on selecting a service tier for your particular workload, see the following articles: - -- [When to choose the General Purpose service tier](service-tier-general-purpose.md#when-to-choose-this-service-tier) -- [When to choose the Business Critical service tier](service-tier-business-critical.md#when-to-choose-this-service-tier) -- [When to choose the Hyperscale service tier](service-tier-hyperscale.md#who-should-consider-the-hyperscale-service-tier) - -## Resource limits - -For vCore resource limits, see [logical servers](resource-limits-logical-server.md), [single databases](resource-limits-vcore-single-databases.md), [pooled databases](resource-limits-vcore-elastic-pools.md). - -## Compute tiers - -Compute tier options in the vCore model include the provisioned and [serverless](serverless-tier-overview.md) compute tiers. - -- While the **provisioned compute tier** provides a specific amount of compute resources that are continuously provisioned independent of workload activity, the **serverless compute tier** auto-scales compute resources based on workload activity. -- While the **provisioned compute tier** bills for the amount of compute provisioned at a fixed price per hour, the **serverless compute tier** bills for the amount of compute used, per second. - - -## Hardware configuration - -Hardware configurations in the vCore model include Gen4, Gen5, M-series, Fsv2-series, and DC-series. Hardware configuration defines compute and memory limits and other characteristics that impact workload performance. - -Certain hardware configurations such as Gen5 may use more than one type of processor (CPU), as described in [Compute resources (CPU and memory)](#compute-resources-cpu-and-memory). While a given database or elastic pool tends to stay on the hardware with the same CPU type for a long time (commonly for multiple months), there are certain events that can cause a database or pool to be moved to hardware that uses a different CPU type. For example, a database or pool can be moved if it is scaled up or down to a different service objective, or if the current infrastructure in a datacenter is approaching its capacity limits, or if the currently used hardware is being decommissioned due to its end of life. - -For some workloads, a move to a different CPU type can change performance. SQL Database configures hardware with the goal to provide predictable workload performance even if CPU type changes, keeping performance changes within a narrow band. However, across the wide spectrum of customer workloads running in SQL Database, and as new types of CPUs become available, it is possible to occasionally see more noticeable changes in performance if a database or pool moves to a different CPU type. - -Regardless of CPU type used, resource limits for a database or elastic pool remain the same as long as the database stays on the same service objective. - -### Gen4/Gen5 - -- Gen4/Gen5 hardware provides balanced compute and memory resources, and is suitable for most database workloads that do not have higher memory, higher vCore, or faster single vCore requirements as provided by Fsv2-series or M-series. - -For regions where Gen4/Gen5 is available, see [Gen4/Gen5 availability](#gen4gen5-1). - -### Fsv2-series - -- Fsv2-series is a compute optimized hardware configuration delivering low CPU latency and high clock speed for the most CPU demanding workloads. -- Depending on the workload, Fsv2-series can deliver more CPU performance per vCore than other types of hardware. For example, the 72 vCore Fsv2 compute size can provide more CPU performance than 80 vCores on Gen5, at lower cost. -- Fsv2 provides less memory and tempdb per vCore than other hardware, so workloads sensitive to those limits may perform better on Gen5 or M-series. - -Fsv2-series in only supported in the General Purpose tier. For regions where Fsv2-series is available, see [Fsv2-series availability](#fsv2-series-1). - -### M-series - -- M-series is a memory optimized hardware configuration for workloads demanding more memory and higher compute limits than provided by other types of hardware. -- M-series provides 29 GB per vCore and up to 128 vCores, which increases the memory limit relative to Gen5 by 8x to nearly 4 TB. - -M-series is only supported in the Business Critical tier and does not support zone redundancy. For regions where M-series is available, see [M-series availability](#m-series-1). - -#### Azure offer types supported by M-series - -To create databases or elastic pools on M-series hardware, the subscription must be a paid offer type including Pay-As-You-Go or Enterprise Agreement (EA). For a complete list of Azure offer types supported by M-series, see [current offers without spending limits](https://azure.microsoft.com/support/legal/offer-details). - - - -### DC-series - -- DC-series hardware uses Intel processors with Software Guard Extensions (Intel SGX) technology. -- DC-series is required for [Always Encrypted with secure enclaves](/sql/relational-databases/security/encryption/always-encrypted-enclaves), which is not supported with other hardware configurations. -- DC-series is designed for workloads that process sensitive data and demand confidential query processing capabilities, provided by Always Encrypted with secure enclaves. -- DC-series hardware provides balanced compute and memory resources. - -DC-series is only supported for Provisioned compute (Serverless is not supported) and does not support zone redundancy. For regions where DC-series is available, see [DC-series availability](#dc-series-1). - -#### Azure offer types supported by DC-series - -To create databases or elastic pools on DC-series hardware, the subscription must be a paid offer type including Pay-As-You-Go or Enterprise Agreement (EA). For a complete list of Azure offer types supported by DC-series, see [current offers without spending limits](https://azure.microsoft.com/support/legal/offer-details). - -### Selecting hardware configuration - -You can select hardware configuration for a database or elastic pool in SQL Database at the time of creation. You can also change hardware configuration of an existing database or elastic pool. - -**To select a hardware configuration when creating a SQL Database or pool** - -For detailed information, see [Create a SQL Database](single-database-create-quickstart.md). - -On the **Basics** tab, select the **Configure database** link in the **Compute + storage** section, and then select the **Change configuration** link: - -:::image type="content" source="./media/service-tiers-vcore/configure-sql-database.png" alt-text="configure SQL database" loc-scope="azure-portal"::: - -Select the desired hardware configuration: - -:::image type="content" source="./media/service-tiers-vcore/select-hardware.png" alt-text="select hardware for SQL database" loc-scope="azure-portal"::: - -**To change hardware configuration of an existing SQL Database or pool** - -For a database, on the Overview page, select the **Pricing tier** link: - -:::image type="content" source="./media/service-tiers-vcore/change-hardware.png" alt-text="change hardware for SQL Database" loc-scope="azure-portal"::: - -For a pool, on the Overview page, select **Configure**. - -Follow the steps to change configuration, and select hardware configuration as described in the previous steps. - -### Hardware availability - -#### Gen4/Gen5 - -Gen4 hardware is [being retired](https://azure.microsoft.com/updates/gen-4-hardware-on-azure-sql-database-approaching-end-of-life-in-2020/) and is no longer available for new deployments. All new databases must be deployed on other hardware configurations. - -Gen5 hardware is available in all public regions worldwide. - -#### Fsv2-series - -Fsv2-series is available in the following regions: -Australia Central, Australia Central 2, Australia East, Australia Southeast, Brazil South, Canada Central, East Asia, East US, France Central, India Central, Korea Central, Korea South, North Europe, South Africa North, Southeast Asia, UK South, UK West, West Europe, West US 2. - -#### M-series - -M-series is available in the following regions: -East US, North Europe, West Europe, West US 2. - - -#### DC-series - -DC-series is available in the following regions: -Canada Central, Canada East, East US, North Europe, UK South, West Europe, West US. - -If you need DC-series in a currently unsupported region, [submit a support ticket](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). On the **Basics** page, provide the following: - -1. For **Issue type**, select **Technical**. -1. For **Service type**, select **SQL Database**. -1. For **Problem type**, select **Security, Private and Compliance**. -1. For **Problem subtype**, select **Always Encrypted**. - -:::image type="content" source="./media/service-tiers-vcore/request-dc-series.png" alt-text="Request DC-series in a new region" loc-scope="azure-portal"::: - -## Compute resources (CPU and memory) - -The following table compares compute resources in different hardware configurations and compute tiers: - -|Hardware configuration |CPU |Memory | -|:---------|:---------|:---------| -|Gen4 |- Intel® E5-2673 v3 (Haswell) 2.4-GHz processors
    - Provision up to 24 vCores (physical) |- 7 GB per vCore
    - Provision up to 168 GB| -|Gen5 |**Provisioned compute**
    - Intel® E5-2673 v4 (Broadwell) 2.3 GHz, Intel® SP-8160 (Skylake)\*, Intel® 8272CL (Cascade Lake) 2.5 GHz\*, and Intel® Xeon Platinum 8307C (Ice Lake)\* processors
    - Provision up to 80 vCores (hyper-threaded)

    **Serverless compute**
    - Intel® E5-2673 v4 (Broadwell) 2.3 GHz, Intel® SP-8160 (Skylake)\*, Intel® 8272CL (Cascade Lake) 2.5 GHz\*, and Intel Xeon® Platinum 8307C (Ice Lake)\* processors
    - Auto-scale up to 40 vCores (hyper-threaded)|**Provisioned compute**
    - 5.1 GB per vCore
    - Provision up to 408 GB

    **Serverless compute**
    - Auto-scale up to 24 GB per vCore
    - Auto-scale up to 120 GB max| -|Fsv2-series |- Intel® 8168 (Skylake) processors
    - Featuring a sustained all core turbo clock speed of 3.4 GHz and a maximum single core turbo clock speed of 3.7 GHz.
    - Provision up to 72 vCores (hyper-threaded)|- 1.9 GB per vCore
    - Provision up to 136 GB| -|M-series |- Intel® E7-8890 v3 2.5 GHz and Intel® 8280M 2.7 GHz (Cascade Lake) processors
    - Provision up to 128 vCores (hyper-threaded)|- 29 GB per vCore
    - Provision up to 3.7 TB| -|DC-series | - Intel® XEON E-2288G processors
    - Featuring Intel Software Guard Extension (Intel SGX))
    - Provision up to 8 vCores (physical) | 4.5 GB per vCore | - -\* In the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) dynamic management view, hardware generation for databases using Intel® SP-8160 (Skylake) processors appears as Gen6, hardware generation for databases using Intel® 8272CL (Cascade Lake) appears as Gen7, and hardware generation for databases using Intel Xeon® Platinum 8307C (Ice Lake) appear as Gen8. For a given compute size and hardware configuration, resource limits are the same regardless of CPU type (Broadwell, Skylake, Ice Lake, or Cascade Lake). - -For more information see resource limits for [single databases](resource-limits-vcore-single-databases.md) and [elastic pools](resource-limits-vcore-elastic-pools.md). - -## Next steps - -- To get started, see [Creating a SQL Database using the Azure portal](single-database-create-quickstart.md) -- For pricing details, see the [Azure SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/single/) -- For details about the specific compute and storage sizes available, see: - - [vCore-based resource limits for Azure SQL Database](resource-limits-vcore-single-databases.md) - - [vCore-based resource limits for pooled Azure SQL Database](resource-limits-vcore-elastic-pools.md) diff --git a/articles/azure-sql/database/service-tiers-vcore.md b/articles/azure-sql/database/service-tiers-vcore.md deleted file mode 100644 index 32d3b768c48c7..0000000000000 --- a/articles/azure-sql/database/service-tiers-vcore.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: vCore purchasing model -titleSuffix: Azure SQL Database & SQL Managed Instance -description: The vCore purchasing model lets you independently scale compute and storage resources, match on-premises performance, and optimize price for Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.topic: conceptual -author: dimitri-furman -ms.author: dfurman -ms.reviewer: kendralittle, mathoma -ms.date: 04/06/2022 -ms.custom: devx-track-azurepowershell ---- -# vCore purchasing model overview - Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -This article provides a brief overview of the vCore purchasing model used by both Azure SQL Database and Azure SQL Managed Instance. To learn more about the vCore model for each product, review [Azure SQL Database](service-tiers-sql-database-vcore.md) and [Azure SQL Managed Instance](../managed-instance/service-tiers-managed-instance-vcore.md). - -## Overview - -[!INCLUDE [vcore-overview](../includes/vcore-overview.md)] - -> [!IMPORTANT] -> In Azure SQL Database, compute resources (CPU and memory), I/O, and data and log storage are charged per database or elastic pool. Backup storage is charged per each database. - -The vCore purchasing model provides transparency in database CPU, memory, and storage resource allocation, hardware configuration, higher scaling granularity, and pricing discounts with the [Azure Hybrid Benefit (AHB)](../azure-hybrid-benefit.md) and [Reserved Instance (RI)](../database/reserved-capacity-overview.md). - -In the case of Azure SQL Database, the vCore purchasing model provides higher compute, memory, I/O, and storage limits than the DTU model. - -## Service tiers - -Two vCore service tiers are available in both Azure SQL Database and Azure SQL Managed Instance: - -- [General purpose](service-tier-general-purpose.md) is a budget-friendly tier designed for most workloads with common performance and availability requirements. -- [Business critical](service-tier-business-critical.md) tier is designed for performance-sensitive workloads with strict availability requirements. - -The [Hyperscale service tier](service-tier-Hyperscale.md) is also available for single databases in Azure SQL Database. This service tier is designed for most business workloads, providing highly scalable storage, read scale-out, fast scaling, and fast database restore capabilities. - -## Resource limits - -For more information on resource limits, see: - - - Azure SQL Database: [logical server](resource-limits-logical-server.md), [single databases](resource-limits-vcore-single-databases.md), [pooled databases](resource-limits-vcore-elastic-pools.md) - - [Azure SQL Managed Instance](../managed-instance/resource-limits.md) - -## Compute cost - -The vCore-based purchasing model has a provisioned compute tier for both Azure SQL Database and Azure SQL Managed Instance, and a serverless compute tier for Azure SQL Database. - -In the provisioned compute tier, the compute cost reflects the total compute capacity continuously provisioned for the application independent of workload activity. Choose the resource allocation that best suits your business needs based on vCore and memory requirements, then scale resources up and down as needed by your workload. - -In the serverless compute tier for Azure SQL database, compute resources are auto-scaled based on workload capacity and billed for the amount of compute used, per second. - -Since three additional replicas are automatically allocated in the Business Critical service tier, the price is approximately 2.7 times higher than it is in the General Purpose service tier. Likewise, the higher storage price per GB in the Business Critical service tier reflects the higher IO limits and lower latency of the local SSD storage. - -## Data and log storage - -The following factors affect the amount of storage used for data and log files, and apply to General Purpose and Business Critical tiers. - -- Each compute size supports a configurable maximum data size, with a default of 32 GB. -- When you configure maximum data size, an additional 30 percent of billable storage is automatically added for the log file. -- In the General Purpose service tier, `tempdb` uses local SSD storage, and this storage cost is included in the vCore price. -- In the Business Critical service tier, `tempdb` shares local SSD storage with data and log files, and `tempdb` storage cost is included in the vCore price. -- In the General Purpose and Business Critical tiers, you are charged for the maximum storage size configured for a database, elastic pool, or managed instance. -- For SQL Database, you can select any maximum data size between 1 GB and the supported storage size maximum, in 1 GB increments. For SQL Managed Instance, select data sizes in multiples of 32 GB up to the supported storage size maximum. - -To monitor the current allocated and used data storage size in SQL Database, use the *allocated_data_storage* and *storage* Azure Monitor [metrics](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlserversdatabases) respectively. - -For both SQL Database and SQL Managed instance, to monitor the current allocated and used storage size of individual data and log files in a database by using T-SQL, use the [sys.database_files](/sql/relational-databases/system-catalog-views/sys-database-files-transact-sql) view and the [FILEPROPERTY(... , 'SpaceUsed')](/sql/t-sql/functions/fileproperty-transact-sql) function. - -> [!TIP] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -## Backup storage - -Storage for database backups is allocated to support the [point-in-time restore (PITR)](recovery-using-backups.md) and [long-term retention (LTR)](long-term-retention-overview.md) capabilities of SQL Database and SQL Managed Instance. This storage is separate from data and log file storage, and is billed separately. - -- **PITR**: In General Purpose and Business Critical tiers, individual database backups are copied to [Azure storage](automated-backups-overview.md#restore-capabilities) automatically. The storage size increases dynamically as new backups are created. The storage is used by full, differential, and transaction log backups. The storage consumption depends on the rate of change of the database and the retention period configured for backups. You can configure a separate retention period for each database between 1 and 35 days for SQL Database, and 0 to 35 days for SQL Managed Instance. A backup storage amount equal to the configured maximum data size is provided at no extra charge. -- **LTR**: You also have the option to configure long-term retention of full backups for up to 10 years. If you set up an LTR policy, these backups are stored in Azure Blob storage automatically, but you can control how often the backups are copied. To meet different compliance requirements, you can select different retention periods for weekly, monthly, and/or yearly backups. The configuration you choose determines how much storage will be used for LTR backups. For more information, see [Long-term backup retention](long-term-retention-overview.md). - -## Next steps - -To get started, see: -- [Creating a SQL Database using the Azure portal](single-database-create-quickstart.md) -- [Creating a SQL Managed Instance using the Azure portal](../managed-instance/instance-create-quickstart.md) - -- For pricing details, see - - [Azure SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/single/) - - [Azure SQL Managed Instance single instance pricing page](https://azure.microsoft.com/pricing/details/azure-sql-managed-instance/single/) - - [Azure SQL Managed Instance pools pricing page](https://azure.microsoft.com/pricing/details/azure-sql-managed-instance/pools/) - -For details about the specific compute and storage sizes available in the General Purpose and Business Critical service tiers, see: - -- [vCore-based resource limits for Azure SQL Database](resource-limits-vcore-single-databases.md). -- [vCore-based resource limits for pooled Azure SQL Database](resource-limits-vcore-elastic-pools.md). -- [vCore-based resource limits for Azure SQL Managed Instance](../managed-instance/resource-limits.md). diff --git a/articles/azure-sql/database/single-database-create-arm-template-quickstart.md b/articles/azure-sql/database/single-database-create-arm-template-quickstart.md deleted file mode 100644 index c1a73af79a95f..0000000000000 --- a/articles/azure-sql/database/single-database-create-arm-template-quickstart.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Azure Resource Manager: Create a single database" -description: Create a single database in Azure SQL Database using an Azure Resource Manager template. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: subject-armqs sqldbrb=1, devx-track-azurepowershell, mode-arm -ms.topic: quickstart -author: LitKnd -ms.author: kendralittle -ms.date: 06/24/2020 ---- - -# Quickstart: Create a single database in Azure SQL Database using an ARM template - -Creating a [single database](single-database-overview.md) is the quickest and simplest option for creating a database in Azure SQL Database. This quickstart shows you how to create a single database using an Azure Resource Manager template (ARM template). - -[!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] - -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. - -[![Deploy to Azure](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.sql%2Fsql-database%2Fazuredeploy.json) - -## Prerequisites - -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/). - -## Review the template - -A single database has a defined set of compute, memory, IO, and storage resources using one of two [purchasing models](purchasing-models.md). When you create a single database, you also define a [server](logical-servers.md) to manage it and place it within [Azure resource group](../../active-directory-b2c/overview.md) in a specified region. - -The template used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/sql-database/). - -:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.sql/sql-database/azuredeploy.json"::: - -These resources are defined in the template: - -- [**Microsoft.Sql/servers**](/azure/templates/microsoft.sql/servers) -- [**Microsoft.Sql/servers/databases**](/azure/templates/microsoft.sql/servers/databases) - -More Azure SQL Database template samples can be found in [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/?resourceType=Microsoft.Sql&pageNumber=1&sort=Popular). - -## Deploy the template - -Select **Try it** from the following PowerShell code block to open Azure Cloud Shell. - -```azurepowershell-interactive -$projectName = Read-Host -Prompt "Enter a project name that is used for generating resource names" -$location = Read-Host -Prompt "Enter an Azure location (i.e. centralus)" -$adminUser = Read-Host -Prompt "Enter the SQL server administrator username" -$adminPassword = Read-Host -Prompt "Enter the SQl server administrator password" -AsSecureString - -$resourceGroupName = "${projectName}rg" - -New-AzResourceGroup -Name $resourceGroupName -Location $location -New-AzResourceGroupDeployment -ResourceGroupName $resourceGroupName -TemplateUri "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/quickstarts/microsoft.sql/sql-database/azuredeploy.json" -administratorLogin $adminUser -administratorLoginPassword $adminPassword - -Read-Host -Prompt "Press [ENTER] to continue ..." -``` - -## Validate the deployment - -To query the database, see [Query the database](single-database-create-quickstart.md#query-the-database). - -## Clean up resources - -Keep this resource group, server, and single database if you want to go to the [Next steps](#next-steps). The next steps show you how to connect and query your database using different methods. - -To delete the resource group: - -```azurepowershell-interactive -$resourceGroupName = Read-Host -Prompt "Enter the Resource Group name" -Remove-AzResourceGroup -Name $resourceGroupName -``` - -## Next steps - -- Create a server-level firewall rule to connect to the single database from on-premises or remote tools. For more information, see [Create a server-level firewall rule](firewall-create-server-level-portal-quickstart.md). -- After you create a server-level firewall rule, [connect and query](connect-query-content-reference-guide.md) your database using several different tools and languages. - - [Connect and query using SQL Server Management Studio](connect-query-ssms.md) - - [Connect and query using Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=%2fazure%2fsql-database%2ftoc.json) -- To create a single database using the Azure CLI, see [Azure CLI samples](az-cli-script-samples-content-guide.md). -- To create a single database using Azure PowerShell, see [Azure PowerShell samples](powershell-script-content-guide.md). -- To learn how to create ARM templates, see [Create your first template](../../azure-resource-manager/templates/template-tutorial-create-first-template.md). diff --git a/articles/azure-sql/database/single-database-create-quickstart.md b/articles/azure-sql/database/single-database-create-quickstart.md deleted file mode 100644 index 237286af6a062..0000000000000 --- a/articles/azure-sql/database/single-database-create-quickstart.md +++ /dev/null @@ -1,351 +0,0 @@ ---- -title: Create a single database -description: Create a single database in Azure SQL Database using the Azure portal, PowerShell, or the Azure CLI. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: contperf-fy21q1, devx-track-azurecli, devx-track-azurepowershell, mode-ui -ms.topic: quickstart -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- -# Quickstart: Create an Azure SQL Database single database - -In this quickstart, you create a [single database](single-database-overview.md) in Azure SQL Database using either the Azure portal, a PowerShell script, or an Azure CLI script. You then query the database using **Query editor** in the Azure portal. - -## Prerequisites - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- The latest version of either [Azure PowerShell](/powershell/azure/install-az-ps) or [Azure CLI](/cli/azure/install-azure-cli-windows). - -## Create a single database - -This quickstart creates a single database in the [serverless compute tier](serverless-tier-overview.md). - -# [Portal](#tab/azure-portal) - -To create a single database in the Azure portal, this quickstart starts at the Azure SQL page. - -1. Browse to the [Select SQL Deployment option](https://portal.azure.com/#create/Microsoft.AzureSQL) page. -1. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - - :::image type="content" source="./media/single-database-create-quickstart/select-deployment.png" alt-text="Add to Azure SQL" lightbox="media/single-database-create-quickstart/select-deployment.png"::: - -1. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. -1. For **Resource group**, select **Create new**, enter *myResourceGroup*, and select **OK**. -1. For **Database name**, enter *mySampleDatabase*. -1. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - **Server name**: Enter *mysqlserver*, and add some characters for uniqueness. We can't provide an exact server name to use because server names must be globally unique for all servers in Azure, not just unique within a subscription. So enter something like mysqlserver12345, and the portal lets you know if it's available or not. - - **Location**: Select a location from the dropdown list. - - **Authentication method**: Select **Use SQL authentication**. - - **Server admin login**: Enter *azureuser*. - - **Password**: Enter a password that meets requirements, and enter it again in the **Confirm password** field. - - - Select **OK**. - -1. Leave **Want to use SQL elastic pool** set to **No**. -1. Under **Compute + storage**, select **Configure database**. -1. This quickstart uses a serverless database, so leave **Service tier** set to **General Purpose (Scalable compute and storage options)** and set **Compute tier** to **Serverless**. Select **Apply**. - - :::image type="content" source="./media/single-database-create-quickstart/configure-database.png" alt-text="configure serverless database" lightbox="media/single-database-create-quickstart/configure-database.png"::: - -1. Select **Next: Networking** at the bottom of the page. - - :::image type="content" source="./media/single-database-create-quickstart/new-sql-database-basics.png" alt-text="New SQL database - Basic tab"::: - -1. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. -1. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. -1. Select **Next: Security** at the bottom of the page. - - :::image type="content" source="./media/single-database-create-quickstart/networking.png" alt-text="Networking tab"::: - -1. On the **Security tab**, you have the option to enable [Microsoft Defender for SQL](../database/azure-defender-for-sql.md). Select **Next: Additional settings** at the bottom of the page. -1. On the **Additional settings** tab, in the **Data source** section, for **Use existing data**, select **Sample**. This creates an AdventureWorksLT sample database so there's some tables and data to query and experiment with, as opposed to an empty blank database. - -1. Select **Review + create** at the bottom of the page: - - :::image type="content" source="./media/single-database-create-quickstart/additional-settings.png" alt-text="Additional settings tab"::: - -1. On the **Review + create** page, after reviewing, select **Create**. - -# [Azure CLI](#tab/azure-cli) - -The Azure CLI code blocks in this section create a resource group, server, single database, and server-level IP firewall rule for access to the server. Make sure to record the generated resource group and server names, so you can manage these resources later. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment-h3.md)] - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names need to be globally unique across all of Azure so the $RANDOM function is used to create the server name. - -Change the location as appropriate for your environment. Replace `0.0.0.0` with the IP address range to match your specific environment. Use the public IP address of the computer you're using to restrict access to the server to only your IP address. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="SetParameterValues"::: - -### Create a resource group - -Create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location: - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateResourceGroup"::: - -### Create a server - -Create a server with the [az sql server create](/cli/azure/sql/server) command. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateServer"::: - -### Configure a server-based firewall rule - -Create a firewall rule with the [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule) command. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateFirewallRule"::: - -### Create a single database - -Create a database with the [az sql db create](/cli/azure/sql/db) command in the [serverless compute tier](serverless-tier-overview.md). - -```azurecli -echo "Creating $database in serverless tier" -az sql db create \ - --resource-group $resourceGroup \ - --server $server \ - --name $database \ - --sample-name AdventureWorksLT \ - --edition GeneralPurpose \ - --compute-model Serverless \ - --family Gen5 \ - --capacity 2 -``` - -# [Azure CLI (sql up)](#tab/azure-cli-sql-up) - -The Azure CLI code blocks in this section use the [az sql up](/cli/azure/sql#az-sql-up) command to simplify the database creation process. With it, you can create a database and all of its associated resources with a single command. This includes the resource group, server name, server location, database name, and login information. The database is created with a default pricing tier of General Purpose, Provisioned, Gen5, 2 vCores. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment-h3.md](../../../includes/azure-cli-prepare-your-environment-h3.md)] - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names need to be globally unique across all of Azure so the $RANDOM function is used to create the server name. - -Change the location as appropriate for your environment. Replace `0.0.0.0` with the IP address range to match your specific environment. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="SetParameterValues"::: - -> [!NOTE] -> [az sql up](/cli/azure/sql#az-sql-up) is currently in preview and does not currently support the serverless compute tier. Also, the use of non-alphabetic and non-numeric characters in the database name are not currently supported. - -### Create a database and resources - -Use the [az sql up](/cli/azure/sql#az-sql-up) command to create and configure a [logical server](logical-servers.md) for Azure SQL Database for immediate use. Make sure to record the generated resource group and server names, so you can manage these resources later. - -> [!NOTE] -> When running the `az sql up` command for the first time, Azure CLI prompts you to install the `db-up` extension. This extension is currently in preview. Accept the installation to continue. For more information about extensions, see [Use extensions with Azure CLI](/cli/azure/azure-cli-extensions-overview). - -1. Run the `az sql up` command. If any required parameters aren't used, like `--server-name`, that resource is created with a random name and login information assigned to it. - - ```azurecli - az sql up \ - --resource-group $resourceGroup \ - --location $location \ - --server-name $server \ - --database-name $database \\ - --admin-user $login \ - --admin-password $password - ``` - -2. A server firewall rule is automatically created. If the server declines your IP address, create a new firewall rule using the `az sql server firewall-rule create` command and specifying appropriate start and end IP addresses. - - ```azurecli - startIp=0.0.0.0 - endIp=0.0.0.0 - az sql server firewall-rule create \ - --resource-group $resourceGroup \ - --server $server \ - -n AllowYourIp \ - --start-ip-address $startIp \ - --end-ip-address $endIp - - ``` - -3. All required resources are created, and the database is ready for queries. - -# [PowerShell](#tab/azure-powershell) - -You can create a resource group, server, and single database using Azure PowerShell. - -### Launch Azure Cloud Shell - -The Azure Cloud Shell is a free interactive shell that you can use to run the steps in this article. It has common Azure tools preinstalled and configured to use with your account. - -To open the Cloud Shell, just select **Try it** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com](https://shell.azure.com). - -When Cloud Shell opens, verify that **PowerShell** is selected for your environment. Subsequent sessions will use Azure CLI in a Bash environment, Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press **Enter** to run it. - -### Set parameter values - -The following values are used in subsequent commands to create the database and required resources. Server names need to be globally unique across all of Azure so the Get-Random cmdlet is used to create the server name. Replace the 0.0.0.0 values in the ip address range to match your specific environment. - -```azurepowershell-interactive - # Set variables for your server and database - $resourceGroupName = "myResourceGroup" - $location = "eastus" - $adminLogin = "azureuser" - $password = "Azure1234567!" - $serverName = "mysqlserver-$(Get-Random)" - $databaseName = "mySampleDatabase" - - # The ip address range that you want to allow to access your server - $startIp = "0.0.0.0" - $endIp = "0.0.0.0" - - # Show randomized variables - Write-host "Resource group name is" $resourceGroupName - Write-host "Server name is" $serverName - - -``` - -### Create resource group - -Create an Azure resource group with [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup). A resource group is a logical container into which Azure resources are deployed and managed. - -```azurepowershell-interactive - Write-host "Creating resource group..." - $resourceGroup = New-AzResourceGroup -Name $resourceGroupName -Location $location -Tag @{Owner="SQLDB-Samples"} - $resourceGroup -``` - -### Create a server - -Create a server with the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) cmdlet. - -```azurepowershell-interactive - Write-host "Creating primary server..." - $server = New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -Location $location ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminLogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - $server -``` - -### Create a firewall rule - -Create a server firewall rule with the [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) cmdlet. - -```azurepowershell-interactive - Write-host "Configuring server firewall rule..." - $serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - $serverFirewallRule -``` - -### Create a single database with PowerShell - -Create a single database with the [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) cmdlet. - -```azurepowershell-interactive - Write-host "Creating a gen5 2 vCore serverless database..." - $database = New-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition GeneralPurpose ` - -ComputeModel Serverless ` - -ComputeGeneration Gen5 ` - -VCore 2 ` - -MinimumCapacity 2 ` - -SampleName "AdventureWorksLT" - $database -``` - ---- - -## Query the database - -Once your database is created, you can use the **Query editor (preview)** in the Azure portal to connect to the database and query data. - -1. In the portal, search for and select **SQL databases**, and then select your database from the list. -1. On the page for your database, select **Query editor (preview)** in the left menu. -1. Enter your server admin login information, and select **OK**. - - :::image type="content" source="./media/single-database-create-quickstart/query-editor-login.png" alt-text="Sign in to Query editor"::: - -1. Enter the following query in the **Query editor** pane. - - ```sql - SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName - FROM SalesLT.ProductCategory pc - JOIN SalesLT.Product p - ON pc.productcategoryid = p.productcategoryid; - ``` - -1. Select **Run**, and then review the query results in the **Results** pane. - - :::image type="content" source="./media/single-database-create-quickstart/query-editor-results.png" alt-text="Query editor results" lightbox="media/single-database-create-quickstart/query-editor-results.png"::: - -1. Close the **Query editor** page, and select **OK** when prompted to discard your unsaved edits. - -## Clean up resources - -Keep the resource group, server, and single database to go on to the next steps, and learn how to connect and query your database with different methods. - -When you're finished using these resources, you can delete the resource group you created, which will also delete the server and single database within it. - -# [Portal](#tab/azure-portal) - -To delete **myResourceGroup** and all its resources using the Azure portal: - -1. In the portal, search for and select **Resource groups**, and then select **myResourceGroup** from the list. -1. On the resource group page, select **Delete resource group**. -1. Under **Type the resource group name**, enter *myResourceGroup*, and then select **Delete**. - -# [Azure CLI](#tab/azure-cli) - -Use the following command to remove the resource group and all resources associated with it using the [az group delete](/cli/azure/vm/extension#az-vm-extension-set) command - unless you have an ongoing need for these resources. Some of these resources may take a while to create, as well as to delete. - -```azurecli -az group delete --name $resourceGroup -``` - -# [Azure CLI (sql up)](#tab/azure-cli-sql-up) - -[!INCLUDE [cli-clean-up-resources.md](../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -# [PowerShell](#tab/azure-powershell) - -To delete the resource group and all its resources, run the following PowerShell cmdlet, using the name of your resource group: - -```azurepowershell-interactive -Remove-AzResourceGroup -Name $resourceGroupName -``` - ---- - -## Next steps - -[Connect and query](connect-query-content-reference-guide.md) your database using different tools and languages: -> [!div class="nextstepaction"] -> [Connect and query using SQL Server Management Studio](connect-query-ssms.md) -> -> [Connect and query using Azure Data Studio](/sql/azure-data-studio/quickstart-sql-database?toc=/azure/sql-database/toc.json) - -Want to optimize and save on your cloud spending? - -> [!div class="nextstepaction"] -> [Start analyzing costs with Cost Management](../../cost-management-billing/costs/quick-acm-cost-analysis.md?WT.mc_id=costmanagementcontent_docsacmhorizontal_-inproduct-learn) diff --git a/articles/azure-sql/database/single-database-manage.md b/articles/azure-sql/database/single-database-manage.md deleted file mode 100644 index a61531bab0cba..0000000000000 --- a/articles/azure-sql/database/single-database-manage.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Create & manage servers and single databases -description: Learn about creating and managing servers and single databases in Azure SQL Database using the Azure portal, PowerShell, the Azure CLI, Transact-SQL (T-SQL), and Rest-API. -services: sql-database -ms.service: sql-database -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, devx-track-azurecli -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 03/12/2019 ---- -# Create and manage servers and single databases in Azure SQL Database - -You can create and manage servers and single databases in Azure SQL Database using the Azure portal, PowerShell, the Azure CLI, REST API, and Transact-SQL. - -## The Azure portal - -You can create the resource group for Azure SQL Database ahead of time or while creating the server itself. - -### Create a server - -To create a server using the [Azure portal](https://portal.azure.com), create a new [server](logical-servers.md) resource from Azure Marketplace. Alternatively, you can create the server when you deploy an Azure SQL Database. - - ![create server](./media/single-database-manage/create-logical-sql-server.png) - -### Create a blank or sample database - -To create a single Azure SQL Database using the [Azure portal](https://portal.azure.com), choose the Azure SQL Database resource in Azure Marketplace. You can create the resource group and server ahead of time or while creating the single database itself. You can create a blank database or create a sample database based on Adventure Works LT. - - ![create database-1](./media/single-database-manage/create-database-1.png) - -> [!IMPORTANT] -> For information on selecting the pricing tier for your database, see [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md). - -## Manage an existing server - -To manage an existing server, navigate to the server using a number of methods - such as from a specific database page, the **SQL servers** page, or the **All resources** page. - -To manage an existing database, navigate to the **SQL databases** page and select the database you wish to manage. The following screenshot shows how to begin setting a server-level firewall for a database from the **Overview** page for a database. - - ![server firewall rule](./media/single-database-manage/server-firewall-rule.png) - -> [!IMPORTANT] -> To configure performance properties for a database, see [DTU-based purchasing model](service-tiers-dtu.md) and [vCore-based purchasing model](service-tiers-vcore.md). -> [!TIP] -> For an Azure portal quickstart, see [Create a database in SQL Database in the Azure portal](single-database-create-quickstart.md). - -## PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To create and manage servers, single and pooled databases, and server-level firewalls with Azure PowerShell, use the following PowerShell cmdlets. If you need to install or upgrade PowerShell, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). - -> [!TIP] -> For PowerShell example scripts, see [Use PowerShell to create a database in SQL Database and configure a server-level firewall rule](scripts/create-and-configure-database-powershell.md) and [Monitor and scale a database in SQL Database using PowerShell](scripts/monitor-and-scale-database-powershell.md). - -| Cmdlet | Description | -| --- | --- | -|[New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase)|Creates a database | -|[Get-AzSqlDatabase](/powershell/module/az.sql/get-azsqldatabase)|Gets one or more databases| -|[Set-AzSqlDatabase](/powershell/module/az.sql/set-azsqldatabase)|Sets properties for a database, or moves an existing database into an elastic pool| -|[Remove-AzSqlDatabase](/powershell/module/az.sql/remove-azsqldatabase)|Removes a database| -|[New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup)|Creates a resource group| -|[New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver)|Creates a server| -|[Get-AzSqlServer](/powershell/module/az.sql/get-azsqlserver)|Returns information about servers| -|[Set-AzSqlServer](/powershell/module/az.sql/set-azsqlserver)|Modifies properties of a server| -|[Remove-AzSqlServer](/powershell/module/az.sql/remove-azsqlserver)|Removes a server| -|[New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule)|Creates a server-level firewall rule | -|[Get-AzSqlServerFirewallRule](/powershell/module/az.sql/get-azsqlserverfirewallrule)|Gets firewall rules for a server| -|[Set-AzSqlServerFirewallRule](/powershell/module/az.sql/set-azsqlserverfirewallrule)|Modifies a firewall rule in a server| -|[Remove-AzSqlServerFirewallRule](/powershell/module/az.sql/remove-azsqlserverfirewallrule)|Deletes a firewall rule from a server.| -| New-AzSqlServerVirtualNetworkRule | Creates a [*virtual network rule*](vnet-service-endpoint-rule-overview.md), based on a subnet that is a Virtual Network service endpoint. | - -## Azure CLI - -To create and manage the servers, databases, and firewalls with [Azure CLI](/cli/azure), use the following [Azure CLI](/cli/azure/sql/db) commands. Use the [Cloud Shell](../../cloud-shell/overview.md) to run Azure CLI in your browser, or [install](/cli/azure/install-azure-cli) it on macOS, Linux, or Windows. For creating and managing elastic pools, see [Elastic pools](elastic-pool-overview.md). - -> [!TIP] -> For an Azure CLI quickstart, see [Create a single Azure SQL Database using Azure CLI](az-cli-script-samples-content-guide.md). For Azure CLI example scripts, see [Use CLI to create a database in Azure SQL Database and configure a SQL Database firewall rule](scripts/create-and-configure-database-cli.md) and [Use CLI to monitor and scale a database in Azure SQL Database](scripts/monitor-and-scale-database-cli.md). -> - -| Cmdlet | Description | -| --- | --- | -|[az sql db create](/cli/azure/sql/db#az-sql-db-create) |Creates a database| -|[az sql db list](/cli/azure/sql/db#az-sql-db-list)|Lists all databases and data warehouses in a server, or all databases in an elastic pool| -|[az sql db list-editions](/cli/azure/sql/db#az-sql-db-list-editions)|Lists available service objectives and storage limits| -|[az sql db list-usages](/cli/azure/sql/db#az-sql-db-list-usages)|Returns database usages| -|[az sql db show](/cli/azure/sql/db#az-sql-db-show)|Gets a database or data warehouse| -|[az sql db update](/cli/azure/sql/db#az-sql-db-update)|Updates a database| -|[az sql db delete](/cli/azure/sql/db#az-sql-db-delete)|Removes a database| -|[az group create](/cli/azure/group#az-group-create)|Creates a resource group| -|[az sql server create](/cli/azure/sql/server#az-sql-server-create)|Creates a server| -|[az sql server list](/cli/azure/sql/server#az-sql-server-list)|Lists servers| -|[az sql server list-usages](/cli/azure/sql/server#az-sql-server-list-usages)|Returns server usages| -|[az sql server show](/cli/azure/sql/server#az-sql-server-show)|Gets a server| -|[az sql server update](/cli/azure/sql/server#az-sql-server-update)|Updates a server| -|[az sql server delete](/cli/azure/sql/server#az-sql-server-delete)|Deletes a server| -|[az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-create)|Creates a server firewall rule| -|[az sql server firewall-rule list](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-list)|Lists the firewall rules on a server| -|[az sql server firewall-rule show](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-show)|Shows the detail of a firewall rule| -|[az sql server firewall-rule update](/cli/azure/sql/server/firewall-rule##az-sql-server-firewall-rule-update)|Updates a firewall rule| -|[az sql server firewall-rule delete](/cli/azure/sql/server/firewall-rule#az-sql-server-firewall-rule-delete)|Deletes a firewall rule| - -## Transact-SQL (T-SQL) - -To create and manage the servers, databases, and firewalls with Transact-SQL, use the following T-SQL commands. You can issue these commands using the Azure portal, [SQL Server Management Studio](/sql/ssms/use-sql-server-management-studio), [Visual Studio Code](https://code.visualstudio.com/docs), or any other program that can connect to a server in SQL Database and pass Transact-SQL commands. For managing elastic pools, see [Elastic pools](elastic-pool-overview.md). - -> [!TIP] -> For a quickstart using SQL Server Management Studio on Microsoft Windows, see [Azure SQL Database: Use SQL Server Management Studio to connect and query data](connect-query-ssms.md). For a quickstart using Visual Studio Code on the macOS, Linux, or Windows, see [Azure SQL Database: Use Visual Studio Code to connect and query data](connect-query-vscode.md). -> [!IMPORTANT] -> You cannot create or delete a server using Transact-SQL. - -| Command | Description | -| --- | --- | -|[CREATE DATABASE](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true)|Creates a new single database. You must be connected to the master database to create a new database.| -| [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true) |Modifies a database or elastic pool. | -|[DROP DATABASE](/sql/t-sql/statements/drop-database-transact-sql)|Deletes a database.| -|[sys.database_service_objectives](/sql/relational-databases/system-catalog-views/sys-database-service-objectives-azure-sql-database)|Returns the edition (service tier), service objective (pricing tier), and elastic pool name, if any, for Azure SQL Database or a dedicated SQL pool in Azure Synapse Analytics. If logged on to the master database in a server in SQL Database, returns information on all databases. For Azure Synapse Analytics, you must be connected to the master database.| -|[sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database)| Returns CPU, IO, and memory consumption for a database in Azure SQL Database. One row exists for every 15 seconds, even if there's no activity in the database.| -|[sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database)|Returns CPU usage and storage data for a database in Azure SQL Database. The data is collected and aggregated within five-minute intervals.| -|[sys.database_connection_stats](/sql/relational-databases/system-catalog-views/sys-database-connection-stats-azure-sql-database)|Contains statistics for SQL Database connectivity events, providing an overview of database connection successes and failures. | -|[sys.event_log](/sql/relational-databases/system-catalog-views/sys-event-log-azure-sql-database)|Returns successful Azure SQL Database connections and connection failures. You can use this information to track or troubleshoot your database activity with SQL Database.| -|[sp_set_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-set-firewall-rule-azure-sql-database)|Creates or updates the server-level firewall settings for your server. This stored procedure is only available in the master database to the server-level principal login. A server-level firewall rule can only be created using Transact-SQL after the first server-level firewall rule has been created by a user with Azure-level permissions| -|[sys.firewall_rules](/sql/relational-databases/system-catalog-views/sys-firewall-rules-azure-sql-database)|Returns information about the server-level firewall settings associated with your database in Azure SQL Database.| -|[sp_delete_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-delete-firewall-rule-azure-sql-database)|Removes server-level firewall settings from your server. This stored procedure is only available in the master database to the server-level principal login.| -|[sp_set_database_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database)|Creates or updates the database-level firewall rules for your database in Azure SQL Database. Database firewall rules can be configured for the master database, and for user databases on SQL Database. Database firewall rules are useful when using contained database users. | -|[sys.database_firewall_rules](/sql/relational-databases/system-catalog-views/sys-database-firewall-rules-azure-sql-database)|Returns information about the database-level firewall settings associated with your database in Azure SQL Database. | -|[sp_delete_database_firewall_rule](/sql/relational-databases/system-stored-procedures/sp-delete-database-firewall-rule-azure-sql-database)|Removes database-level firewall setting from a database. | - -## REST API - -To create and manage the servers, databases, and firewalls, use these REST API requests. - -| Command | Description | -| --- | --- | -|[Servers - Create or update](/rest/api/sql/servers/createorupdate)|Creates or updates a new server.| -|[Servers - Delete](/rest/api/sql/servers/delete)|Deletes a SQL server.| -|[Servers - Get](/rest/api/sql/servers/get)|Gets a server.| -|[Servers - List](/rest/api/sql/servers/list)|Returns a list of servers in a subscription.| -|[Servers - List by resource group](/rest/api/sql/servers/listbyresourcegroup)|Returns a list of servers in a resource group.| -|[Servers - Update](/rest/api/sql/servers/update)|Updates an existing server.| -|[Databases - Create or update](/rest/api/sql/databases/createorupdate)|Creates a new database or updates an existing database.| -|[Databases - Delete](/rest/api/sql/databases/delete)|Deletes a database.| -|[Databases - Get](/rest/api/sql/databases/get)|Gets a database.| -|[Databases - List by elastic pool](/rest/api/sql/databases/listbyelasticpool)|Returns a list of databases in an elastic pool.| -|[Databases - List by server](/rest/api/sql/databases/listbyserver)|Returns a list of databases in a server.| -|[Databases - Update](/rest/api/sql/databases/update)|Updates an existing database.| -|[Firewall rules - Create or update](/rest/api/sql/firewallrules/createorupdate)|Creates or updates a firewall rule.| -|[Firewall rules - Delete](/rest/api/sql/firewallrules/delete)|Deletes a firewall rule.| -|[Firewall rules - Get](/rest/api/sql/firewallrules/get)|Gets a firewall rule.| -|[Firewall rules - List by server](/rest/api/sql/firewallrules/listbyserver)|Returns a list of firewall rules.| - -## Next steps - -- To learn about migrating a SQL Server database to Azure, see [Migrate to Azure SQL Database](migrate-to-database-from-sql-server.md). -- For information about supported features, see [Features](features-comparison.md). diff --git a/articles/azure-sql/database/single-database-overview.md b/articles/azure-sql/database/single-database-overview.md deleted file mode 100644 index 9bbdc76a800b6..0000000000000 --- a/articles/azure-sql/database/single-database-overview.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: What is a single database? -description: Learn about the single database resource type in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 04/08/2019 ---- -# What is a single database in Azure SQL Database? -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -The single database resource type creates a database in Azure SQL Database with its own set of resources and is managed via a [server](logical-servers.md). With a single database, each database is isolated, using a dedicated database engine. Each has its own service tier within the [DTU-based purchasing model](service-tiers-dtu.md) or [vCore-based purchasing model](service-tiers-vcore.md) and a compute size defining the resources allocated to the database engine. - -Single database is a deployment model for Azure SQL Database. The other is [elastic pools](elastic-pool-overview.md). - -## Dynamic scalability - -You can build your first app on a small, single database at low cost in the serverless compute tier or a small compute size in the provisioned compute tier. You change the [compute or service tier](single-database-scale.md) manually or programmatically at any time to meet the needs of your solution. You can adjust performance without downtime to your app or to your customers. Dynamic scalability enables your database to transparently respond to rapidly changing resource requirements and enables you to only pay for the resources that you need when you need them. - -## Single databases and elastic pools - -A single database can be moved into or out of an [elastic pool](elastic-pool-overview.md) for resource sharing. For many businesses and applications, being able to create single databases and dial performance up or down on demand is enough, especially if usage patterns are relatively predictable. But if you have unpredictable usage patterns, it can make it hard to manage costs and your business model. Elastic pools are designed to solve this problem. The concept is simple. You allocate performance resources to a pool rather than an individual database and pay for the collective performance resources of the pool rather than for single database performance. - -## Monitoring and alerting - -You use the built-in [performance monitoring](performance-guidance.md) and [alerting tools](alerts-insights-configure-portal.md), combined with the performance ratings. Using these tools, you can quickly assess the impact of scaling up or down based on your current or project performance needs. Additionally, SQL Database can [emit metrics and resource logs](metrics-diagnostic-telemetry-logging-streaming-export-configure.md) for easier monitoring. - -## Availability capabilities - -Single databases and elastic pools provide many availability characteristics. For information, see [Availability characteristics](sql-database-paas-overview.md#availability-capabilities). - -## Transact-SQL differences - -Most Transact-SQL features that applications use are fully supported in both Microsoft SQL Server and Azure SQL Database. For example, the core SQL components such as data types, operators, string, arithmetic, logical, and cursor functions, work identically in SQL Server and SQL Database. There are, however, a few T-SQL differences in DDL (data-definition language) and DML (data manipulation language) elements resulting in T-SQL statements and queries that are only partially supported (which we discuss later in this article). - -In addition, there are some features and syntax that are not supported because Azure SQL Database is designed to isolate features from dependencies on the master database and the operating system. As such, most server-level activities are inappropriate for SQL Database. T-SQL statements and options are not available if they configure server-level options, configure operating system components, or specify file system configuration. When such capabilities are required, an appropriate alternative is often available in some other way from SQL Database or from another Azure feature or service. - -For more information, see [Resolving Transact-SQL differences during migration to SQL Database](transact-sql-tsql-differences-sql-server.md). - -## Security - -SQL Database provides a range of [built-in security and compliance](security-overview.md) features to help your application meet various security and compliance requirements. - -> [!IMPORTANT] -> Azure SQL Database has been certified against a number of compliance standards. For more information, see the [Microsoft Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942), where you can find the most current list of SQL Database compliance certifications. - -## Next steps - -- To quickly get started with a single database, start with the [Single database quickstart guide](quickstart-content-reference-guide.md). -- To learn about migrating a SQL Server database to Azure, see [Migrate to Azure SQL Database](migrate-to-database-from-sql-server.md). -- For information about supported features, see [Features](features-comparison.md). diff --git a/articles/azure-sql/database/single-database-scale.md b/articles/azure-sql/database/single-database-scale.md deleted file mode 100644 index 01d2359987bfd..0000000000000 --- a/articles/azure-sql/database/single-database-scale.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Scale single database resources -description: This article describes how to scale the compute and storage resources available for a single database in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1, references_regions, devx-track-azurepowershell -ms.devlang: -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 04/09/2021 ---- -# Scale single database resources in Azure SQL Database - -This article describes how to scale the compute and storage resources available for an Azure SQL Database in the provisioned compute tier. Alternatively, the [serverless compute tier](serverless-tier-overview.md) provides compute autoscaling and bills per second for compute used. - -After initially picking the number of vCores or DTUs, you can scale a single database up or down dynamically based on actual experience using: - -* [Transact-SQL](/sql/t-sql/statements/alter-database-transact-sql#overview-sql-database) -* [Azure portal](single-database-manage.md#the-azure-portal) -* [PowerShell](/powershell/module/az.sql/set-azsqldatabase) -* [Azure CLI](/cli/azure/sql/db#az-sql-db-update) -* [REST API](/rest/api/sql/databases/update) - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -## Impact - -Changing the service tier or compute size of mainly involves the service performing the following steps: - -1. Create a new compute instance for the database. - - A new compute instance is created with the requested service tier and compute size. For some combinations of service tier and compute size changes, a replica of the database must be created in the new compute instance, which involves copying data and can strongly influence the overall latency. Regardless, the database remains online during this step, and connections continue to be directed to the database in the original compute instance. - -2. Switch routing of connections to a new compute instance. - - Existing connections to the database in the original compute instance are dropped. Any new connections are established to the database in the new compute instance. For some combinations of service tier and compute size changes, database files are detached and reattached during the switch. Regardless, the switch can result in a brief service interruption when the database is unavailable generally for less than 30 seconds and often for only a few seconds. If there are long-running transactions running when connections are dropped, the duration of this step may take longer in order to recover aborted transactions. [Accelerated Database Recovery](../accelerated-database-recovery.md) can reduce the impact from aborting long running transactions. - -> [!IMPORTANT] -> No data is lost during any step in the workflow. Make sure that you have implemented some [retry logic](troubleshoot-common-connectivity-issues.md) in the applications and components that are using Azure SQL Database while the service tier is changed. - -## Latency - -The estimated latency to change the service tier, scale the compute size of a single database or elastic pool, move a database in/out of an elastic pool, or move a database between elastic pools is parameterized as follows: - -|Service tier|Basic single database,
    Standard (S0-S1)|Basic elastic pool,
    Standard (S2-S12),
    General Purpose single database or elastic pool|Premium or Business Critical single database or elastic pool|Hyperscale -|:---|:---|:---|:---|:---| -|**Basic single database,
    Standard (S0-S1)**|•  Constant time latency independent of space used
    •  Typically, less than 5 minutes|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Basic elastic pool,
    Standard (S2-S12),
    General Purpose single database or elastic pool**|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  For single databases, constant time latency independent of space used
    •  Typically, less than 5 minutes for single databases
    •  For elastic pools, proportional to the number of databases|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Premium or Business Critical single database or elastic pool**|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used|•  Latency proportional to database space used due to data copying
    •  Typically, less than 1 minute per GB of space used| -|**Hyperscale**|N/A|N/A|N/A|•  Constant time latency independent of space used
    •  Typically, less than 2 minutes| - -> [!NOTE] -> Additionally, for Standard (S2-S12) and General Purpose databases, latency for moving a database in/out of an elastic pool or between elastic pools will be proportional to database size if the database is using Premium File Share ([PFS](../../storage/files/storage-files-introduction.md)) storage. -> -> To determine if a database is using PFS storage, execute the following query in the context of the database. If the value in the AccountType column is `PremiumFileStorage` or `PremiumFileStorage-ZRS`, the database is using PFS storage. - -```sql -SELECT s.file_id, - s.type_desc, - s.name, - FILEPROPERTYEX(s.name, 'AccountType') AS AccountType -FROM sys.database_files AS s -WHERE s.type_desc IN ('ROWS', 'LOG'); -``` - -> [!NOTE] -> The zone redundant property will remain the same by default when scaling from the Business Critical to the General Purpose tier. Latency for this downgrade when zone redundancy is enabled as well as latency for switching to zone redundancy for the General Purpose tier will be proportional to database size. - -> [!TIP] -> To monitor in-progress operations, see: [Manage operations using the SQL REST API](/rest/api/sql/operations/list), [Manage operations using CLI](/cli/azure/sql/db/op), [Monitor operations using T-SQL](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) and these two PowerShell commands: [Get-AzSqlDatabaseActivity](/powershell/module/az.sql/get-azsqldatabaseactivity) and [Stop-AzSqlDatabaseActivity](/powershell/module/az.sql/stop-azsqldatabaseactivity). - -## Cancelling changes - -A service tier change or compute rescaling operation can be canceled. - -### The Azure portal - -In the database overview blade, navigate to **Notifications** and click on the tile indicating there's an ongoing operation: - -![Ongoing operation](./media/single-database-scale/ongoing-operations.png) - -Next, click on the button labeled **Cancel this operation**. - -![Cancel ongoing operation](./media/single-database-scale/cancel-ongoing-operation.png) - -### PowerShell - -From a PowerShell command prompt, set the `$resourceGroupName`, `$serverName`, and `$databaseName`, and then run the following command: - -```azurecli -$operationName = (az sql db op list --resource-group $resourceGroupName --server $serverName --database $databaseName --query "[?state=='InProgress'].name" --out tsv) -if (-not [string]::IsNullOrEmpty($operationName)) { - (az sql db op cancel --resource-group $resourceGroupName --server $serverName --database $databaseName --name $operationName) - "Operation " + $operationName + " has been canceled" -} -else { - "No service tier change or compute rescaling operation found" -} -``` - -## Additional considerations - -- If you're upgrading to a higher service tier or compute size, the database max size doesn't increase unless you explicitly specify a larger size (maxsize). -- To downgrade a database, the database used space must be smaller than the maximum allowed size of the target service tier and compute size. -- When downgrading from **Premium** to the **Standard** tier, an extra storage cost applies if both (1) the max size of the database is supported in the target compute size, and (2) the max size exceeds the included storage amount of the target compute size. For example, if a P1 database with a max size of 500 GB is downsized to S3, then an extra storage cost applies since S3 supports a max size of 1 TB and its included storage amount is only 250 GB. So, the extra storage amount is 500 GB – 250 GB = 250 GB. For pricing of extra storage, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). If the actual amount of space used is less than the included storage amount, then this extra cost can be avoided by reducing the database max size to the included amount. -- When upgrading a database with [geo-replication](active-geo-replication-configure-portal.md) enabled, upgrade its secondary databases to the desired service tier and compute size before upgrading the primary database (general guidance for best performance). When upgrading to a different edition, it's a requirement that the secondary database is upgraded first. -- When downgrading a database with [geo-replication](active-geo-replication-configure-portal.md) enabled, downgrade its primary databases to the desired service tier and compute size before downgrading the secondary database (general guidance for best performance). When downgrading to a different edition, it's a requirement that the primary database is downgraded first. -- The restore service offerings are different for the various service tiers. If you're downgrading to the **Basic** tier, there's a lower backup retention period. See [Azure SQL Database Backups](automated-backups-overview.md). -- The new properties for the database aren't applied until the changes are complete. -- When data copying is required to scale a database (see [Latency](#latency)) when changing the service tier, high resource utilization concurrent to the scaling operation may cause longer scaling times. With [Accelerated Database Recovery (ADR)](/sql/relational-databases/accelerated-database-recovery-concepts), rollback of long running transactions is not a significant source of delay, but high concurrent resource usage may leave less compute, storage, and network bandwidth resources for scaling, particularly for smaller compute sizes. - -## Billing - -You're billed for each hour a database exists using the highest service tier + compute size that applied during that hour, regardless of usage or whether the database was active for less than an hour. For example, if you create a single database and delete it five minutes later your bill reflects a charge for one database hour. - -## Change storage size - -### vCore-based purchasing model - -- Storage can be provisioned up to the data storage max size limit using 1-GB increments. The minimum configurable data storage is 1 GB. For data storage max size limits in each service objective, see resource limit documentation pages for [Resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) and [Resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md). -- Data storage for a single database can be provisioned by increasing or decreasing its max size using the [Azure portal](https://portal.azure.com), [Transact-SQL](/sql/t-sql/statements/alter-database-transact-sql#examples-1), [PowerShell](/powershell/module/az.sql/set-azsqldatabase), [Azure CLI](/cli/azure/sql/db#az-sql-db-update), or [REST API](/rest/api/sql/databases/update). If the max size value is specified in bytes, it must be a multiple of 1 GB (1073741824 bytes). -- The amount of data that can be stored in the data files of a database is limited by the configured data storage max size. In addition to that storage, Azure SQL Database automatically allocates 30% more storage to be used for the transaction log. -- Azure SQL Database automatically allocates 32 GB per vCore for the `tempdb` database. `tempdb` is located on the local SSD storage in all service tiers. -- The price of storage for a single database or an elastic pool is the sum of data storage and transaction log storage amounts multiplied by the storage unit price of the service tier. The cost of `tempdb` is included in the price. For details on storage price, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -### DTU-based purchasing model - -- The DTU price for a single database includes a certain amount of storage at no additional cost. Extra storage beyond the included amount can be provisioned for an additional cost up to the max size limit in increments of 250 GB up to 1 TB, and then in increments of 256 GB beyond 1 TB. For included storage amounts and max size limits, see [Single database: Storage sizes and compute sizes](resource-limits-dtu-single-databases.md#single-database-storage-sizes-and-compute-sizes). -- Extra storage for a single database can be provisioned by increasing its max size using the Azure portal, [Transact-SQL](/sql/t-sql/statements/alter-database-transact-sql#examples-1), [PowerShell](/powershell/module/az.sql/set-azsqldatabase), the [Azure CLI](/cli/azure/sql/db#az-sql-db-update), or the [REST API](/rest/api/sql/databases/update). -- The price of extra storage for a single database is the extra storage amount multiplied by the extra storage unit price of the service tier. For details on the price of extra storage, see [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/). - -> [!IMPORTANT] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](file-space-manage.md). - -### Geo-replicated database - -To change the database size of a replicated secondary database, change the size of the primary database. This change will then be replicated and implemented on the secondary database as well. - -## P11 and P15 constraints when max size greater than 1 TB - -More than 1 TB of storage in the Premium tier is currently available in all regions except: China East, China North, Germany Central, and Germany Northeast. In these regions, the storage max in the Premium tier is limited to 1 TB. The following considerations and limitations apply to P11 and P15 databases with a maximum size greater than 1 TB: - -- If the max size for a P11 or P15 database was ever set to a value greater than 1 TB, then can it only be restored or copied to a P11 or P15 database. Subsequently, the database can be rescaled to a different compute size provided the amount of space allocated at the time of the rescaling operation doesn't exceed max size limits of the new compute size. -- For active geo-replication scenarios: - - Setting up a geo-replication relationship: If the primary database is P11 or P15, the secondary(ies) must also be P11 or P15. Lower compute size are rejected as secondaries since they aren't capable of supporting more than 1 TB. - - Upgrading the primary database in a geo-replication relationship: Changing the maximum size to more than 1 TB on a primary database triggers the same change on the secondary database. Both upgrades must be successful for the change on the primary to take effect. Region limitations for the more than 1-TB option apply. If the secondary is in a region that doesn't support more than 1 TB, the primary isn't upgraded. -- Using the Import/Export service for loading P11/P15 databases with more than 1 TB isn't supported. Use SqlPackage.exe to [import](database-import.md) and [export](database-export.md) data. - -## Next steps - -For overall resource limits, see [Azure SQL Database vCore-based resource limits - single databases](resource-limits-vcore-single-databases.md) and [Azure SQL Database DTU-based resource limits - single databases](resource-limits-dtu-single-databases.md). diff --git a/articles/azure-sql/database/spark-connector.md b/articles/azure-sql/database/spark-connector.md deleted file mode 100644 index 6622f401c07ce..0000000000000 --- a/articles/azure-sql/database/spark-connector.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: Use the Spark connector with Microsoft Azure SQL and SQL Server -description: Learn how to use the Spark Connector with Azure SQL Database, Azure SQL Managed Instance, and SQL Server. -services: sql-database -ms.service: sql-db-mi -ms.subservice: development -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: LitKnd -ms.author: kendralittle -ms.date: 09/02/2020 ---- -# Accelerate real-time big data analytics using the Spark connector -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -> [!NOTE] -> As of Sep 2020, this connector is not actively maintained. However, [Apache Spark Connector for SQL Server and Azure SQL](/sql/connect/spark/connector) is now available, with support for Python and R bindings, an easier-to use interface to bulk insert data, and many other improvements. We strongly encourage you to evaluate and use the new connector instead of this one. The information about the old connector (this page) is only retained for archival purposes. - -The Spark connector enables databases in Azure SQL Database, Azure SQL Managed Instance, and SQL Server to act as the input data source or output data sink for Spark jobs. It allows you to utilize real-time transactional data in big data analytics and persist results for ad hoc queries or reporting. Compared to the built-in JDBC connector, this connector provides the ability to bulk insert data into your database. It can outperform row-by-row insertion with 10x to 20x faster performance. The Spark connector supports Azure Active Directory (Azure AD) authentication to connect to Azure SQL Database and Azure SQL Managed Instance, allowing you to connect your database from Azure Databricks using your Azure AD account. It provides similar interfaces with the built-in JDBC connector. It is easy to migrate your existing Spark jobs to use this new connector. - -## Download and build a Spark connector - -The GitHub repo for the old connector previously linked to from this page is not actively maintained. Instead, we strongly encourage you to evaluate and use the [new connector](https://github.com/microsoft/sql-spark-connector). - -### Official supported versions - -| Component | Version | -| :----------------------------------- | :----------------------- | -| Apache Spark | 2.0.2 or later | -| Scala | 2.10 or later | -| Microsoft JDBC Driver for SQL Server | 6.2 or later | -| Microsoft SQL Server | SQL Server 2008 or later | -| Azure SQL Database | Supported | -| Azure SQL Managed Instance | Supported | - -The Spark connector utilizes the Microsoft JDBC Driver for SQL Server to move data between Spark worker nodes and databases: - -The dataflow is as follows: - -1. The Spark master node connects to databases in SQL Database or SQL Server and loads data from a specific table or using a specific SQL query. -2. The Spark master node distributes data to worker nodes for transformation. -3. The Worker node connects to databases that connect to SQL Database and SQL Server and writes data to the database. User can choose to use row-by-row insertion or bulk insert. - -The following diagram illustrates the data flow. - - ![Diagram shows the described flow, with a master node connecting directly to the database and connecting to three worker nodes, which connect to the database.](./media/spark-connector/architecture.png) - -### Build the Spark connector - -Currently, the connector project uses maven. To build the connector without dependencies, you can run: - -- mvn clean package -- Download the latest versions of the JAR from the release folder -- Include the SQL Database Spark JAR - -## Connect and read data using the Spark connector - -You can connect to databases in SQL Database and SQL Server from a Spark job to read or write data. You can also run a DML or DDL query in databases in SQL Database and SQL Server. - -### Read data from Azure SQL and SQL Server - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "dbTable" -> "dbo.Clients", - "user" -> "username", - "password" -> "*********", - "connectTimeout" -> "5", //seconds - "queryTimeout" -> "5" //seconds -)) - -val collection = sqlContext.read.sqlDB(config) -collection.show() -``` - -### Read data from Azure SQL and SQL Server with specified SQL query - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "queryCustom" -> "SELECT TOP 100 * FROM dbo.Clients WHERE PostalCode = 98074" //Sql query - "user" -> "username", - "password" -> "*********", -)) - -//Read all data in table dbo.Clients -val collection = sqlContext.read.sqlDB(config) -collection.show() -``` - -### Write data to Azure SQL and SQL Server - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -// Aquire a DataFrame collection (val collection) - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "dbTable" -> "dbo.Clients", - "user" -> "username", - "password" -> "*********" -)) - -import org.apache.spark.sql.SaveMode -collection.write.mode(SaveMode.Append).sqlDB(config) -``` - -### Run DML or DDL query in Azure SQL and SQL Server - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.query._ -val query = """ - |UPDATE Customers - |SET ContactName = 'Alfred Schmidt', City = 'Frankfurt' - |WHERE CustomerID = 1; - """.stripMargin - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "user" -> "username", - "password" -> "*********", - "queryCustom" -> query -)) - -sqlContext.sqlDBQuery(config) -``` - -## Connect from Spark using Azure AD authentication - -You can connect to Azure SQL Database and SQL Managed Instance using Azure AD authentication. Use Azure AD authentication to centrally manage identities of database users and as an alternative to SQL Server authentication. - -### Connecting using ActiveDirectoryPassword Authentication Mode - -#### Setup requirement - -If you are using the ActiveDirectoryPassword authentication mode, you need to download [azure-activedirectory-library-for-java](https://github.com/AzureAD/azure-activedirectory-library-for-java) and its dependencies, and include them in the Java build path. - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "user" -> "username", - "password" -> "*********", - "authentication" -> "ActiveDirectoryPassword", - "encrypt" -> "true" -)) - -val collection = sqlContext.read.sqlDB(config) -collection.show() -``` - -### Connecting using an access token - -#### Setup requirement - -If you are using the access token-based authentication mode, you need to download [azure-activedirectory-library-for-java](https://github.com/AzureAD/azure-activedirectory-library-for-java) and its dependencies, and include them in the Java build path. - -See [Use Azure Active Directory Authentication for authentication](authentication-aad-overview.md) to learn how to get an access token to your database in Azure SQL Database or Azure SQL Managed Instance. - -```scala -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -val config = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "accessToken" -> "access_token", - "hostNameInCertificate" -> "*.database.windows.net", - "encrypt" -> "true" -)) - -val collection = sqlContext.read.sqlDB(config) -collection.show() -``` - -## Write data using bulk insert - -The traditional jdbc connector writes data into your database using row-by-row insertion. You can use the Spark connector to write data to Azure SQL and SQL Server using bulk insert. It significantly improves the write performance when loading large data sets or loading data into tables where a column store index is used. - -```scala -import com.microsoft.azure.sqldb.spark.bulkcopy.BulkCopyMetadata -import com.microsoft.azure.sqldb.spark.config.Config -import com.microsoft.azure.sqldb.spark.connect._ - -/** - Add column Metadata. - If not specified, metadata is automatically added - from the destination table, which may suffer performance. -*/ -var bulkCopyMetadata = new BulkCopyMetadata -bulkCopyMetadata.addColumnMetadata(1, "Title", java.sql.Types.NVARCHAR, 128, 0) -bulkCopyMetadata.addColumnMetadata(2, "FirstName", java.sql.Types.NVARCHAR, 50, 0) -bulkCopyMetadata.addColumnMetadata(3, "LastName", java.sql.Types.NVARCHAR, 50, 0) - -val bulkCopyConfig = Config(Map( - "url" -> "mysqlserver.database.windows.net", - "databaseName" -> "MyDatabase", - "user" -> "username", - "password" -> "*********", - "dbTable" -> "dbo.Clients", - "bulkCopyBatchSize" -> "2500", - "bulkCopyTableLock" -> "true", - "bulkCopyTimeout" -> "600" -)) - -df.bulkCopyToSqlDB(bulkCopyConfig, bulkCopyMetadata) -//df.bulkCopyToSqlDB(bulkCopyConfig) if no metadata is specified. -``` - -## Next steps - -If you haven't already, download the Spark connector from [azure-sqldb-spark GitHub repository](https://github.com/Azure/azure-sqldb-spark) and explore the additional resources in the repo: - -- [Sample Azure Databricks notebooks](https://github.com/Azure/azure-sqldb-spark/tree/master/samples/notebooks) -- [Sample scripts (Scala)](https://github.com/Azure/azure-sqldb-spark/tree/master/samples/scripts) - -You might also want to review the [Apache Spark SQL, DataFrames, and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html) and the [Azure Databricks documentation](/azure/azure-databricks/). \ No newline at end of file diff --git a/articles/azure-sql/database/sql-data-sync-agent-overview.md b/articles/azure-sql/database/sql-data-sync-agent-overview.md deleted file mode 100644 index 2d1a1d246572f..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-agent-overview.md +++ /dev/null @@ -1,335 +0,0 @@ ---- -title: Data Sync Agent for SQL Data Sync -description: Learn how to install and run the Data Sync Agent for SQL Data Sync in Azure to sync data with SQL Server databases -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 12/20/2018 ---- -# Data Sync Agent for SQL Data Sync -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Sync data with SQL Server databases by installing and configuring the Data Sync Agent for SQL Data Sync in Azure. For more info about SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does **not** support Azure SQL Managed Instance at this time. - -## Download and install - -To download the Data Sync Agent, go to [SQL Data Sync Agent](https://www.microsoft.com/download/details.aspx?id=27693). To upgrade the Data Sync Agent, install the Agent in the same location as the old Agent and it will override the original one. - -### Install silently - -To install the Data Sync Agent silently from the command prompt, enter a command similar to the following example. Check the file name of the downloaded .msi file, and provide your own values for the **TARGETDIR** and **SERVICEACCOUNT** arguments. - -- If you don't provide a value for **TARGETDIR**, the default value is `C:\Program Files (x86)\Microsoft SQL Data Sync 2.0`. - -- If you provide `LocalSystem` as the value of **SERVICEACCOUNT**, use SQL Server authentication when you configure the agent to connect to SQL Server. - -- If you provide a domain user account or a local user account as the value of **SERVICEACCOUNT**, you also have to provide the password with the **SERVICEPASSWORD** argument. For example, `SERVICEACCOUNT="\" SERVICEPASSWORD=""`. - -```cmd -msiexec /i "SQLDataSyncAgent-2.0-x86-ENU.msi" TARGETDIR="C:\Program Files (x86)\Microsoft SQL Data Sync 2.0" SERVICEACCOUNT="LocalSystem" /qn -``` - -## Sync data with a SQL Server database - -To configure the Data Sync Agent so you can sync data with one or more SQL Server databases, see [Add a SQL Server database](sql-data-sync-sql-server-configure.md#add-on-prem). - -## Data Sync Agent FAQ - -### Why do I need a client agent - -The SQL Data Sync service communicates with SQL Server databases via the client agent. This security feature prevents direct communication with databases behind a firewall. When the SQL Data Sync service communicates with the agent, it does so using encrypted connections and a unique token or *agent key*. The SQL Server databases authenticate the agent using the connection string and agent key. This design provides a high level of security for your data. - -### How many instances of the local agent UI can be run - -Only one instance of the UI can be run. - -### How can I change my service account - -After you install a client agent, the only way to change the service account is to uninstall it and install a new client agent with the new service account. - -### How do I change my agent key - -An agent key can only be used once by an agent. It cannot be reused when you remove then reinstall a new agent, nor can it be used by multiple agents. If you need to create a new key for an existing agent, you must be sure that the same key is recorded with the client agent and with the SQL Data Sync service. - -### How do I retire a client agent - -To immediately invalidate or retire an agent, regenerate its key in the portal but do not submit it in the Agent UI. Regenerating a key invalidates the previous key irrespective if the corresponding agent is online or offline. - -### How do I move a client agent to another computer - -If you want to run the local agent from a different computer than it is currently on, do the following things: - -1. Install the agent on desired computer. -2. Log in to the SQL Data Sync portal and regenerate an agent key for the new agent. -3. Use the new agent's UI to submit the new agent key. -4. Wait while the client agent downloads the list of on-premises databases that were registered earlier. -5. Provide database credentials for all databases that display as unreachable. These databases must be reachable from the new computer on which the agent is installed. - -### How do I delete the Sync metadata database if the Sync agent is still associated with it - -In order to delete a Sync metadata database that has a Sync agent associated with it, you must first delete the Sync agent. To delete the agent, do the following things: - -1. Select the Sync database. -2. Go to the **Sync to other databases** page. -3. Select the Sync agent and click on **Delete**. - -## Troubleshoot Data Sync Agent issues - -- [The client agent install, uninstall, or repair fails](#agent-install) - -- [The client agent doesn't work after I cancel the uninstall](#agent-uninstall) - -- [My database isn't listed in the agent list](#agent-list) - -- [Client agent doesn't start (Error 1069)](#agent-start) - -- [I can't submit the agent key](#agent-key) - -- [The client agent can't be deleted from the portal if its associated on-premises database is unreachable](#agent-delete) - -- [Local Sync Agent app can't connect to the local sync service](#agent-connect) - -### The client agent install, uninstall, or repair fails - -- **Cause**. Many scenarios might cause this failure. To determine the specific cause for this failure, look at the logs. - -- **Resolution**. To find the specific cause of the failure, generate and look at the Windows Installer logs. You can turn on logging at a command prompt. For example, if the downloaded installation file is `SQLDataSyncAgent-2.0-x86-ENU.msi`, generate and examine log files by using the following command lines: - - - For installs: `msiexec.exe /i SQLDataSyncAgent-2.0-x86-ENU.msi /l*v LocalAgentSetup.Log` - - For uninstalls: `msiexec.exe /x SQLDataSyncAgent-2.0-x86-ENU.msi /l*v LocalAgentSetup.Log` - - You can also turn on logging for all installations that are performed by Windows Installer. The Microsoft Knowledge Base article [How to enable Windows Installer logging](https://support.microsoft.com/help/223300/how-to-enable-windows-installer-logging) provides a one-click solution to turn on logging for Windows Installer. It also provides the location of the logs. - -### The client agent doesn't work after I cancel the uninstall - -The client agent doesn't work, even after you cancel its uninstallation. - -- **Cause**. This occurs because the SQL Data Sync client agent doesn't store credentials. - -- **Resolution**. You can try these two solutions: - - - Use services.msc to reenter the credentials for the client agent. - - Uninstall this client agent and then install a new one. Download and install the latest client agent from [Download Center](https://www.microsoft.com/download/details.aspx?id=27693). - -### My database isn't listed in the agent list - -When you attempt to add an existing SQL Server database to a sync group, the database doesn't appear in the list of agents. - -These scenarios might cause this issue: - -- **Cause**. The client agent and sync group are in different datacenters. - -- **Resolution**. The client agent and the sync group must be in the same datacenter. To set this up, you have two options: - - - Create a new agent in the datacenter where the sync group is located. Then, register the database with that agent. - - Delete the current sync group. Then, re-create the sync group in the datacenter where the agent is located. - -- **Cause**. The client agent's list of databases isn't current. - -- **Resolution**. Stop and then restart the client agent service. - - The local agent downloads the list of associated databases only on the first submission of the agent key. It doesn't download the list of associated databases on subsequent agent key submissions. Databases that are registered during an agent move don't show up in the original agent instance. - -### Client agent doesn't start (Error 1069) - -You discover that the agent isn't running on a computer that hosts SQL Server. When you attempt to manually start the agent, you see a dialog box that displays the message, "Error 1069: The service did not start due to a logon failure." - -![Data Sync error 1069 dialog box](./media/sql-data-sync-agent-overview/sync-error-1069.png) - -- **Cause**. A likely cause of this error is that the password on the local server has changed since you created the agent and agent password. - -- **Resolution**. Update the agent's password to your current server password: - - 1. Locate the SQL Data Sync client agent service. - a. Select **Start**. - b. In the search box, enter **services.msc**. - c. In the search results, select **Services**. - d. In the **Services** window, scroll to the entry for **SQL Data Sync Agent**. - 1. Right-click **SQL Data Sync Agent**, and then select **Stop**. - 1. Right-click **SQL Data Sync Agent**, and then select **Properties**. - 1. On **SQL Data Sync Agent Properties**, select the **Log in** tab. - 1. In the **Password** box, enter your password. - 1. In the **Confirm Password** box, reenter your password. - 1. Select **Apply**, and then select **OK**. - 1. In the **Services** window, right-click the **SQL Data Sync Agent** service, and then click **Start**. - 1. Close the **Services** window. - -### I can't submit the agent key - -After you create or re-create a key for an agent, you try to submit the key through the SqlAzureDataSyncAgent application. The submission fails to complete. - -![Sync Error dialog box - Can't submit agent key](./media/sql-data-sync-agent-overview/sync-error-cant-submit-agent-key.png) - -- **Prerequisites**. Before you proceed, check the following prerequisites: - - - The SQL Data Sync Windows service is running. - - - The service account for SQL Data Sync Windows service has network access. - - - The outbound 1433 port is open in your local firewall rule. - - - The local ip is added to the server or database firewall rule for the sync metadata database. - -- **Cause**. The agent key uniquely identifies each local agent. The key must meet two conditions: - - - The client agent key on the SQL Data Sync server and the local computer must be identical. - - The client agent key can be used only once. - -- **Resolution**. If your agent isn't working, it's because one or both of these conditions are not met. To get your agent to work again: - - 1. Generate a new key. - 1. Apply the new key to the agent. - - To apply the new key to the agent: - - 1. In File Explorer, go to your agent installation directory. The default installation directory is C:\\Program Files (x86)\\Microsoft SQL Data Sync. - 1. Double-click the bin subdirectory. - 1. Open the SqlAzureDataSyncAgent application. - 1. Select **Submit Agent Key**. - 1. In the space provided, paste the key from your clipboard. - 1. Select **OK**. - 1. Close the program. - -### The client agent can't be deleted from the portal if its associated on-premises database is unreachable - -If a local endpoint (that is, a database) that is registered with a SQL Data Sync client agent becomes unreachable, the client agent can't be deleted. - -- **Cause**. The local agent can't be deleted because the unreachable database is still registered with the agent. When you try to delete the agent, the deletion process tries to reach the database, which fails. - -- **Resolution**. Use "force delete" to delete the unreachable database. - -> [!NOTE] -> If sync metadata tables remain after a "force delete", use `deprovisioningutil.exe` to clean them up. - -### Local Sync Agent app can't connect to the local sync service - -- **Resolution**. Try the following steps: - - 1. Exit the app. - 1. Open the Component Services Panel. - a. In the search box on the taskbar, enter **services.msc**. - b. In the search results, double-click **Services**. - 1. Stop the **SQL Data Sync** service. - 1. Restart the **SQL Data Sync** service. - 1. Reopen the app. - -## Run the Data Sync Agent from the command prompt - -You can run the following Data Sync Agent commands from the command prompt: - -### Ping the service - -#### Usage - -```cmd -SqlDataSyncAgentCommand.exe -action pingsyncservice -``` - -#### Example - -```cmd -SqlDataSyncAgentCommand.exe -action "pingsyncservice" -``` - -### Display registered databases - -#### Usage - -```cmd -SqlDataSyncAgentCommand.exe -action displayregistereddatabases -``` - -#### Example - -```cmd -SqlDataSyncAgentCommand.exe -action "displayregistereddatabases" -``` - -### Submit the agent key - -#### Usage - -```cmd -Usage: SqlDataSyncAgentCommand.exe -action submitagentkey -agentkey [agent key] -username [user name] -password [password] -``` - -#### Example - -```cmd -SqlDataSyncAgentCommand.exe -action submitagentkey -agentkey [agent key generated from portal, PowerShell, or API] -username [user name to sync metadata database] -password [user name to sync metadata database] -``` - -### Register a database - -#### Usage - -```cmd -SqlDataSyncAgentCommand.exe -action registerdatabase -servername [on-premisesdatabase server name] -databasename [on-premisesdatabase name] -username [domain\\username] -password [password] -authentication [sql or windows] -encryption [true or false] -``` - -#### Examples - -```cmd -SqlDataSyncAgentCommand.exe -action "registerdatabase" -serverName localhost -databaseName testdb -authentication sql -username -password -encryption true - -SqlDataSyncAgentCommand.exe -action "registerdatabase" -serverName localhost -databaseName testdb -authentication windows -encryption true - -``` - -### Unregister a database - -When you use this command to unregister a database, it deprovisions the database completely. If the database participates in other sync groups, this operation breaks the other sync groups. - -#### Usage - -```cmd -SqlDataSyncAgentCommand.exe -action unregisterdatabase -servername [on-premisesdatabase server name] -databasename [on-premisesdatabase name] -``` - -#### Example - -```cmd -SqlDataSyncAgentCommand.exe -action "unregisterdatabase" -serverName localhost -databaseName testdb -``` - -### Update credentials - -#### Usage - -```cmd -SqlDataSyncAgentCommand.exe -action updatecredential -servername [on-premisesdatabase server name] -databasename [on-premisesdatabase name] -username [domain\\username] -password [password] -authentication [sql or windows] -encryption [true or false] -``` - -#### Examples - -```cmd -SqlDataSyncAgentCommand.exe -action "updatecredential" -serverName localhost -databaseName testdb -authentication sql -username -password -encryption true - -SqlDataSyncAgentCommand.exe -action "updatecredential" -serverName localhost -databaseName testdb -authentication windows -encryption true -``` - -## Next steps - -For more info about SQL Data Sync, see the following articles: - -- Overview - [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - In the portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](sql-data-sync-sql-server-configure.md) - - With PowerShell - - [Use PowerShell to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) - - [Use PowerShell to sync between a database in Azure SQL Database and a database in a SQL Server instance](scripts/sql-data-sync-sync-data-between-azure-onprem.md) -- Best practices - [Best practices for Azure SQL Data Sync](sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with Azure SQL Data Sync]sql-data-sync-troubleshoot.md) -- Update the sync schema - - With Transact-SQL - [Automate replication of schema changes with SQL Data Sync in Azure](sql-data-sync-update-sync-schema.md) - - With PowerShell - [Use PowerShell to update the sync schema in an existing sync group](scripts/update-sync-schema-in-sync-group.md) diff --git a/articles/azure-sql/database/sql-data-sync-best-practices.md b/articles/azure-sql/database/sql-data-sync-best-practices.md deleted file mode 100644 index e2a5ab0d46d21..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-best-practices.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -title: Best practices for Azure SQL Data Sync -description: "Learn about best practices for configuring and running Azure SQL Data Sync." -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1 -ms.topic: conceptual -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 12/20/2018 ---- -# Best practices for Azure SQL Data Sync - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes best practices for Azure SQL Data Sync. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with Azure SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> Azure SQL Data Sync does **not** support Azure SQL Managed Instance at this time. - -## Security and reliability - -### Client agent - -- Install the client agent by using the least privileged user account that has network service access. -- Install the client agent on a computer that isn't the SQL Server computer. -- Don't register an on-premises database with more than one agent. - - Avoid this even if you are syncing different tables for different sync groups. - - Registering an on-premises database with multiple client agents poses challenges when you delete one of the sync groups. - -### Database accounts with least required privileges - -- **For sync setup**. Create/Alter Table; Alter Database; Create Procedure; Select/ Alter Schema; Create User-Defined Type. - -- **For ongoing sync**. Select/ Insert/ Update/ Delete on tables that are selected for syncing, and on sync metadata and tracking tables; Execute permission on stored procedures created by the service; Execute permission on user-defined table types. - -- **For deprovisioning**. Alter on tables part of sync; Select/ Delete on sync metadata tables; Control on sync tracking tables, stored procedures, and user-defined types. - -Azure SQL Database supports only a single set of credentials. To accomplish these tasks within this constraint, consider the following options: - -- Change the credentials for different phases (for example, *credentials1* for setup and *credentials2* for ongoing). -- Change the permission of the credentials (that is, change the permission after sync is set up). - -### Auditing - -It is recommended to enable auditing at the level of the databases in the sync groups. Learn how to [enable auditing on your Azure SQL database](./auditing-overview.md) or [enable auditing on your SQL Server database](/sql/relational-databases/security/auditing/sql-server-audit-database-engine). - -## Setup - -### Database considerations and constraints - -#### Database size - -When you create a new database, set the maximum size so that it's always larger than the database you deploy. If you don't set the maximum size to larger than the deployed database, sync fails. Although SQL Data Sync doesn't offer automatic growth, you can run the `ALTER DATABASE` command to increase the size of the database after it has been created. Ensure that you stay within the database size limits. - -> [!IMPORTANT] -> SQL Data Sync stores additional metadata with each database. Ensure that you account for this metadata when you calculate space needed. The amount of added overhead is related to the width of the tables (for example, narrow tables require more overhead) and the amount of traffic. - -### Table considerations and constraints - -#### Selecting tables - -You don't have to include all the tables that are in a database in a sync group. The tables that you include in a sync group affect efficiency and costs. Include tables, and the tables they are dependent on, in a sync group only if business needs require it. - -#### Primary keys - -Each table in a sync group must have a primary key. SQL Data Sync can't sync a table that doesn't have a primary key. - -Before using SQL Data Sync in production, test initial and ongoing sync performance. - -#### Empty tables provide the best performance - -Empty tables provide the best performance at initialization time. If the target table is empty, Data Sync uses bulk insert to load the data. Otherwise, Data Sync does a row-by-row comparison and insertion to check for conflicts. If performance is not a concern, however, you can set up sync between tables that already contain data. - -### Provisioning destination databases - -SQL Data Sync provides basic database autoprovisioning. - -This section discusses the limitations of provisioning in SQL Data Sync. - -#### Autoprovisioning limitations - -SQL Data Sync has the following limitations for autoprovisioning: - -- Select only the columns that are created in the destination table. Any columns that aren't part of the sync group aren't provisioned in the destination tables. -- Indexes are created only for selected columns. If the source table index has columns that aren't part of the sync group, those indexes aren't provisioned in the destination tables. -- Indexes on XML type columns aren't provisioned. -- CHECK constraints aren't provisioned. -- Existing triggers on the source tables aren't provisioned. -- Views and stored procedures aren't created on the destination database. -- ON UPDATE CASCADE and ON DELETE CASCADE actions on foreign key constraints aren't recreated in the destination tables. -- If you have decimal or numeric columns with a precision greater than 28, SQL Data Sync may encounter a conversion overflow issue during sync. We recommend that you limit the precision of decimal or numeric columns to 28 or less. - -#### Recommendations - -- Use the SQL Data Sync autoprovisioning capability only when you are trying out the service. -- For production, provision the database schema. - -### Where to locate the hub database - -#### Enterprise-to-cloud scenario - -To minimize latency, keep the hub database close to the greatest concentration of the sync group's database traffic. - -#### Cloud-to-cloud scenario - -- When all the databases in a sync group are in one datacenter, the hub should be located in the same datacenter. This configuration reduces latency and the cost of data transfer between datacenters. -- When the databases in a sync group are in multiple datacenters, the hub should be located in the same datacenter as the majority of the databases and database traffic. - -#### Mixed scenarios - -Apply the preceding guidelines to complex sync group configurations, such as those that are a mix of enterprise-to-cloud and cloud-to-cloud scenarios. - -## Sync - -### Avoid slow and costly initial sync - -In this section, we discuss the initial sync of a sync group. Learn how to help prevent an initial sync from taking longer and being more costly than necessary. - -#### How initial sync works - -When you create a sync group, start with data in only one database. If you have data in multiple databases, SQL Data Sync treats each row as a conflict that needs to be resolved. This conflict resolution causes the initial sync to go slowly. If you have data in multiple databases, initial sync might take between several days and several months, depending on the database size. - -If the databases are in different datacenters, each row must travel between the different datacenters. This increases the cost of an initial sync. - -#### Recommendation - -If possible, start with data in only one of the sync group's databases. - -### Design to avoid sync loops - -A sync loop occurs when there are circular references within a sync group. In that scenario, each change in one database is endlessly and circularly replicated through the databases in the sync group. - -Ensure that you avoid sync loops, because they cause performance degradation and might significantly increase costs. - -### Changes that fail to propagate - -#### Reasons that changes fail to propagate - -Changes might fail to propagate for one of the following reasons: - -- Schema/datatype incompatibility. -- Inserting null in non-nullable columns. -- Violating foreign key constraints. - -#### What happens when changes fail to propagate? - -- Sync group shows that it's in a **Warning** state. -- Details are listed in the portal UI log viewer. -- If the issue is not resolved for 45 days, the database becomes out of date. - -> [!NOTE] -> These changes never propagate. The only way to recover in this scenario is to re-create the sync group. - -#### Recommendation - -Monitor the sync group and database health regularly through the portal and log interface. - - -## Maintenance - -### Avoid out-of-date databases and sync groups - -A sync group or a database in a sync group can become out of date. When a sync group's status is **Out-of-date**, it stops functioning. When a database's status is **Out-of-date**, data might be lost. It's best to avoid this scenario instead of trying to recover from it. - -#### Avoid out-of-date databases - -A database's status is set to **Out-of-date** when it has been offline for 45 days or more. To avoid an **Out-of-date** status on a database, ensure that none of the databases are offline for 45 days or more. - -#### Avoid out-of-date sync groups - -A sync group's status is set to **Out-of-date** when any change in the sync group fails to propagate to the rest of the sync group for 45 days or more. To avoid an **Out-of-date** status on a sync group, regularly check the sync group's history log. Ensure that all conflicts are resolved, and that changes are successfully propagated throughout the sync group databases. - -A sync group might fail to apply a change for one of these reasons: - -- Schema incompatibility between tables. -- Data incompatibility between tables. -- Inserting a row with a null value in a column that doesn't allow null values. -- Updating a row with a value that violates a foreign key constraint. - -To prevent out-of-date sync groups: - -- Update the schema to allow the values that are contained in the failed rows. -- Update the foreign key values to include the values that are contained in the failed rows. -- Update the data values in the failed row so they are compatible with the schema or foreign keys in the target database. - -### Avoid deprovisioning issues - -In some circumstances, unregistering a database with a client agent might cause sync to fail. - -#### Scenario - -1. Sync group A was created by using a SQL Database instance and a SQL Server database, which is associated with local agent 1. -2. The same on-premises database is registered with local agent 2 (this agent is not associated with any sync group). -3. Unregistering the on-premises database from local agent 2 removes the tracking and meta tables for sync group A for the on-premises database. -4. Sync group A operations fail, with this error: "The current operation could not be completed because the database is not provisioned for sync or you do not have permissions to the sync configuration tables." - -#### Solution - -To avoid this scenario, don't register a database with more than one agent. - -To recover from this scenario: - -1. Remove the database from each sync group that it belongs to. -2. Add the database back into each sync group that you removed it from. -3. Deploy each affected sync group (this action provisions the database). - -### Modifying a sync group - -Don't attempt to remove a database from a sync group and then edit the sync group without first deploying one of the changes. - -Instead, first remove a database from a sync group. Then, deploy the change and wait for deprovisioning to finish. When deprovisioning is finished, you can edit the sync group and deploy the changes. - -If you attempt to remove a database and then edit a sync group without first deploying one of the changes, one or the other operation fails. The portal interface might become inconsistent. If this happens, refresh the page to restore the correct state. - -### Avoid schema refresh timeout - -If you have a complex schema to sync, you may encounter an "operation timeout" during a schema refresh if the sync metadata database has a lower SKU (example: basic). - -#### Solution - -To mitigate this issue, please scale up your sync metadata database to have a higher SKU, such as S3. - -## Next steps -For more information about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with Azure SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md) -- Set up SQL Data Sync - - In the portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](sql-data-sync-sql-server-configure.md) - - With PowerShell - - [Use PowerShell to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) - - [Use PowerShell to sync between a database in SQL Database and a database in a SQL Server instance](scripts/sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for Azure SQL Data Sync](sql-data-sync-agent-overview.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with Azure SQL Data Sync](sql-data-sync-troubleshoot.md) -- Update the sync schema - - With Transact-SQL - [Automate the replication of schema changes in Azure SQL Data Sync](sql-data-sync-update-sync-schema.md) - - With PowerShell - [Use PowerShell to update the sync schema in an existing sync group](scripts/update-sync-schema-in-sync-group.md) - -For more information about SQL Database, see: - -- [SQL Database overview](sql-database-paas-overview.md) -- [Database lifecycle management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) \ No newline at end of file diff --git a/articles/azure-sql/database/sql-data-sync-data-sql-server-sql-database.md b/articles/azure-sql/database/sql-data-sync-data-sql-server-sql-database.md deleted file mode 100644 index 6903abc07b6e0..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-data-sql-server-sql-database.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: What is SQL Data Sync for Azure? -description: This overview introduces SQL Data Sync for Azure, which allows you to sync data across multiple cloud and on-premises databases. -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: data sync, sqldbrb=1, fasttrack-edit -ms.devlang: -ms.topic: conceptual -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 2/2/2022 ---- -# What is SQL Data Sync for Azure? - -SQL Data Sync is a service built on Azure SQL Database that lets you synchronize the data you select bi-directionally across multiple databases, both on-premises and in the cloud. - -> [!IMPORTANT] -> Azure SQL Data Sync does not support Azure SQL Managed Instance at this time. - - -## Overview - -Data Sync is based around the concept of a sync group. A sync group is a group of databases that you want to synchronize. - -Data Sync uses a hub and spoke topology to synchronize data. You define one of the databases in the sync group as the hub database. The rest of the databases are member databases. Sync occurs only between the hub and individual members. - -- The **Hub Database** must be an Azure SQL Database. -- The **member databases** can be either databases in Azure SQL Database or in instances of SQL Server. -- The **Sync Metadata Database** contains the metadata and log for Data Sync. The Sync Metadata Database has to be an Azure SQL Database located in the same region as the Hub Database. The Sync Metadata Database is customer created and customer owned. You can only have one Sync Metadata Database per region and subscription. Sync Metadata Database cannot be deleted or renamed while sync groups or sync agents exist. Microsoft recommends to create a new, empty database for use as the Sync Metadata Database. Data Sync creates tables in this database and runs a frequent workload. - -> [!NOTE] -> If you're using an on premises database as a member database, you have to [install and configure a local sync agent](sql-data-sync-sql-server-configure.md#add-on-prem). - -![Sync data between databases](./media/sql-data-sync-data-sql-server-sql-database/sync-data-overview.png) - -A sync group has the following properties: - -- The **Sync Schema** describes which data is being synchronized. -- The **Sync Direction** can be bi-directional or can flow in only one direction. That is, the Sync Direction can be *Hub to Member*, or *Member to Hub*, or both. -- The **Sync Interval** describes how often synchronization occurs. -- The **Conflict Resolution Policy** is a group level policy, which can be *Hub wins* or *Member wins*. - -## When to use - -Data Sync is useful in cases where data needs to be kept updated across several databases in Azure SQL Database or SQL Server. Here are the main use cases for Data Sync: - -- **Hybrid Data Synchronization:** With Data Sync, you can keep data synchronized between your databases in SQL Server and Azure SQL Database to enable hybrid applications. This capability may appeal to customers who are considering moving to the cloud and would like to put some of their application in Azure. -- **Distributed Applications:** In many cases, it's beneficial to separate different workloads across different databases. For example, if you have a large production database, but you also need to run a reporting or analytics workload on this data, it's helpful to have a second database for this additional workload. This approach minimizes the performance impact on your production workload. You can use Data Sync to keep these two databases synchronized. -- **Globally Distributed Applications:** Many businesses span several regions and even several countries/regions. To minimize network latency, it's best to have your data in a region close to you. With Data Sync, you can easily keep databases in regions around the world synchronized. - -Data Sync isn't the preferred solution for the following scenarios: - -| Scenario | Some recommended solutions | -|----------|----------------------------| -| Disaster Recovery | [Azure geo-redundant backups](automated-backups-overview.md) | -| Read Scale | [Use read-only replicas to load balance read-only query workloads](read-scale-out.md) | -| ETL (OLTP to OLAP) | [Azure Data Factory](https://azure.microsoft.com/services/data-factory/) or [SQL Server Integration Services](/sql/integration-services/sql-server-integration-services) | -| Migration from SQL Server to Azure SQL Database. However, SQL Data Sync can be used after the migration is completed, to ensure that the source and target are kept in sync. | [Azure Database Migration Service](https://azure.microsoft.com/services/database-migration/) | - - -## How it works - -- **Tracking data changes:** Data Sync tracks changes using insert, update, and delete triggers. The changes are recorded in a side table in the user database. Note that BULK INSERT doesn't fire triggers by default. If FIRE_TRIGGERS isn't specified, no insert triggers execute. Add the FIRE_TRIGGERS option so Data Sync can track those inserts. -- **Synchronizing data:** Data Sync is designed in a hub and spoke model. The hub syncs with each member individually. Changes from the hub are downloaded to the member and then changes from the member are uploaded to the hub. -- **Resolving conflicts:** Data Sync provides two options for conflict resolution, *Hub wins* or *Member wins*. - - If you select *Hub wins*, the changes in the hub always overwrite changes in the member. - - If you select *Member wins*, the changes in the member overwrite changes in the hub. If there's more than one member, the final value depends on which member syncs first. - -## Compare with Transactional Replication - -| | Data Sync | Transactional Replication | -|---|---|---| -| **Advantages** | - Active-active support
    - Bi-directional between on-premises and Azure SQL Database | - Lower latency
    - Transactional consistency
    - Reuse existing topology after migration
    -Azure SQL Managed Instance support | -| **Disadvantages** | - No transactional consistency
    - Higher performance impact | - Can't publish from Azure SQL Database
    - High maintenance cost | - -## Private link for Data Sync - -> [!NOTE] -> The SQL Data Sync private link is different from the [Azure Private Link](https://azure.microsoft.com/services/private-link/). - -The new private link feature allows you to choose a service managed private endpoint to establish a secure connection between the sync service and your member/hub databases during the data synchronization process. A service managed private endpoint is a private IP address within a specific virtual network and subnet. Within Data Sync, the service managed private endpoint is created by Microsoft and is exclusively used by the Data Sync service for a given sync operation. -Before setting up the private link, read the [general requirements](sql-data-sync-data-sql-server-sql-database.md#general-requirements) for the feature. - -![Private link for Data Sync](./media/sql-data-sync-data-sql-server-sql-database/sync-private-link-overview.png) - -> [!NOTE] -> You must manually approve the service managed private endpoint in the **Private endpoint connections** page of the Azure portal during the sync group deployment or by using PowerShell. - -## Get started - -### Set up Data Sync in the Azure portal - -- [Set up Azure SQL Data Sync](sql-data-sync-sql-server-configure.md) -- Data Sync Agent - [Data Sync Agent for Azure SQL Data Sync](sql-data-sync-agent-overview.md) - -### Set up Data Sync with PowerShell - -- [Use PowerShell to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) -- [Use PowerShell to sync between a database in Azure SQL Database and a databases in a SQL Server instance](scripts/sql-data-sync-sync-data-between-azure-onprem.md) - -### Set up Data Sync with REST API -- [Use REST API to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases-rest-api.md) - -### Review the best practices for Data Sync - -- [Best practices for Azure SQL Data Sync](sql-data-sync-best-practices.md) - -### Did something go wrong - -- [Troubleshoot issues with Azure SQL Data Sync](./sql-data-sync-troubleshoot.md) - -## Consistency and performance - -### Eventual consistency - -Since Data Sync is trigger-based, transactional consistency isn't guaranteed. Microsoft guarantees that all changes are made eventually and that Data Sync doesn't cause data loss. - -### Performance impact - -Data Sync uses insert, update, and delete triggers to track changes. It creates side tables in the user database for change tracking. These change tracking activities have an impact on your database workload. Assess your service tier and upgrade if needed. - -Provisioning and deprovisioning during sync group creation, update, and deletion may also impact the database performance. - -## Requirements and limitations - -### General requirements - -- Each table must have a primary key. Don't change the value of the primary key in any row. If you have to change a primary key value, delete the row and recreate it with the new primary key value. - -> [!IMPORTANT] -> Changing the value of an existing primary key will result in the following faulty behavior: -> - Data between hub and member can be lost even though sync does not report any issue. -> - Sync can fail because the tracking table has a non-existing row from source due to the primary key change. - -- Snapshot isolation must be enabled for both Sync members and hub. For more info, see [Snapshot Isolation in SQL Server](/dotnet/framework/data/adonet/sql/snapshot-isolation-in-sql-server). - -- In order to use Data Sync private link, both the member and hub databases must be hosted in Azure (same or different regions), in the same cloud type (e.g. both in public cloud or both in government cloud). Additionally, to use private link, Microsoft.Network resource providers must be Registered for the subscriptions that host the hub and member servers. Lastly, you must manually approve the private link for Data Sync during the sync configuration, within the “Private endpoint connections” section in the Azure portal or through PowerShell. For more details on how to approve the private link, see [Set up SQL Data Sync](./sql-data-sync-sql-server-configure.md). Once you approve the service managed private endpoint, all communication between the sync service and the member/hub databases will happen over the private link. Existing sync groups can be updated to have this feature enabled. - -### General limitations - -- A table can't have an identity column that isn't the primary key. -- A primary key can't have the following data types: sql_variant, binary, varbinary, image, xml. -- Be cautious when you use the following data types as a primary key, because the supported precision is only to the second: time, datetime, datetime2, datetimeoffset. -- The names of objects (databases, tables, and columns) can't contain the printable characters period (.), left square bracket ([), or right square bracket (]). -- A table name can't contain printable characters: ! " # $ % ' ( ) * + - space -- Azure Active Directory authentication isn't supported. -- If there are tables with the same name but different schema (for example, dbo.customers and sales.customers) only one of the tables can be added into sync. -- Columns with User-Defined Data Types aren't supported -- Moving servers between different subscriptions isn't supported. -- If two primary keys are only different in case (e.g. Foo and foo), Data Sync won't support this scenario. -- Truncating tables is not an operation supported by Data Sync (changes won't be tracked). -- Using a Hyperscale database as a Hub or Sync Metadata database is not supported. However, a Hyperscale database can be a member database in a Data Sync topology. -- Memory-optimized tables are not supported. - -#### Unsupported data types - -- FileStream -- SQL/CLR UDT -- XMLSchemaCollection (XML supported) -- Cursor, RowVersion, Timestamp, Hierarchyid - -#### Unsupported column types - -Data Sync can't sync read-only or system-generated columns. For example: - -- Computed columns. -- System-generated columns for temporal tables. - -#### Limitations on service and database dimensions - -| **Dimensions** | **Limit** | **Workaround** | -|-----------------------------------------------------------------|------------------------|-----------------------------| -| Maximum number of sync groups any database can belong to. | 5 | | -| Maximum number of endpoints in a single sync group | 30 | | -| Maximum number of on-premises endpoints in a single sync group. | 5 | Create multiple sync groups | -| Database, table, schema, and column names | 50 characters per name | | -| Tables in a sync group | 500 | Create multiple sync groups | -| Columns in a table in a sync group | 1000 | | -| Data row size on a table | 24 Mb | | - -> [!NOTE] -> There may be up to 30 endpoints in a single sync group if there is only one sync group. If there is more than one sync group, the total number of endpoints across all sync groups cannot exceed 30. If a database belongs to multiple sync groups, it is counted as multiple endpoints, not one. - -### Network requirements - -> [!NOTE] -> If you use Sync private link, these network requirements do not apply. - -When the sync group is established, the Data Sync service needs to connect to the hub database. At the time when you establish the sync group, the Azure SQL server must have the following configuration in its `Firewalls and virtual networks` settings: - - * *Deny public network access* must be set to *Off*. - * *Allow Azure services and resources to access this server* must be set to *Yes*, or you must create IP rules for the [IP addresses used by Data Sync service](network-access-controls-overview.md#data-sync). - -Once the sync group is created and provisioned, you can then disable these settings. The sync agent will connect directly to the hub database, and you can use the server's [firewall IP rules](firewall-configure.md) or [private endpoints](private-endpoint-overview.md) to allow the agent to access the hub server. - -> [!NOTE] -> If you change the sync group's schema settings, you will need to allow the Data Sync service to access the server again so that the hub database can be re-provisioned. - -### Region data residency - -If you synchronize data within the same region, SQL Data Sync doesn't store/process customer data outside that region in which the service instance is deployed. If you synchronize data across different regions, SQL Data Sync will replicate customer data to the paired regions. - -## FAQ about SQL Data Sync - -### How much does the SQL Data Sync service cost - -There's no charge for the SQL Data Sync service itself. However, you still collect data transfer charges for data movement in and out of your SQL Database instance. For more information, see [data transfer charges](https://azure.microsoft.com/pricing/details/bandwidth/). - -### What regions support Data Sync - -SQL Data Sync is available in all regions. - -### Is a SQL Database account required - -Yes. You must have a SQL Database account to host the hub database. - -### Can I use Data Sync to sync between SQL Server databases only - -Not directly. You can sync between SQL Server databases indirectly, however, by creating a Hub database in Azure, and then adding the on-premises databases to the sync group. - -### Can I configure Data Sync to sync between databases in Azure SQL Database that belong to different subscriptions - -Yes. You can configure sync between databases that belong to resource groups owned by different subscriptions, even if the subscriptions belong to different tenants. - -- If the subscriptions belong to the same tenant and you have permission to all subscriptions, you can configure the sync group in the Azure portal. -- Otherwise, you have to use PowerShell to add the sync members. - -### Can I setup Data Sync to sync between databases in SQL Database that belong to different clouds (like Azure Public Cloud and Azure China 21Vianet) - -Yes. You can setup sync between databases that belong to different clouds. You have to use PowerShell to add the sync members that belong to the different subscriptions. - -### Can I use Data Sync to seed data from my production database to an empty database, and then sync them - -Yes. Create the schema manually in the new database by scripting it from the original. After you create the schema, add the tables to a sync group to copy the data and keep it synced. - -### Should I use SQL Data Sync to back up and restore my databases - -It isn't recommended to use SQL Data Sync to create a backup of your data. You can't back up and restore to a specific point in time because SQL Data Sync synchronizations aren't versioned. Furthermore, SQL Data Sync doesn't back up other SQL objects, such as stored procedures, and doesn't do the equivalent of a restore operation quickly. - -For one recommended backup technique, see [Copy a database in Azure SQL Database](database-copy.md). - -### Can Data Sync sync encrypted tables and columns - -- If a database uses Always Encrypted, you can sync only the tables and columns that are *not* encrypted. You can't sync the encrypted columns, because Data Sync can't decrypt the data. -- If a column uses Column-Level Encryption (CLE), you can sync the column, as long as the row size is less than the maximum size of 24 Mb. Data Sync treats the column encrypted by key (CLE) as normal binary data. To decrypt the data on other sync members, you need to have the same certificate. - -### Is collation supported in SQL Data Sync - -Yes. SQL Data Sync supports collation in the following scenarios: - -- If the selected sync schema tables aren't already in your hub or member databases, then when you deploy the sync group, the service automatically creates the corresponding tables and columns with the collation settings selected in the empty destination databases. -- If the tables to be synced already exist in both your hub and member databases, SQL Data Sync requires that the primary key columns have the same collation between hub and member databases to successfully deploy the sync group. There are no collation restrictions on columns other than the primary key columns. - -### Is federation supported in SQL Data Sync - -Federation Root Database can be used in the SQL Data Sync Service without any limitation. You can't add the Federated Database endpoint to the current version of SQL Data Sync. - -### Can I use Data Sync to sync data exported from Dynamics 365 using bring your own database (BYOD) feature? - -The Dynamics 365 bring your own database feature lets administrators export data entities from the application into their own Microsoft Azure SQL database. Data Sync can be used to sync this data into other databases if data is exported using **incremental push** (full push is not supported) and **enable triggers in target database** is set to **yes**. - -### How do I create Data Sync in Failover group to support Disaster Recovery? - -- To ensure data sync operations in failover region are at par with Primary region, after failover you have to manually re-create the Sync Group in failover region with same settings as primary region. - -## Next steps - -### Update the schema of a synced database - -Do you have to update the schema of a database in a sync group? Schema changes aren't automatically replicated. For some solutions, see the following articles: - -- [Automate the replication of schema changes with SQL Data Sync in Azure](./sql-data-sync-update-sync-schema.md) -- [Use PowerShell to update the sync schema in an existing sync group](scripts/update-sync-schema-in-sync-group.md) - -### Monitor and troubleshoot - -Is SQL Data Sync doing as expected? To monitor activity and troubleshoot issues, see the following articles: - -- [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- [Troubleshoot issues with Azure SQL Data Sync](./sql-data-sync-troubleshoot.md) - -### Learn more about Azure SQL Database - -For more info about Azure SQL Database, see the following articles: - -- [SQL Database Overview](sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) diff --git a/articles/azure-sql/database/sql-data-sync-sql-server-configure.md b/articles/azure-sql/database/sql-data-sync-sql-server-configure.md deleted file mode 100644 index 36b4de2362dd1..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-sql-server-configure.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: Set up SQL Data Sync -description: This tutorial shows you how to set up SQL Data Sync for Azure -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 01/14/2019 ---- -# Tutorial: Set up SQL Data Sync between databases in Azure SQL Database and SQL Server -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -In this tutorial, you learn how to set up SQL Data Sync by creating a sync group that contains both Azure SQL Database and SQL Server instances. The sync group is custom configured and synchronizes on the schedule you set. - -The tutorial assumes you have at least some prior experience with SQL Database and SQL Server. - -For an overview of SQL Data Sync, see [Sync data across cloud and on-premises databases with SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md). - -For PowerShell examples on how to configure SQL Data Sync, see [How to sync between databases in SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) or [between databases in Azure SQL Database and SQL Server](scripts/sql-data-sync-sync-data-between-azure-onprem.md) - -> [!IMPORTANT] -> SQL Data Sync does **not** support Azure SQL Managed Instance at this time. - -## Create sync group - -1. Go to the [Azure portal](https://portal.azure.com) to find your database in SQL Database. Search for and select **SQL databases**. - - ![Search for databases, Microsoft Azure portal](./media/sql-data-sync-sql-server-configure/search-for-sql-databases.png) - -1. Select the database you want to use as the hub database for Data Sync. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/select-sql-database.png" alt-text = "Select from the database list, Microsoft Azure portal"::: - - > [!NOTE] - > The hub database is a sync topology's central endpoint, in which a sync group has multiple database endpoints. All other member databases with endpoints in the sync group, sync with the hub database. - -1. On the **SQL database** menu for the selected database, select **Sync to other databases**. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/sync-to-other-databases.png" alt-text = "Sync to other databases, Microsoft Azure portal"::: - -1. On the **Sync to other databases** page, select **New Sync Group**. The **New sync group** page opens with **Create sync group**. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/create-sync-group.png" alt-text = "Set up new sync group with private link"::: - - On the **Create Data Sync Group** page, change the following settings: - - | Setting       | Description | - | ------------------------------ | ------------------------------------------------- | - | **Sync Group Name** | Enter a name for the new sync group. This name is distinct from the name of the database itself. | - | **Sync Metadata Database** | Choose to create a database (recommended) or to use an existing database.

    If you choose **New database**, select **Create new database.** Then on the **SQL Database** page, name and configure the new database and select **OK**.

    If you choose **Use existing database**, select the database from the list. | - | **Automatic Sync** | Select **On** or **Off**.

    If you choose **On**, enter a number and select **Seconds**, **Minutes**, **Hours**, or **Days** in the **Sync Frequency** section.
    The first sync begins after the selected interval period elapses from the time the configuration is saved.| - | **Conflict Resolution** | Select **Hub win** or **Member win**.

    **Hub win** means when conflicts occur, data in the hub database overwrites conflicting data in the member database.

    **Member win** means when conflicts occur, data in the member database overwrites conflicting data in the hub database. | - | **Use private link** | Choose a service managed private endpoint to establish a secure connection between the sync service and the hub database. | - - > [!NOTE] - > Microsoft recommends to create a new, empty database for use as the **Sync Metadata Database**. Data Sync creates tables in this database and runs a frequent workload. This database is shared as the **Sync Metadata Database** for all sync groups in a selected region and subscription. You can't change the database or its name without removing all sync groups and sync agents in the region. Additionally, an Elastic jobs database cannot be used as the SQL Data Sync Metadata database and vice versa. - - Select **OK** and wait for the sync group to be created and deployed. - -1. On the **New Sync Group** page, if you selected **Use private link**, you will need to approve the private endpoint connection. The link in the info message will take you to the private endpoint connections experience where you can approve the connection. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/approve-private-link-update.png" alt-text = "Approve private link"::: - - > [!NOTE] - > The private links for the syng group and the sync members neet to be created, approved, and disabled separately. - -## Add sync members - -After the new sync group is created and deployed, open the sync group and access the **Databases** page, where you will select sync members. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/add-sync-members.png" alt-text = "Select sync members"::: - - > [!NOTE] - > To update or insert the username and password to your hub database, go to the **Hub Database** section in the **Select sync members** page. - -### To add a database in Azure SQL Database - -In the **Select sync members** section, optionally add a database in Azure SQL Database to the sync group by selecting **Add an Azure Database**. The **Configure Azure Database** page opens. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/step-two-configure.png" alt-text = "Add a database to the sync group"::: - - On the **Configure Azure SQL Database** page, change the following settings: - - | Setting       | Description | - | ----------------------------- | ------------------------------------------------- | - | **Sync Member Name** | Provide a name for the new sync member. This name is distinct from the database name itself. | - | **Subscription** | Select the associated Azure subscription for billing purposes. | - | **Azure SQL Server** | Select the existing server. | - | **Azure SQL Database** | Select the existing database in SQL Database. | - | **Sync Directions** | Select **Bi-directional Sync**, **To the Hub**, or **From the Hub**. | - | **Username** and **Password** | Enter the existing credentials for the server on which the member database is located. Don't enter *new* credentials in this section. | - | **Use private link** | Choose a service managed private endpoint to establish a secure connection between the sync service and the member database. | - - Select **OK** and wait for the new sync member to be created and deployed. - - - -### To add a SQL Server database - -In the **Member Database** section, optionally add a SQL Server database to the sync group by selecting **Add an On-Premises Database**. The **Configure On-Premises** page opens where you can do the following things: - -1. Select **Choose the Sync Agent Gateway**. The **Select Sync Agent** page opens. - - :::image type="content" source="./media/sql-data-sync-sql-server-configure/steptwo-agent.png" alt-text = "Creating a sync agent"::: - -1. On the **Choose the Sync Agent** page, choose whether to use an existing agent or create an agent. - - If you choose **Existing agents**, select the existing agent from the list. - - If you choose **Create a new agent**, do the following things: - - 1. Download the data sync agent from the link provided and install it on the computer where the SQL Server is located. You can also download the agent directly from [Azure SQL Data Sync Agent](https://www.microsoft.com/download/details.aspx?id=27693). - - > [!IMPORTANT] - > You have to open outbound TCP port 1433 in the firewall to let the client agent communicate with the server. - - 1. Enter a name for the agent. - - 1. Select **Create and Generate Key** and copy the agent key to the clipboard. - - 1. Select **OK** to close the **Select Sync Agent** page. - -1. On the SQL Server computer, locate and run the Client Sync Agent app. - - ![The data sync client agent app](./media/sql-data-sync-sql-server-configure/datasync-preview-clientagent.png) - - 1. In the sync agent app, select **Submit Agent Key**. The **Sync Metadata Database Configuration** dialog box opens. - - 1. In the **Sync Metadata Database Configuration** dialog box, paste in the agent key copied from the Azure portal. Also provide the existing credentials for the server on which the metadata database is located. (If you created a metadata database, this database is on the same server as the hub database.) Select **OK** and wait for the configuration to finish. - - ![Enter the agent key and server credentials](./media/sql-data-sync-sql-server-configure/datasync-preview-agent-enterkey.png) - - > [!NOTE] - > If you get a firewall error, create a firewall rule on Azure to allow incoming traffic from the SQL Server computer. You can create the rule manually in the portal or in SQL Server Management Studio (SSMS). In SSMS, connect to the hub database on Azure by entering its name as .database.windows.net. - - 1. Select **Register** to register a SQL Server database with the agent. The **SQL Server Configuration** dialog box opens. - - ![Add and configure a SQL Server database](./media/sql-data-sync-sql-server-configure/datasync-preview-agent-adddb.png) - - 1. In the **SQL Server Configuration** dialog box, choose to connect using SQL Server authentication or Windows authentication. If you choose SQL Server authentication, enter the existing credentials. Provide the SQL Server name and the name of the database that you want to sync and select **Test connection** to test your settings. Then select **Save** and the registered database appears in the list. - - ![SQL Server database is now registered](./media/sql-data-sync-sql-server-configure/datasync-preview-agent-dbadded.png) - - 1. Close the Client Sync Agent app. - -1. In the portal, on the **Configure On-Premises** page, select **Select the Database**. - -1. On the **Select Database** page, in the **Sync Member Name** field, provide a name for the new sync member. This name is distinct from the name of the database itself. Select the database from the list. In the **Sync Directions** field, select **Bi-directional Sync**, **To the Hub**, or **From the Hub**. - - ![Select the on premises database](./media/sql-data-sync-sql-server-configure/datasync-preview-selectdb.png) - -1. Select **OK** to close the **Select Database** page. Then select **OK** to close the **Configure On-Premises** page and wait for the new sync member to be created and deployed. Finally, select **OK** to close the **Select sync members** page. - -> [!NOTE] -> To connect to SQL Data Sync and the local agent, add your user name to the role *DataSync_Executor*. Data Sync creates this role on the SQL Server instance. - -## Configure sync group - -After the new sync group members are created and deployed, go to the **Tables** section in the **Database Sync Group** page. - -![Step 3 settings](./media/sql-data-sync-sql-server-configure/configure-sync-group.png) - -1. On the **Tables** page, select a database from the list of sync group members and select **Refresh schema**. Please expect a few minutes delay in refresh schema, the delay might be a few minutes longer if using private link. - -1. From the list, select the tables you want to sync. By default, all columns are selected, so disable the checkbox for the columns you don't want to sync. Be sure to leave the primary key column selected. - -1. Select **Save**. - -1. By default, databases are not synced until scheduled or manually run. To run a manual sync, navigate to your database in SQL Database in the Azure portal, select **Sync to other databases**, and select the sync group. The **Data Sync** page opens. Select **Sync**. - - ![Manual sync](./media/sql-data-sync-sql-server-configure/datasync-sync.png) - -## FAQ - -**Does SQL Data Sync fully create tables?** - -If sync schema tables are missing in the destination database, SQL Data Sync creates them with the columns you selected. However, this doesn't result in a full-fidelity schema for the following reasons: - -- Only columns you select are created in the destination table. Columns not selected are ignored. -- Only selected column indexes are created in the destination table. For columns not selected, those indexes are ignored. -- Indexes on XML type columns aren't created. -- CHECK constraints aren't created. -- Triggers on the source tables aren't created. -- Views and stored procedures aren't created. - -Because of these limitations, we recommend the following things: - -- For production environments, create the full-fidelity schema yourself. -- When experimenting with the service, use the auto-provisioning feature. - -**Why do I see tables I didn't create?** - -Data Sync creates additional tables in the database for change tracking. Don't delete these or Data Sync stops working. - -**Is my data convergent after a sync?** - -Not necessarily. Take a sync group with a hub and three spokes (A, B, and C) where synchronizations are Hub to A, Hub to B, and Hub to C. If a change is made to database A *after* the Hub to A sync, that change isn't written to database B or database C until the next sync task. - -**How do I get schema changes into a sync group?** - -Make and propagate all schema changes manually. - -1. Replicate the schema changes manually to the hub and to all sync members. -1. Update the sync schema. - -For adding new tables and columns: - -New tables and columns don't impact the current sync and Data Sync ignores them until they're added to the sync schema. When adding new database objects, follow the sequence: - -1. Add new tables or columns to the hub and to all sync members. -1. Add new tables or columns to the sync schema. -1. Begin inserting values into the new tables and columns. - -For changing the data type of a column: - -When you change the data type of an existing column, Data Sync continues to work as long as the new values fit the original data type defined in the sync schema. For example, if you change the type in the source database from **int** to **bigint**, Data Sync continues to work until you insert a value too large for the **int** data type. To complete the change, replicate the schema change manually to the hub and to all sync members, then update the sync schema. - -**How can I export and import a database with Data Sync?** - -After you export a database as a *.bacpac* file and import the file to create a database, do the following to use Data Sync in the new database: - -1. Clean up the Data Sync objects and additional tables on the new database by using [this script](https://github.com/vitomaz-msft/DataSyncMetadataCleanup/blob/master/Data%20Sync%20complete%20cleanup.sql). The script deletes all the required Data Sync objects from the database. -1. Recreate the sync group with the new database. If you no longer need the old sync group, delete it. - -**Where can I find information on the client agent?** - -For frequently asked questions about the client agent, see [Agent FAQ](sql-data-sync-agent-overview.md#agent-faq). - -**Is it necessary to manually approve the link before I can start using it?** - -Yes, you must manually approve the service managed private endpoint, in the Private endpoint connections page of the Azure portal during the sync group deployment or by using PowerShell. - -**Why do I get a firewall error when the Sync job is provisioning my Azure database?** - -This may happen because Azure resources are not allowed to access your server. Ensure that the firewall on the Azure database has "Allow Azure services and resources to access this server” setting set to "Yes". - - -## Next steps - -Congratulations. You've created a sync group that includes both a SQL Database instance and a SQL Server database. - -For more info about SQL Data Sync, see: - -- [Data Sync Agent for Azure SQL Data Sync](sql-data-sync-agent-overview.md) -- [Best practices](sql-data-sync-best-practices.md) and [How to troubleshoot issues with Azure SQL Data Sync](sql-data-sync-troubleshoot.md) -- [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- [Update the sync schema with Transact-SQL](sql-data-sync-update-sync-schema.md) or [PowerShell](scripts/update-sync-schema-in-sync-group.md) - -For more info about SQL Database, see: - -- [SQL Database Overview](sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) diff --git a/articles/azure-sql/database/sql-data-sync-troubleshoot.md b/articles/azure-sql/database/sql-data-sync-troubleshoot.md deleted file mode 100644 index bda5e8b22cd3d..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-troubleshoot.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Troubleshoot SQL Data Sync -description: "Learn how to identify, troubleshoot, and resolve common issues with SQL Data Sync in Azure." -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: data sync, sqldbrb=1 -ms.devlang: -ms.topic: troubleshooting -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 12/20/2018 ---- -# Troubleshoot issues with SQL Data Sync -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This article describes how to troubleshoot known issues with SQL Data Sync in Azure. If there is a resolution for an issue, it's provided here. - -For an overview of SQL Data Sync, see [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](sql-data-sync-data-sql-server-sql-database.md). - -> [!IMPORTANT] -> SQL Data Sync does **not** support Azure SQL Managed Instance at this time. - -## Sync issues - -- [Sync fails in the portal UI for on-premises databases that are associated with the client agent](#sync-fails) - -- [My sync group is stuck in the processing state](#sync-stuck) - -- [I see erroneous data in my tables](#sync-baddata) - -- [I see inconsistent primary key data after a successful sync](#sync-pkdata) - -- [I see a significant degradation in performance](#sync-perf) - -- [I see this message: "Cannot insert the value NULL into the column \. Column does not allow nulls." What does this mean, and how can I fix it?](#sync-nulls) - -- [How does Data Sync handle circular references? That is, when the same data is synced in multiple sync groups, and keeps changing as a result?](#sync-circ) - -### Sync fails in the portal UI for on-premises databases that are associated with the client agent - -Sync fails in the SQL Data Sync portal UI for on-premises databases that are associated with the client agent. On the local computer that's running the agent, you see System.IO.IOException errors in the Event Log. The errors say that the disk has insufficient space. - -- **Cause**. The drive has insufficient space. - -- **Resolution**. Create more space on the drive on which the %TEMP% directory is located. - -### My sync group is stuck in the processing state - -A sync group in SQL Data Sync has been in the processing state for a long time. It doesn't respond to the **stop** command, and the logs show no new entries. - -Any of the following conditions might result in a sync group being stuck in the processing state: - -- **Cause**. The client agent is offline - -- **Resolution**. Be sure that the client agent is online and then try again. - -- **Cause**. The client agent is uninstalled or missing. - -- **Resolution**. If the client agent is uninstalled or otherwise missing: - - 1. Remove the agent XML file from the SQL Data Sync installation folder, if the file exists. - 1. Install the agent on an on-premises computer (it can be the same or a different computer). Then, submit the agent key that's generated in the portal for the agent that's showing as offline. - -- **Cause**. The SQL Data Sync service is stopped. - -- **Resolution**. Restart the SQL Data Sync service. - - 1. In the **Start** menu, search for **Services**. - 1. In the search results, select **Services**. - 1. Find the **SQL Data Sync** service. - 1. If the service status is **Stopped**, right-click the service name, and then select **Start**. - -> [!NOTE] -> If the preceding information doesn't move your sync group out of the processing state, Microsoft Support can reset the status of your sync group. To have your sync group status reset, in the [Microsoft Q&A question page for Azure SQL Database](/answers/topics/azure-sql-database.html), create a post. In the post, include your subscription ID and the sync group ID for the group that needs to be reset. A Microsoft Support engineer will respond to your post, and will let you know when the status has been reset. - -### I see erroneous data in my tables - -If tables that have the same name but which are from different database schemas are included in a sync, you see erroneous data in the tables after the sync. - -- **Cause**. The SQL Data Sync provisioning process uses the same tracking tables for tables that have the same name but which are in different schemas. Because of this, changes from both tables are reflected in the same tracking table. This causes erroneous data changes during sync. - -- **Resolution**. Ensure that the names of tables that are involved in a sync are different, even if the tables belong to different schemas in a database. - -### I see inconsistent primary key data after a successful sync - -A sync is reported as successful, and the log shows no failed or skipped rows, but you observe that primary key data is inconsistent among the databases in the sync group. - -- **Cause**. This result is by design. Changes in any primary key column result in inconsistent data in the rows where the primary key was changed. - -- **Resolution**. To prevent this issue, ensure that no data in a primary key column is changed. To fix this issue after it has occurred, delete the row that has inconsistent data from all endpoints in the sync group. Then, reinsert the row. - -### I see a significant degradation in performance - -Your performance degrades significantly, possibly to the point where you can't even open the Data Sync UI. - -- **Cause**. The most likely cause is a sync loop. A sync loop occurs when a sync by sync group A triggers a sync by sync group B, which then triggers a sync by sync group A. The actual situation might be more complex, and it might involve more than two sync groups in the loop. The issue is that there is a circular triggering of syncing that's caused by sync groups overlapping one another. - -- **Resolution**. The best fix is prevention. Ensure that you don't have circular references in your sync groups. Any row that is synced by one sync group can't be synced by another sync group. - -### I see this message: "Cannot insert the value NULL into the column \. Column does not allow nulls." What does this mean, and how can I fix it? -This error message indicates that one of the two following issues has occurred: -- A table doesn't have a primary key. To fix this issue, add a primary key to all the tables that you're syncing. -- There's a WHERE clause in your CREATE INDEX statement. Data Sync doesn't handle this condition. To fix this issue, remove the WHERE clause or manually make the changes to all databases. - -### How does Data Sync handle circular references? That is, when the same data is synced in multiple sync groups, and keeps changing as a result? -Data Sync doesn't handle circular references. Be sure to avoid them. - -## Client agent issues - -To troubleshoot issues with the client agent, see [Troubleshoot Data Sync Agent issues](sql-data-sync-agent-overview.md#agent-tshoot). - -## Setup and maintenance issues - -- [I get a "disk out of space" message](#setup-space) - -- [I can't delete my sync group](#setup-delete) - -- [I can't unregister a SQL Server database](#setup-unreg) - -- [I don't have sufficient privileges to start system services](#setup-perms) - -- [A database has an "Out-of-Date" status](#setup-date) - -- [A sync group has an "Out-of-Date" status](#setup-date2) - -- [A sync group can't be deleted within three minutes of uninstalling or stopping the agent](#setup-delete2) - -- [What happens when I restore a lost or corrupted database?](#setup-restore) - -### I get a "disk out of space" message - -- **Cause**. The "disk out of space" message might appear if leftover files need to be deleted. This might be caused by antivirus software, or files are open when delete operations are attempted. - -- **Resolution**. Manually delete the sync files that are in the %temp% folder (`del \*sync\* /s`). Then, delete the subdirectories in the %temp% folder. - -> [!IMPORTANT] -> Don't delete any files while sync is in progress. - -### I can't delete my sync group - -Your attempt to delete a sync group fails. Any of the following scenarios might result in failure to delete a sync group: - -- **Cause**. The client agent is offline. - -- **Resolution**. Ensure that the client agent is online and then try again. - -- **Cause**. The client agent is uninstalled or missing. - -- **Resolution**. If the client agent is uninstalled or otherwise missing: - a. Remove the agent XML file from the SQL Data Sync installation folder, if the file exists. - b. Install the agent on an on-premises computer (it can be the same or a different computer). Then, submit the agent key that's generated in the portal for the agent that's showing as offline. - -- **Cause**. A database is offline. - -- **Resolution**. Ensure that your databases are all online. - -- **Cause**. The sync group is provisioning or syncing. - -- **Resolution**. Wait until the provisioning or sync process finishes and then retry deleting the sync group. - -### I can't unregister a SQL Server database - -- **Cause**. Most likely, you are trying to unregister a database that has already been deleted. - -- **Resolution**. To unregister a SQL Server database, select the database and then select **Force Delete**. - - If this operation fails to remove the database from the sync group: - - 1. Stop and then restart the client agent host service: - a. Select the **Start** menu. - b. In the search box, enter **services.msc**. - c. In the **Programs** section of the search results pane, double-click **Services**. - d. Right-click the **SQL Data Sync** service. - e. If the service is running, stop it. - f. Right-click the service, and then select **Start**. - g. Check whether the database is still registered. If it is no longer registered, you're done. Otherwise, proceed with the next step. - 1. Open the client agent app (SqlAzureDataSyncAgent). - 1. Select **Edit Credentials**, and then enter the credentials for the database. - 1. Proceed with unregistration. - -### I don't have sufficient privileges to start system services - -- **Cause**. This error occurs in two situations: - - The user name and/or the password are incorrect. - - The specified user account doesn't have sufficient privileges to log on as a service. - -- **Resolution**. Grant log-on-as-a-service credentials to the user account: - - 1. Go to **Start** > **Control Panel** > **Administrative Tools** > **Local Security Policy** > **Local Policy** > **User Rights Management**. - 1. Select **Log on as a service**. - 1. In the **Properties** dialog box, add the user account. - 1. Select **Apply**, and then select **OK**. - 1. Close all windows. - -### A database has an "Out-of-Date" status - -- **Cause**. SQL Data Sync removes databases that have been offline from the service for 45 days or more (as counted from the time the database went offline). If a database is offline for 45 days or more and then comes back online, its status is **Out-of-Date**. - -- **Resolution**. You can avoid an **Out-of-Date** status by ensuring that none of your databases go offline for 45 days or more. - - If a database's status is **Out-of-Date**: - - 1. Remove the database that has an **Out-of-Date** status from the sync group. - 1. Add the database back in to the sync group. - - > [!WARNING] - > You lose all changes made to this database while it was offline. - -### A sync group has an "Out-of-Date" status - -- **Cause**. If one or more changes fail to apply for the whole retention period of 45 days, a sync group can become outdated. - -- **Resolution**. To avoid an **Out-of-Date** status for a sync group, examine the results of your sync jobs in the history viewer on a regular basis. Investigate and resolve any changes that fail to apply. - - If a sync group's status is **Out-of-Date**, delete the sync group and then re-create it. - -### A sync group can't be deleted within three minutes of uninstalling or stopping the agent - -You can't delete a sync group within three minutes of uninstalling or stopping the associated SQL Data Sync client agent. - -- **Resolution**. - - 1. Remove a sync group while the associated sync agents are online (recommended). - 1. If the agent is offline but is installed, bring it online on the on-premises computer. Wait for the status of the agent to appear as **Online** in the SQL Data Sync portal. Then, remove the sync group. - 1. If the agent is offline because it was uninstalled: - a. Remove the agent XML file from the SQL Data Sync installation folder, if the file exists. - b. Install the agent on an on-premises computer (it can be the same or a different computer). Then, submit the agent key that's generated in the portal for the agent that's showing as offline. - c. Try to delete the sync group. - -### What happens when I restore a lost or corrupted database? - -If you restore a lost or corrupted database from a backup, there might be a non-convergence of data in the sync groups to which the database belongs. - -## Next steps -For more information about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with SQL Data Sync in Azure](sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - In the portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](sql-data-sync-sql-server-configure.md) - - With PowerShell - - [Use PowerShell to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) - - [Use PowerShell to sync between a database in Azure SQL Database and a database in a SQL Server instance](scripts/sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for SQL Data Sync in Azure](sql-data-sync-agent-overview.md) -- Best practices - [Best practices for SQL Data Sync in Azure](sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- Update the sync schema - - With Transact-SQL - [Automate the replication of schema changes in SQL Data Sync in Azure](sql-data-sync-update-sync-schema.md) - - With PowerShell - [Use PowerShell to update the sync schema in an existing sync group](scripts/update-sync-schema-in-sync-group.md) - -For more information about SQL Database, see: - -- [SQL Database Overview](sql-database-paas-overview.md) -- [Database Lifecycle Management](/previous-versions/sql/sql-server-guides/jj907294(v=sql.110)) \ No newline at end of file diff --git a/articles/azure-sql/database/sql-data-sync-update-sync-schema.md b/articles/azure-sql/database/sql-data-sync-update-sync-schema.md deleted file mode 100644 index facf6456dd5d7..0000000000000 --- a/articles/azure-sql/database/sql-data-sync-update-sync-schema.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -title: Automate the replication of schema changes in SQL Data Sync -description: Learn how to automate the replication of schema changes in Azure SQL Data Sync. -services: sql-database -ms.service: sql-database -ms.subservice: sql-data-sync -ms.custom: data sync -ms.devlang: -ms.topic: how-to -author: rothja -ms.author: jroth -ms.reviewer: kendralittle, mathoma -ms.date: 11/14/2018 ---- -# Automate the replication of schema changes in Azure SQL Data Sync -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -SQL Data Sync lets users synchronize data between databases in Azure SQL Database and SQL Server instances in one direction or in both directions. One of the current limitations of SQL Data Sync is a lack of support for the replication of schema changes. Every time you change the table schema, you need to apply the changes manually on all endpoints, including the hub and all members, and then update the sync schema. - -This article introduces a solution to automatically replicate schema changes to all SQL Data Sync endpoints. - -1. This solution uses a DDL trigger to track schema changes. -1. The trigger inserts the schema change commands in a tracking table. -1. This tracking table is synced to all endpoints using the Data Sync service. -1. DML triggers after insertion are used to apply the schema changes on the other endpoints. - -This article uses ALTER TABLE as an example of a schema change, but this solution also works for other types of schema changes. - -> [!IMPORTANT] -> We recommend that you read this article carefully, especially the sections about [Troubleshooting](#troubleshoot) and [Other considerations](#other), before you start to implement automated schema change replication in your sync environment. We also recommend that you read [Sync data across multiple cloud and on-premises databases with SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md). Some database operations may break the solution described in this article. Additional domain knowledge of SQL Server and Transact-SQL may be required to troubleshoot those issues. - -![Automating the replication of schema changes](./media/sql-data-sync-update-sync-schema/automate-schema-changes.png) - -## Set up automated schema change replication - -### Create a table to track schema changes - -Create a table to track schema changes in all databases in the sync group: - -```sql -CREATE TABLE SchemaChanges ( -ID bigint IDENTITY(1,1) PRIMARY KEY, -SqlStmt nvarchar(max), -[Description] nvarchar(max) -) -``` - -This table has an identity column to track the order of schema changes. You can add more fields to log more information if needed. - -### Create a table to track the history of schema changes - -On all endpoints, create a table to track the ID of the most recently applied schema change command. - -```sql -CREATE TABLE SchemaChangeHistory ( -LastAppliedId bigint PRIMARY KEY -) -GO - -INSERT INTO SchemaChangeHistory VALUES (0) -``` - -### Create an ALTER TABLE DDL trigger in the database where schema changes are made - -Create a DDL trigger for ALTER TABLE operations. You only need to create this trigger in the database where schema changes are made. To avoid conflicts, only allow schema changes in one database in a sync group. - -```sql -CREATE TRIGGER AlterTableDDLTrigger -ON DATABASE -FOR ALTER_TABLE -AS - --- You can add your own logic to filter ALTER TABLE commands instead of replicating all of them. - -IF NOT (EVENTDATA().value('(/EVENT_INSTANCE/SchemaName)[1]', 'nvarchar(512)') like 'DataSync') - -INSERT INTO SchemaChanges (SqlStmt, Description) - VALUES (EVENTDATA().value('(/EVENT_INSTANCE/TSQLCommand/CommandText)[1]', 'nvarchar(max)'), 'From DDL trigger') -``` - -The trigger inserts a record in the schema change tracking table for each ALTER TABLE command. This example adds a filter to avoid replicating schema changes made under schema **DataSync**, because these are most likely made by the Data Sync service. Add more filters if you only want to replicate certain types of schema changes. - -You can also add more triggers to replicate other types of schema changes. For example, create CREATE_PROCEDURE, ALTER_PROCEDURE and DROP_PROCEDURE triggers to replicate changes to stored procedures. - -### Create a trigger on other endpoints to apply schema changes during insertion - -This trigger executes the schema change command when it is synced to other endpoints. You need to create this trigger on all the endpoints, except the one where schema changes are made (that is, in the database where the DDL trigger `AlterTableDDLTrigger` is created in the previous step). - -```sql -CREATE TRIGGER SchemaChangesTrigger -ON SchemaChanges -AFTER INSERT -AS -DECLARE @lastAppliedId bigint -DECLARE @id bigint -DECLARE @sqlStmt nvarchar(max) -SELECT TOP 1 @lastAppliedId=LastAppliedId FROM SchemaChangeHistory -SELECT TOP 1 @id = id, @SqlStmt = SqlStmt FROM SchemaChanges WHERE id > @lastAppliedId ORDER BY id -IF (@id = @lastAppliedId + 1) -BEGIN - EXEC sp_executesql @SqlStmt - UPDATE SchemaChangeHistory SET LastAppliedId = @id - WHILE (1 = 1) - BEGIN - SET @id = @id + 1 - IF exists (SELECT id FROM SchemaChanges WHERE ID = @id) - BEGIN - SELECT @sqlStmt = SqlStmt FROM SchemaChanges WHERE ID = @id - EXEC sp_executesql @SqlStmt - UPDATE SchemaChangeHistory SET LastAppliedId = @id - END - ELSE - BREAK; - END -END -``` - -This trigger runs after the insertion and checks whether the current command should run next. The code logic ensures that no schema change statement is skipped, and all changes are applied even if the insertion is out of order. - -### Sync the schema change tracking table to all endpoints - -You can sync the schema change tracking table to all endpoints using the existing sync group or a new sync group. Make sure the changes in the tracking table can be synced to all endpoints, especially when you're using one-direction sync. - -Don't sync the schema change history table, since that table maintains different state on different endpoints. - -### Apply the schema changes in a sync group - -Only schema changes made in the database where the DDL trigger is created are replicated. Schema changes made in other databases are not replicated. - -After the schema changes are replicated to all endpoints, you also need to take extra steps to update the sync schema to start or stop syncing the new columns. - -#### Add new columns - -1. Make the schema change. - -1. Avoid any data change where the new columns are involved until you've completed the step that creates the trigger. - -1. Wait until the schema changes are applied to all endpoints. - -1. Refresh the database schema and add the new column to the sync schema. - -1. Data in the new column is synced during next sync operation. - -#### Remove columns - -1. Remove the columns from the sync schema. Data Sync stops syncing data in these columns. - -1. Make the schema change. - -1. Refresh the database schema. - -#### Update data types - -1. Make the schema change. - -1. Wait until the schema changes are applied to all endpoints. - -1. Refresh the database schema. - -1. If the new and old data types are not fully compatible - for example, if you change from `int` to `bigint` - sync may fail before the steps that create the triggers are completed. Sync succeeds after a retry. - -#### Rename columns or tables - -Renaming columns or tables makes Data Sync stop working. Create a new table or column, backfill the data, and then delete the old table or column instead of renaming. - -#### Other types of schema changes - -For other types of schema changes - for example, creating stored procedures or dropping an index- updating the sync schema is not required. - -## Troubleshoot automated schema change replication - -The replication logic described in this article stops working in some situations- for example, if you made a schema change in an on-premises database which is not supported in Azure SQL Database. In that case, syncing the schema change tracking table fails. You need fix this problem manually: - -1. Disable the DDL trigger and avoid any further schema changes until the issue is fixed. - -1. In the endpoint database where the issue is happening, disable the AFTER INSERT trigger on the endpoint where the schema change can't be made. This action allows the schema change command to be synced. - -1. Trigger sync to sync the schema change tracking table. - -1. In the endpoint database where the issue is happening, query the schema change history table to get the ID of last applied schema change command. - -1. Query the schema change tracking table to list all the commands with an ID greater than the ID value you retrieved in the previous step. - - a. Ignore those commands that can't be executed in the endpoint database. You need to deal with the schema inconsistency. Revert the original schema changes if the inconsistency impacts your application. - - b. Manually apply those commands that should be applied. - -1. Update the schema change history table and set the last applied ID to the correct value. - -1. Double-check whether the schema is up-to-date. - -1. Re-enable the AFTER INSERT trigger disabled in the second step. - -1. Re-enable the DDL trigger disabled in the first step. - -If you want to clean up the records in the schema change tracking table, use DELETE instead of TRUNCATE. Never reseed the identity column in schema change tracking table by using DBCC CHECKIDENT. You can create new schema change tracking tables and update the table name in the DDL trigger if reseeding is required. - -## Other Considerations - -- Database users who configure the hub and member databases need to have enough permission to execute the schema change commands. - -- You can add more filters in the DDL trigger to only replicate schema change in selected tables or operations. - -- You can only make schema changes in the database where the DDL trigger is created. - -- If you are making a change in a SQL Server database, make sure the schema change is supported in Azure SQL Database. - -- If schema changes are made in databases other than the database where the DDL trigger is created, the changes are not replicated. To avoid this issue, you can create DDL triggers to block changes on other endpoints. - -- If you need to change the schema of the schema change tracking table, disable the DDL trigger before you make the change, and then manually apply the change to all endpoints. Updating the schema in an AFTER INSERT trigger on the same table does not work. - -- Don't reseed the identity column by using DBCC CHECKIDENT. - -- Don't use TRUNCATE to clean up data in the schema change tracking table. - -## Next steps - -For more info about SQL Data Sync, see: - -- Overview - [Sync data across multiple cloud and on-premises databases with Azure SQL Data Sync](sql-data-sync-data-sql-server-sql-database.md) -- Set up Data Sync - - In the portal - [Tutorial: Set up SQL Data Sync to sync data between Azure SQL Database and SQL Server](sql-data-sync-sql-server-configure.md) - - With PowerShell - - [Use PowerShell to sync between multiple databases in Azure SQL Database](scripts/sql-data-sync-sync-data-between-sql-databases.md) - - [Use PowerShell to sync between a database in Azure SQL Database and a database in a SQL Server instance](scripts/sql-data-sync-sync-data-between-azure-onprem.md) -- Data Sync Agent - [Data Sync Agent for Azure SQL Data Sync](sql-data-sync-agent-overview.md) -- Best practices - [Best practices for Azure SQL Data Sync](sql-data-sync-best-practices.md) -- Monitor - [Monitor SQL Data Sync with Azure Monitor logs](./monitor-tune-overview.md) -- Troubleshoot - [Troubleshoot issues with Azure SQL Data Sync]() -- Update the sync schema - - With PowerShell - [Use PowerShell to update the sync schema in an existing sync group](scripts/update-sync-schema-in-sync-group.md) \ No newline at end of file diff --git a/articles/azure-sql/database/sql-database-paas-overview.md b/articles/azure-sql/database/sql-database-paas-overview.md deleted file mode 100644 index 89675cb035cca..0000000000000 --- a/articles/azure-sql/database/sql-database-paas-overview.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: What is the Azure SQL Database service? -description: 'Get an introduction to SQL Database: technical details and capabilities of the Microsoft relational database management system (RDBMS) in the cloud.' -keywords: introduction to sql,intro to sql,what is sql database -services: sql-database -ms.service: sql-database -ms.subservice: service-overview -ms.custom: sqldbrb=3 -ms.devlang: -ms.topic: overview -author: LitKnd -ms.author: kendralittle -ms.reviewer: mathoma -ms.date: 03/18/2022 ---- - -# What is Azure SQL Database? -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -Azure SQL Database is a fully managed platform as a service (PaaS) database engine that handles most of the database management functions such as upgrading, patching, backups, and monitoring without user involvement. Azure SQL Database is always running on the latest stable version of the SQL Server database engine and patched OS with [99.99% availability](https://azure.microsoft.com/support/legal/sla/azure-sql-database). PaaS capabilities built into Azure SQL Database enable you to focus on the domain-specific database administration and optimization activities that are critical for your business. - -With Azure SQL Database, you can create a highly available and high-performance data storage layer for the applications and solutions in Azure. SQL Database can be the right choice for a variety of modern cloud applications because it enables you to process both relational data and [non-relational structures](../multi-model-features.md), such as graphs, JSON, spatial, and XML. - -Azure SQL Database is based on the latest stable version of the [Microsoft SQL Server database engine](/sql/sql-server/sql-server-technical-documentation?toc=%2fazure%2fsql-database%2ftoc.json). You can use advanced query processing features, such as [high-performance in-memory technologies](../in-memory-oltp-overview.md) and [intelligent query processing](/sql/relational-databases/performance/intelligent-query-processing?toc=%2fazure%2fsql-database%2ftoc.json). In fact, the newest capabilities of SQL Server are released first to SQL Database, and then to SQL Server itself. You get the newest SQL Server capabilities with no overhead for patching or upgrading, tested across millions of databases. - -SQL Database enables you to easily define and scale performance within two different purchasing models: a [vCore-based purchasing model](service-tiers-vcore.md) and a [DTU-based purchasing model](service-tiers-dtu.md). SQL Database is a fully managed service that has built-in high availability, backups, and other common maintenance operations. Microsoft handles all patching and updating of the SQL and operating system code. You don't have to manage the underlying infrastructure. - -If you're new to Azure SQL Database, check out the *Azure SQL Database Overview* video from our in-depth [Azure SQL video series](/shows/Azure-SQL-for-Beginners/?WT.mc_id=azuresql4beg_azuresql-ch9-niner): -> [!VIDEO https://docs.microsoft.com/shows/Azure-SQL-for-Beginners/Azure-SQL-Database-Overview-7-of-61/player] - - - -## Deployment models - -Azure SQL Database provides the following deployment options for a database: - -- [Single database](single-database-overview.md) represents a fully managed, isolated database. You might use this option if you have modern cloud applications and microservices that need a single reliable data source. A single database is similar to a [contained database](/sql/relational-databases/databases/contained-databases?toc=%2fazure%2fsql-database%2ftoc.json) in the [SQL Server database engine](/sql/sql-server/sql-server-technical-documentation?toc=%2fazure%2fsql-database%2ftoc.json). -- [Elastic pool](elastic-pool-overview.md) is a collection of [single databases](single-database-overview.md) with a shared set of resources, such as CPU or memory. Single databases can be moved into and out of an elastic pool. - -> [!IMPORTANT] -> To understand the feature differences between SQL Database, SQL Server, and Azure SQL Managed Instance, as well as the differences among different Azure SQL Database options, see [SQL Database features](features-comparison.md). - -SQL Database delivers predictable performance with multiple resource types, service tiers, and compute sizes. It provides dynamic scalability with no downtime, built-in intelligent optimization, global scalability and availability, and advanced security options. These capabilities allow you to focus on rapid app development and accelerating your time-to-market, rather than on managing virtual machines and infrastructure. SQL Database is currently in 38 datacenters around the world, so you can run your database in a datacenter near you. - -## Scalable performance and pools - -You can define the amount of resources assigned. -- With single databases, each database is isolated from others and is portable. Each has its own guaranteed amount of compute, memory, and storage resources. The amount of the resources assigned to the database is dedicated to that database, and isn't shared with other databases in Azure. You can dynamically [scale single database resources](single-database-scale.md) up and down. The single database option provides different compute, memory, and storage resources for different needs. For example, you can get 1 to 128 vCores, or 32 GB to 4 TB. The [Hyperscale service tier](service-tier-hyperscale.md) for single databases enables you to scale to 100 TB, with fast backup and restore capabilities. -- With elastic pools, you can assign resources that are shared by all databases in the pool. You can create a new database, or move the existing single databases into a resource pool to maximize the use of resources and save money. This option also gives you the ability to dynamically [scale elastic pool resources](elastic-pool-scale.md) up and down. - -You can build your first app on a small, single database at a low cost per month in the [General Purpose](service-tier-general-purpose.md) service tier. You can then change its service tier manually or programmatically at any time to the [Business Critical](service-tier-business-critical.md) or [Hyperscale](service-tier-hyperscale.md) service tier, to meet the needs of your solution. You can adjust performance without downtime to your app or to your customers. Dynamic scalability enables your database to transparently respond to rapidly changing resource requirements. You pay for only the resources that you need when you need them. - -*Dynamic scalability* is different from *autoscale*. Autoscale is when a service scales automatically based on criteria, whereas dynamic scalability allows for manual scaling without downtime. The single database option supports manual dynamic scalability, but not autoscale. For a more automatic experience, consider using elastic pools, which allow databases to share resources in a pool based on individual database needs. Another option is to use scripts that can help automate scalability for a single database. For an example, see [Use PowerShell to monitor and scale a single database](scripts/monitor-and-scale-database-powershell.md). - -### Purchasing models - -SQL Database offers the following purchasing models: -- The [vCore-based purchasing model](service-tiers-vcore.md) lets you choose the number of vCores, the amount of memory, and the amount and speed of storage. The vCore-based purchasing model also allows you to use [Azure Hybrid Benefit for SQL Server](https://azure.microsoft.com/pricing/hybrid-benefit/) to gain cost savings. For more information about the Azure Hybrid Benefit, see the [Frequently asked questions](#sql-database-frequently-asked-questions) section later in this article. - -- The [DTU-based purchasing model](service-tiers-dtu.md) offers a blend of compute, memory, and I/O resources in three service tiers, to support light to heavy database workloads. Compute sizes within each tier provide a different mix of these resources, to which you can add additional storage resources. - -### Service tiers - -Azure SQL Database offers three service tiers: -- The [General Purpose/Standard](service-tier-general-purpose.md) service tier is designed for common workloads. It offers budget-oriented balanced compute and storage options. -- The [Business Critical/Premium](service-tier-business-critical.md) service tier is designed for OLTP applications with high transaction rates and low latency I/O requirements. It offers the highest resilience to failures by using several isolated replicas. -- The [Hyperscale](service-tier-hyperscale.md) service tier is designed for most business workloads. Hyperscale provides great flexibility and high performance with independently scalable compute and storage resources. It offers higher resilience to failures by allowing configuration of more than one isolated database replica. - -### Serverless compute - -The [serverless compute tier](serverless-tier-overview.md) is available within the vCore-based purchasing model when you select the [General Purpose service tier](service-tier-general-purpose.md). - -The serverless compute tier automatically scales compute based on workload demand, and bills for the amount of compute used per second. The serverless compute tier automatically pauses databases during inactive periods when only storage is billed, and automatically resumes databases when activity returns. - -### Elastic pools to maximize resource utilization - -For many businesses and applications, being able to create single databases and dial performance up or down on demand is enough, especially if usage patterns are relatively predictable. Unpredictable usage patterns can make it hard to manage costs and your business model. [Elastic pools](elastic-pool-overview.md) are designed to solve this problem. You allocate performance resources to a pool rather than an individual database. You pay for the collective performance resources of the pool rather than for single database performance. - - ![Graphic that shows elastic pools in basic, standard, and premium editions](./media/sql-database-paas-overview/sqldb_elastic_pools.png) - -With elastic pools, you don't need to focus on dialing database performance up and down as demand for resources fluctuates. The pooled databases consume the performance resources of the elastic pool as needed. Pooled databases consume but don't exceed the limits of the pool, so your cost remains predictable even if individual database usage doesn't. - -You can [add and remove databases to the pool](elastic-pool-overview.md), scaling your app from a handful of databases to thousands, all within a budget that you control. You can also control the minimum and maximum resources available to databases in the pool, to ensure that no database in the pool uses all the pool resources, and that every pooled database has a guaranteed minimum amount of resources. To learn more about design patterns for software as a service (SaaS) applications that use elastic pools, see [Design patterns for multi-tenant SaaS applications with SQL Database](saas-tenancy-app-design-patterns.md). - -Scripts can help with monitoring and scaling elastic pools. For an example, see [Use PowerShell to monitor and scale an elastic pool in Azure SQL Database](scripts/monitor-and-scale-pool-powershell.md). - - -### Blend single databases with pooled databases - -You can blend single databases with elastic pools, and change the service tiers of single databases and elastic pools to adapt to your situation. You can also mix and match other Azure services with SQL Database to meet your unique app design needs, drive cost and resource efficiencies, and unlock new business opportunities. - -## Extensive monitoring and alerting capabilities - -Azure SQL Database provides advanced monitoring and troubleshooting features that help you get deeper insights into workload characteristics. These features and tools include: - - The built-in monitoring capabilities provided by the latest version of the SQL Server database engine. They enable you to find real-time performance insights. - - PaaS monitoring capabilities provided by Azure that enable you to monitor and troubleshoot a large number of database instances. - -[Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store), a built-in SQL Server monitoring feature, records the performance of your queries in real time, and enables you to identify the potential performance issues and the top resource consumers. [Automatic tuning and recommendations](automatic-tuning-overview.md) provide advice regarding the queries with the regressed performance and missing or duplicated indexes. Automatic tuning in SQL Database enables you to either manually apply the scripts that can fix the issues, or let SQL Database apply the fix. SQL Database can also test and verify that the fix provides some benefit, and retain or revert the change depending on the outcome. In addition to Query Store and automatic tuning capabilities, you can use standard [DMVs and XEvents](monitoring-with-dmvs.md) to monitor the workload performance. - -Azure provides [built-in performance monitoring](performance-guidance.md) and [alerting](alerts-insights-configure-portal.md) tools, combined with performance ratings, that enable you to monitor the status of thousands of databases. Using these tools, you can quickly assess the impact of scaling up or down, based on your current or projected performance needs. Additionally, SQL Database can [emit metrics and resource logs](metrics-diagnostic-telemetry-logging-streaming-export-configure.md) for easier monitoring. You can configure SQL Database to store resource usage, workers and sessions, and connectivity into one of these Azure resources: - -- **Azure Storage**: For archiving vast amounts of telemetry for a small price. -- **Azure Event Hubs**: For integrating SQL Database telemetry with your custom monitoring solution or hot pipelines. -- **Azure Monitor logs**: For a built-in monitoring solution with reporting, alerting, and mitigating capabilities. - -![Diagram of Azure monitoring architecture](./media/sql-database-paas-overview/architecture.png) - -## Availability capabilities - -Azure SQL Database enables your business to continue operating during disruptions. In a traditional SQL Server environment, you generally have at least two machines locally set up. These machines have exact, synchronously maintained, copies of the data to protect against a failure of a single machine or component. This environment provides high availability, but it doesn't protect against a natural disaster destroying your datacenter. - -Disaster recovery assumes that a catastrophic event is geographically localized enough to have another machine or set of machines with a copy of your data far away. In SQL Server, you can use Always On Availability Groups running in async mode to get this capability. People often don't want to wait for replication to happen that far away before committing a transaction, so there's potential for data loss when you do unplanned failovers. - -Databases in the Premium and Business Critical service tiers already [do something similar](high-availability-sla.md#premium-and-business-critical-service-tier-locally-redundant-availability) to the synchronization of an availability group. Databases in lower service tiers provide redundancy through storage by using a [different but equivalent mechanism](high-availability-sla.md#basic-standard-and-general-purpose-service-tier-locally-redundant-availability). Built-in logic helps protect against a single machine failure. The active geo-replication feature gives you the ability to protect against disaster where a whole region is destroyed. - -Azure Availability Zones tries to protect against the outage of a single datacenter building within a single region. It helps you protect against the loss of power or network to a building. In SQL Database, you place the different replicas in different availability zones (different buildings, effectively). - -In fact, the service level agreement [(SLA)](https://azure.microsoft.com/support/legal/sla/) of Azure, powered by a global network of Microsoft-managed datacenters, helps keep your app running 24/7. The Azure platform fully manages every database, and it guarantees no data loss and a high percentage of data availability. Azure automatically handles patching, backups, replication, failure detection, underlying potential hardware, software or network failures, deploying bug fixes, failovers, database upgrades, and other maintenance tasks. Standard availability is achieved by a separation of compute and storage layers. Premium availability is achieved by integrating compute and storage on a single node for performance, and then implementing technology similar to Always On Availability Groups. For a full discussion of the high availability capabilities of Azure SQL Database, see [SQL Database availability](high-availability-sla.md). - -In addition, SQL Database provides built-in [business continuity and global scalability](business-continuity-high-availability-disaster-recover-hadr-overview.md) features. These include: - -- [Automatic backups](automated-backups-overview.md): - - SQL Database automatically performs full, differential, and transaction log backups of databases to enable you to restore to any point in time. For single databases and pooled databases, you can configure SQL Database to store full database backups to Azure Storage for long-term backup retention. For managed instances, you can also perform copy-only backups for long-term backup retention. - -- [Point-in-time restores](recovery-using-backups.md): - - All SQL Database deployment options support recovery to any point in time within the automatic backup retention period for any database. -- [Active geo-replication](active-geo-replication-overview.md): - - The single database and pooled databases options allow you to configure up to four readable secondary databases in either the same or globally distributed Azure datacenters. For example, if you have a SaaS application with a catalog database that has a high volume of concurrent read-only transactions, use active geo-replication to enable global read scale. This removes bottlenecks on the primary that are due to read workloads. For managed instances, use auto-failover groups. -- [Auto-failover groups](auto-failover-group-overview.md): - - All SQL Database deployment options allow you to use failover groups to enable high availability and load balancing at global scale. This includes transparent geo-replication and failover of large sets of databases, elastic pools, and managed instances. Failover groups enable the creation of globally distributed SaaS applications, with minimal administration overhead. This leaves all the complex monitoring, routing, and failover orchestration to SQL Database. -- [Zone-redundant databases](high-availability-sla.md): - - SQL Database allows you to provision Premium or Business Critical databases or elastic pools across multiple availability zones. Because these databases and elastic pools have multiple redundant replicas for high availability, placing these replicas into multiple availability zones provides higher resilience. This includes the ability to recover automatically from the datacenter scale failures, without data loss. - -## Built-in intelligence - -With SQL Database, you get built-in intelligence that helps you dramatically reduce the costs of running and managing databases, and that maximizes both performance and security of your application. Running millions of customer workloads around the clock, SQL Database collects and processes a massive amount of telemetry data, while also fully respecting customer privacy. Various algorithms continuously evaluate the telemetry data so that the service can learn and adapt with your application. - -### Automatic performance monitoring and tuning - -SQL Database provides detailed insight into the queries that you need to monitor. SQL Database learns about your database patterns, and enables you to adapt your database schema to your workload. SQL Database provides [performance tuning recommendations](database-advisor-implement-performance-recommendations.md), where you can review tuning actions and apply them. - -However, constantly monitoring a database is a hard and tedious task, especially when you're dealing with many databases. [Intelligent Insights](intelligent-insights-overview.md) does this job for you by automatically monitoring SQL Database performance at scale. It informs you of performance degradation issues, it identifies the root cause of each issue, and it provides performance improvement recommendations when possible. - -Managing a huge number of databases might be impossible to do efficiently even with all available tools and reports that SQL Database and Azure provide. Instead of monitoring and tuning your database manually, you might consider delegating some of the monitoring and tuning actions to SQL Database by using [automatic tuning](automatic-tuning-overview.md). SQL Database automatically applies recommendations, tests, and verifies each of its tuning actions to ensure the performance keeps improving. This way, SQL Database automatically adapts to your workload in a controlled and safe way. Automatic tuning means that the performance of your database is carefully monitored and compared before and after every tuning action. If the performance doesn't improve, the tuning action is reverted. - -Many of our partners that run [SaaS multi-tenant apps](saas-tenancy-app-design-patterns.md) on top of SQL Database are relying on automatic performance tuning to make sure their applications always have stable and predictable performance. For them, this feature tremendously reduces the risk of having a performance incident in the middle of the night. In addition, because part of their customer base also uses SQL Server, they're using the same indexing recommendations provided by SQL Database to help their SQL Server customers. - -Two automatic tuning aspects are [available in SQL Database](automatic-tuning-overview.md): - -- **Automatic index management**: Identifies indexes that should be added in your database, and indexes that should be removed. -- **Automatic plan correction**: Identifies problematic plans and fixes SQL plan performance problems. - -### Adaptive query processing - -You can use [adaptive query processing](/sql/relational-databases/performance/intelligent-query-processing), including interleaved execution for multi-statement table-valued functions, batch mode memory grant feedback, and batch mode adaptive joins. Each of these adaptive query processing features applies similar "learn and adapt" techniques, helping further address performance issues related to historically intractable query optimization problems. - -## Advanced security and compliance - -SQL Database provides a range of [built-in security and compliance features](../../active-directory/identity-protection/concept-identity-protection-security-overview.md) to help your application meet various security and compliance requirements. - -> [!IMPORTANT] -> Microsoft has certified Azure SQL Database (all deployment options) against a number of compliance standards. For more information, see the [Microsoft Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942), where you can find the most current list of SQL Database compliance certifications. - -### Advanced threat protection - -Microsoft Defender for SQL is a unified package for advanced SQL security capabilities. It includes functionality for managing your database vulnerabilities, and detecting anomalous activities that might indicate a threat to your database. It provides a single location for enabling and managing these capabilities. - -- [Vulnerability assessment](sql-vulnerability-assessment.md): - - This service can discover, track, and help you remediate potential database vulnerabilities. It provides visibility into your security state, and includes actionable steps to resolve security issues, and enhance your database fortifications. -- [Threat detection](threat-detection-configure.md): - - This feature detects anomalous activities that indicate unusual and potentially harmful attempts to access or exploit your database. It continuously monitors your database for suspicious activities, and provides immediate security alerts on potential vulnerabilities, SQL injection attacks, and anomalous database access patterns. Threat detection alerts provide details of the suspicious activity, and recommend action on how to investigate and mitigate the threat. - -### Auditing for compliance and security - -[Auditing](/azure/azure-sql/database/auditing-overview) tracks database events and writes them to an audit log in your Azure storage account. Auditing can help you maintain regulatory compliance, understand database activity, and gain insight into discrepancies and anomalies that might indicate business concerns or suspected security violations. - -### Data encryption - -SQL Database helps secure your data by providing encryption. For data in motion, it uses [transport layer security](https://support.microsoft.com/kb/3135244). For data at rest, it uses [transparent data encryption](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql). For data in use, it uses [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine). - -### Data discovery and classification - -[Data discovery and classification](data-discovery-and-classification-overview.md) provides capabilities built into Azure SQL Database for discovering, classifying, labeling, and protecting the sensitive data in your databases. It provides visibility into your database classification state, and tracks the access to sensitive data within the database and beyond its borders. - -### Azure Active Directory integration and multi-factor authentication - -SQL Database enables you to centrally manage identities of database user and other Microsoft services with [Azure Active Directory integration](authentication-aad-overview.md). This capability simplifies permission management and enhances security. Azure Active Directory supports [multi-factor authentication](authentication-mfa-ssms-overview.md) to increase data and application security, while supporting a single sign-in process. - -## Easy-to-use tools - -SQL Database makes building and maintaining applications easier and more productive. SQL Database allows you to focus on what you do best: building great apps. You can manage and develop in SQL Database by using tools and skills you already have. - -|Tool|Description| -|:---|:---| -|[The Azure portal](https://portal.azure.com/)|A web-based application for managing all Azure services.| -|[Azure Data Studio](/sql/azure-data-studio/)|A cross-platform database tool that runs on Windows, macOS, and Linux.| -|[SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms)|A free, downloadable client application for managing any SQL infrastructure, from SQL Server to SQL Database.| -|[SQL Server Data Tools in Visual Studio](/sql/ssdt/download-sql-server-data-tools-ssdt)|A free, downloadable client application for developing SQL Server relational databases, databases in Azure SQL Database, Integration Services packages, Analysis Services data models, and Reporting Services reports.| -|[Visual Studio Code](https://code.visualstudio.com/docs)|A free, downloadable, open-source code editor for Windows, macOS, and Linux. It supports extensions, including the [mssql extension](https://aka.ms/mssql-marketplace) for querying Microsoft SQL Server, Azure SQL Database, and Azure Synapse Analytics.| - -SQL Database supports building applications with Python, Java, Node.js, PHP, Ruby, and .NET on macOS, Linux, and Windows. SQL Database supports the same [connection libraries](connect-query-content-reference-guide.md#libraries) as SQL Server. - -[!INCLUDE [sql-database-create-manage-portal](../includes/sql-database-create-manage-portal.md)] - -## SQL Database frequently asked questions - -### Can I control when patching downtime occurs? - -The [maintenance window feature](maintenance-window.md) allows you to configure predictable maintenance window schedules for eligible databases in Azure SQL Database. [Maintenance window advance notifications](../database/advance-notifications.md) are available for databases configured to use a non-default [maintenance window](maintenance-window.md). - -### How do I plan for maintenance events? - -Patching is generally not noticeable if you [employ retry logic](develop-overview.md#resiliency) in your app. For more information, see [Planning for Azure maintenance events in Azure SQL Database](planned-maintenance.md). - -## Engage with the SQL Server engineering team - -- [DBA Stack Exchange](https://dba.stackexchange.com/questions/tagged/sql-server): Ask database administration questions. -- [Stack Overflow](https://stackoverflow.com/questions/tagged/sql-server): Ask development questions. -- [Microsoft Q&A question page](/answers/topics/azure-sql-database.html): Ask technical questions. -- [Feedback](https://aka.ms/sqlfeedback): Report bugs and request features. -- [Reddit](https://www.reddit.com/r/SQLServer/): Discuss SQL Server. - -## Next steps - -- See the [pricing page](https://azure.microsoft.com/pricing/details/sql-database/) for cost comparisons and calculators regarding single databases and elastic pools. -- See these quickstarts to get started: - - - [Create a database in the Azure portal](single-database-create-quickstart.md) - - [Create a database with the Azure CLI](az-cli-script-samples-content-guide.md) - - [Create a database using PowerShell](powershell-script-content-guide.md) - -- For a set of Azure CLI and PowerShell samples, see: - - [Azure CLI samples for SQL Database](az-cli-script-samples-content-guide.md) - - [Azure PowerShell samples for SQL Database](powershell-script-content-guide.md) - -- For information about new capabilities as they're announced, see [Azure Roadmap for SQL Database](https://azure.microsoft.com/roadmap/?category=databases). -- See the [Azure SQL Database blog](https://azure.microsoft.com/blog/topics/database), where SQL Server product team members blog about SQL Database news and features. diff --git a/articles/azure-sql/database/sql-database-vulnerability-assessment-rules-changelog.md b/articles/azure-sql/database/sql-database-vulnerability-assessment-rules-changelog.md deleted file mode 100644 index 613dff557b578..0000000000000 --- a/articles/azure-sql/database/sql-database-vulnerability-assessment-rules-changelog.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: SQL Vulnerability assessment rules changelog -description: "Changelog for SQL Vulnerability assessment rules with SQL Server, Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics" -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.topic: reference -author: davidtrigano -ms.author: datrigan -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 01/16/2022 ---- - -# SQL Vulnerability assessment rules changelog - -This article details the changes made to the SQL Vulnerability Assessment service rules. Rules that are updated, removed, or added will be outlined below. For an updated list of SQL Vulnerability assessment rules, see [SQL Vulnerability Assessment rules](sql-database-vulnerability-assessment-rules.md). - -## January 2022 - -|Rule ID |Rule Title |Change details | -|---------|---------|---------| -|VA1054 |Minimal set of principals should be members of fixed high impact database roles |Logic change | -|VA1220 |Database communication using TDS should be protected through TLS |Logic change | -|VA2120 |Features that may affect security should be disabled |Logic change | -|VA2129 |Changes to signed modules should be authorized |Logic change | - -## June 2021 - -|Rule ID |Rule Title |Change details | -|---------|---------|---------| -|VA1220 |Database communication using TDS should be protected through TLS |Logic change | -|VA2108 |Minimal set of principals should be members of fixed high impact database roles |Logic change | - - -## December 2020 - -|Rule ID |Rule Title |Change details | -|---------|---------|---------| -|VA1017 |Execute permissions on xp_cmdshell from all users (except dbo) should be revoked |Title and description change| -|VA1021 |Global temporary stored procedures should be removed |Removed rule | -|VA1024 |C2 Audit Mode should be enabled |Removed rule | -|VA1042 |Database ownership chaining should be disabled for all databases except for `master`, `msdb`, and `tempdb` |Description change | -|VA1044 |Remote Admin Connections should be disabled unless specifically required |Title and description change | -|VA1047 |Password expiration check should be enabled for all SQL logins |Title and description change | -|VA1051 |AUTO_CLOSE should be disabled on all databases |Description change | -|VA1053 |Account with default name 'sa' should be renamed or disabled |Description change | -|VA1067 |Database Mail XPs should be disabled when it is not in use | Title and description change | -|VA1068 |Server permissions shouldn't be granted directly to principals |Logic change | -|VA1069 |Permissions to select from system tables and views should be revoked from non-sysadmins |Removed rule | -|VA1090 |Ensure all Government Off The Shelf (GOTS) and Custom Stored Procedures are encrypted |Removed rule | -|VA1091 |Auditing of both successful and failed login attempts (default trace) should be enabled when 'Login auditing' is set up to track logins |Description change | -|VA1098 |Any Existing SSB or Mirroring endpoint should require AES connection |Logic change | -|VA1103 |Use only CLR with SAFE_ACCESS permission |Removed rule | -|VA1219 |Transparent data encryption should be enabled |Description change | -|VA1229 |Filestream setting in registry and in SQL Server configuration should match |Removed rule | -|VA1230 |Filestream should be disabled |Description change | -|VA1231 |Filestream should be disabled (SQL) |Removed rule | -|VA1234 |Common Criteria setting should be enabled |Removed rule | -|VA1235 |Replication XPs should be disabled |Title, description, and Logic change | -|VA1252 |List of events being audited and centrally managed via server audit specifications. |Removed rule | -|VA1253 |List of DB-scoped events being audited and centrally managed via server audit specifications. |Removed rule | -|VA1263 |List all the active audits in the system |Removed rule | -|VA1264 |Auditing of both successful and failed login attempts should be enabled |Description change | -|VA1266 |The 'MUST_CHANGE' option should be set on all SQL logins |Removed rule | -|VA1276 |Agent XPs feature should be disabled |Removed rule | -|VA1281 |All memberships for user-defined roles should be intended |Logic change | -|VA1282 |Orphan roles should be removed |Logic change | -|VA1286 |Database permissions shouldn't be granted directly to principals (OBJECT or COLUMN) |Removed rule | -|VA1288 |Sensitive data columns should be classified |Description change | -|VA2030 |Minimal set of principals should be granted database-scoped SELECT or EXECUTE permissions |Removed rule | -|VA2033 |Minimal set of principals should be granted database-scoped EXECUTE permission on objects or columns |Description change | -|VA2062 |Database-level firewall rules should not grant excessive access |Description change | -|VA2063 |Server-level firewall rules should not grant excessive access |Description change | -|VA2100 |Minimal set of principals should be granted high impact server-scoped permissions |Removed rule | -|VA2101 |Minimal set of principals should be granted medium impact server-scoped permissions |Removed rule | -|VA2102 |Minimal set of principals should be granted low impact server-scoped permissions |Removed rule | -|VA2103 |Unnecessary execute permissions on extended stored procedures should be revoked |Logic change | -|VA2104 |Execute permissions on extended stored procedures should be revoked from PUBLIC |Removed rule | -|VA2105 |Login password should not be easily guessed |Removed rule | -|VA2108 |Minimal set of principals should be members of fixed high impact database roles |Logic change | -|VA2111 |Sample databases should be removed |Logic change | -|VA2112 |Permissions from PUBLIC for Data Transformation Services (DTS) should be revoked |Removed rule | -|VA2113 |Data Transformation Services (DTS) permissions should only be granted to SSIS roles |Description and logic change | -|VA2114 |Minimal set of principals should be members of high impact fixed server roles |Logic change | -|VA2115 |Minimal set of principals should be members of medium impact fixed server roles |Removed rule | -|VA2120 |Features that may affect security should be disabled | Logic change | -|VA2121 |'OLE Automation Procedures' feature should be disabled |Title and description change | -|VA2123 |'Remote Access' feature should be disabled |Removed rule | -|VA2126 |Features that may affect security should be disabled |Title, description, and logic change | -|VA2127 |'External Scripts' feature should be disabled |Removed rule | -|VA2129 |Changes to signed modules should be authorized |Platform update | -|VA2130 |Track all users with access to the database |Description and logic change | - -## Next steps - -- [SQL Vulnerability Assessment rules](sql-database-vulnerability-assessment-rules.md) -- [SQL Vulnerability Assessment overview](sql-vulnerability-assessment.md) -- [Store Vulnerability Assessment scan results in a storage account accessible behind firewalls and VNets](sql-database-vulnerability-assessment-storage.md) diff --git a/articles/azure-sql/database/sql-database-vulnerability-assessment-rules.md b/articles/azure-sql/database/sql-database-vulnerability-assessment-rules.md deleted file mode 100644 index 02da2624ddb56..0000000000000 --- a/articles/azure-sql/database/sql-database-vulnerability-assessment-rules.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: SQL Vulnerability Assessment rules reference guide -description: "List of rule titles and descriptions for SQL Server, Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics" -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.topic: reference -author: davidtrigano -ms.author: datrigan -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 05/06/2021 ---- - -# SQL Vulnerability Assessment rules reference guide -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] :::image type="icon" source="../media/applies-to/yes.png" border="false"::: SQL Server (all supported versions) - -This article lists the set of built-in rules that are used to flag security vulnerabilities and highlight deviations from best practices, such as misconfigurations, excessive permissions, and unprotected sensitive data. The rules are based on Microsoft's best practices and focus on the security issues that present the biggest risks to your database and its valuable data. They cover both database-level issues as well as server-level security issues, like server firewall settings and server-level permissions. These rules also represent many of the requirements from various regulatory bodies to meet their compliance standards. - -The rules shown in your database scans depend on the SQL version and platform that was scanned. - -To learn about how to implement Vulnerability Assessment in Azure, see [Implement Vulnerability Assessment](./sql-vulnerability-assessment.md#configure-vulnerability-assessment). - -For a list of changes to these rules, see [SQL Vulnerability Assessment rules changelog](sql-database-vulnerability-assessment-rules-changelog.md). - -## Rule categories - -SQL Vulnerability Assessment rules have five categories, which are in the following sections: - -- [Authentication and Authorization](#authentication-and-authorization) -- [Auditing and Logging](#auditing-and-logging) -- [Data Protection](#data-protection) -- [Installation Updates and Patches](#installation-updates-and-patches) -- [Surface Area Reduction](#surface-area-reduction) - -1 **SQL Server 2012+** refers to all versions of SQL Server 2012 and above. - -2 **SQL Server 2017+** refers to all versions of SQL Server 2017 and above. - -3 **SQL Server 2016+** refers to all versions of SQL Server 2016 and above. - -## Authentication and Authorization - -|Rule ID |Rule Title |Rule Severity |Rule Description |Platform | -|---------|---------|---------|---------|---------| -| VA1017 |Execute permissions on xp_cmdshell from all users (except dbo) should be revoked |High |The xp_cmdshell extended stored procedure spawns a Windows command shell, passing in a string for execution. This rule checks that no users (other than users with the CONTROL SERVER permission like members of the sysadmin server role) have permission to execute the xp_cmdshell extended stored procedure. |SQL Server 2012+1 | -|VA1020 |Database user GUEST should not be a member of any role |High |The guest user permits access to a database for any logins that are not mapped to a specific database user. This rule checks that no database roles are assigned to the Guest user. |SQL Server 2012+

    SQL Database | -|VA1042 |Database ownership chaining should be disabled for all databases except for `master`, `msdb`, and `tempdb` |High |Cross database ownership chaining is an extension of ownership chaining, except it does cross the database boundary. This rule checks that this option is disabled for all databases except for `master`, `msdb`, and `tempdb` . For `master`, `msdb`, and `tempdb`, cross database ownership chaining is enabled by default. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1043 |Principal GUEST should not have access to any user database |Medium |The guest user permits access to a database for any logins that are not mapped to a specific database user. This rule checks that the guest user cannot connect to any database. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1046 |CHECK_POLICY should be enabled for all SQL logins |Low |CHECK_POLICY option enables verifying SQL logins against the domain policy. This rule checks that CHECK_POLICY option is enabled for all SQL logins. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1047 |Password expiration check should be enabled for all SQL logins |Low |Password expiration policies are used to manage the lifespan of a password. When SQL Server enforces password expiration policy, users are reminded to change old passwords, and accounts that have expired passwords are disabled. This rule checks that password expiration policy is enabled for all SQL logins. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1048 |Database principals should not be mapped to the `sa` account |High |A database principal that is mapped to the `sa` account can be exploited by an attacker to elevate permissions to `sysadmin` |
    SQL Server 2012+

    SQL Managed Instance | -|VA1052 |Remove BUILTIN\Administrators as a server login |Low |The BUILTIN\Administrators group contains the Windows Local Administrators group. In older versions of Microsoft SQL Server this group has administrator rights by default. This rule checks that this group is removed from SQL Server. |
    SQL Server 2012+ | -|VA1053 |Account with default name `sa` should be renamed or disabled |Low |`sa` is a well-known account with principal ID 1. This rule verifies that the `sa` account is either renamed or disabled. |SQL Server 2012+

    SQL Managed Instance | -|VA1054 |Excessive permissions should not be granted to PUBLIC role on objects or columns |Low |Every SQL Server login belongs to the public server role. When a server principal has not been granted or denied specific permissions on a securable object the user inherits the permissions granted to public on that object. This rule displays a list of all securable objects or columns that are accessible to all users through the PUBLIC role. |
    SQL Server 2012+

    SQL Database | -|VA1058 |`sa` login should be disabled |High |`sa` is a well-known account with principal ID 1. This rule verifies that the `sa` account is disabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1059 |xp_cmdshell should be disabled |High |xp_cmdshell spawns a Windows command shell and passes it a string for execution. This rule checks that xp_cmdshell is disabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1067 |Database Mail XPs should be disabled when it is not in use |Medium |This rule checks that Database Mail is disabled when no database mail profile is configured. Database Mail can be used for sending e-mail messages from the SQL Server Database Engine and is disabled by default. If you are not using this feature, it is recommended to disable it to reduce the surface area. |
    SQL Server 2012+ | -|VA1068 |Server permissions shouldn't be granted directly to principals |Low |Server level permissions are associated with a server level object to regulate which users can gain access to the object. This rule checks that there are no server level permissions granted directly to logins. |SQL Server 2012+

    SQL Managed Instance | -|VA1070 |Database users shouldn't share the same name as a server login |Low |Database users may share the same name as a server login. This rule validates that there are no such users. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1072 |Authentication mode should be Windows Authentication |Medium |There are two possible authentication modes: Windows Authentication mode and mixed mode. Mixed mode means that SQL Server enables both Windows authentication and SQL Server authentication. This rule checks that the authentication mode is set to Windows Authentication. |
    SQL Server 2012+ | -|VA1094 |Database permissions shouldn't be granted directly to principals |Low |Permissions are rules associated with a securable object to regulate which users can gain access to the object. This rule checks that there are no DB permissions granted directly to users. |SQL Server 2012+

    SQL Managed Instance | -|VA1095 |Excessive permissions should not be granted to PUBLIC role |Medium |Every SQL Server login belongs to the public server role. When a server principal has not been granted or denied specific permissions on a securable object the user inherits the permissions granted to public on that object. This displays a list of all permissions that are granted to the PUBLIC role. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database | -|VA1096 |Principal GUEST should not be granted permissions in the database |Low |Each database includes a user called GUEST. Permissions granted to GUEST are inherited by users who have access to the database but who do not have a user account in the database. This rule checks that all permissions have been revoked from the GUEST user. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database | -|VA1097 |Principal GUEST should not be granted permissions on objects or columns |Low |Each database includes a user called GUEST. Permissions granted to GUEST are inherited by users who have access to the database but who do not have a user account in the database. This rule checks that all permissions have been revoked from the GUEST user. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database | -|VA1099 |GUEST user should not be granted permissions on database securables |Low |Each database includes a user called GUEST. Permissions granted to GUEST are inherited by users who have access to the database but who do not have a user account in the database. This rule checks that all permissions have been revoked from the GUEST user. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database | -|VA1246 |Application roles should not be used |Low |An application role is a database principal that enables an application to run with its own user-like permissions. Application roles enable that only users connecting through a particular application can access specific data. Application roles are password-based (which applications typically hardcode) and not permission based which exposes the database to app role impersonation by password-guessing. This rule checks that no application roles are defined in the database. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database | -|VA1248 |User-defined database roles should not be members of fixed roles |Medium |To easily manage the permissions in your databases SQL Server provides several roles, which are security principals that group other principals. They are like groups in the Microsoft Windows operating system. Database accounts and other SQL Server roles can be added into database-level roles. Each member of a fixed-database role can add other users to that same role. This rule checks that no user-defined roles are members of fixed roles. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1267 |Contained users should use Windows Authentication |Medium |Contained users are users that exist within the database and do not require a login mapping. This rule checks that contained users use Windows Authentication. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1280 |Server Permissions granted to public should be minimized |Medium |Every SQL Server login belongs to the public server role. When a server principal has not been granted or denied specific permissions on a securable object the user inherits the permissions granted to public on that object. This rule checks that server permissions granted to public are minimized. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1282 |Orphan roles should be removed |Low |Orphan roles are user-defined roles that have no members. Eliminate orphaned roles as they are not needed on the system. This rule checks whether there are any orphan roles. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA2020 |Minimal set of principals should be granted ALTER or ALTER ANY USER database-scoped permissions |High |Every SQL Server securable has permissions associated with it that can be granted to principals. Permissions can be scoped at the server level (assigned to logins and server roles) or at the database level (assigned to database users and database roles). These rules check that only a minimal set of principals are granted ALTER or ALTER ANY USER database-scoped permissions. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA2033 |Minimal set of principals should be granted database-scoped EXECUTE permission on objects or columns |Low |This rule checks which principals are granted EXECUTE permission on objects or columns to ensure this permission is granted to a minimal set of principals. Every SQL Server securable has permissions associated with it that can be granted to principals. Permissions can be scoped at the server level (assigned to logins and server roles) or at the database level (assigned to database users, database roles, or application roles). The EXECUTE permission applies to both stored procedures and scalar functions, which can be used in computed columns. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA2103 |Unnecessary execute permissions on extended stored procedures should be revoked |Medium |Extended stored procedures are DLLs that an instance of SQL Server can dynamically load and run. SQL Server is packaged with many extended stored procedures that allow for interaction with the system DLLs. This rule checks that unnecessary execute permissions on extended stored procedures have been revoked. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2107 |Minimal set of principals should be members of fixed Azure SQL DB master database roles |High |SQL Database provides two restricted administrative roles in the master database to which user accounts can be added that grant permissions to either create databases or manage logins. This rule check that a minimal set of principals are members of these administrative roles. |
    SQL Database

    Azure Synapse | -|VA2108 |Minimal set of principals should be members of fixed high impact database roles |High |SQL Server provides roles to help manage the permissions. Roles are security principals that group other principals. Database-level roles are database-wide in their permission scope. This rule checks that a minimal set of principals are members of the fixed database roles. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA2109 |Minimal set of principals should be members of fixed low impact database roles |Low |SQL Server provides roles to help manage the permissions. Roles are security principals that group other principals. Database-level roles are database-wide in their permission scope. This rule checks that a minimal set of principals are members of the fixed database roles. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA2110 |Execute permissions to access the registry should be revoked |High |Registry extended stored procedures allow Microsoft SQL Server to read write and enumerate values and keys in the registry. They are used by Enterprise Manager to configure the server. This rule checks that the permissions to execute registry extended stored procedures have been revoked from all users (other than dbo). |
    SQL Server 2012+

    SQL Managed Instance | -|VA2113 |Data Transformation Services (DTS) permissions should only be granted to SSIS roles |Medium |Data Transformation Services (DTS), is a set of objects and utilities that allow the automation of extract, transform, and load operations to or from a database. The objects are DTS packages and their components, and the utilities are called DTS tools. This rule checks that only the SSIS roles are granted permissions to use the DTS system stored procedures and the permissions for the PUBLIC role to use the DTS system stored procedures have been revoked. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2114 |Minimal set of principals should be members of high impact fixed server roles |High |SQL Server provides roles to help manage permissions. Roles are security principals that group other principals. Server-level roles are server-wide in their permission scope. This rule checks that a minimal set of principals are members of the fixed server roles. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2129 |Changes to signed modules should be authorized |High |You can sign a stored procedure, function, or trigger with a certificate or an asymmetric key. This is designed for scenarios when permissions cannot be inherited through ownership chaining or when the ownership chain is broken, such as dynamic SQL. This rule checks for changes made to signed modules, which could be an indication of malicious use. |
    SQL Server 2012+

    SQL Database

    SQL Managed Instance | -|VA2130 |Track all users with access to the database |Low |This check tracks all users with access to a database. Make sure that these users are authorized according to their current role in the organization. |
    SQL Database

    Azure Synapse | - -## Auditing and Logging - -|Rule ID |Rule Title |Rule Severity |Rule Description |Platform | -|---------|---------|---------|---------|---------| -|VA1045 |Default trace should be enabled |Medium |Default trace provides troubleshooting assistance to database administrators by ensuring that they have the log data necessary to diagnose problems the first time they occur. This rule checks that the default trace is enabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1091 |Auditing of both successful and failed login attempts (default trace) should be enabled when 'Login auditing' is set up to track logins |Low |SQL Server Login auditing configuration enables administrators to track the users logging into SQL Server instances. If the user chooses to count on 'Login auditing' to track users logging into SQL Server instances, then it is important to enable it for both successful and failed login attempts. |
    SQL Server 2012+ | -|VA1093 |Maximum number of error logs should be 12 or more |Low |Each SQL Server Error log will have all the information related to failures / errors that have occurred since SQL Server was last restarted or since the last time you have recycled the error logs. This rule checks that the maximum number of error logs is 12 or more. |SQL Server 2012+ | -|VA1258 |Database owners are as expected |High |Database owners can perform all configuration and maintenance activities on the database and can also drop databases in SQL Server. Tracking database owners is important to avoid having excessive permission for some principals. Create a baseline that defines the expected database owners for the database. This rule checks whether the database owners are as defined in the baseline. |SQL Server 2016+3

    SQL Database

    Azure Synapse | -|VA1264 |Auditing of both successful and failed login attempts should be enabled |Low |SQL Server auditing configuration enables administrators to track the users logging into SQL Server instances that they're responsible for. This rule checks that auditing is enabled for both successful and failed login attempts. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1265 |Auditing of both successful and failed login attempts for contained DB authentication should be enabled |Medium |SQL Server auditing configuration enables administrators to track users logging to SQL Server instances that they're responsible for. This rule checks that auditing is enabled for both successful and failed login attempts for contained DB authentication. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1281 |All memberships for user-defined roles should be intended |Medium |User-defined roles are security principals defined by the user to group principals to easily manage permissions. Monitoring these roles is important to avoid having excessive permissions. Create a baseline that defines expected membership for each user-defined role. This rule checks whether all memberships for user-defined roles are as defined in the baseline. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1283 |There should be at least 1 active audit in the system |Low |Auditing an instance of the SQL Server Database Engine or an individual database involves tracking and logging events that occur on the Database Engine. The SQL Server Audit object collects a single instance of server or database-level actions and groups of actions to monitor. This rule checks that there is at least one active audit in the system. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2061 |Auditing should be enabled at the server level |High |Azure SQL Database Auditing tracks database events and writes them to an audit log in your Azure storage account. Auditing helps you understand database activity and gain insight into discrepancies and anomalies that could indicate business concerns or suspected security violations as well as helps you meet regulatory compliance. For more information, see [Azure SQL Auditing](./auditing-overview.md). This rule checks that auditing is enabled. |
    SQL Database

    Azure Synapse | - -## Data Protection - -|Rule ID |Rule Title |Rule Severity |Rule Description |Platform | -|---------|---------|---------|---------|---------| -|VA1098 |Any Existing SSB or Mirroring endpoint should require AES connection |High |Service Broker and Mirroring endpoints support different encryption algorithms including no-encryption. This rule checks that any existing endpoint requires AES encryption. |
    SQL Server 2012+ | -|VA1219 |Transparent data encryption should be enabled |Medium |Transparent data encryption (TDE) helps to protect the database files against information disclosure by performing real-time encryption and decryption of the database, associated backups, and transaction log files 'at rest', without requiring changes to the application. This rule checks that TDE is enabled on the database. |SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1220 |Database communication using TDS should be protected through TLS |High |Microsoft SQL Server can use Secure Sockets Layer (SSL) or Transport Layer Security (TLS) to encrypt data that is transmitted across a network between an instance of SQL Server and a client application. This rule checks that all connections to the SQL Server are encrypted through TLS. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1221 |Database Encryption Symmetric Keys should use AES algorithm |High |SQL Server uses encryption keys to help secure data credentials and connection information that is stored in a server database. SQL Server has two kinds of keys: symmetric and asymmetric. This rule checks that Database Encryption Symmetric Keys use AES algorithm. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1222 |Cell-Level Encryption keys should use AES algorithm |High |Cell-Level Encryption (CLE) allows you to encrypt your data using symmetric and asymmetric keys. This rule checks that Cell-Level Encryption symmetric keys use AES algorithm. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1223 |Certificate keys should use at least 2048 bits |High |Certificate keys are used in RSA and other encryption algorithms to protect data. These keys need to be of enough length to secure the user's data. This rule checks that the key's length is at least 2048 bits for all certificates. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1224 |Asymmetric keys' length should be at least 2048 bits |High |Database asymmetric keys are used in many encryption algorithms these keys need to be of enough length to secure the encrypted data this rule checks that all asymmetric keys stored in the database are of length of at least 2048 bits |
    SQL Server 2012

    SQL Server 2014

    SQL Database | -|VA1279 |Force encryption should be enabled for TDS |High |When the Force Encryption option for the Database Engine is enabled all communications between client and server is encrypted regardless of whether the 'Encrypt connection' option (such as from SSMS) is checked or not. This rule checks that Force Encryption option is enabled. |
    SQL Server 2012+ | -|VA1288 |Sensitive data columns should be classified |Medium |This rule checks if the scanned database has potentially sensitive data that has not been classified. |SQL Database | -|VA2060 |SQL Threat Detection should be enabled at the server level |Medium |SQL Threat Detection provides a layer of security that detects potential vulnerabilities and anomalous activity in databases such as SQL injection attacks and unusual behavior patterns. When a potential threat is detected Threat Detection sends an actionable real-time alert by email and in Microsoft Defender for Cloud, which includes clear investigation and remediation steps for the specific threat. For more information, please see [Configure threat detection](./threat-detection-configure.md). This check verifies that SQL Threat Detection is enabled |
    SQL Managed Instance

    SQL Database

    Azure Synapse | - -## Installation Updates and Patches - -|Rule ID |Rule Title |Rule Severity |Rule Description |Platform | -|---------|---------|---------|---------|---------| -| VA1018 |Latest updates should be installed |High |Microsoft periodically releases Cumulative Updates (CUs) for each version of SQL Server. This rule checks whether the latest CU has been installed for the particular version of SQL Server being used, by passing in a string for execution. This rule checks that all users (except dbo) do not have permission to execute the xp_cmdshell extended stored procedure. |
    SQL Server 2005

    SQL Server 2008

    SQL Server 2008

    SQL Server 2012

    SQL Server 2014

    SQL Server 2016

    SQL Server 2017
    | -|VA2128 |Vulnerability Assessment is not supported for SQL Server versions lower than SQL Server 2012 |High |To run a Vulnerability Assessment scan on your SQL Server the server needs to be upgraded to SQL Server 2012 or higher, SQL Server 2008 R2 and below are no longer supported by Microsoft. For more information, see |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | - -## Surface Area Reduction - -|Rule ID |Rule Title |Rule Severity |Rule Description |Platform | -|---------|---------|---------|---------|---------| -|VA1022 |Ad hoc distributed queries should be disabled |Medium |Ad hoc distributed queries use the `OPENROWSET` and `OPENDATASOURCE` functions to connect to remote data sources that use OLE DB. This rule checks that ad hoc distributed queries are disabled. |
    SQL Server 2012+ | -|VA1023 |CLR should be disabled |High |The CLR allows managed code to be hosted by and run in the Microsoft SQL Server environment. This rule checks that CLR is disabled. |SQL Server 2012+ | -|VA1026 |CLR should be disabled |Medium |The CLR allows managed code to be hosted by and run in the Microsoft SQL Server environment. CLR strict security treats SAFE and EXTERNAL_ACCESS assemblies as if they were marked UNSAFE and requires all assemblies be signed by a certificate or asymmetric key with a corresponding login that has been granted UNSAFE ASSEMBLY permission in the master database. This rule checks that CLR is disabled. |SQL Server 2017+2

    SQL Managed Instance | -|VA1027 |Untracked trusted assemblies should be removed |High |Assemblies marked as UNSAFE are required to be signed by a certificate or asymmetric key with a corresponding login that has been granted UNSAFE ASSEMBLY permission in the master database. Trusted assemblies may bypass this requirement. |
    SQL Server 2017+

    SQL Managed Instance | -|VA1044 |Remote Admin Connections should be disabled unless specifically required |Medium |This rule checks that remote dedicated admin connections are disabled if they are not being used for clustering to reduce attack surface area. SQL Server provides a dedicated administrator connection (DAC). The DAC lets an administrator access a running server to execute diagnostic functions or Transact-SQL statements, or to troubleshoot problems on the server and it becomes an attractive target to attack when it is enabled remotely. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1051 |AUTO_CLOSE should be disabled on all databases |Medium |The AUTO_CLOSE option specifies whether the database shuts down gracefully and frees resources after the last user disconnects. Regardless of its benefits it can cause denial of service by aggressively opening and closing the database, thus it is important to keep this feature disabled. This rule checks that this option is disabled on the current database. |
    SQL Server 2012+ | -|VA1066 |Unused service broker endpoints should be removed |Low |Service Broker provides queuing and reliable messaging for SQL Server. Service Broker is used both for applications that use a single SQL Server instance and applications that distribute work across multiple instances. Service Broker endpoints provide options for transport security and message forwarding. This rule enumerates all the service broker endpoints. Remove those that are not used. |SQL Server 2012+ | -|VA1071 |'Scan for startup stored procedures' option should be disabled |Medium |When 'Scan for startup procs' is enabled SQL Server scans for and runs all automatically run stored procedures defined on the server. If this option is enabled SQL Server scans for and runs all automatically run stored procedures defined on the server. This rule checks that this option is disabled. |SQL Server 2012+ | -|VA1092 |SQL Server instance shouldn't be advertised by the SQL Server Browser service |Low |SQL Server uses the SQL Server Browser service to enumerate instances of the Database Engine installed on the computer. This enables client applications to browse for a server and helps clients distinguish between multiple instances of the Database Engine on the same computer. This rule checks that the SQL instance is hidden. |SQL Server 2012+ | -|VA1102 |The Trustworthy bit should be disabled on all databases except MSDB |High |The TRUSTWORTHY database property is used to indicate whether the instance of SQL Server trusts the database and the contents within it. If this option is enabled database modules (for example user-defined functions or stored procedures) that use an impersonation context can access resources outside the database. This rule verifies that the TRUSTWORTHY bit is disabled on all databases except MSDB. |SQL Server 2012+

    SQL Managed Instance | -|VA1143 |'dbo' user should not be used for normal service operation |Medium |The 'dbo' or database owner is a user account that has implied permissions to perform all activities in the database. Members of the sysadmin fixed server role are automatically mapped to dbo. This rule checks that dbo is not the only account allowed to access this database. Note that on a newly created clean database this rule will fail until additional roles are created. |
    SQL Server 2012+

    SQL Managed Instance

    SQL Database

    Azure Synapse | -|VA1144 |Model database should only be accessible by 'dbo' |Medium |The Model database is used as the template for all databases created on the instance of SQL Server. Modifications made to the model database such as database size recovery model and other database options are applied to any databases created afterward. This rule checks that dbo is the only account allowed to access the model database. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1230 |Filestream should be disabled |High |FILESTREAM integrates the SQL Server Database Engine with an NTFS file system by storing varbinary (max) binary large object (BLOB) data as files on the file system. Transact-SQL statements can insert, update, query, search, and back up FILESTREAM data. Enabling Filestream on SQL server exposes additional NTFS streaming API, which increases its attack surface and makes it prone to malicious attacks. This rule checks that Filestream is disabled. |
    SQL Server 2012+ | -|VA1235 |Server configuration 'Replication XPs' should be disabled |Medium |Disable the deprecated server configuration 'Replication XPs' to limit the attack surface area. This is an internal only configuration setting. |SQL Server 2012+

    SQL Managed Instance | -|VA1244 |Orphaned users should be removed from SQL server databases |Medium |A database user that exists on a database but has no corresponding login in the master database or as an external resource (for example, a Windows user) is referred to as an orphaned user and it should either be removed or remapped to a valid login. This rule checks that there are no orphaned users. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1245 |The dbo information should be consistent between the target DB and master |High |There is redundant information about the dbo identity for any database: metadata stored in the database itself and metadata stored in master DB. This rule checks that this information is consistent between the target DB and master. |
    SQL Server 2012+

    SQL Managed Instance | -|VA1247 |There should be no SPs marked as auto-start |High |When SQL Server has been configured to 'scan for startup procs' the server will scan master DB for stored procedures marked as auto-start. This rule checks that there are no SPs marked as auto-start. |
    SQL Server 2012+ | -|VA1256 |User CLR assemblies should not be defined in the database |High |CLR assemblies can be used to execute arbitrary code on SQL Server process. This rule checks that there are no user-defined CLR assemblies in the database. |SQL Server 2012+

    SQL Managed Instance | -|VA1277 |Polybase network encryption should be enabled |High |PolyBase is a technology that accesses and combines both non-relational and relational data all from within SQL Server. Polybase network encryption option configures SQL Server to encrypt control and data channels when using Polybase. This rule verifies that this option is enabled. |
    SQL Server 2016+ | -|VA1278 |Create a baseline of External Key Management Providers |Medium |The SQL Server Extensible Key Management (EKM) enables third-party EKM / Hardware Security Modules (HSM) vendors to register their modules in SQL Server. When registered SQL Server users can use the encryption keys stored on EKM modules,this rule displays a list of EKM providers being used in the system. |SQL Server 2012+

    SQL Managed Instance | -|VA2062 |Database-level firewall rules should not grant excessive access |High |The Azure SQL Database-level firewall helps protect your data by preventing all access to your database until you specify which IP addresses have permission. Database-level firewall rules grant access to the specific database based on the originating IP address of each request. Database-level firewall rules for master and user databases can only be created and managed through Transact-SQL (unlike server-level firewall rules, which can also be created and managed using the Azure portal or PowerShell). For more information, see [Azure SQL Database and Azure Synapse Analytics IP firewall rules](./firewall-configure.md). This check verifies that database-level firewall rules do not grant access to more than 255 IP addresses. |
    SQL Database

    Azure Synapse | -|VA2063 |Server-level firewall rules should not grant excessive access |High |The Azure SQL server-level firewall helps protect your server by preventing all access to your databases until you specify which IP addresses have permission. Server-level firewall rules grant access to all databases that belong to the server based on the originating IP address of each request. Server-level firewall rules can only be created and managed through Transact-SQL as well as through the Azure portal or PowerShell. For more information, see [Azure SQL Database and Azure Synapse Analytics IP firewall rules](./firewall-configure.md). This check verifies that server-level firewall rules do not grant access to more than 255 IP addresses. |
    SQL Database

    Azure Synapse | -|VA2064 |Database-level firewall rules should be tracked and maintained at a strict minimum |High |The Azure SQL Database-level firewall helps protect your data by preventing all access to your database until you specify which IP addresses have permission. Database-level firewall rules grant access to the specific database based on the originating IP address of each request. Database-level firewall rules for master and user databases can only be created and managed through Transact-SQL (unlike server-level firewall rules, which can also be created and managed using the Azure portal or PowerShell). For more information, see [Azure SQL Database and Azure Synapse Analytics IP firewall rules](./firewall-configure.md). This check enumerates all the database-level firewall rules so that any changes made to them can be identified and addressed. |
    SQL Database

    Azure Synapse | -|VA2065 |Server-level firewall rules should be tracked and maintained at a strict minimum |High |The Azure SQL server-level firewall helps protect your data by preventing all access to your databases until you specify which IP addresses have permission. Server-level firewall rules grant access to all databases that belong to the server based on the originating IP address of each request. Server-level firewall rules can be created and managed through Transact-SQL as well as through the Azure portal or PowerShell. For more information, see [Azure SQL Database and Azure Synapse Analytics IP firewall rules](./firewall-configure.md). This check enumerates all the server-level firewall rules so that any changes made to them can be identified and addressed. |
    SQL Database

    Azure Synapse | -|VA2111 |Sample databases should be removed |Low |Microsoft SQL Server comes shipped with several sample databases. This rule checks whether the sample databases have been removed. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2120 |Features that may affect security should be disabled |High |SQL Server is capable of providing a wide range of features and services. Some of the features and services provided by default may not be necessary and enabling them could adversely affect the security of the system. This rule checks that these features are disabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2121 | 'OLE Automation Procedures' feature should be disabled |High |SQL Server is capable of providing a wide range of features and services. Some of the features and services, provided by default, may not be necessary, and enabling them could adversely affect the security of the system. The OLE Automation Procedures option controls whether OLE Automation objects can be instantiated within Transact-SQL batches. These are extended stored procedures that allow SQL Server users to execute functions external to SQL Server. Regardless of its benefits it can also be used for exploits, and is known as a popular mechanism to plant files on the target machines. It is advised to use PowerShell as a replacement for this tool. This rule checks that 'OLE Automation Procedures' feature is disabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2122 |'User Options' feature should be disabled |Medium |SQL Server is capable of providing a wide range of features and services. Some of the features and services provided by default may not be necessary and enabling them could adversely affect the security of the system. The user options specifies global defaults for all users. A list of default query processing options is established for the duration of a user's work session. The user options allows you to change the default values of the SET options (if the server's default settings are not appropriate). This rule checks that 'user options' feature is disabled. |
    SQL Server 2012+

    SQL Managed Instance | -|VA2126 |Extensibility-features that may affect security should be disabled if not needed |Medium |SQL Server provides a wide range of features and services. Some of the features and services, provided by default, may not be necessary, and enabling them could adversely affect the security of the system. This rule checks that configurations that allow extraction of data to an external data source and the execution of scripts with certain remote language extensions are disabled. |
    SQL Server 2016+ | - -## Removed rules - -|Rule ID |Rule Title | -|---------|---------| -|VA1021 |Global temporary stored procedures should be removed | -|VA1024 |C2 Audit Mode should be enabled | -|VA1069 |Permissions to select from system tables and views should be revoked from non-sysadmins | -|VA1090 |Ensure all Government Off The Shelf (GOTS) and Custom Stored Procedures are encrypted | -|VA1103 |Use only CLR with SAFE_ACCESS permission | -|VA1229 |Filestream setting in registry and in SQL Server configuration should match | -|VA1231 |Filestream should be disabled (SQL) | -|VA1234 |Common Criteria setting should be enabled | -|VA1252 |List of events being audited and centrally managed via server audit specifications. | -|VA1253 |List of DB-scoped events being audited and centrally managed via server audit specifications | -|VA1263 |List all the active audits in the system | -|VA1266 |The 'MUST_CHANGE' option should be set on all SQL logins | -|VA1276 |Agent XPs feature should be disabled | -|VA1286 |Database permissions shouldn't be granted directly to principals (OBJECT or COLUMN) | -|VA2000 |Minimal set of principals should be granted high impact database-scoped permissions | -|VA2001 |Minimal set of principals should be granted high impact database-scoped permissions on objects or columns | -|VA2002 |Minimal set of principals should be granted high impact database-scoped permissions on various securables | -|VA2010 |Minimal set of principals should be granted medium impact database-scoped permissions | -|VA2021 |Minimal set of principals should be granted database-scoped ALTER permissions on objects or columns | -|VA2022 |Minimal set of principals should be granted database-scoped ALTER permission on various securables | -|VA2030 |Minimal set of principals should be granted database-scoped SELECT or EXECUTE permissions | -|VA2031 |Minimal set of principals should be granted database-scoped SELECT | -|VA2032 |Minimal set of principals should be granted database-scoped SELECT or EXECUTE permissions on schema | -|VA2034 |Minimal set of principals should be granted database-scoped EXECUTE permission on XML Schema Collection | -|VA2040 |Minimal set of principals should be granted low impact database-scoped permissions | -|VA2041 |Minimal set of principals should be granted low impact database-scoped permissions on objects or columns | -|VA2042 |Minimal set of principals should be granted low impact database-scoped permissions on schema | -|VA2050 |Minimal set of principals should be granted database-scoped VIEW DEFINITION permissions | -|VA2051 |Minimal set of principals should be granted database-scoped VIEW DEFINITION permissions on objects or columns | -|VA2052 |Minimal set of principals should be granted database-scoped VIEW DEFINITION permission on various securables | -|VA2100 |Minimal set of principals should be granted high impact server-scoped permissions | -|VA2101 |Minimal set of principals should be granted medium impact server-scoped permissions | -|VA2102 |Minimal set of principals should be granted low impact server-scoped permissions | -|VA2104 |Execute permissions on extended stored procedures should be revoked from PUBLIC | -|VA2105 |Login password should not be easily guessed | -|VA2112 |Permissions from PUBLIC for Data Transformation Services (DTS) should be revoked | -|VA2115 |Minimal set of principals should be members of medium impact fixed server roles | -|VA2123 |'Remote Access' feature should be disabled | -|VA2127 |'External Scripts' feature should be disabled | - -## Next steps - -- [Vulnerability Assessment](sql-vulnerability-assessment.md) -- [SQL Vulnerability Assessment rules changelog](sql-database-vulnerability-assessment-rules-changelog.md) diff --git a/articles/azure-sql/database/sql-database-vulnerability-assessment-storage.md b/articles/azure-sql/database/sql-database-vulnerability-assessment-storage.md deleted file mode 100644 index 0facf3262e6e0..0000000000000 --- a/articles/azure-sql/database/sql-database-vulnerability-assessment-storage.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Store Vulnerability Assessment scan results in a storage account accessible behind firewalls and VNets -description: "Provides instructions on how to store Vulnerability Assessment (VA) scans in a storage account that can be accessed through a firewall or a VNet" -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.topic: how-to -author: davidtrigano -ms.author: datrigan -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 11/10/2021 ---- - -# Store Vulnerability Assessment scan results in a storage account accessible behind firewalls and VNets -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -If you are limiting access to your storage account in Azure for certain VNets or services, you'll need to enable the appropriate configuration so that Vulnerability Assessment (VA) scanning for SQL Databases or Managed Instances have access to that storage account. - -## Prerequisites - -The SQL Vulnerability Assessment service needs permission to the storage account to save baseline and scan results. There are three methods: -- **Use Storage Account key**: Azure creates the SAS key and saves it (though we don't save the account key) -- **Use Storage SAS key**: The SAS key must have: Write | List | Read | Delete permissions -- **Use SQL Server managed identity**: The SQL Server must have a managed identity. The storage account must have a role assignment for the SQL Managed Identity as [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor). When you apply the settings, the VA fields storageContainerSasKey and storageAccountAccessKey must be empty. When storage is behind a firewall or virtual network, then the SQL managed identity is required. - -When you use the Azure portal to save SQL VA settings, Azure checks if you have permission to assign a new role assignment for the managed identity as [Storage Blob Data Contributor](../../role-based-access-control/built-in-roles.md#storage-blob-data-contributor) on the storage. If permissions are assigned, Azure uses SQL Server managed identity, otherwise Azure uses the key method. - -## Enable Azure SQL Database VA scanning access to the storage account - -If you have configured your VA storage account to only be accessible by certain networks or services, you'll need to ensure that VA scans for your Azure SQL Database are able to store the scans on the storage account. You can use the existing storage account, or create a new storage account to store VA scan results for all databases on your [logical SQL server](logical-servers.md). - -> [!NOTE] -> The vulnerability assessment service can't access storage accounts protected with firewalls or VNets if they require storage access keys. - -Go to your **Resource group** that contains the storage account and access the **Storage account** pane. Under **Settings**, select **Firewall and virtual networks**. - -Ensure that **Allow trusted Microsoft services access to this storage account** is checked. - -:::image type="content" source="media/sql-database-vulnerability-assessment-storage/storage-allow-microsoft-services.png" alt-text="Screenshot shows Firewall and virtual networks dialog box, with Allow trusted Microsoft services to access this storage account selected."::: - -To find out which storage account is being used, go to your **SQL server** pane in the [Azure portal](https://portal.azure.com), under **Security**, and then select **Defender for Cloud**. - -:::image type="content" source="../database/media/azure-defender-for-sql/va-storage.png" alt-text="set up vulnerability assessment"::: - -> [!NOTE] -> You can set up email alerts to notify users in your organization to view or access the scan reports. To do this, ensure that you have SQL Security Manager and Storage Blob Data Reader permissions. - -## Store VA scan results for Azure SQL Managed Instance in a storage account that can be accessed behind a firewall or VNet - -Since Managed Instance is not a trusted Microsoft Service and has a different VNet from the storage account, executing a VA scan will result in an error. - -To support VA scans on Managed Instances, follow the below steps: - -1. In the **SQL managed instance** pane, under the **Overview** heading, click the **Virtual network/subnet** link. This takes you to the **Virtual network** pane. - - :::image type="content" source="../managed-instance/media/public-endpoint-configure/mi-overview.png" alt-text="mi-overview2"::: - -1. Under **Settings**, select **Subnets**. Click **Subnet** in the new pane to add a subnet, and delegate it to *Microsoft.Sql\managedInstance*. For more information, see [Manage subnets](../../virtual-network/virtual-network-manage-subnet.md). - - :::image type="content" source="media/sql-database-vulnerability-assessment-storage/mi-subnets.png" alt-text="Screenshot shows a subnet that has been delegated Microsoft.sql\managedInstance."::: - -1. In your **Virtual network** pane, under **Settings**, select **Service endpoints**. Click **Add** in the new pane, and add the *Microsoft.Storage* Service as a new service endpoint. Make sure the *ManagedInstance* Subnet is selected. Click **Add**. - - :::image type="content" source="media/sql-database-vulnerability-assessment-storage/mi-service-endpoint.png" alt-text="Screenshot shows Add service endpoints, where you add the Microsoft.Storage Service as an endpoint."::: - -1. Go to your **Storage account** that you've selected to store your VA scans. Under **Settings**, select **Firewall and virtual networks**. Click on **Add existing virtual network**. Select your managed instance virtual network and subnet, and click **Add**. - - :::image type="content" source="media/sql-database-vulnerability-assessment-storage/storage-firewall.png" alt-text="Screenshot shows the Firewalls and virtual networks pane, which contains the Add existing virtual network link."::: - -You should now be able to store your VA scans for Managed Instances in your storage account. - -## Troubleshoot vulnerability assessment scan-related issues - -Troubleshoot common issues related to vulnerability assessment scans. - -### Failure to save vulnerability assessment settings - -You might not be able to save changes to vulnerability assessment settings if your storage account doesn't meet some prerequisites or if you have insufficient permissions. - -#### Storage account requirements - -The storage account in which vulnerability assessment scan results are saved must meet the following requirements: - -- **Type**: StorageV2 (General Purpose V2) or Storage (General Purpose V1) -- **Performance**: Standard (only) -- **Region**: The storage must be in the same region as the instance of Azure SQL Server. - -If any of these requirements aren't met, saving changes to vulnerability assessment settings fails. - -#### Permissions - -The following permissions are required to save changes to vulnerability assessment settings: - -- SQL Security Manager -- Storage Blob Data Reader -- Owner role on the storage account - -Setting a new role assignment requires owner or user administrator access to the storage account and the following permissions: - -- Storage Blob Data Owner - -### Storage account isn't visible for selection in vulnerability assessment settings - -The storage account might not appear in the storage account picker for several reasons: - -- The storage account you're looking for isn't in the selected subscription. -- The storage account you're looking for isn't in the same region as the instance of Azure SQL Server. -- You don't have Microsoft.Storage/storageAccounts/read permissions on the storage account. - -### Failure to open an email link for scan results or can't view scan results - -You might not be able to open a link in a notification email about scan results or to view scan results if you don't have the required permissions or if you use a browser that doesn't support opening or displaying scan results. - -#### Permissions - -The following permissions are required to open links in email notifications about scan results or to view scan results: - -- SQL Security Manager -- Storage Blob Data Reader - -#### Browser requirements - -The Firefox browser doesn't support opening or displaying scan results view. We recommend that you use Chrome or Microsoft Edge to view vulnerability assessment scan results. - -## Next steps - -- [Vulnerability Assessment](sql-vulnerability-assessment.md) -- [Create an Azure Storage account](../../storage/common/storage-account-create.md) -- [Microsoft Defender for SQL](azure-defender-for-sql.md) diff --git a/articles/azure-sql/database/sql-vulnerability-assessment.md b/articles/azure-sql/database/sql-vulnerability-assessment.md deleted file mode 100644 index 320d35ae09bd9..0000000000000 --- a/articles/azure-sql/database/sql-vulnerability-assessment.md +++ /dev/null @@ -1,325 +0,0 @@ ---- -title: SQL vulnerability assessment -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: Learn how to configure SQL vulnerability assessment and interpret the assessment reports on Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=3 -ms.devlang: -ms.topic: how-to -author: davidtrigano -ms.author: datrigan -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/01/2021 -tags: azure-synapse ---- -# SQL vulnerability assessment helps you identify database vulnerabilities -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -SQL vulnerability assessment is an easy-to-configure service that can discover, track, and help you remediate potential database vulnerabilities. Use it to proactively improve your database security. - -Vulnerability assessment is part of the [Microsoft Defender for SQL](azure-defender-for-sql.md) offering, which is a unified package for advanced SQL security capabilities. Vulnerability assessment can be accessed and managed via the central Microsoft Defender for SQL portal. - -> [!NOTE] -> Vulnerability assessment is supported for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. Databases in Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics are referred to collectively in the remainder of this article as databases, and the server is referring to the [server](logical-servers.md) that hosts databases for Azure SQL Database and Azure Synapse. - -## What is SQL vulnerability assessment? - -SQL vulnerability assessment is a service that provides visibility into your security state. Vulnerability assessment includes actionable steps to resolve security issues and enhance your database security. It can help you to monitor a dynamic database environment where changes are difficult to track and improve your SQL security posture. - -Vulnerability assessment is a scanning service built into Azure SQL Database. The service employs a knowledge base of rules that flag security vulnerabilities. It highlights deviations from best practices, such as misconfigurations, excessive permissions, and unprotected sensitive data. - -The rules are based on Microsoft's best practices and focus on the security issues that present the biggest risks to your database and its valuable data. They cover database-level issues and server-level security issues, like server firewall settings and server-level permissions. - -Results of the scan include actionable steps to resolve each issue and provide customized remediation scripts where applicable. You can customize an assessment report for your environment by setting an acceptable baseline for: - -- Permission configurations -- Feature configurations -- Database settings - -## Configure vulnerability assessment - -Take the following steps to configure the vulnerability assessment: - -1. In the [Azure portal](https://portal.azure.com), open the specific resource in Azure SQL Database, SQL Managed Instance Database, or Azure Synapse. - -1. Under the **Security** heading, select **Defender for Cloud**. - -1. Select **Configure** on the link to open the Microsoft Defender for SQL settings pane for either the entire server or managed instance. - - :::image type="content" source="media/sql-vulnerability-assessment/opening-sql-configuration.png" alt-text="Opening the Defender for SQL configuration"::: - - > [!NOTE] - > SQL vulnerability assessment requires **Microsoft Defender for SQL** plan to be able to run scans. For more information about how to enable Microsoft Defender for SQL, see [Microsoft Defender for SQL](azure-defender-for-sql.md). - -1. In the **Server settings** page, define the Microsoft Defender for SQL settings: - - :::image type="content" source="media/sql-vulnerability-assessment/sql-vulnerability-scan-settings.png" alt-text="Configuring the SQL vulnerability assessment scans"::: - - 1. Configure a storage account where your scan results for all databases on the server or managed instance will be stored. For information about storage accounts, see [About Azure storage accounts](../../storage/common/storage-account-create.md). - - > [!TIP] - > For more information about storing vulnerability assessment scans behind firewalls and VNets, see [Store vulnerability assessment scan results in a storage account accessible behind firewalls and VNets](sql-database-vulnerability-assessment-storage.md). - - 1. To configure vulnerability assessments to automatically run weekly scans to detect security misconfigurations, set **Periodic recurring scans** to **On**. The results are sent to the email addresses you provide in **Send scan reports to**. You can also send email notification to admins and subscription owners by enabling **Also send email notification to admins and subscription owners**. - -1. SQL vulnerability assessment scans can also be run on-demand: - - 1. From the resource's **Defender for Cloud** page, select **View additional findings in Vulnerability Assessment** to access the scan results from previous scans. - - :::image type="content" source="media/sql-vulnerability-assessment/view-additional-findings-link.png" alt-text="Opening the scan results and manual scan options"::: - - 1. To run an on-demand scan to scan your database for vulnerabilities, select **Scan** from the toolbar: - - :::image type="content" source="media/sql-vulnerability-assessment/on-demand-vulnerability-scan.png" alt-text="Select scan to run an on-demand vulnerability assessment scan of your SQL resource"::: - - - > [!NOTE] - > The scan is lightweight and safe. It takes a few seconds to run and is entirely read-only. It doesn't make any changes to your database. - -## Remediate vulnerabilities - -When a vulnerability scan completes, the report is displayed in the Azure portal. The report presents: - -- An overview of your security state -- The number of issues that were found, and -- A summary by severity of the risks -- A list of the findings for further investigations - -:::image type="content" source="media/sql-vulnerability-assessment/sample-sql-vulnerabilities-report.png" alt-text="Sampl scan report from the SQL vulnerability assessment scanner"::: - -To remediate the vulnerabilities discovered: - -1. Review your results and determine which of the report's findings are true security issues for your environment. - -1. Select each failed result to understand its impact and why the security check failed. - - > [!TIP] - > The findings details page includes actionable remediation information explaining how to resolve the issue. - - :::image type="content" source="media/sql-vulnerability-assessment/examining-vulnerability-findings.gif" alt-text="Examining the findings from a vulnerability scan"::: - -1. As you review your assessment results, you can mark specific results as being an acceptable *baseline* in your environment. A baseline is essentially a customization of how the results are reported. In subsequent scans, results that match the baseline are considered as passes. After you've established your baseline security state, vulnerability assessment only reports on deviations from the baseline. In this way, you can focus your attention on the relevant issues. - - :::image type="content" source="media/sql-vulnerability-assessment/baseline-approval.png" alt-text="Approving a finding as a baseline for future scans"::: - -1. If you change the baselines, use the **Scan** button to run an on-demand scan and view the customized report. Any findings you've added to the baseline will now appear in **Passed** with an indication that they've passed because of the baseline changes. - - :::image type="content" source="media/sql-vulnerability-assessment/passed-per-custom-baseline.png" alt-text="Passed assessments indicating they've passed per custom baseline"::: - -Your vulnerability assessment scans can now be used to ensure that your database maintains a high level of security, and that your organizational policies are met. - -## Advanced capabilities - -### Export an assessment report - -Select **Export Scan Results** to create a downloadable Excel report of your scan result. This report contains a summary tab that displays a summary of the assessment. The report includes all failed checks. It also includes a **Results** tab that contains the full set of results from the scan. The results include all checks that were run and the result details for each. - -### View scan history - -Select **Scan History** in the vulnerability assessment pane to view a history of all scans previously run on this database. Select a particular scan in the list to view the detailed results of that scan. - -### Disable specific findings from Microsoft Defender for Cloud (preview) - -If you have an organizational need to ignore a finding, rather than remediate it, you can optionally disable it. Disabled findings don't impact your secure score or generate unwanted noise. - -When a finding matches the criteria you've defined in your disable rules, it won't appear in the list of findings. Typical scenarios include: - -- Disable findings with severity below medium -- Disable findings that are non-patchable -- Disable findings from benchmarks that aren't of interest for a defined scope - -> [!IMPORTANT] -> To disable specific findings, you need permissions to edit a policy in Azure Policy. Learn more in [Azure RBAC permissions in Azure Policy](../../governance/policy/overview.md#azure-rbac-permissions-in-azure-policy). - -To create a rule: - -1. From the recommendations detail page for **Vulnerability assessment findings on your SQL servers on machines should be remediated**, select **Disable rule**. - -1. Select the relevant scope. - -1. Define your criteria. You can use any of the following criteria: - - Finding ID - - Severity - - Benchmarks - - :::image type="content" source="../../defender-for-cloud/media/defender-for-sql-on-machines-vulnerability-assessment/disable-rule-vulnerability-findings-sql.png" alt-text="Create a disable rule for VA findings on SQL servers on machines"::: - -1. Select **Apply rule**. Changes might take up to 24hrs to take effect. - -1. To view, override, or delete a rule: - - 1. Select **Disable rule**. - - 1. From the scope list, subscriptions with active rules show as **Rule applied**. - - :::image type="content" source="../../defender-for-cloud/media/remediate-vulnerability-findings-vm/modify-rule.png" alt-text="Modify or delete an existing rule"::: - - 1. To view or delete the rule, select the ellipsis menu ("..."). - -## Manage vulnerability assessments programmatically - -# [Azure PowerShell](#tab/azure-powershell) - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -You can use Azure PowerShell cmdlets to programmatically manage your vulnerability assessments. The supported cmdlets are: - -| Cmdlet name as a link | Description | -| :-------------------- | :---------- | -| [Clear-AzSqlDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Clear-azSqlDatabaseVulnerabilityAssessmentRuleBaseline) | Clears the vulnerability assessment rule baseline.
    First, set the baseline before you use this cmdlet to clear it. | -| [Clear-AzSqlDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Clear-azSqlDatabaseVulnerabilityAssessmentSetting) | Clears the vulnerability assessment settings of a database. | -| [Clear-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Clear-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline) | Clears the vulnerability assessment rule baseline of a managed database.
    First, set the baseline before you use this cmdlet to clear it. | -| [Clear-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Clear-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting) | Clears the vulnerability assessment settings of a managed database. | -| [Clear-AzSqlInstanceVulnerabilityAssessmentSetting](/powershell/module/az.sql/Clear-AzSqlInstanceVulnerabilityAssessmentSetting) | Clears the vulnerability assessment settings of a managed instance. | -| [Convert-AzSqlDatabaseVulnerabilityAssessmentScan](/powershell/module/az.sql/Convert-azSqlDatabaseVulnerabilityAssessmentScan) | Converts vulnerability assessment scan results of a database to an Excel file. | -| [Convert-AzSqlInstanceDatabaseVulnerabilityAssessmentScan](/powershell/module/az.sql/Convert-AzSqlInstanceDatabaseVulnerabilityAssessmentScan) | Converts vulnerability assessment scan results of a managed database to an Excel file. | -| [Get-AzSqlDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Get-azSqlDatabaseVulnerabilityAssessmentRuleBaseline) | Gets the vulnerability assessment rule baseline of a database for a given rule. | -| [Get-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Get-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline) | Gets the vulnerability assessment rule baseline of a managed database for a given rule. | -| [Get-AzSqlDatabaseVulnerabilityAssessmentScanRecord](/powershell/module/az.sql/Get-azSqlDatabaseVulnerabilityAssessmentScanRecord) | Gets all vulnerability assessment scan records associated with a given database. | -| [Get-AzSqlInstanceDatabaseVulnerabilityAssessmentScanRecord](/powershell/module/az.sql/Get-AzSqlInstanceDatabaseVulnerabilityAssessmentScanRecord) | Gets all vulnerability assessment scan records associated with a given managed database. | -| [Get-AzSqlDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Get-azSqlDatabaseVulnerabilityAssessmentSetting) | Returns the vulnerability assessment settings of a database. | -| [Get-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Get-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting) | Returns the vulnerability assessment settings of a managed database. | -| [Set-AzSqlDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Set-azSqlDatabaseVulnerabilityAssessmentRuleBaseline) | Sets the vulnerability assessment rule baseline. | -| [Set-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline](/powershell/module/az.sql/Set-AzSqlInstanceDatabaseVulnerabilityAssessmentRuleBaseline) | Sets the vulnerability assessment rule baseline for a managed database. | -| [Start-AzSqlDatabaseVulnerabilityAssessmentScan](/powershell/module/az.sql/Start-azSqlDatabaseVulnerabilityAssessmentScan) | Triggers the start of a vulnerability assessment scan on a database. | -| [Start-AzSqlInstanceDatabaseVulnerabilityAssessmentScan](/powershell/module/az.sql/Start-AzSqlInstanceDatabaseVulnerabilityAssessmentScan) | Triggers the start of a vulnerability assessment scan on a managed database. | -| [Update-AzSqlDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Update-azSqlDatabaseVulnerabilityAssessmentSetting) | Updates the vulnerability assessment settings of a database. | -| [Update-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting](/powershell/module/az.sql/Update-AzSqlInstanceDatabaseVulnerabilityAssessmentSetting) | Updates the vulnerability assessment settings of a managed database. | -| [Update-AzSqlInstanceVulnerabilityAssessmentSetting](/powershell/module/az.sql/Update-AzSqlInstanceVulnerabilityAssessmentSetting) | Updates the vulnerability assessment settings of a managed instance. | - - -For a script example, see [Azure SQL vulnerability assessment PowerShell support](/archive/blogs/sqlsecurity/azure-sql-vulnerability-assessment-now-with-powershell-support). - -# [Azure CLI](#tab/azure-cli) - -> [!IMPORTANT] -> The following Azure CLI commands are for SQL databases hosted on VMs or On-Premise machines. For vunerability assessments regarding Azure SQL Databases, refer to the Azure portal or PowerShell section. - -You can use Azure CLI commands to programmatically manage your vulnerability assessments. The supported commands are: - -| Command name as a link | Description | -| :-------------------- | :---------- | -| [az security va sql baseline delete](/cli/azure/security/va/sql/baseline#az-security-va-sql-baseline-delete) | Delete Sql Vulnerability Assessment rule baseline. | -| [az security va sql baseline list](/cli/azure/security/va/sql/baseline#az-security-va-sql-baseline-list) | View Sql Vulnerability Assessment baseline for all rules. | -| [az security va sql baseline set](/cli/azure/security/va/sql/baseline#az-security-va-sql-baseline-set) | Sets Sql Vulnerability Assessment baseline. Replaces the current baseline. | -| [az security va sql baseline show](/cli/azure/security/va/sql/baseline#az-security-va-sql-baseline-show) | View Sql Vulnerability Assessment rule baseline. | -| [az security va sql baseline update](/cli/azure/security/va/sql/baseline#az-security-va-sql-baseline-update) | Update Sql Vulnerability Assessment rule baseline. Replaces the current rule baseline. | -| [az security va sql results list](/cli/azure/security/va/sql/results#az-security-va-sql-results-list) | View all Sql Vulnerability Assessment scan results. | -| [az security va sql results show](/cli/azure/security/va/sql/results#az-security-va-sql-results-show) | View Sql Vulnerability Assessment scan results. | -| [az security va sql scans list](/cli/azure/security/va/sql/scans#az-security-va-sql-scans-list) | List all Sql Vulnerability Assessment scan summaries. | -| [az security va sql scans show](/cli/azure/security/va/sql/scans#az-security-va-sql-scans-show) | View Sql Vulnerability Assessment scan summaries. | - ---- - -### Using Resource Manager templates - -To configure vulnerability assessment baselines by using Azure Resource Manager templates, use the `Microsoft.Sql/servers/databases/vulnerabilityAssessments/rules/baselines` type. - -Ensure that you have enabled `vulnerabilityAssessments` before you add baselines. - -Here's an example for defining Baseline Rule VA2065 to `master` database and VA1143 to `user` database as resources in a Resource Manager template: - -```json - "resources": [ - { - "type": "Microsoft.Sql/servers/databases/vulnerabilityAssessments/rules/baselines", - "apiVersion": "2018-06-01-preview", - "name": "[concat(parameters('server_name'),'/', parameters('database_name') , '/default/VA2065/master')]", - "properties": { - "baselineResults": [ - { - "result": [ - "FirewallRuleName3", - "StartIpAddress", - "EndIpAddress" - ] - }, - { - "result": [ - "FirewallRuleName4", - "62.92.15.68", - "62.92.15.68" - ] - } - ] - }, - "type": "Microsoft.Sql/servers/databases/vulnerabilityAssessments/rules/baselines", - "apiVersion": "2018-06-01-preview", - "name": "[concat(parameters('server_name'),'/', parameters('database_name'), '/default/VA2130/Default')]", - "dependsOn": [ - "[resourceId('Microsoft.Sql/servers/vulnerabilityAssessments', parameters('server_name'), 'Default')]" - ], - "properties": { - "baselineResults": [ - { - "result": [ - "dbo" - ] - } - ] - } - } - ] -``` - -For `master` database and `user` database, the resource names are defined differently: - -- Master database - "name": "[concat(parameters('server_name'),'/', parameters('database_name') , '/default/VA2065/master')]", -- User database - "name": "[concat(parameters('server_name'),'/', parameters('database_name') , '/default/VA2065/default')]", - -To handle Boolean types as true/false, set the baseline result with binary input like "1"/"0". - -```json - { - "type": "Microsoft.Sql/servers/databases/vulnerabilityAssessments/rules/baselines", - "apiVersion": "2018-06-01-preview", - "name": "[concat(parameters('server_name'),'/', parameters('database_name'), '/default/VA1143/Default')]", - - "dependsOn": [ - "[resourceId('Microsoft.Sql/servers/vulnerabilityAssessments', parameters('server_name'), 'Default')]" - ], - - "properties": { - "baselineResults": [ - { - "result": [ - "1" - ] - } - ] - } - - } -``` -## Permissions - -One of the following permissions is required to see vulnerability assessment results in the Microsoft Defender for Cloud recommendation **SQL databases should have vulnerability findings resolved**: -- Security Admin -- Security Reader - -The following permissions are required to changes vulnerability assessment settings: - -- SQL Security Manager -- Storage Blob Data Reader -- Owner role on the storage account - -The following permissions are required to open links in email notifications about scan results or to view scan results at the resource-level: - -- SQL Security Manager -- Storage Blob Data Reader - -## Data residency - -SQL Vulnerability Assessment queries the SQL server using publicly available queries under Defender for Cloud recommendations for SQL Vulnerability Assessment, and stores the query results. The data is stored in the configured user-owned storage account. - -SQL Vulnerability Assessment allows you to specify the region where your data will be stored by choosing the location of the storage account. The user is responsible for the security and data resiliency of the storage account. - -## Next steps - -- Learn more about [Microsoft Defender for SQL](azure-defender-for-sql.md). -- Learn more about [data discovery and classification](data-discovery-and-classification-overview.md). -- Learn more about [Storing vulnerability assessment scan results in a storage account accessible behind firewalls and VNets](sql-database-vulnerability-assessment-storage.md). diff --git a/articles/azure-sql/database/stream-data-stream-analytics-integration.md b/articles/azure-sql/database/stream-data-stream-analytics-integration.md deleted file mode 100644 index a79f1de203622..0000000000000 --- a/articles/azure-sql/database/stream-data-stream-analytics-integration.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Stream data using Azure Stream Analytics integration (preview) -description: Use Azure Stream Analytics integration to stream data into Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: ajetasin -ms.author: ajetasi -ms.reviewer: kendralittle, mathoma -ms.date: 11/04/2019 ---- - -# Stream data into Azure SQL Database using Azure Stream Analytics integration (preview) - -Users can now ingest, process, view, and analyze real-time streaming data into a table directly from a database in Azure SQL Database. They do so in the Azure portal using [Azure Stream Analytics](../../stream-analytics/stream-analytics-introduction.md). This experience enables a wide variety of scenarios such as connected car, remote monitoring, fraud detection, and many more. In the Azure portal, you can select an events source (Event Hub/IoT Hub), view incoming real-time events, and select a table to store events. You can also write Azure Stream Analytics Query Language queries in the portal to transform incoming events and store them in the selected table. This new entry point is in addition to the creation and configuration experiences that already exist in Stream Analytics. This experience starts from the context of your database, enabling you to quickly set up a Stream Analytics job and navigate seamlessly between the database in Azure SQL Database and Stream Analytics experiences. - -![Stream Analytics flow](./media/stream-data-stream-analytics-integration/stream-analytics-flow.png) - -## Key benefits - -- Minimum context switching: You can start from a database in Azure SQL Database in the portal and start ingesting real-time data into a table without switching to any other service. -- Reduced number of steps: The context of your database and table is used to pre-configure a Stream Analytics job. -- Additional ease of use with preview data: Preview incoming data from the events source (Event Hub/IoT Hub) in the context of selected table - -> [!IMPORTANT] -> An Azure Stream Analytics job can output to Azure SQL Database, Azure SQL Managed Instance, or Azure Synapse Analytics. For more information, see [Outputs](../../stream-analytics/stream-analytics-define-outputs.md). - -## Prerequisites - -To complete the steps in this article, you need the following resources: - -- An Azure subscription. If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/). -- A database in Azure SQL Database. For details, see [Create a single database in Azure SQL Database](single-database-create-quickstart.md). -- A firewall rule allowing your computer to connect to the server. For details, see [Create a server-level firewall rule](firewall-create-server-level-portal-quickstart.md). - -## Configure Stream analytics integration - -1. Sign in to the Azure portal. -2. Navigate to the database where you want to ingest your streaming data. Select **Stream analytics (preview)**. - - ![Stream Analytics](./media/stream-data-stream-analytics-integration/stream-analytics.png) - -3. To start ingesting your streaming data into this database, select **Create** and give a name to your streaming job, and then select **Next: Input**. - - ![configure Stream Analytics job basics](./media/stream-data-stream-analytics-integration/create-job.png) - -4. Enter your events source details, and then select **Next: Output**. - - - **Input type**: Event Hub/IoT Hub - - **Input alias**: Enter a name to identify your events source - - **Subscription**: Same as Azure SQL Database subscription - - **Event Hub namespace**: Name for namespace - - **Event Hub name**: Name of event hub within selected namespace - - **Event Hub policy name** (Default to create new): Give a policy name - - **Event Hub consumer group** (Default to create new): Give a consumer group name - - We recommend that you create a consumer group and a policy for each new Azure Stream Analytics job that you create from here. Consumer groups allow only five concurrent readers, so providing a dedicated consumer group for each job will avoid any errors that might arise from exceeding that limit. A dedicated policy allows you to rotate your key or revoke permissions without impacting other resources. - - ![configure Stream Analytics job output](./media/stream-data-stream-analytics-integration/create-job-output.png) - -5. Select which table you want to ingest your streaming data into. Once done, select **Create**. - - - **Username**, **Password**: Enter your credentials for SQL server authentication. Select **Validate**. - - **Table**: Select **Create new** or **Use existing**. In this flow, let’s select **Create**. This will create a new table when you start the stream Analytics job. - - ![create Stream Analytics job](./media/stream-data-stream-analytics-integration/create.png) - -6. A query page opens with following details: - - - Your **Input** (input events source) from which you'll ingest data - - Your **Output** (output table) which will store transformed data - - Sample [SAQL query](../../stream-analytics/stream-analytics-stream-analytics-query-patterns.md) with SELECT statement. - - **Input preview**: Shows snapshot of latest incoming data from input events source. - - The serialization type in your data is automatically detected (JSON/CSV). You can manually change it as well to JSON/CSV/AVRO. - - You can preview incoming data in the Table format or Raw format. - - If your data shown isn't current, select **Refresh** to see the latest events. - - Select **Select time range** to test your query against a specific time range of incoming events. - - Select **Upload sample input** to test your query by uploading a sample JSON/CSV file. For more information about testing a SAQL query, see [Test an Azure Stream Analytics job with sample data](../../stream-analytics/stream-analytics-test-query.md). - - ![test query](./media/stream-data-stream-analytics-integration/test-query.png) - - - **Test results**: Select **Test query** and you can see the results of your streaming query - - ![test results](./media/stream-data-stream-analytics-integration/test-results.png) - - - **Test results schema**: Shows the schema of the results of your streaming query after testing. Make sure the test results schema matches with your output schema. - - ![test results schema](./media/stream-data-stream-analytics-integration/test-results-schema.png) - - - **Output schema**: This contains schema of the table you selected in step 5 (new or existing). - - - Create new: If you selected this option in step 5, you won’t see the schema yet until you start the streaming job. When creating a new table, select the appropriate table index. For more information about table indexing, see [Clustered and Nonclustered Indexes Described](/sql/relational-databases/indexes/clustered-and-nonclustered-indexes-described/). - - Use existing: If you selected this option in step 5, you'll see the schema of selected table. - -7. After you're done authoring & testing the query, select **Save query**. Select **Start Stream Analytics job** to start ingesting transformed data into the SQL table. Once you finalize the following fields, **start** the job. - - **Output start time**: This defines the time of the first output of the job. - - Now: The job will start now and process new incoming data. - - Custom: The job will start now but will process data from a specific point in time (that can be in the past or the future). For more information, see [How to start an Azure Stream Analytics job](../../stream-analytics/start-job.md). - - **Streaming units**: Azure Stream Analytics is priced by the number of streaming units required to process the data into the service. For more information, see [Azure Stream Analytics pricing](https://azure.microsoft.com/pricing/details/stream-analytics/). - - **Output data error handling**: - - Retry: When an error occurs, Azure Stream Analytics retries writing the event indefinitely until the write succeeds. There's no timeout for retries. Eventually all subsequent events are blocked from processing by the event that is retrying. This option is the default output error handling policy. - - Drop: Azure Stream Analytics will drop any output event that results in a data conversion error. The dropped events can't be recovered for reprocessing later. All transient errors (for example, network errors) are retried regardless of the output error handling policy configuration. - - **SQL Database output settings**: An option for inheriting the partitioning scheme of your previous query step, to enable fully parallel topology with multiple writers to the table. For more information, see [Azure Stream Analytics output to Azure SQL Database](../../stream-analytics/stream-analytics-sql-output-perf.md). - - **Max batch count**: The recommended upper limit on the number of records sent with every bulk insert transaction. - For more information about output error handling, see [Output error policies in Azure Stream Analytics](../../stream-analytics/stream-analytics-output-error-policy.md). - - ![start job](./media/stream-data-stream-analytics-integration/start-job.png) - -8. Once you start the job, you'll see the Running job in the list, and you can take following actions: - - **Start/stop the job**: If the job is running, you can stop the job. If the job is stopped, you can start the job. - - **Edit job**: You can edit the query. If you want to do more changes to the job ex, add more inputs/outputs, then open the job in Stream Analytics. Edit option is disabled when the job is running. - - **Preview output table**: You can preview the table in the SQL query editor. - - **Open in Stream Analytics**: Open the job in Stream Analytics to view monitoring, debugging details of the job. - - ![stream analytics jobs](./media/stream-data-stream-analytics-integration/jobs.png) - -## Next steps - -- [Azure Stream Analytics documentation](../../stream-analytics/index.yml) -- [Azure Stream Analytics solution patterns](../../stream-analytics/stream-analytics-solution-patterns.md) diff --git a/articles/azure-sql/database/temporal-tables-retention-policy.md b/articles/azure-sql/database/temporal-tables-retention-policy.md deleted file mode 100644 index c1da5573e3708..0000000000000 --- a/articles/azure-sql/database/temporal-tables-retention-policy.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Manage historical data in temporal tables -description: Learn how to use temporal retention policy to keep historical data under your control. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: MladjoA -ms.author: mlandzic -ms.reviewer: kendralittle, mathoma -ms.date: 10/18/2021 ---- -# Manage historical data in Temporal tables with retention policy -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -Temporal tables may increase database size more than regular tables, especially if you retain historical data for a longer period of time. Hence, retention policy for historical data is an important aspect of planning and managing the lifecycle of every temporal table. Temporal tables in Azure SQL Database and Azure SQL Managed Instance come with easy-to-use retention mechanism that helps you accomplish this task. - -Temporal history retention can be configured at the individual table level, which allows users to create flexible aging policies. Applying temporal retention is simple: it requires only one parameter to be set during table creation or schema change. - -After you define retention policy, Azure SQL Database and Azure SQL Managed Instance starts checking regularly if there are historical rows that are eligible for automatic data cleanup. Identification of matching rows and their removal from the history table occur transparently, in the background task that is scheduled and run by the system. Age condition for the history table rows is checked based on the column representing end of SYSTEM_TIME period. If retention period, for example, is set to six months, table rows eligible for cleanup satisfy the following condition: - -```sql -ValidTo < DATEADD (MONTH, -6, SYSUTCDATETIME()) -``` - -In the preceding example, we assumed that **ValidTo** column corresponds to the end of SYSTEM_TIME period. - -## How to configure retention policy - -Before you configure retention policy for a temporal table, check first whether temporal historical retention is enabled *at the database level*. - -```sql -SELECT is_temporal_history_retention_enabled, name -FROM sys.databases -``` - -Database flag **is_temporal_history_retention_enabled** is set to ON by default, but users can change it with ALTER DATABASE statement. It is also automatically set to OFF after [point in time restore](recovery-using-backups.md) operation. To enable temporal history retention cleanup for your database, execute the following statement: - -```sql -ALTER DATABASE [] -SET TEMPORAL_HISTORY_RETENTION ON -``` - -> [!IMPORTANT] -> You can configure retention for temporal tables even if **is_temporal_history_retention_enabled** is OFF, but automatic cleanup for aged rows is not triggered in that case. - -Retention policy is configured during table creation by specifying value for the HISTORY_RETENTION_PERIOD parameter: - -```sql -CREATE TABLE dbo.WebsiteUserInfo -( - [UserID] int NOT NULL PRIMARY KEY CLUSTERED - , [UserName] nvarchar(100) NOT NULL - , [PagesVisited] int NOT NULL - , [ValidFrom] datetime2 (0) GENERATED ALWAYS AS ROW START - , [ValidTo] datetime2 (0) GENERATED ALWAYS AS ROW END - , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo) - ) - WITH - ( - SYSTEM_VERSIONING = ON - ( - HISTORY_TABLE = dbo.WebsiteUserInfoHistory, - HISTORY_RETENTION_PERIOD = 6 MONTHS - ) - ); -``` - -Azure SQL Database and Azure SQL Managed Instance allow you to specify retention period by using different time units: DAYS, WEEKS, MONTHS, and YEARS. If HISTORY_RETENTION_PERIOD is omitted, INFINITE retention is assumed. You can also use INFINITE keyword explicitly. - -In some scenarios, you may want to configure retention after table creation, or to change previously configured value. In that case use ALTER TABLE statement: - -```sql -ALTER TABLE dbo.WebsiteUserInfo -SET (SYSTEM_VERSIONING = ON (HISTORY_RETENTION_PERIOD = 9 MONTHS)); -``` - -> [!IMPORTANT] -> Setting SYSTEM_VERSIONING to OFF *does not preserve* retention period value. Setting SYSTEM_VERSIONING to ON without HISTORY_RETENTION_PERIOD specified explicitly results in the INFINITE retention period. - -To review current state of the retention policy, use the following query that joins temporal retention enablement flag at the database level with retention periods for individual tables: - -```sql -SELECT DB.is_temporal_history_retention_enabled, -SCHEMA_NAME(T1.schema_id) AS TemporalTableSchema, -T1.name as TemporalTableName, SCHEMA_NAME(T2.schema_id) AS HistoryTableSchema, -T2.name as HistoryTableName,T1.history_retention_period, -T1.history_retention_period_unit_desc -FROM sys.tables T1 -OUTER APPLY (select is_temporal_history_retention_enabled from sys.databases -where name = DB_NAME()) AS DB -LEFT JOIN sys.tables T2 -ON T1.history_table_id = T2.object_id WHERE T1.temporal_type = 2 -``` - -## How ages rows are deleted - -The cleanup process depends on the index layout of the history table. It is important to notice that *only history tables with a clustered index (B-tree or columnstore) can have finite retention policy configured*. A background task is created to perform aged data cleanup for all temporal tables with finite retention period. -Cleanup logic for the rowstore (B-tree) clustered index deletes aged row in smaller chunks (up to 10K) minimizing pressure on database log and IO subsystem. Although cleanup logic utilizes required B-tree index, order of deletions for the rows older than retention period cannot be firmly guaranteed. Hence, *do not take any dependency on the cleanup order in your applications*. - -The cleanup task for the clustered columnstore removes entire [row groups](/sql/relational-databases/indexes/columnstore-indexes-overview) at once (typically contain 1 million of rows each), which is very efficient, especially when historical data is generated at a high pace. - -![Clustered columnstore retention](./media/temporal-tables-retention-policy/cciretention.png) - -Excellent data compression and efficient retention cleanup makes clustered columnstore index a perfect choice for scenarios when your workload rapidly generates high amount of historical data. That pattern is typical for intensive [transactional processing workloads that use temporal tables](/sql/relational-databases/tables/temporal-table-usage-scenarios) for change tracking and auditing, trend analysis, or IoT data ingestion. - -## Index considerations - -The cleanup task for tables with rowstore clustered index requires index to start with the column corresponding the end of SYSTEM_TIME period. If such index doesn't exist, you cannot configure a finite retention period: - -*Msg 13765, Level 16, State 1

    -Setting finite retention period failed on system-versioned temporal table 'temporalstagetestdb.dbo.WebsiteUserInfo' because the history table 'temporalstagetestdb.dbo.WebsiteUserInfoHistory' does not contain required clustered index. Consider creating a clustered columnstore or B-tree index starting with the column that matches end of SYSTEM_TIME period, on the history table.* - -It is important to notice that the default history table created by Azure SQL Database and Azure SQL Managed Instance already has clustered index, which is compliant for retention policy. If you try to remove that index on a table with finite retention period, operation fails with the following error: - -*Msg 13766, Level 16, State 1

    -Cannot drop the clustered index 'WebsiteUserInfoHistory.IX_WebsiteUserInfoHistory' because it is being used for automatic cleanup of aged data. Consider setting HISTORY_RETENTION_PERIOD to INFINITE on the corresponding system-versioned temporal table if you need to drop this index.* - -Cleanup on the clustered columnstore index works optimally if historical rows are inserted in the ascending order (ordered by the end of period column), which is always the case when the history table is populated exclusively by the SYSTEM_VERSIONIOING mechanism. If rows in the history table are not ordered by end of period column (which may be the case if you migrated existing historical data), you should re-create clustered columnstore index on top of B-tree rowstore index that is properly ordered, to achieve optimal performance. - -Avoid rebuilding clustered columnstore index on the history table with the finite retention period, because it may change ordering in the row groups naturally imposed by the system-versioning operation. If you need to rebuild clustered columnstore index on the history table, do that by re-creating it on top of compliant B-tree index, preserving ordering in the rowgroups necessary for regular data cleanup. The same approach should be taken if you create temporal table with existing history table that has clustered column index without guaranteed data order: - -```sql -/*Create B-tree ordered by the end of period column*/ -CREATE CLUSTERED INDEX IX_WebsiteUserInfoHistory ON WebsiteUserInfoHistory (ValidTo) -WITH (DROP_EXISTING = ON); -GO -/*Re-create clustered columnstore index*/ -CREATE CLUSTERED COLUMNSTORE INDEX IX_WebsiteUserInfoHistory ON WebsiteUserInfoHistory -WITH (DROP_EXISTING = ON); -``` - -When finite retention period is configured for the history table with the clustered columnstore index, you cannot create additional non-clustered B-tree indexes on that table: - -```sql -CREATE NONCLUSTERED INDEX IX_WebHistNCI ON WebsiteUserInfoHistory ([UserName]) -``` - -An attempt to execute above statement fails with the following error: - -*Msg 13772, Level 16, State 1

    -Cannot create non-clustered index on a temporal history table 'WebsiteUserInfoHistory' since it has finite retention period and clustered columnstore index defined.* - -## Querying tables with retention policy - -All queries on the temporal table automatically filter out historical rows matching finite retention policy, to avoid unpredictable and inconsistent results, since aged rows can be deleted by the cleanup task, *at any point in time and in arbitrary order*. - -The following picture shows the query plan for a simple query: - -```sql -SELECT * FROM dbo.WebsiteUserInfo FOR SYSTEM_TIME ALL; -``` - -The query plan includes additional filter applied to end of period column (ValidTo) in the Clustered Index Scan operator on the history table (highlighted). This example assumes that one MONTH retention period was set on WebsiteUserInfo table. - -![Retention query filter](./media/temporal-tables-retention-policy/queryexecplanwithretention.png) - -However, if you query history table directly, you may see rows that are older than specified retention period, but without any guarantee for repeatable query results. The following picture shows query execution plan for the query on the history table without additional filters applied: - -![Querying history without retention filter](./media/temporal-tables-retention-policy/queryexecplanhistorytable.png) - -Do not rely your business logic on reading history table beyond retention period as you may get inconsistent or unexpected results. We recommend that you use temporal queries with FOR SYSTEM_TIME clause for analyzing data in temporal tables. - -## Point in time restore considerations - -When you create new database by [restoring existing database to a specific point in time](recovery-using-backups.md), it has temporal retention disabled at the database level. (**is_temporal_history_retention_enabled** flag set to OFF). This functionality allows you to examine all historical rows upon restore, without worrying that aged rows are removed before you get to query them. You can use it to *inspect historical data beyond configured retention period*. - -Say that a temporal table has one MONTH retention period specified. If your database was created in Premium Service tier, you would be able to create database copy with the database state up to 35 days back in the past. That effectively would allow you to analyze historical rows that are up to 65 days old by querying the history table directly. - -If you want to activate temporal retention cleanup, run the following Transact-SQL statement after point in time restore: - -```sql -ALTER DATABASE [] -SET TEMPORAL_HISTORY_RETENTION ON -``` - -## Next steps - -To learn how to use temporal tables in your applications, check out [Getting Started with Temporal Tables](../temporal-tables.md). - -For detailed information about temporal tables, review [Temporal tables](/sql/relational-databases/tables/temporal-tables). diff --git a/articles/azure-sql/database/threat-detection-configure.md b/articles/azure-sql/database/threat-detection-configure.md deleted file mode 100644 index 53119869c65f7..0000000000000 --- a/articles/azure-sql/database/threat-detection-configure.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Configure Advanced Threat Protection -description: Advanced Threat Protection detects anomalous database activities indicating potential security threats to the database in Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: seo-dt-2019, sqldbrb=1 -ms.topic: how-to -author: rmatchoro -ms.author: ronmat -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 02/16/2022 ---- -# Configure Advanced Threat Protection for Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[Advanced Threat Protection](threat-detection-overview.md) for Azure SQL Database detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. Advanced Threat Protection can identify **Potential SQL injection**, **Access from unusual location or data center**, **Access from unfamiliar principal or potentially harmful application**, and **Brute force SQL credentials** - see more details in [Advanced Threat Protection alerts](threat-detection-overview.md#alerts). - -You can receive notifications about the detected threats via [email notifications](threat-detection-overview.md#explore-detection-of-a-suspicious-event) or [Azure portal](threat-detection-overview.md#explore-alerts-in-the-azure-portal) - -[Advanced Threat Protection](threat-detection-overview.md) is part of the [Microsoft Defender for SQL](azure-defender-for-sql.md) offering, which is a unified package for advanced SQL security capabilities. Advanced Threat Protection can be accessed and managed via the central Microsoft Defender for SQL portal. - -## Set up Advanced Threat Protection in the Azure portal - -1. Sign into the [Azure portal](https://portal.azure.com). -2. Navigate to the configuration page of the [server](logical-servers.md) you want to protect. In the security settings, select **Microsoft Defender for Cloud**. -3. On the **Microsoft Defender for Cloud** configuration page: - - 1. If Microsoft Defender for SQL hasn't yet been enabled, select **Enable Microsoft Defender for SQL**. - - 1. Select **Configure**. - - :::image type="content" source="media/azure-defender-for-sql/enable-microsoft-defender-sql.png" alt-text="Enable Microsoft Defender for SQL." lightbox="media/azure-defender-for-sql/enable-microsoft-defender-sql.png"::: - - 1. Under **ADVANCED THREAT PROTECTION SETTINGS**, select **Add your contact details to the subscription's email settings in Defender for Cloud**. - - :::image type="content" source="media/azure-defender-for-sql/advanced-threat-protection-add-contact-details.png" alt-text="Select link to proceed to advanced threat protection settings." lightbox="media/azure-defender-for-sql/advanced-threat-protection-add-contact-details.png"::: - - 1. Provide the list of emails to receive notifications upon detection of anomalous database activities in the **Additional email addresses (separated by commas)** text box. - 1. Optionally customize the severity of alerts that will trigger notifications to be sent under **Notification types**. - 1. Select **Save**. - - :::image type="content" source="media/azure-defender-for-sql/advanced-threat-protection-configure-emails.png" alt-text="Enter emails for Advanced Threat Protection notifications." lightbox="media/azure-defender-for-sql/advanced-threat-protection-configure-emails.png"::: - -## Set up Advanced Threat Protection using PowerShell - -For a script example, see [Configure auditing and Advanced Threat Protection using PowerShell](scripts/auditing-threat-detection-powershell-configure.md). - -## Next steps - -Learn more about Advanced Threat Protection and Microsoft Defender for SQL in the following articles: - -- [Advanced Threat Protection](threat-detection-overview.md) -- [Advanced Threat Protection in SQL Managed Instance](../managed-instance/threat-detection-configure.md) -- [Microsoft Defender for SQL](azure-defender-for-sql.md) -- [Auditing for Azure SQL Database and Azure Synapse Analytics](auditing-overview.md) -- [Microsoft Defender for Cloud](../../security-center/security-center-introduction.md) -- For more information on pricing, see the [SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/) diff --git a/articles/azure-sql/database/threat-detection-overview.md b/articles/azure-sql/database/threat-detection-overview.md deleted file mode 100644 index 8483bc67482d8..0000000000000 --- a/articles/azure-sql/database/threat-detection-overview.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Advanced Threat Protection -titleSuffix: Azure SQL Database, SQL Managed Instance, & Azure Synapse Analytics -description: Advanced Threat Protection detects anomalous database activities indicating potential security threats in Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: sqldbrb=2 -ms.topic: conceptual -author: davidtrigano -ms.author: datrigan -ms.reviewer: kendralittle, vanto, sstein, mathoma -ms.date: 06/09/2021 -tags: azure-synapse ---- - -# SQL Advanced Threat Protection -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] :::image type="icon" source="../media/applies-to/yes.png" border="false":::SQL Server on Azure VM :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure Arc-enabled SQL Server - -Advanced Threat Protection for [Azure SQL Database](sql-database-paas-overview.md), [Azure SQL Managed Instance](../managed-instance/sql-managed-instance-paas-overview.md), [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md), [SQL Server on Azure Virtual Machines](../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) and [Azure Arc-enabled SQL Server](/sql/sql-server/azure-arc/overview) detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. - -Advanced Threat Protection is part of the [Microsoft Defender for SQL](../../security-center/defender-for-sql-introduction.md) offering, which is a unified package for advanced SQL security capabilities. Advanced Threat Protection can be accessed and managed via the central Microsoft Defender for SQL portal. - -## Overview - -Advanced Threat Protection provides a new layer of security, which enables customers to detect and respond to potential threats as they occur by providing security alerts on anomalous activities. Users receive an alert upon suspicious database activities, potential vulnerabilities, and SQL injection attacks, as well as anomalous database access and queries patterns. Advanced Threat Protection integrates alerts with [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/), which include details of suspicious activity and recommend action on how to investigate and mitigate the threat. Advanced Threat Protection makes it simple to address potential threats to the database without the need to be a security expert or manage advanced security monitoring systems. - -For a full investigation experience, it is recommended to enable auditing, which writes database events to an audit log in your Azure storage account. To enable auditing, see [Auditing for Azure SQL Database and Azure Synapse](/azure/azure-sql/database/auditing-overview) or [Auditing for Azure SQL Managed Instance](../managed-instance/auditing-configure.md). - -## Alerts - -Advanced Threat Protection detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. For a list of alerts, see the [Alerts for SQL Database and Azure Synapse Analytics in Microsoft Defender for Cloud](../../security-center/alerts-reference.md#alerts-sql-db-and-warehouse). - -## Explore detection of a suspicious event - -You receive an email notification upon detection of anomalous database activities. The email provides information on the suspicious security event including the nature of the anomalous activities, database name, server name, application name, and the event time. In addition, the email provides information on possible causes and recommended actions to investigate and mitigate the potential threat to the database. - -![Anomalous activity report](./media/threat-detection-overview/anomalous_activity_report.png) - -1. Click the **View recent SQL alerts** link in the email to launch the Azure portal and show the Microsoft Defender for Cloud alerts page, which provides an overview of active threats detected on the database. - - ![Activity threats](./media/threat-detection-overview/active_threats.png) - -1. Click a specific alert to get additional details and actions for investigating this threat and remediating future threats. - - For example, SQL injection is one of the most common Web application security issues on the Internet that is used to attack data-driven applications. Attackers take advantage of application vulnerabilities to inject malicious SQL statements into application entry fields, breaching or modifying data in the database. For SQL Injection alerts, the alert’s details include the vulnerable SQL statement that was exploited. - - ![Specific alert](./media/threat-detection-overview/specific_alert.png) - -## Explore alerts in the Azure portal - -Advanced Threat Protection integrates its alerts with [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/). Live SQL Advanced Threat Protection tiles within the database and SQL Microsoft Defender for Cloud blades in the Azure portal track the status of active threats. - -Click **Advanced Threat Protection alert** to launch the Microsoft Defender for Cloud alerts page and get an overview of active SQL threats detected on the database. - -:::image type="content" source="media/azure-defender-for-sql/advanced-threat-protection-alerts.png" alt-text="advanced threat protection alerts in database overview"::: - -:::image type="content" source="media/azure-defender-for-sql/advanced-threat-protection.png" alt-text="advanced threat protection in Defender for SQL"::: - -## Next steps - -- Learn more about [Advanced Threat Protection in Azure SQL Database & Azure Synapse](threat-detection-configure.md). -- Learn more about [Advanced Threat Protection in Azure SQL Managed Instance](../managed-instance/threat-detection-configure.md). -- Learn more about [Microsoft Defender for SQL](azure-defender-for-sql.md). -- Learn more about [Azure SQL Database auditing](/azure/azure-sql/database/auditing-overview) -- Learn more about [Microsoft Defender for Cloud](../../security-center/security-center-introduction.md) - For more information on pricing, see the [Azure SQL Database pricing page](https://azure.microsoft.com/pricing/details/sql-database/) diff --git a/articles/azure-sql/database/transact-sql-tsql-differences-sql-server.md b/articles/azure-sql/database/transact-sql-tsql-differences-sql-server.md deleted file mode 100644 index 19b6c9bc8b673..0000000000000 --- a/articles/azure-sql/database/transact-sql-tsql-differences-sql-server.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Resolving T-SQL differences-migration -description: T-SQL statements that are less than fully supported in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: migration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: reference -author: mokabiru -ms.author: mokabiru -ms.reviewer: kendralittle, mathoma -ms.date: 06/17/2021 ---- -# T-SQL differences between SQL Server and Azure SQL Database - -When [migrating your database](migrate-to-database-from-sql-server.md) from SQL Server to Azure SQL Database, you may discover that your SQL Server databases require some re-engineering before they can be migrated. This article provides guidance to assist you in both performing this re-engineering and understanding the underlying reasons why the re-engineering is necessary. To detect incompatibilities and migrate databases to Azure SQL Database, use [Data Migration Assistant (DMA)](/sql/dma/dma-overview). - -## Overview - -Most T-SQL features that applications use are fully supported in both Microsoft SQL Server and Azure SQL Database. For example, the core SQL components such as data types, operators, string, arithmetic, logical, and cursor functions work identically in SQL Server and SQL Database. There are, however, a few T-SQL differences in DDL (data definition language) and DML (data manipulation language) elements resulting in T-SQL statements and queries that are only partially supported (which we discuss later in this article). - -In addition, there are some features and syntax that isn't supported at all because Azure SQL Database is designed to isolate features from dependencies on the system databases and the operating system. As such, most instance-level features are not supported in SQL Database. T-SQL statements and options aren't available if they configure instance-level options, operating system components, or specify file system configuration. When such capabilities are required, an appropriate alternative is often available in some other way from SQL Database or from another Azure feature or service. - -For example, high availability is built into Azure SQL Database. T-SQL statements related to availability groups are not supported by SQL Database, and the dynamic management views related to Always On Availability Groups are also not supported. - -For a list of the features that are supported and unsupported by SQL Database, see [Azure SQL Database feature comparison](features-comparison.md). This page supplements that article, and focuses on T-SQL statements. - -## T-SQL syntax statements with partial differences - -The core DDL statements are available, but DDL statement extensions related to unsupported features, such as file placement on disk, are not supported. - -- In SQL Server, `CREATE DATABASE` and `ALTER DATABASE` statements have over three dozen options. The statements include file placement, FILESTREAM, and service broker options that only apply to SQL Server. This may not matter if you create databases in SQL Database before you migrate, but if you're migrating T-SQL code that creates databases you should compare [CREATE DATABASE (Azure SQL Database)](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-current&preserve-view=true) with the SQL Server syntax at [CREATE DATABASE (SQL Server T-SQL)](/sql/t-sql/statements/create-database-transact-sql?view=sql-server-ver15&preserve-view=true) to make sure all the options you use are supported. `CREATE DATABASE` for Azure SQL Database also has service objective and elastic pool options that apply only to SQL Database. -- The `CREATE TABLE` and `ALTER TABLE` statements have `FILETABLE` and `FILESTREAM` options that can't be used on SQL Database because these features aren't supported. -- `CREATE LOGIN` and `ALTER LOGIN` statements are supported, but do not offer all options available in SQL Server. To make your database more portable, SQL Database encourages using contained database users instead of logins whenever possible. For more information, see [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-current&preserve-view=true) and [ALTER LOGIN](/sql/t-sql/statements/alter-login-transact-sql?view=azuresqldb-current&preserve-view=true) and [Manage logins and users](logins-create-manage.md). - -## T-SQL syntax not supported in Azure SQL Database - -In addition to T-SQL statements related to the unsupported features described in [Azure SQL Database feature comparison](features-comparison.md), the following statements and groups of statements aren't supported. As such, if your database to be migrated is using any of the following features, re-engineer your application to eliminate these T-SQL features and statements. - -- Collation of system objects. -- Connection related: Endpoint statements. SQL Database doesn't support Windows authentication, but does support Azure Active Directory authentication. This includes authentication of Active Directory principals federated with Azure Active Directory. For more information, see [Connecting to SQL Database or Azure Azure Synapse Analytics By Using Azure Active Directory Authentication](authentication-aad-overview.md). -- Cross-database and cross-instance queries using three or four part names. Three part names referencing the `tempdb` database and the current database are supported. [Elastic query](elastic-query-overview.md) supports read-only references to tables in other MSSQL databases. -- Cross database ownership chaining and the `TRUSTWORTHY` database property. -- `EXECUTE AS LOGIN`. Use `EXECUTE AS USER` instead. -- Extensible key management (EKM) for encryption keys. Transparent Data Encryption (TDE) [customer-managed keys](transparent-data-encryption-byok-overview.md) and Always Encrypted [column master keys](always-encrypted-azure-key-vault-configure.md) may be stored in Azure Key Vault. -- Eventing: event notifications, query notifications. -- File properties: Syntax related to database file name, placement, size, and other file properties automatically managed by SQL Database. -- High availability: Syntax related to high availability and database recovery, which are managed by SQL Database. This includes syntax for backup, restore, Always On, database mirroring, log shipping, recovery models. -- Syntax related to snapshot, transactional, and merge replication, which is not available in SQL Database. [Replication subscriptions](replication-to-sql-database.md) are supported. -- Functions: `fn_get_sql`, `fn_virtualfilestats`, `fn_virtualservernodes`. -- Instance configuration: Syntax related to server memory, worker threads, CPU affinity, trace flags. Use service tiers and compute sizes instead. -- `KILL STATS JOB`. -- `OPENQUERY`, `OPENDATASOURCE`, and four-part names. -- .NET Framework: CLR integration -- Semantic search -- Server credentials: Use [database scoped credentials](/sql/t-sql/statements/create-database-scoped-credential-transact-SQL) instead. -- Server-level permissions: `GRANT`, `REVOKE`, and `DENY` of server level permissions are not supported. Some server-level permissions are replaced by database-level permissions, or granted implicitly by built-in server roles. Some server-level DMVs and catalog views have similar database-level views. -- `SET REMOTE_PROC_TRANSACTIONS` -- `SHUTDOWN` -- `sp_addmessage` -- `sp_configure` and `RECONFIGURE`. [ALTER DATABASE SCOPED CONFIGURATION](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql) is supported. -- `sp_helpuser` -- `sp_migrate_user_to_contained` -- SQL Server Agent: Syntax that relies upon the SQL Server Agent or the MSDB database: alerts, operators, central management servers. Use scripting, such as PowerShell, instead. -- SQL Server audit: Use SQL Database [auditing](auditing-overview.md) instead. -- SQL Server trace. -- Trace flags. -- T-SQL debugging. -- Server-scoped or logon triggers. -- `USE` statement: To change database context to a different database, you must create a new connection to that database. - -## Full T-SQL reference - -For more information about T-SQL grammar, usage, and examples, see [T-SQL Reference (Database Engine)](/sql/t-sql/language-reference). - -### About the "Applies to" tags - -The T-SQL reference includes articles related to all recent SQL Server versions. Below the article title there's an icon bar, listing MSSQL platforms, and indicating applicability. For example, availability groups were introduced in SQL Server 2012. The [CREATE AVAILABILITY GROUP](/sql/t-sql/statements/create-availability-group-transact-sql) article indicates that the statement applies to **SQL Server (starting with 2012)**. The statement doesn't apply to SQL Server 2008, SQL Server 2008 R2, Azure SQL Database, Azure Azure Synapse Analytics, or Parallel Data Warehouse. - -In some cases, the general subject of an article can be used in a product, but there are minor differences between products. The differences are indicated at midpoints in the article as appropriate. For example, the `CREATE TRIGGER` article is available in SQL Database. But the `ALL SERVER` option for server-level triggers, indicates that server-level triggers can't be used in SQL Database. Use database-level triggers instead. - -## Next steps - -For a list of the features that are supported and unsupported by SQL Database, see [Azure SQL Database feature comparison](features-comparison.md). - -To detect compatibility issues in your SQL Server databases before migrating to Azure SQL Database, and to migrate your databases, use [Data Migration Assistant (DMA)](/sql/dma/dma-overview). \ No newline at end of file diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-configure.md b/articles/azure-sql/database/transparent-data-encryption-byok-configure.md deleted file mode 100644 index 4e2b6df893343..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-configure.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: Enable SQL TDE with Azure Key Vault -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: "Learn how to configure an Azure SQL Database and Azure Synapse Analytics to start using Transparent Data Encryption (TDE) for encryption-at-rest using PowerShell or Azure CLI." -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: seo-lt-2019 sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: how-to -author: shohamMSFT -ms.author: shohamd -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 06/23/2021 ---- - -# PowerShell and Azure CLI: Enable Transparent Data Encryption with customer-managed key from Azure Key Vault - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article walks through how to use a key from Azure Key Vault for Transparent Data Encryption (TDE) on Azure SQL Database or Azure Synapse Analytics. To learn more about the TDE with Azure Key Vault integration - Bring Your Own Key (BYOK) Support, visit [TDE with customer-managed keys in Azure Key Vault](transparent-data-encryption-byok-overview.md). - -> [!NOTE] -> Azure SQL now supports using a RSA key stored in a Managed HSM as TDE Protector. Azure Key Vault Managed HSM is a fully managed, highly available, single-tenant, standards-compliant cloud service that enables you to safeguard cryptographic keys for your cloud applications, using FIPS 140-2 Level 3 validated HSMs. Learn more about [Managed HSMs](../../key-vault/managed-hsm/index.yml). - -> [!NOTE] -> This article applies to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics (dedicated SQL pools (formerly SQL DW)). For documentation on Transparent Data Encryption for dedicated SQL pools inside Synapse workspaces, see [Azure Synapse Analytics encryption](../../synapse-analytics/security/workspaces-encryption.md). - -## Prerequisites for PowerShell - -- You must have an Azure subscription and be an administrator on that subscription. -- [Recommended but Optional] Have a hardware security module (HSM) or local key store for creating a local copy of the TDE Protector key material. -- You must have Azure PowerShell installed and running. -- Create an Azure Key Vault and Key to use for TDE. - - [Instructions for using a hardware security module (HSM) and Key Vault](../../key-vault/keys/hsm-protected-keys.md) - - The key vault must have the following property to be used for TDE: - - [soft-delete](../../key-vault/general/soft-delete-overview.md) and purge protection -- The key must have the following attributes to be used for TDE: - - No expiration date - - Not disabled - - Able to perform *get*, *wrap key*, *unwrap key* operations -- To use a Managed HSM key, follow instructions to [create and activate a Managed HSM using Azure CLI](../../key-vault/managed-hsm/quick-create-cli.md) - -# [PowerShell](#tab/azure-powershell) - -For Az module installation instructions, see [Install Azure PowerShell](/powershell/azure/install-az-ps). For specific cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). - -For specifics on Key Vault, see [PowerShell instructions from Key Vault](../../key-vault/secrets/quick-create-powershell.md) and [How to use Key Vault soft-delete with PowerShell](../../key-vault/general/key-vault-recovery.md). - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -## Assign an Azure Active Directory (Azure AD) identity to your server - -If you have an existing [server](logical-servers.md), use the following to add an Azure Active Directory (Azure AD) identity to your server: - - ```powershell - $server = Set-AzSqlServer -ResourceGroupName -ServerName -AssignIdentity - ``` - -If you are creating a server, use the [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) cmdlet with the tag -Identity to add an Azure AD identity during server creation: - - ```powershell - $server = New-AzSqlServer -ResourceGroupName -Location ` - -ServerName -ServerVersion "12.0" -SqlAdministratorCredentials -AssignIdentity - ``` - -## Grant Key Vault permissions to your server - -Use the [Set-AzKeyVaultAccessPolicy](/powershell/module/az.keyvault/set-azkeyvaultaccesspolicy) cmdlet to grant your server access to the key vault before using a key from it for TDE. - - ```powershell - Set-AzKeyVaultAccessPolicy -VaultName ` - -ObjectId $server.Identity.PrincipalId -PermissionsToKeys get, wrapKey, unwrapKey - ``` - -For adding permissions to your server on a Managed HSM, add the 'Managed HSM Crypto Service Encryption User' local RBAC role to the server. This will enable the server to perform get, wrap key, unwrap key operations on the keys in the Managed HSM. -[Instructions for provisioning server access on Managed HSM](../../key-vault/managed-hsm/role-management.md) - -## Add the Key Vault key to the server and set the TDE Protector - -- Use the [Get-AzKeyVaultKey](/powershell/module/az.keyvault/get-azkeyvaultkey) cmdlet to retrieve the key ID from key vault -- Use the [Add-AzSqlServerKeyVaultKey](/powershell/module/az.sql/add-azsqlserverkeyvaultkey) cmdlet to add the key from the Key Vault to the server. -- Use the [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) cmdlet to set the key as the TDE protector for all server resources. -- Use the [Get-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/get-azsqlservertransparentdataencryptionprotector) cmdlet to confirm that the TDE protector was configured as intended. - -> [!NOTE] -> For Managed HSM keys, use Az.Sql 2.11.1 version of PowerShell. - -> [!NOTE] -> The combined length for the key vault name and key name cannot exceed 94 characters. - -> [!TIP] -> An example KeyId from Key Vault: `https://contosokeyvault.vault.azure.net/keys/Key1/1a1a2b2b3c3c4d4d5e5e6f6f7g7g8h8h` -> -> An example KeyId from Managed HSM:
    https://contosoMHSM.managedhsm.azure.net/keys/myrsakey - -```powershell -# add the key from Key Vault to the server -Add-AzSqlServerKeyVaultKey -ResourceGroupName -ServerName -KeyId - -# set the key as the TDE protector for all resources under the server -Set-AzSqlServerTransparentDataEncryptionProtector -ResourceGroupName -ServerName ` - -Type AzureKeyVault -KeyId - -# confirm the TDE protector was configured as intended -Get-AzSqlServerTransparentDataEncryptionProtector -ResourceGroupName -ServerName -``` - -## Turn on TDE - -Use the [Set-AzSqlDatabaseTransparentDataEncryption](/powershell/module/az.sql/set-azsqldatabasetransparentdataencryption) cmdlet to turn on TDE. - -```powershell -Set-AzSqlDatabaseTransparentDataEncryption -ResourceGroupName ` - -ServerName -DatabaseName -State "Enabled" -``` - -Now the database or data warehouse has TDE enabled with an encryption key in Key Vault. - -## Check the encryption state and encryption activity - -Use the [Get-AzSqlDatabaseTransparentDataEncryption](/powershell/module/az.sql/get-azsqldatabasetransparentdataencryption) to get the encryption state and the [Get-AzSqlDatabaseTransparentDataEncryptionActivity](/powershell/module/az.sql/get-azsqldatabasetransparentdataencryptionactivity) to check the encryption progress for a database or data warehouse. - -```powershell -# get the encryption state -Get-AzSqlDatabaseTransparentDataEncryption -ResourceGroupName ` - -ServerName -DatabaseName ` - -# check the encryption progress for a database or data warehouse -Get-AzSqlDatabaseTransparentDataEncryptionActivity -ResourceGroupName ` - -ServerName -DatabaseName -``` - -# [The Azure CLI](#tab/azure-cli) - -To install the required version of Azure CLI (version 2.0 or later) and connect to your Azure subscription, see [Install and Configure the Azure Cross-Platform Command-Line Interface 2.0](/cli/azure/install-azure-cli). - -For specifics on Key Vault, see [Manage Key Vault using Azure CLI 2.0](../../key-vault/general/manage-with-cli2.md) and [How to use Key Vault soft-delete with the CLI](../../key-vault/general/key-vault-recovery.md). - -## Assign an Azure AD identity to your server - -```azurecli -# create server (with identity) and database -az sql server create --name --resource-group --location --admin-user --admin-password --assign-identity -az sql db create --name --server --resource-group -``` - -> [!TIP] -> Keep the "principalID" from creating the server, it is the object id used to assign key vault permissions in the next step - -## Grant Key Vault permissions to your server - -```azurecli -# create key vault, key and grant permission -az keyvault create --name --resource-group --location --enable-soft-delete true -az keyvault key create --name --vault-name --protection software -az keyvault set-policy --name --object-id --resource-group --key-permissions wrapKey unwrapKey get -``` - -> [!TIP] -> Keep the key URI or keyID of the new key for the next step, for example: `https://contosokeyvault.vault.azure.net/keys/Key1/1a1a2b2b3c3c4d4d5e5e6f6f7g7g8h8h` - -## Add the Key Vault key to the server and set the TDE Protector - -```azurecli -# add server key and update encryption protector -az sql server key create --server --resource-group --kid -az sql server tde-key set --server --server-key-type AzureKeyVault --resource-group --kid -``` - -> [!NOTE] -> The combined length for the key vault name and key name cannot exceed 94 characters. - -## Turn on TDE - -```azurecli -# enable encryption -az sql db tde set --database --server --resource-group --status Enabled -``` - -Now the database or data warehouse has TDE enabled with a customer-managed encryption key in Azure Key Vault. - -## Check the encryption state and encryption activity - -```azurecli -# get encryption scan progress -az sql db tde list-activity --database --server --resource-group - -# get whether encryption is on or off -az sql db tde show --database --server --resource-group -``` - -* * * - -## Useful PowerShell cmdlets - -# [PowerShell](#tab/azure-powershell) - -- Use the [Set-AzSqlDatabaseTransparentDataEncryption](/powershell/module/az.sql/set-azsqldatabasetransparentdataencryption) cmdlet to turn off TDE. - - ```powershell - Set-AzSqlDatabaseTransparentDataEncryption -ServerName -ResourceGroupName ` - -DatabaseName -State "Disabled" - ``` - -- Use the [Get-AzSqlServerKeyVaultKey](/powershell/module/az.sql/get-azsqlserverkeyvaultkey) cmdlet to return the list of Key Vault keys added to the server. - - ```powershell - # KeyId is an optional parameter, to return a specific key version - Get-AzSqlServerKeyVaultKey -ServerName -ResourceGroupName - ``` - -- Use the [Remove-AzSqlServerKeyVaultKey](/powershell/module/az.sql/remove-azsqlserverkeyvaultkey) to remove a Key Vault key from the server. - - ```powershell - # the key set as the TDE Protector cannot be removed - Remove-AzSqlServerKeyVaultKey -KeyId -ServerName -ResourceGroupName - ``` - -# [Azure CLI](#tab/azure-cli) - -- For general database settings, see [az sql](/cli/azure/sql). - -- For vault key settings, see [az sql server key](/cli/azure/sql/server/key). - -- For TDE settings, see [az sql server tde-key](/cli/azure/sql/server/tde-key) and [az sql db tde](/cli/azure/sql/db/tde). - -* * * - -## Troubleshooting - -Check the following if an issue occurs: - -- If the key vault cannot be found, make sure you're in the right subscription. - - # [PowerShell](#tab/azure-powershell) - - ```powershell - Get-AzSubscription -SubscriptionId - ``` - - # [Azure CLI](#tab/azure-cli) - - ```azurecli - az account show - s - ``` - - * * * - -- If the new key cannot be added to the server, or the new key cannot be updated as the TDE Protector, check the following: - - The key should not have an expiration date - - The key must have the *get*, *wrap key*, and *unwrap key* operations enabled. - -## Next steps - -- Learn how to rotate the TDE Protector of a server to comply with security requirements: [Rotate the Transparent Data Encryption protector Using PowerShell](transparent-data-encryption-byok-key-rotation.md). -- In case of a security risk, learn how to remove a potentially compromised TDE Protector: [Remove a potentially compromised key](transparent-data-encryption-byok-remove-tde-protector.md). diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-create-server.md b/articles/azure-sql/database/transparent-data-encryption-byok-create-server.md deleted file mode 100644 index efeeffe8e567c..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-create-server.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -title: Create server configured with user-assigned managed identity and customer-managed TDE -titleSuffix: Azure SQL Database & Azure Synapse Analytics -description: Learn how to configure user-assigned managed identity and customer-managed transparent data encryption (TDE) while creating an Azure SQL Database logical server using the Azure portal, PowerShell, or Azure CLI. -ms.service: sql-database -ms.subservice: security -ms.topic: how-to -author: shohamMSFT -ms.author: shohamd -ms.reviewer: vanto -ms.date: 12/16/2021 ---- -# Create server configured with user-assigned managed identity and customer-managed TDE - -> [!NOTE] -> Assigning a user-assigned managed identity for Azure SQL logical servers and Managed Instances is in **public preview**. - -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -This how-to guide outlines the steps to create an Azure SQL logical [server](logical-servers.md) configured with transparent data encryption (TDE) with customer-managed keys (CMK) using a [user-assigned managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types) to access [Azure Key Vault](../../key-vault/general/quick-create-portal.md). - -## Prerequisites - -- This how-to guide assumes that you've already created an [Azure Key Vault](../../key-vault/general/quick-create-portal.md) and imported a key into it to use as the TDE protector for Azure SQL Database. For more information, see [transparent data encryption with BYOK support](transparent-data-encryption-byok-overview.md). -- Soft-delete and Purge protection must be enabled on the key vault -- You must have created a [user-assigned managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types) and provided it the required TDE permissions (*Get, Wrap Key, Unwrap Key*) on the above key vault. For creating a user-assigned managed identity, see [Create a user-assigned managed identity](/azure/active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-portal). -- You must have Azure PowerShell installed and running. -- [Recommended but optional] Create the key material for the TDE protector in a hardware security module (HSM) or local key store first, and import the key material to Azure Key Vault. Follow the [instructions for using a hardware security module (HSM) and Key Vault](../../key-vault/general/overview.md) to learn more. - -## Create server configured with TDE with customer-managed key (CMK) - - The following steps outline the process of creating a new Azure SQL Database logical server and a new database with a user-assigned managed identity assigned. The user-assigned managed identity is required for configuring a customer-managed key for TDE at server creation time. - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment](https://portal.azure.com/#create/Microsoft.AzureSQL) option page in the Azure portal. - -2. If you aren't already signed in to Azure portal, sign in when prompted. - -3. Under **SQL databases**, leave **Resource type** set to **Single database**, and select **Create**. - -4. On the **Basics** tab of the **Create SQL Database** form, under **Project details**, select the desired Azure **Subscription**. - -5. For **Resource group**, select **Create new**, enter a name for your resource group, and select **OK**. - -6. For **Database name** enter `ContosoHR`. - -7. For **Server**, select **Create new**, and fill out the **New server** form with the following values: - - - **Server name**: Enter a unique server name. Server names must be globally unique for all servers in Azure, not just unique within a subscription. Enter something like `mysqlserver135`, and the Azure portal will let you know if it's available or not. - - **Server admin login**: Enter an admin login name, for example: `azureuser`. - - **Password**: Enter a password that meets the password requirements, and enter it again in the **Confirm password** field. - - **Location**: Select a location from the dropdown list - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/create-server.png" alt-text="Create sql server menu in Azure portal"::: - -8. Select **Next: Networking** at the bottom of the page. - -9. On the **Networking** tab, for **Connectivity method**, select **Public endpoint**. - -10. For **Firewall rules**, set **Add current client IP address** to **Yes**. Leave **Allow Azure services and resources to access this server** set to **No**. - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/networking-settings.png" alt-text="screenshot of networking settings when creating a SQL server in the Azure portal"::: - -11. Select **Next: Security** at the bottom of the page. - -12. On the Security tab, under **Identity (preview)**, select **Configure Identities**. - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/configure-identity.png" alt-text="screenshot of security settings and configuring identities in the Azure portal"::: - -13. On the **Identity (preview)** blade, select **User assigned managed identity** and then select **Add**. Select the desired **Subscription** and then under **User assigned managed identities** select the desired user-assigned managed identity from the selected subscription. Then select the **Select** button. - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/identity-configuration-managed-identity.png" alt-text="screenshot of adding user assigned managed identity when configuring server identity"::: - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/selecting-user-assigned-managed-identity.png" alt-text="screenshot of user assigned managed identity when configuring server identity"::: - -14. Under **Primary identity**, select the same user-assigned managed identity selected in the previous step. - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/selecting-primary-identity-for-server.png" alt-text="screenshot of selecting primary identity for server"::: - -15. Select **Apply** - -16. On the Security tab, under **Transparent data encryption**, select **Configure Transparent data encryption**. Then select **Select a key** and select **Change key**. Select the desired **Subscription**, **Key vault**, **Key**, and **Version** for the customer-managed key to be used for TDE. Select the **Select** button. - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/configure-tde-for-server.png" alt-text="screenshot configuring TDE for server"::: - - :::image type="content" source="media/transparent-data-encryption-byok-create-server/select-key-for-tde.png" alt-text="screenshot selecting key for use with TDE"::: - -17. Select **Apply** - -18. Select **Review + create** at the bottom of the page - -19. On the **Review + create** page, after reviewing, select **Create**. - - -# [The Azure CLI](#tab/azure-cli) - -For information on installing the current release of Azure CLI, see [Install the Azure CLI](/cli/azure/install-azure-cli) article. - -Create a server configured with user-assigned managed identity and customer-managed TDE using the [az sql server create](/cli/azure/sql/server) command. - -```azurecli -az sql server create \ - --name $serverName \ - --resource-group $resourceGroupName \ - --location $location \ - --admin-user $adminlogin \ - --admin-password $password - --assign-identity - --identity-type $identitytype - --user-assigned-identity-id $identityid - --primary-user-assigned-identity-id $primaryidentityid - --key-id $keyid - -``` -Create a database with the [az sql db create](/cli/azure/sql/db) command. - -```azurecli -az sql db create \ - --resource-group $resourceGroupName \ - --server $serverName \ - --name mySampleDatabase \ - --sample-name AdventureWorksLT \ - --edition GeneralPurpose \ - --compute-model Serverless \ - --family Gen5 \ - --capacity 2 -``` - -# [PowerShell](#tab/azure-powershell) - -Create a server configured with user-assigned managed identity and customer-managed TDE using PowerShell. - -For Az module installation instructions, see [Install Azure PowerShell](/powershell/azure/install-az-ps). For specific cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). - -Use the [New-AzSqlServer](/powershell/module/az.sql/New-AzSqlServer) cmdlet. - -Replace the following values in the example: - -- ``: Name of the resource group for your Azure SQL logical server -- ``: Location of the server, such as `West US`, or `Central US` -- ``: Use a unique Azure SQL logical server name -- ``: The SQL Administrator login -- ``: The SQL Administrator password -- ``: Type of identity to be assigned to the server. Possible values are `SystemAssigned`, `UserAssigned`, `SystemAssigned,UserAssigned` and None -- ``: The list of user-assigned managed identities to be assigned to the server (can be one or multiple) -- ``: The user-assigned managed identity that should be used as the primary or default on this server -- ``: The Azure Key Vault URI that is used for encryption - -To get your user-assigned managed identity **Resource ID**, search for **Managed Identities** in the [Azure portal](https://portal.azure.com). Find your managed identity, and go to **Properties**. An example of your UMI **Resource ID** will look like `/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/` - -```powershell -# create a server with user-assigned managed identity and customer-managed TDE -New-AzSqlServer -ResourceGroupName -Location -ServerName -ServerVersion "12.0" -SqlAdministratorCredentials (Get-Credential) -SqlAdministratorLogin -SqlAdministratorPassword -AssignIdentity -IdentityType -UserAssignedIdentityId -PrimaryUserAssignedIdentityId -KeyId - -``` - -# [ARM Template](#tab/arm-template) - -Here's an example of an ARM template that creates an Azure SQL logical server with a user-assigned managed identity and customer-managed TDE. The template also adds an Azure AD admin set for the server and enables [Azure AD-only authentication](authentication-azure-ad-only-authentication.md), but this can be removed from the template example. - -For more information and ARM templates, see [Azure Resource Manager templates for Azure SQL Database & SQL Managed Instance](arm-templates-content-guide.md). - -Use a [Custom deployment in the Azure portal](https://portal.azure.com/#create/Microsoft.Template), and **Build your own template in the editor**. Next, **Save** the configuration once you pasted in the example. - -To get your user-assigned managed identity **Resource ID**, search for **Managed Identities** in the [Azure portal](https://portal.azure.com). Find your managed identity, and go to **Properties**. An example of your UMI **Resource ID** will look like `/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/`. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "server": { - "type": "String" - }, - "location": { - "type": "String" - }, - "aad_admin_name": { - "type": "String", - "metadata": { - "description": "The name of the Azure AD admin for the SQL server." - } - }, - "aad_admin_objectid": { - "type": "String", - "metadata": { - "description": "The Object ID of the Azure AD admin." - } - }, - "aad_admin_tenantid": { - "type": "String", - "defaultValue": "[subscription().tenantId]", - "metadata": { - "description": "The Tenant ID of the Azure Active Directory" - } - }, - "aad_admin_type": { - "defaultValue": "User", - "allowedValues": [ - "User", - "Group", - "Application" - ], - "type": "String" - }, - "aad_only_auth": { - "defaultValue": true, - "type": "Bool" - }, - "user_identity_resource_id": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The Resource ID of the user-assigned managed identity." - } - }, - "keyvault_url": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The key vault URI." - } - }, - "AdminLogin": { - "minLength": 1, - "type": "String" - }, - "AdminLoginPassword": { - "type": "SecureString" - } - }, - "resources": [ - { - "type": "Microsoft.Sql/servers", - "apiVersion": "2020-11-01-preview", - "name": "[parameters('server')]", - "location": "[parameters('location')]", - "identity": { - "type": "UserAssigned", - "UserAssignedIdentities": { - "[parameters('user_identity_resource_id')]": {} - } - }, - "properties": { - "administratorLogin": "[parameters('AdminLogin')]", - "administratorLoginPassword": "[parameters('AdminLoginPassword')]", - "PrimaryUserAssignedIdentityId": "[parameters('user_identity_resource_id')]", - "KeyId": "[parameters('keyvault_url')]", - "administrators": { - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantid')]", - "principalType": "[parameters('aad_admin_type')]", - "azureADOnlyAuthentication": "[parameters('aad_only_auth')]" - } - } - } - ] -} - -``` - ---- - -## Next steps - -- Get started with Azure Key Vault integration and Bring Your Own Key support for TDE: [Turn on TDE using your own key from Key Vault](transparent-data-encryption-byok-configure.md). diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-identity.md b/articles/azure-sql/database/transparent-data-encryption-byok-identity.md deleted file mode 100644 index 9d4b027964545..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-identity.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Customer-managed transparent data encryption using user-assigned managed identity -description: "Bring Your Own Key (BYOK) support for transparent data encryption (TDE) using user-assigned managed identity (UMI)" -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -author: shohamMSFT -ms.author: shohamd -ms.reviewer: vanto -ms.date: 12/16/2021 ---- - -# Managed identities for transparent data encryption with BYOK -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -> [!NOTE] -> Assigning a user-assigned managed identity for Azure SQL logical servers and Managed Instances is in **public preview**. - -Managed identities in Azure Active Directory (Azure AD) provide Azure services with an automatically managed identity in Azure AD. This identity can be used to authenticate to any service that supports Azure AD authentication, such as [Azure Key Vault](../../key-vault/general/overview.md), without any credentials in the code. For more information, see [Managed identity types](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types) in Azure. - -Managed Identities can be of two types: - -- **System-assigned** -- **User-assigned** - -Enabling system-assigned managed identity for Azure SQL logical servers and Managed Instances are already supported today. [Assigning user-assigned managed identity](authentication-azure-ad-user-assigned-managed-identity.md) to the server is now in public preview. - -For [TDE with customer-managed key (CMK)](transparent-data-encryption-byok-overview.md) in Azure SQL, a managed identity on the server is used for providing access rights to the server on the key vault. For instance, the system-assigned managed identity of the server should be provided with [key vault permissions](transparent-data-encryption-byok-overview.md#how-customer-managed-tde-works) prior to enabling TDE with CMK on the server. - -In addition to the system-assigned managed identity that is already supported for TDE with CMK, a user-assigned managed identity (UMI) that is assigned to the server can be used to allow the server to access the key vault. A prerequisite to enable key vault access is to ensure the user-assigned managed identity has been provided the *Get*, *wrapKey* and *unwrapKey* permissions on the key vault. Since the user-assigned managed identity is a standalone resource that can be created and granted access to the key vault, [TDE with a customer-managed key can now be enabled at creation time for the server or database](transparent-data-encryption-byok-create-server.md). - -> [!NOTE] -> For assigning a user-assigned managed identity to the logical server or managed instance, a user must have the [SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) or [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) Azure RBAC role along with any other Azure RBAC role containing the **Microsoft.ManagedIdentity/userAssignedIdentities/*/assign/action** action. - -## Benefits of using UMI for customer-managed TDE - -- Enables the ability to pre-authorize key vault access for Azure SQL logical servers or managed instances by creating a user-assigned managed identity, and granting it access to key vault, even before the server or database has been created - -- Allows creation of an Azure SQL logical server with TDE and CMK enabled - -- Enables the same user-assigned managed identity to be assigned to multiple servers, eliminating the need to individually turn on system-assigned managed identity for each Azure SQL logical server or managed instance, and providing it access to key vault - -- Provides the capability to enforce CMK at server or database creation time with an available built-in Azure policy - -## Considerations while using UMI for customer-managed TDE - -- By default, TDE in Azure SQL uses the primary user-assigned managed identity set on the server for key vault access. If no user-assigned identities have been assigned to the server, then the system-assigned managed identity of the server is used for key vault access. -- When using the system-assigned managed identity for TDE with CMK, no user-assigned managed identities should be assigned to the server -- When using a user-assigned managed identity for TDE with CMK, assign the identity to the server and set it as the primary identity for the server -- The primary user-assigned managed identity requires continuous key vault access (*get, wrapKey, unwrapKey* permissions). If the identity's access to key vault is revoked or sufficient permissions are not provided, the database will move to *Inaccessible* state -- If the primary user-assigned managed identity is being updated to a different user-assigned managed identity, the new identity must be given required permissions to the key vault prior to updating the primary -- To switch the server from user-assigned to system-assigned managed identity for key vault access, provide the system-assigned managed identity with the required key vault permissions, then remove all user-assigned managed identities from the server - -> [!Important] -> The primary user-assigned managed identity being used for TDE with CMK should not be deleted from Azure. Deleting this identity will lead to the server losing access to key vault and databases becoming *inaccessible*. - -## Limitations and known issues - -- If the key vault is behind a VNet, a user-assigned managed identity cannot be used with customer-managed TDE. A system-assigned managed identity must be used in this case. A user-assigned managed identity can only be used when the key vault is not behind a VNet. -- When multiple user-assigned managed identities are assigned to the server or managed instance, if a single identity is removed from the server using the *Identity* blade of the Azure Portal, the operation succeeds but the identity does not get removed from the server. Removing all user-assigned managed identities together from the Azure portal works successfully. -- When the server or managed instance is configured with customer-managed TDE and both system-assigned and user-assigned managed identities are enabled on the server, removing the user-assigned managed identities from the server without first giving the system-assigned managed identity access to the key vault results in an *Unexpected error occurred* message. Ensure the system-assigned managed identity has been provided key vault access prior to removing the primary user-assigned managed identity (and any other user-assigned managed identities) from the server. -- User Assigned Managed Identity for SQL Managed Instance is currently not supported when AKV firewall is enabled. - - -## Next steps - -> [!div class="nextstepaction"] -> [Create Azure SQL database configured with user-assigned managed identity and customer-managed TDE](transparent-data-encryption-byok-create-server.md) \ No newline at end of file diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-key-rotation.md b/articles/azure-sql/database/transparent-data-encryption-byok-key-rotation.md deleted file mode 100644 index 0433d2ebe57c3..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-key-rotation.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: Rotate TDE protector (PowerShell & the Azure CLI) -titleSuffix: Azure SQL Database & Azure Synapse Analytics -description: Learn how to rotate the Transparent Data Encryption (TDE) protector for a server in Azure used by Azure SQL Database and Azure Synapse Analytics using PowerShell and the Azure CLI. -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: seo-lt-2019 sqldbrb=1, devx-track-azurecli, devx-track-azurepowershell -ms.topic: how-to -author: shohamMSFT -ms.author: shohamd -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/15/2021 ---- -# Rotate the Transparent Data Encryption (TDE) protector -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - - -This article describes key rotation for a [server](logical-servers.md) using a TDE protector from Azure Key Vault. Rotating the logical TDE Protector for a server means switching to a new asymmetric key that protects the databases on the server. Key rotation is an online operation and should only take a few seconds to complete, because this only decrypts and re-encrypts the database's data encryption key, not the entire database. - -## Important considerations when rotating the TDE Protector -- When the TDE protector is changed/rotated, old backups of the database, including backed-up log files, are not updated to use the latest TDE protector. To restore a backup encrypted with a TDE protector from Key Vault, make sure that the key material is available to the target server. Therefore, we recommend that you keep all the old versions of the TDE protector in Azure Key Vault (AKV), so database backups can be restored. -- Even when switching from customer managed key (CMK) to service-managed key, keep all previously used keys in AKV. This ensures database backups, including backed-up log files, can be restored with the TDE protectors stored in AKV. -- Apart from old backups, transaction log files might also require access to the older TDE Protector. To determine if there are any remaining logs that still require the older key, after performing key rotation, use the [sys.dm_db_log_info](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-log-info-transact-sql) dynamic management view (DMV). This DMV returns information on the virtual log file (VLF) of the transantion log along with its encryption key thumbprint of the VLF. -- Older keys need to be kept in AKV and available to the server based on the backup retention period configured as back of backup retention policies on the database. This helps ensure any Long Term Retention (LTR) backups on the server can still be restored using the older keys. - - -> [!NOTE] -> A paused dedicated SQL pool in Azure Synapse Analytics must be resumed before key rotations. - -> [!IMPORTANT] -> Do not delete previous versions of the key after a rollover. When keys are rolled over, some data is still encrypted with the previous keys, such as older database backups, backed-up log files and transaction log files. - -> [!NOTE] -> This article applies to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics (dedicated SQL pools (formerly SQL DW)). For documentation on Transparent Data Encryption for dedicated SQL pools inside Synapse workspaces, see [Azure Synapse Analytics encryption](../../synapse-analytics/security/workspaces-encryption.md). - -## Prerequisites - -- This how-to guide assumes that you are already using a key from Azure Key Vault as the TDE protector for Azure SQL Database or Azure Synapse Analytics. See [Transparent Data Encryption with BYOK Support](transparent-data-encryption-byok-overview.md). -- You must have Azure PowerShell installed and running. -- [Recommended but optional] Create the key material for the TDE protector in a hardware security module (HSM) or local key store first, and import the key material to Azure Key Vault. Follow the [instructions for using a hardware security module (HSM) and Key Vault](../../key-vault/general/overview.md) to learn more. - -# [PowerShell](#tab/azure-powershell) - -For Az module installation instructions, see [Install Azure PowerShell](/powershell/azure/install-az-ps). For specific cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -# [The Azure CLI](#tab/azure-cli) - -For installation, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -* * * - -## Manual key rotation - -Manual key rotation uses the following commands to add a completely new key, which could be under a new key name or even another key vault. Using this approach supports adding the same key to different key vaults to support high-availability and geo-dr scenarios. - -> [!NOTE] -> The combined length for the key vault name and key name cannot exceed 94 characters. - -# [PowerShell](#tab/azure-powershell) - -Use the [Add-AzKeyVaultKey](/powershell/module/az.keyvault/Add-AzKeyVaultKey), [Add-AzSqlServerKeyVaultKey](/powershell/module/az.sql/add-azsqlserverkeyvaultkey), and [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) cmdlets. - -```powershell -# add a new key to Key Vault -Add-AzKeyVaultKey -VaultName -Name -Destination - -# add the new key from Key Vault to the server -Add-AzSqlServerKeyVaultKey -KeyId -ServerName -ResourceGroup - -# set the key as the TDE protector for all resources under the server -Set-AzSqlServerTransparentDataEncryptionProtector -Type AzureKeyVault -KeyId ` - -ServerName -ResourceGroup -``` - -# [The Azure CLI](#tab/azure-cli) - -Use the [az keyvault key create](/cli/azure/keyvault/key#az-keyvault-key-create), [az sql server key create](/cli/azure/sql/server/key#az-sql-server-key-create), and [az sql server tde-key set](/cli/azure/sql/server/tde-key#az-sql-server-tde-key-set) commands. - -```azurecli -# add a new key to Key Vault -az keyvault key create --name --vault-name --protection - -# add the new key from Key Vault to the server -az sql server key create --kid --resource-group --server - -# set the key as the TDE protector for all resources under the server -az sql server tde-key set --server-key-type AzureKeyVault --kid --resource-group --server -``` - -* * * - -## Switch TDE protector mode - -# [PowerShell](#tab/azure-powershell) - -- To switch the TDE protector from Microsoft-managed to BYOK mode, use the [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) cmdlet. - - ```powershell - Set-AzSqlServerTransparentDataEncryptionProtector -Type AzureKeyVault ` - -KeyId -ServerName -ResourceGroup - ``` - -- To switch the TDE protector from BYOK mode to Microsoft-managed, use the [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) cmdlet. - - ```powershell - Set-AzSqlServerTransparentDataEncryptionProtector -Type ServiceManaged ` - -ServerName -ResourceGroup - ``` - -# [The Azure CLI](#tab/azure-cli) - -The following examples use [az sql server tde-key set](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector). - -- To switch the TDE protector from Microsoft-managed to BYOK mode, - - ```azurecli - az sql server tde-key set --server-key-type AzureKeyVault --kid --resource-group --server - ``` - -- To switch the TDE protector from BYOK mode to Microsoft-managed, - - ```azurecli - az sql server tde-key set --server-key-type ServiceManaged --resource-group --server - ``` - -* * * - -## Next steps - -- In case of a security risk, learn how to remove a potentially compromised TDE protector: [Remove a potentially compromised key](transparent-data-encryption-byok-remove-tde-protector.md). - -- Get started with Azure Key Vault integration and Bring Your Own Key support for TDE: [Turn on TDE using your own key from Key Vault using PowerShell](transparent-data-encryption-byok-configure.md). \ No newline at end of file diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-overview.md b/articles/azure-sql/database/transparent-data-encryption-byok-overview.md deleted file mode 100644 index c9290b7e8b81b..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-overview.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -title: Customer-managed transparent data encryption (TDE) -description: "Bring Your Own Key (BYOK) support for transparent data encryption (TDE) with Azure Key Vault for SQL Database and Azure Synapse Analytics. TDE with BYOK overview, benefits, how it works, considerations, and recommendations." -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -services: sql-database -ms.service: sql-db-mi -ms.subservice: security -ms.custom: seo-lt-2019, azure-synapse -ms.devlang: -ms.topic: conceptual -author: shohamMSFT -ms.author: shohamd -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 12/16/2021 ---- -# Azure SQL transparent data encryption with customer-managed key -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -Azure SQL [transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption) with customer-managed key enables Bring Your Own Key (BYOK) scenario for data protection at rest, and allows organizations to implement separation of duties in the management of keys and data. With customer-managed TDE, customer is responsible for and in a full control of a key lifecycle management (key creation, upload, rotation, deletion), key usage permissions, and auditing of operations on keys. - -In this scenario, the key used for encryption of the Database Encryption Key (DEK), called TDE protector, is a customer-managed asymmetric key stored in a customer-owned and customer-managed [Azure Key Vault (AKV)](../../key-vault/general/security-features.md), a cloud-based external key management system. Key Vault is highly available and scalable secure storage for RSA cryptographic keys, optionally backed by FIPS 140-2 Level 2 validated hardware security modules (HSMs). It doesn't allow direct access to a stored key, but provides services of encryption/decryption using the key to the authorized entities. The key can be generated by the key vault, imported, or [transferred to the key vault from an on-prem HSM device](../../key-vault/keys/hsm-protected-keys.md). - -For Azure SQL Database and Azure Synapse Analytics, the TDE protector is set at the server level and is inherited by all encrypted databases associated with that server. For Azure SQL Managed Instance, the TDE protector is set at the instance level and is inherited by all encrypted databases on that instance. The term *server* refers both to a server in SQL Database and Azure Synapse and to a managed instance in SQL Managed Instance throughout this document, unless stated differently. - -> [!NOTE] -> This article applies to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics (dedicated SQL pools (formerly SQL DW)). For documentation on transparent data encryption for dedicated SQL pools inside Synapse workspaces, see [Azure Synapse Analytics encryption](../../synapse-analytics/security/workspaces-encryption.md). - -> [!IMPORTANT] -> For those using service-managed TDE who would like to start using customer-managed TDE, data remains encrypted during the process of switching over, and there is no downtime nor re-encryption of the database files. Switching from a service-managed key to a customer-managed key only requires re-encryption of the DEK, which is a fast and online operation. - -> [!NOTE] -> To provide Azure SQL customers with two layers of encryption of data at rest, infrastructure encryption (using AES-256 encryption algorithm) with platform managed keys is being rolled out. This provides an addition layer of encryption at rest along with TDE with customer-managed keys, which is already available. For Azure SQL Database and Managed Instance, all databases, including the master database and other system databases, will be encrypted when infrastructure encryption is turned on. At this time, customers must request access to this capability. If you are interested in this capability, contact AzureSQLDoubleEncryptionAtRest@service.microsoft.com. - - -## Benefits of the customer-managed TDE - -Customer-managed TDE provides the following benefits to the customer: - -- Full and granular control over usage and management of the TDE protector; - -- Transparency of the TDE protector usage; - -- Ability to implement separation of duties in the management of keys and data within the organization; - -- Key Vault administrator can revoke key access permissions to make encrypted database inaccessible; - -- Central management of keys in AKV; - -- Greater trust from your end customers, since AKV is designed such that Microsoft can't see nor extract encryption keys; - -## How customer-managed TDE works - -![Setup and functioning of the customer-managed TDE](./media/transparent-data-encryption-byok-overview/customer-managed-tde-with-roles.PNG) - -In order for the Azure SQL server to use TDE protector stored in AKV for encryption of the DEK, the key vault administrator needs to give the following access rights to the server using its unique Azure Active Directory (Azure AD) identity: - -- **get** - for retrieving the public part and properties of the key in the Key Vault - -- **wrapKey** - to be able to protect (encrypt) DEK - -- **unwrapKey** - to be able to unprotect (decrypt) DEK - -Key vault administrator can also [enable logging of key vault audit events](../../azure-monitor/insights/key-vault-insights-overview.md), so they can be audited later. - -When server is configured to use a TDE protector from AKV, the server sends the DEK of each TDE-enabled database to the key vault for encryption. Key vault returns the encrypted DEK, which is then stored in the user database. - -When needed, server sends protected DEK to the key vault for decryption. - -Auditors can use Azure Monitor to review key vault AuditEvent logs, if logging is enabled. - -[!INCLUDE [sql-database-akv-permission-delay](../includes/sql-database-akv-permission-delay.md)] - -## Requirements for configuring customer-managed TDE - -### Requirements for configuring AKV - -- Key vault and SQL Database/managed instance must belong to the same Azure Active Directory tenant. Cross-tenant key vault and server interactions aren't supported. To move resources afterwards, TDE with AKV will have to be reconfigured. Learn more about [moving resources](../../azure-resource-manager/management/move-resource-group-and-subscription.md). -- [Soft-delete](../../key-vault/general/soft-delete-overview.md) and [purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) features must be enabled on the key vault to protect from data loss due to accidental key (or key vault) deletion. -- Grant the server or managed instance access to the key vault (*get*, *wrapKey*, *unwrapKey*) using its Azure Active Directory identity. The server identity can be a system-assigned managed identity or a user-assigned managed identity assigned to the server. When using the Azure portal, the Azure AD identity gets automatically created when the server is created. When using PowerShell or Azure CLI, the Azure AD identity must be explicitly created and should be verified. See [Configure TDE with BYOK](transparent-data-encryption-byok-configure.md) and [Configure TDE with BYOK for SQL Managed Instance](../managed-instance/scripts/transparent-data-encryption-byok-powershell.md) for detailed step-by-step instructions when using PowerShell. - - Depending on the permission model of the key vault (access policy or Azure RBAC), key vault access can be granted either by creating an access policy on the key vault, or by creating a new Azure RBAC role assignment with the role [Key Vault Crypto Service Encryption User](../../key-vault/general/rbac-guide.md#azure-built-in-roles-for-key-vault-data-plane-operations). - -- When using firewall with AKV, you must enable option *Allow trusted Microsoft services to bypass the firewall*. - -### Enable soft-delete and purge protection for AKV - -> [!IMPORTANT] -> Both **soft-delete** and **purge protection** must be enabled on the key vault when configuring customer-managed TDE on a new or existing server or managed instance. - -[Soft-delete](../../key-vault/general/soft-delete-overview.md) and [purge protection](../../key-vault/general/soft-delete-overview.md#purge-protection) are important features of Azure Key Vault that allow recovery of deleted vaults and deleted key vault objects, reducing the risk of a user accidentally or maliciously deleting a key or a key vault. - -- Soft-deleted resources are retained for 90 days, unless recovered or purged by the customer. The *recover* and *purge* actions have their own permissions associated in a key vault access policy. The soft-delete feature is on by default for new key vaults and can also be enabled using the Azure portal, [PowerShell](../../key-vault/general/key-vault-recovery.md?tabs=azure-powershell) or [Azure CLI](../../key-vault/general/key-vault-recovery.md?tabs=azure-cli). - -- Purge protection can be turned on using [Azure CLI](../../key-vault/general/key-vault-recovery.md?tabs=azure-cli) or [PowerShell](../../key-vault/general/key-vault-recovery.md?tabs=azure-powershell). When purge protection is enabled, a vault or an object in the deleted state cannot be purged until the retention period has passed. The default retention period is 90 days, but is configurable from 7 to 90 days through the Azure portal. - -- Azure SQL requires soft-delete and purge protection to be enabled on the key vault containing the encryption key being used as the TDE Protector for the server or managed instance. This helps prevent the scenario of accidental or malicious key vault or key deletion that can lead to the database going into *Inaccessible* state. - -- When configuring the TDE Protector on an existing server or during server creation, Azure SQL validates that the key vault being used has soft-delete and purge protection turned on. If soft-delete and purge protection are not enabled on the key vault, the TDE Protector setup fails with an error. In this case, soft-delete and purge protection must first be enabled on the key vault and then the TDE Protector setup should be performed. - - -### Requirements for configuring TDE protector - -- TDE protector can only be an asymmetric, RSA, or RSA HSM key. The supported key lengths are 2048 bytes and 3072 bytes. - -- The key activation date (if set) must be a date and time in the past. Expiration date (if set) must be a future date and time. - -- The key must be in the *Enabled* state. - -- If you're importing existing key into the key vault, make sure to provide it in the supported file formats (`.pfx`, `.byok`, or `.backup`). - -> [!NOTE] -> Azure SQL now supports using a RSA key stored in a Managed HSM as TDE Protector. -Azure Key Vault Managed HSM is a fully managed, highly available, single-tenant, standards-compliant cloud service that enables you to safeguard cryptographic keys for your cloud applications, using FIPS 140-2 Level 3 validated HSMs. Learn more about [Managed HSMs](../../key-vault/managed-hsm/index.yml). - - -## Recommendations when configuring customer-managed TDE - -### Recommendations when configuring AKV - -- Associate at most 500 General Purpose or 200 Business Critical databases in total with a key vault in a single subscription to ensure high availability when server accesses the TDE protector in the key vault. These figures are based on the experience and documented in the [key vault service limits](../../key-vault/general/service-limits.md). The intention here is to prevent issues after server failover, as it will trigger as many key operations against the vault as there are databases in that server. - -- Set a resource lock on the key vault to control who can delete this critical resource and prevent accidental or unauthorized deletion. Learn more about [resource locks](../../azure-resource-manager/management/lock-resources.md). - -- Enable auditing and reporting on all encryption keys: Key vault provides logs that are easy to inject into other security information and event management tools. Operations Management Suite [Log Analytics](../../azure-monitor/insights/key-vault-insights-overview.md) is one example of a service that is already integrated. - -- Link each server with two key vaults that reside in different regions and hold the same key material, to ensure high availability of encrypted databases. Mark the key from one of the key vaults as the TDE protector. System will automatically switch to the key vault in the second region with the same key material, if there's an outage affecting the key vault in the first region. - -> [!NOTE] -> To allow greater flexibility in configuring customer-managed TDE, Azure SQL Database server and Managed Instance in one region can now be linked to key vault in any other region. The server and key vault do not have to be co-located in the same region. - -### Recommendations when configuring TDE protector - -- Keep a copy of the TDE protector on a secure place or escrow it to the escrow service. - -- If the key is generated in the key vault, create a key backup before using the key in AKV for the first time. Backup can be restored to an Azure Key Vault only. Learn more about the [Backup-AzKeyVaultKey](/powershell/module/az.keyvault/backup-azkeyvaultkey) command. - -- Create a new backup whenever any changes are made to the key (for example, key attributes, tags, ACLs). - -- **Keep previous versions** of the key in the key vault when rotating keys, so older database backups can be restored. When the TDE protector is changed for a database, old backups of the database **are not updated** to use the latest TDE protector. At restore time, each backup needs the TDE protector it was encrypted with at creation time. Key rotations can be performed following the instructions at [Rotate the transparent data encryption Protector Using PowerShell](transparent-data-encryption-byok-key-rotation.md). - -- Keep all previously used keys in AKV even after switching to service-managed keys. It ensures database backups can be restored with the TDE protectors stored in AKV. TDE protectors created with Azure Key Vault have to be maintained until all remaining stored backups have been created with service-managed keys. Make recoverable backup copies of these keys using [Backup-AzKeyVaultKey](/powershell/module/az.keyvault/backup-azkeyvaultkey). - -- To remove a potentially compromised key during a security incident without the risk of data loss, follow the steps from the [Remove a potentially compromised key](transparent-data-encryption-byok-remove-tde-protector.md). - - -## Inaccessible TDE protector - -When TDE is configured to use a customer-managed key, continuous access to the TDE protector is required for the database to stay online. If the server loses access to the customer-managed TDE protector in AKV, in up to 10 minutes a database will start denying all connections with the corresponding error message and change its state to *Inaccessible*. The only action allowed on a database in the Inaccessible state is deleting it. - -> [!NOTE] -> If the database is inaccessible due to an intermittent networking outage, there is no action required and the databases will come back online automatically. - -After access to the key is restored, taking database back online requires extra time and steps, which may vary based on the time elapsed without access to the key and the size of the data in the database: - -- If key access is restored within 30 minutes, the database will autoheal within next hour. - -- If key access is restored after more than 30 minutes, autoheal is not possible and bringing back the database requires extra steps on the portal and can take a significant amount of time depending on the size of the database. Once the database is back online, previously configured server-level settings such as [failover group](auto-failover-group-overview.md) configuration, point-in-time-restore history, and tags **will be lost**. Therefore, it's recommended implementing a notification system that allows you to identify and address the underlying key access issues within 30 minutes. - -Below is a view of the extra steps required on the portal to bring an inaccessible database back online. - -![TDE BYOK Inaccessible Database](./media/transparent-data-encryption-byok-overview/customer-managed-tde-inaccessible-database.jpg) - - -### Accidental TDE protector access revocation - -It may happen that someone with sufficient access rights to the key vault accidentally disables server access to the key by: - -- revoking the key vault's *get*, *wrapKey*, *unwrapKey* permissions from the server - -- deleting the key - -- deleting the key vault - -- changing the key vault's firewall rules - -- deleting the managed identity of the server in Azure Active Directory - -Learn more about [the common causes for database to become inaccessible](/sql/relational-databases/security/encryption/troubleshoot-tde?view=azuresqldb-current&preserve-view=true#common-errors-causing-databases-to-become-inaccessible). - -## Monitoring of the customer-managed TDE - -To monitor database state and to enable alerting for loss of TDE protector access, configure the following Azure features: - -- [Azure Resource Health](../../service-health/resource-health-overview.md). An inaccessible database that has lost access to the TDE protector will show as "Unavailable" after the first connection to the database has been denied. -- [Activity Log](../../service-health/alerts-activity-log-service-notifications-portal.md) when access to the TDE protector in the customer-managed key vault fails, entries are added to the activity log. Creating alerts for these events will enable you to reinstate access as soon as possible. -- [Action Groups](../../azure-monitor/alerts/action-groups.md) can be defined to send you notifications and alerts based on your preferences, for example, Email/SMS/Push/Voice, Logic App, Webhook, ITSM, or Automation Runbook. - -## Database backup and restore with customer-managed TDE - -Once a database is encrypted with TDE using a key from Key Vault, any newly generated backups are also encrypted with the same TDE protector. When the TDE protector is changed, old backups of the database **are not updated** to use the latest TDE protector. - -To restore a backup encrypted with a TDE protector from Key Vault, make sure that the key material is available to the target server. Therefore, we recommend that you keep all the old versions of the TDE protector in key vault, so database backups can be restored. - -> [!IMPORTANT] -> At any moment there can be not more than one TDE protector set for a server. It's the key marked with "Make the key the default TDE protector" in the Azure portal blade. However, multiple additional keys can be linked to a server without marking them as a TDE protector. These keys are not used for protecting DEK, but can be used during restore from a backup, if backup file is encrypted with the key with the corresponding thumbprint. - -If the key that is needed for restoring a backup is no longer available to the target server, the following error message is returned on the restore try: -"Target server `` does not have access to all AKV URIs created between \ and \. Retry operation after restoring all AKV URIs." - -To mitigate it, run the [Get-AzSqlServerKeyVaultKey](/powershell/module/az.sql/get-azsqlserverkeyvaultkey) cmdlet for the target server or [Get-AzSqlInstanceKeyVaultKey](/powershell/module/az.sql/get-azsqlinstancekeyvaultkey) for the target managed instance to return the list of available keys and identify the missing ones. To ensure all backups can be restored, make sure the target server for the restore has access to all of keys needed. These keys don't need to be marked as TDE protector. - -To learn more about backup recovery for SQL Database, see [Recover a database in SQL Database](recovery-using-backups.md). To learn more about backup recovery for dedicated SQL pool in Azure Synapse Analytics, see [Recover a dedicated SQL pool](../../synapse-analytics/sql-data-warehouse/backup-and-restore.md). For SQL Server's native backup/restore with SQL Managed Instance, see [Quickstart: Restore a database to SQL Managed Instance](../managed-instance/restore-sample-database-quickstart.md) - -Another consideration for log files: Backed up log files remain encrypted with the original TDE protector, even if it was rotated and the database is now using a new TDE protector. At restore time, both keys will be needed to restore the database. If the log file is using a TDE protector stored in Azure Key Vault, this key will be needed at restore time, even if the database has been changed to use service-managed TDE in the meantime. - -## High availability with customer-managed TDE - -Even in cases when there's no configured geo-redundancy for server, it's highly recommended to configure the server to use two different key vaults in two different regions with the same key material. The key in the secondary key vault in the other region shouldn't be marked as TDE protector, and it's not even allowed. If there's an outage affecting the primary key vault, and only then, the system will automatically switch to the other linked key with the same thumbprint in the secondary key vault, if it exists. Note though that switch won't happen if TDE protector is inaccessible because of revoked access rights, or because key or key vault is deleted, as it may indicate that customer intentionally wanted to restrict server from accessing the key. Providing the same key material to two key vaults in different regions can be done by creating the key outside of the key vault, and importing them into both key vaults. - -Alternatively, it can be accomplished by generating key using the primary key vault in one region and cloning the key into a key vault in a different Azure region. Use the [Backup-AzKeyVaultKey](/powershell/module/az.keyvault/Backup-AzKeyVaultKey) cmdlet to retrieve the key in encrypted format from the primary key vault and then use the [Restore-AzKeyVaultKey](/powershell/module/az.keyvault/restore-azkeyvaultkey) cmdlet and specify a key vault in the second region to clone the key. Alternatively, use the Azure portal to back up and restore the key. Key backup/restore operation is only allowed between key vaults within the same Azure subscription and [Azure geography](https://azure.microsoft.com/global-infrastructure/geographies/). - -![Single-Server HA](./media/transparent-data-encryption-byok-overview/customer-managed-tde-with-ha.png) - -## Geo-DR and customer-managed TDE - -In both [active geo-replication](active-geo-replication-overview.md) and [failover groups](auto-failover-group-overview.md) scenarios, the primary and secondary servers involved can be linked either to the same key vault (in any region) or to separate key vaults. If separate key vaults are linked to the primary and secondary servers, customer is responsible for keeping the key material across the key vaults consistent, so that geo-secondary is in sync and can take over using the same key from its linked key vault if primary becomes inaccessible due to an outage in the region and a failover is triggered. Up to four secondaries can be configured, and chaining (secondaries of secondaries) isn't supported. - -To avoid issues while establishing or during geo-replication due to incomplete key material, it's important to follow these rules when configuring customer-managed TDE (if separate key vaults are used for the primary and secondary servers): - -- All key vaults involved must have same properties, and same access rights for respective servers. - -- All key vaults involved must contain identical key material. It applies not just to the current TDE protector, but to the all previous TDE protectors that may be used in the backup files. - -- Both initial setup and rotation of the TDE protector must be done on the secondary first, and then on primary. - -![Failover groups and geo-dr](./media/transparent-data-encryption-byok-overview/customer-managed-tde-with-bcdr.png) - -To test a failover, follow the steps in [Active geo-replication overview](active-geo-replication-overview.md). Testing failover should be done regularly to validate that SQL Database has maintained access permission to both key vaults. - -**Azure SQL Database server and Managed Instance in one region can now be linked to key vault in any other region.** The server and key vault do not have to be co-located in the same region. With this, for simplicity, the primary and secondary servers can be connected to the same key vault (in any region). This will help avoid scenarios where key material may be out of sync if separate key vaults are used for both the servers. Azure Key Vault has multiple layers of redundancy in place to make sure that your keys and key vaults remain available in case of service or region failures. [Azure Key Vault availability and redundancy](../../key-vault/general/disaster-recovery-guidance.md) - -## Azure Policy for customer-managed TDE - -Azure Policy can be used to enforce customer-managed TDE during the creation or update of an Azure SQL Database server or Azure SQL Managed Instance. With this policy in place, any attempts to create or update a [logical server in Azure](logical-servers.md) or managed instance will fail if it isn't configured with a customer-managed key. -The Azure Policy can be applied to the whole Azure subscription, or just within a resource group. - -For more information on Azure Policy, see [What is Azure Policy?](../../governance/policy/overview.md) and [Azure Policy definition structure](../../governance/policy/concepts/definition-structure.md). - -The following two built-in policies are supported for customer-managed TDE in Azure Policy: -- SQL servers should use customer-managed keys to encrypt data at rest -- SQL managed instances should use customer-managed keys to encrypt data at rest - -The customer-managed TDE policy can be managed by going to the [Azure portal](https://portal.azure.com), and searching for the **Policy** service. Under **Definitions**, search for customer-managed key. - -There are three effects for these policies: -- **Audit** - The default setting, and will only capture an audit report in the Azure Policy activity logs -- **Deny** - Prevents logical server or managed instance creation or update without a customer-managed key configured -- **Disabled** - Will disable the policy, and won't restrict users from creating or updating a logical server or managed instance without customer-managed TDE enabled - -If the Azure Policy for customer-managed TDE is set to **Deny**, Azure SQL logical server or managed instance creation will fail. The details of this failure will be recorded in the **Activity log** of the resource group. - -> [!IMPORTANT] -> Earlier versions of built-in policies for customer-managed TDE containing the `AuditIfNotExist` effect have been deprecated. Existing policy assignments using the deprecated policies are not impacted and will continue to work as before. - -## Next steps - -You may also want to check the following PowerShell sample scripts for the common operations with customer-managed TDE: - -- [Rotate the transparent data encryption Protector for SQL Database](transparent-data-encryption-byok-key-rotation.md) - -- [Remove a transparent data encryption (TDE) protector for SQL Database](transparent-data-encryption-byok-remove-tde-protector.md) - -- [Manage transparent data encryption in SQL Managed Instance with your own key using PowerShell](../managed-instance/scripts/transparent-data-encryption-byok-powershell.md?toc=%2fpowershell%2fmodule%2ftoc.json) diff --git a/articles/azure-sql/database/transparent-data-encryption-byok-remove-tde-protector.md b/articles/azure-sql/database/transparent-data-encryption-byok-remove-tde-protector.md deleted file mode 100644 index 0a65d4d86b72a..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-byok-remove-tde-protector.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Remove TDE protector (PowerShell & the Azure CLI) -titleSuffix: Azure SQL Database & Azure Synapse Analytics -description: "Learn how to respond to a potentially compromised TDE protector for Azure SQL Database or Azure Synapse Analytics using TDE with Bring Your Own Key (BYOK) support." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: seo-lt-2019 sqldbrb=1, devx-track-azurecli -ms.topic: how-to -author: shohamMSFT -ms.author: shohamd -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 06/23/2021 ---- -# Remove a Transparent Data Encryption (TDE) protector using PowerShell -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - - -This topic describes how to respond to a potentially compromised TDE protect for Azure SQL Database or Azure Synapse Analytics that is using TDE with customer-managed keys in Azure Key Vault - Bring Your Own Key (BYOK) support. To learn more about BYOK support for TDE, see the [overview page](transparent-data-encryption-byok-overview.md). - -> [!CAUTION] -> The procedures outlined in this article should only be done in extreme cases or in test environments. Review the steps carefully, as deleting actively used TDE protectors from Azure Key Vault will result in **database becoming unavailable**. - -If a key is ever suspected to be compromised, such that a service or user had unauthorized access to the key, it's best to delete the key. - -Keep in mind that once the TDE protector is deleted in Key Vault, in up to 10 minutes, all encrypted databases will start denying all connections with the corresponding error message and change its state to [Inaccessible](./transparent-data-encryption-byok-overview.md#inaccessible-tde-protector). - -This how-to guide goes over the approach to render databases **inaccessible** after a compromised incident response. - -> [!NOTE] -> This article applies to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics (dedicated SQL pools (formerly SQL DW)). For documentation on Transparent Data Encryption for dedicated SQL pools inside Synapse workspaces, see [Azure Synapse Analytics encryption](../../synapse-analytics/security/workspaces-encryption.md). - -## Prerequisites - -- You must have an Azure subscription and be an administrator on that subscription -- You must have Azure PowerShell installed and running. -- This how-to guide assumes that you are already using a key from Azure Key Vault as the TDE protector for an Azure SQL Database or Azure Synapse. See [Transparent Data Encryption with BYOK Support](transparent-data-encryption-byok-overview.md) to learn more. - -# [PowerShell](#tab/azure-powershell) - - For Az module installation instructions, see [Install Azure PowerShell](/powershell/azure/install-az-ps). For specific cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager (RM) module is still supported but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -# [The Azure CLI](#tab/azure-cli) - -For installation, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -* * * - -## Check TDE Protector thumbprints - -The following steps outline how to check the TDE Protector thumbprints still in use by Virtual Log Files (VLF) of a given database. -The thumbprint of the current TDE protector of the database, and the database ID can be found by running: - -```sql -SELECT [database_id], -      [encryption_state], - [encryptor_type], /*asymmetric key means AKV, certificate means service-managed keys*/ - [encryptor_thumbprint] - FROM [sys].[dm_database_encryption_keys] -``` - -The following query returns the VLFs and the TDE Protector respective thumbprints in use. Each different thumbprint refers to different key in Azure Key Vault (AKV): - -```sql -SELECT * FROM sys.dm_db_log_info (database_id) -``` - -Alternatively, you can use PowerShell or the Azure CLI: - -# [PowerShell](#tab/azure-powershell) - -The PowerShell command **Get-AzureRmSqlServerKeyVaultKey** provides the thumbprint of the TDE Protector used in the query, so you can see which keys to keep and which keys to delete in AKV. Only keys no longer used by the database can be safely deleted from Azure Key Vault. - -# [The Azure CLI](#tab/azure-cli) - -The PowerShell command **az sql server key show** provides the thumbprint of the TDE Protector used in the query, so you can see which keys to keep and which keys to delete in AKV. Only keys no longer used by the database can be safely deleted from Azure Key Vault. - -* * * - -## Keep encrypted resources accessible - -# [PowerShell](#tab/azure-powershell) - -1. Create a [new key in Key Vault](/powershell/module/az.keyvault/add-azkeyvaultkey). Make sure this new key is created in a separate key vault from the potentially compromised TDE protector, since access control is provisioned on a vault level. - -2. Add the new key to the server using the [Add-AzSqlServerKeyVaultKey](/powershell/module/az.sql/add-azsqlserverkeyvaultkey) and [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) cmdlets and update it as the server's new TDE protector. - - ```powershell - # add the key from Key Vault to the server - Add-AzSqlServerKeyVaultKey -ResourceGroupName -ServerName -KeyId - - # set the key as the TDE protector for all resources under the server - Set-AzSqlServerTransparentDataEncryptionProtector -ResourceGroupName ` - -ServerName -Type AzureKeyVault -KeyId - ``` - -3. Make sure the server and any replicas have updated to the new TDE protector using the [Get-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/get-azsqlservertransparentdataencryptionprotector) cmdlet. - - > [!NOTE] - > It may take a few minutes for the new TDE protector to propagate to all databases and secondary databases under the server. - - ```powershell - Get-AzSqlServerTransparentDataEncryptionProtector -ServerName -ResourceGroupName - ``` - -4. Take a [backup of the new key](/powershell/module/az.keyvault/backup-azkeyvaultkey) in Key Vault. - - ```powershell - # -OutputFile parameter is optional; if removed, a file name is automatically generated. - Backup-AzKeyVaultKey -VaultName -Name -OutputFile - ``` - -5. Delete the compromised key from Key Vault using the [Remove-AzKeyVaultKey](/powershell/module/az.keyvault/remove-azkeyvaultkey) cmdlet. - - ```powershell - Remove-AzKeyVaultKey -VaultName -Name - ``` - -6. To restore a key to Key Vault in the future using the [Restore-AzKeyVaultKey](/powershell/module/az.keyvault/restore-azkeyvaultkey) cmdlet: - - ```powershell - Restore-AzKeyVaultKey -VaultName -InputFile - ``` - -# [The Azure CLI](#tab/azure-cli) - -For command reference, see the [Azure CLI keyvault](/cli/azure/keyvault/key). - -1. Create a [new key in Key Vault](/cli/azure/keyvault/key#az-keyvault-key-create). Make sure this new key is created in a separate key vault from the potentially compromised TDE protector, since access control is provisioned on a vault level. - -2. Add the new key to the server and update it as the new TDE protector of the server. - - ```azurecli - # add the key from Key Vault to the server - az sql server key create --kid --resource-group --server - - # set the key as the TDE protector for all resources under the server - az sql server tde-key set --server-key-type AzureKeyVault --kid --resource-group --server - ``` - -3. Make sure the server and any replicas have updated to the new TDE protector. - - > [!NOTE] - > It may take a few minutes for the new TDE protector to propagate to all databases and secondary databases under the server. - - ```azurecli - az sql server tde-key show --resource-group --server - ``` - -4. Take a backup of the new key in Key Vault. - - ```azurecli - # --file parameter is optional; if removed, a file name is automatically generated. - az keyvault key backup --file --name --vault-name - ``` - -5. Delete the compromised key from Key Vault. - - ```azurecli - az keyvault key delete --name --vault-name - ``` - -6. To restore a key to Key Vault in the future. - - ```azurecli - az keyvault key restore --file --vault-name - ``` - -* * * - -## Make encrypted resources inaccessible - -1. Drop the databases that are being encrypted by the potentially compromised key. - - The database and log files are automatically backed up, so a point-in-time restore of the database can be done at any point (as long as you provide the key). The databases must be dropped before deletion of an active TDE protector to prevent potential data loss of up to 10 minutes of the most recent transactions. - -2. Back up the key material of the TDE protector in Key Vault. -3. Remove the potentially compromised key from Key Vault - -[!INCLUDE [sql-database-akv-permission-delay](../includes/sql-database-akv-permission-delay.md)] - -## Next steps - -- Learn how to rotate the TDE protector of a server to comply with security requirements: [Rotate the Transparent Data Encryption protector Using PowerShell](transparent-data-encryption-byok-key-rotation.md) -- Get started with Bring Your Own Key support for TDE: [Turn on TDE using your own key from Key Vault using PowerShell](transparent-data-encryption-byok-configure.md) \ No newline at end of file diff --git a/articles/azure-sql/database/transparent-data-encryption-tde-overview.md b/articles/azure-sql/database/transparent-data-encryption-tde-overview.md deleted file mode 100644 index 3fb70ceee81a0..0000000000000 --- a/articles/azure-sql/database/transparent-data-encryption-tde-overview.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: Transparent data encryption -titleSuffix: Azure SQL Database & SQL Managed Instance & Azure Synapse Analytics -description: "An overview of transparent data encryption for Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. The document covers its benefits and the options for configuration, which includes service-managed transparent data encryption and Bring Your Own Key." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: seo-lt-2019 sqldbrb=3 -ms.devlang: -ms.topic: conceptual -author: shohamMSFT -ms.author: shohamd -ms.reviewer: kendralittle, vanto, mathoma -ms.date: 06/23/2021 ---- -# Transparent data encryption for SQL Database, SQL Managed Instance, and Azure Synapse Analytics -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -[Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption) helps protect Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics against the threat of malicious offline activity by encrypting data at rest. It performs real-time encryption and decryption of the database, associated backups, and transaction log files at rest without requiring changes to the application. By default, TDE is enabled for all newly deployed Azure SQL Databases and must be manually enabled for older databases of Azure SQL Database. For Azure SQL Managed Instance, TDE is enabled at the instance level and newly created databases. TDE must be manually enabled for Azure Synapse Analytics. - -> [!NOTE] -> This article applies to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics (dedicated SQL pools (formerly SQL DW)). For documentation on Transparent Data Encryption for dedicated SQL pools inside Synapse workspaces, see [Azure Synapse Analytics encryption](../../synapse-analytics/security/workspaces-encryption.md). - -TDE performs real-time I/O encryption and decryption of the data at the page level. Each page is decrypted when it's read into memory and then encrypted before being written to disk. TDE encrypts the storage of an entire database by using a symmetric key called the Database Encryption Key (DEK). On database startup, the encrypted DEK is decrypted and then used for decryption and re-encryption of the database files in the SQL Server database engine process. DEK is protected by the TDE protector. TDE protector is either a service-managed certificate (service-managed transparent data encryption) or an asymmetric key stored in [Azure Key Vault](../../key-vault/general/security-features.md) (customer-managed transparent data encryption). - -For Azure SQL Database and Azure Synapse, the TDE protector is set at the [server](logical-servers.md) level and is inherited by all databases associated with that server. For Azure SQL Managed Instance, the TDE protector is set at the instance level and it is inherited by all encrypted databases on that instance. The term *server* refers both to server and instance throughout this document, unless stated differently. - -> [!IMPORTANT] -> All newly created databases in SQL Database are encrypted by default by using service-managed transparent data encryption. Existing SQL databases created before May 2017 and SQL databases created through restore, geo-replication, and database copy are not encrypted by default. Existing SQL Managed Instance databases created before February 2019 are not encrypted by default. SQL Managed Instance databases created through restore inherit encryption status from the source. To restore an existing TDE-encrypted database, the required TDE certificate must first be [imported](../managed-instance/tde-certificate-migrate.md) into the SQL Managed Instance. - -> [!NOTE] -> TDE cannot be used to encrypt system databases, such as the **master** database, in Azure SQL Database and Azure SQL Managed Instance. The **master** database contains objects that are needed to perform the TDE operations on the user databases. It is recommended to not store any sensitive data in the system databases. [Infrastructure encryption](transparent-data-encryption-byok-overview.md#doubleencryption) is now being rolled out which encrypts the system databases including master. - - -## Service-managed transparent data encryption - -In Azure, the default setting for TDE is that the DEK is protected by a built-in server certificate. The built-in server certificate is unique for each server and the encryption algorithm used is AES 256. If a database is in a geo-replication relationship, both the primary and geo-secondary databases are protected by the primary database's parent server key. If two databases are connected to the same server, they also share the same built-in certificate. Microsoft automatically rotates these certificates in compliance with the internal security policy and the root key is protected by a Microsoft internal secret store. Customers can verify SQL Database and SQL Managed Instance compliance with internal security policies in independent third-party audit reports available on the [Microsoft Trust Center](https://servicetrust.microsoft.com/). - -Microsoft also seamlessly moves and manages the keys as needed for geo-replication and restores. - -## Customer-managed transparent data encryption - Bring Your Own Key - -Customer-managed TDE is also referred to as Bring Your Own Key (BYOK) support for TDE. In this scenario, the TDE Protector that encrypts the DEK is a customer-managed asymmetric key, which is stored in a customer-owned and managed Azure Key Vault (Azure's cloud-based external key management system) and never leaves the key vault. The TDE Protector can be [generated by the key vault or transferred to the key vault](../../key-vault/keys/hsm-protected-keys.md) from an on-premises hardware security module (HSM) device. SQL Database, SQL Managed Instance, and Azure Synapse need to be granted permissions to the customer-owned key vault to decrypt and encrypt the DEK. If permissions of the server to the key vault are revoked, a database will be inaccessible, and all data is encrypted. - -With TDE with Azure Key Vault integration, users can control key management tasks including key rotations, key vault permissions, key backups, and enable auditing/reporting on all TDE protectors using Azure Key Vault functionality. Key Vault provides central key management, leverages tightly monitored HSMs, and enables separation of duties between management of keys and data to help meet compliance with security policies. -To learn more about BYOK for Azure SQL Database and Azure Synapse, see [Transparent data encryption with Azure Key Vault integration](transparent-data-encryption-byok-overview.md). - -To start using TDE with Azure Key Vault integration, see the how-to guide [Turn on transparent data encryption by using your own key from Key Vault](transparent-data-encryption-byok-configure.md). - -## Move a transparent data encryption-protected database - -You don't need to decrypt databases for operations within Azure. The TDE settings on the source database or primary database are transparently inherited on the target. Operations that are included involve: - -- Geo-restore -- Self-service point-in-time restore -- Restoration of a deleted database -- Active geo-replication -- Creation of a database copy -- Restore of backup file to Azure SQL Managed Instance - -> [!IMPORTANT] -> Taking manual COPY-ONLY backup of a database encrypted by service-managed TDE is not supported in Azure SQL Managed Instance, since the certificate used for encryption is not accessible. Use point-in-time-restore feature to move this type of database to another SQL Managed Instance, or switch to customer-managed key. - -When you export a TDE-protected database, the exported content of the database isn't encrypted. This exported content is stored in unencrypted BACPAC files. Be sure to protect the BACPAC files appropriately and enable TDE after import of the new database is finished. - -For example, if the BACPAC file is exported from a SQL Server instance, the imported content of the new database isn't automatically encrypted. Likewise, if the BACPAC file is imported to a SQL Server instance, the new database also isn't automatically encrypted. - -The one exception is when you export a database to and from SQL Database. TDE is enabled on the new database, but the BACPAC file itself still isn't encrypted. - -## Manage transparent data encryption - -# [The Azure portal](#tab/azure-portal) - -Manage TDE in the Azure portal. - -To configure TDE through the Azure portal, you must be connected as the Azure Owner, Contributor, or SQL Security Manager. - -Enable and disable TDE on the database level. For Azure SQL Managed Instance use Transact-SQL (T-SQL) to turn TDE on and off on a database. For Azure SQL Database and Azure Synapse, you can manage TDE for the database in the [Azure portal](https://portal.azure.com) after you've signed in with the Azure Administrator or Contributor account. Find the TDE settings under your user database. By default, service-managed transparent data encryption is used. A TDE certificate is automatically generated for the server that contains the database. - -![Service-managed transparent data encryption](./media/transparent-data-encryption-tde-overview/service-managed-transparent-data-encryption.png) - -You set the TDE master key, known as the TDE protector, at the server or instance level. To use TDE with BYOK support and protect your databases with a key from Key Vault, open the TDE settings under your server. - -![Transparent data encryption with Bring Your Own Key support](./media/transparent-data-encryption-tde-overview/tde-byok-support.png) - -# [PowerShell](#tab/azure-powershell) - -Manage TDE by using PowerShell. - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -To configure TDE through PowerShell, you must be connected as the Azure Owner, Contributor, or SQL Security Manager. - -### Cmdlets for Azure SQL Database and Azure Synapse - -Use the following cmdlets for Azure SQL Database and Azure Synapse: - -| Cmdlet | Description | -| --- | --- | -| [Set-AzSqlDatabaseTransparentDataEncryption](/powershell/module/az.sql/set-azsqldatabasetransparentdataencryption) |Enables or disables transparent data encryption for a database.| -| [Get-AzSqlDatabaseTransparentDataEncryption](/powershell/module/az.sql/get-azsqldatabasetransparentdataencryption) |Gets the transparent data encryption state for a database. | -| [Get-AzSqlDatabaseTransparentDataEncryptionActivity](/powershell/module/az.sql/get-azsqldatabasetransparentdataencryptionactivity) |Checks the encryption progress for a database. | -| [Add-AzSqlServerKeyVaultKey](/powershell/module/az.sql/add-azsqlserverkeyvaultkey) |Adds a Key Vault key to a server. | -| [Get-AzSqlServerKeyVaultKey](/powershell/module/az.sql/get-azsqlserverkeyvaultkey) |Gets the Key Vault keys for a server | -| [Set-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/set-azsqlservertransparentdataencryptionprotector) |Sets the transparent data encryption protector for a server. | -| [Get-AzSqlServerTransparentDataEncryptionProtector](/powershell/module/az.sql/get-azsqlservertransparentdataencryptionprotector) |Gets the transparent data encryption protector | -| [Remove-AzSqlServerKeyVaultKey](/powershell/module/az.sql/remove-azsqlserverkeyvaultkey) |Removes a Key Vault key from a server. | - - -> [!IMPORTANT] -> For Azure SQL Managed Instance, use the T-SQL [ALTER DATABASE](/sql/t-sql/statements/alter-database-azure-sql-database) command to turn TDE on and off on a database level, and check [sample PowerShell script](transparent-data-encryption-byok-configure.md) to manage TDE on an instance level. - -# [Transact-SQL](#tab/azure-TransactSQL) - -Manage TDE by using Transact-SQL. - -Connect to the database by using a login that is an administrator or member of the **dbmanager** role in the master database. - -| Command | Description | -| --- | --- | -| [ALTER DATABASE (Azure SQL Database)](/sql/t-sql/statements/alter-database-azure-sql-database) | SET ENCRYPTION ON/OFF encrypts or decrypts a database | -| [sys.dm_database_encryption_keys](/sql/relational-databases/system-dynamic-management-views/sys-dm-database-encryption-keys-transact-sql) |Returns information about the encryption state of a database and its associated database encryption keys | -| [sys.dm_pdw_nodes_database_encryption_keys](/sql/relational-databases/system-dynamic-management-views/sys-dm-pdw-nodes-database-encryption-keys-transact-sql) |Returns information about the encryption state of each Azure Synapse node and its associated database encryption keys | - - -You can't switch the TDE protector to a key from Key Vault by using Transact-SQL. Use PowerShell or the Azure portal. - -# [REST API](#tab/azure-RESTAPI) - -Manage TDE by using the REST API. - -To configure TDE through the REST API, you must be connected as the Azure Owner, Contributor, or SQL Security Manager. -Use the following set of commands for Azure SQL Database and Azure Synapse: - -| Command | Description | -| --- | --- | -|[Create Or Update Server](/rest/api/sql/servers/createorupdate)|Adds an Azure Active Directory identity to a server. (used to grant access to Key Vault)| -|[Create Or Update Server Key](/rest/api/sql/serverkeys/createorupdate)|Adds a Key Vault key to a server.| -|[Delete Server Key](/rest/api/sql/serverkeys/delete)|Removes a Key Vault key from a server. | -|[Get Server Keys](/rest/api/sql/serverkeys/get)|Gets a specific Key Vault key from a server.| -|[List Server Keys By Server](/rest/api/sql/serverkeys/listbyserver)|Gets the Key Vault keys for a server. | -|[Create Or Update Encryption Protector](/rest/api/sql/encryptionprotectors/createorupdate)|Sets the TDE protector for a server.| -|[Get Encryption Protector](/rest/api/sql/encryptionprotectors/get)|Gets the TDE protector for a server.| -|[List Encryption Protectors By Server](/rest/api/sql/encryptionprotectors/listbyserver)|Gets the TDE protectors for a server. | -|[Create Or Update Transparent Data Encryption Configuration](/rest/api/sql/transparentdataencryptions/createorupdate)|Enables or disables TDE for a database.| -|[Get Transparent Data Encryption Configuration](/rest/api/sql/transparentdataencryptions/get)|Gets the TDE configuration for a database.| -|[List Transparent Data Encryption Configuration Results](/rest/api/sql/transparentdataencryptionactivities/listbyconfiguration)|Gets the encryption result for a database.| - -## See Also - -- SQL Server running on an Azure virtual machine also can use an asymmetric key from Key Vault. The configuration steps are different from using an asymmetric key in SQL Database and SQL Managed Instance. For more information, see [Extensible key management by using Azure Key Vault (SQL Server)](/sql/relational-databases/security/encryption/extensible-key-management-using-azure-key-vault-sql-server). -- For a general description of TDE, see [Transparent data encryption](/sql/relational-databases/security/encryption/transparent-data-encryption). -- To learn more about TDE with BYOK support for Azure SQL Database, Azure SQL Managed Instance and Azure Synapse, see [Transparent data encryption with Bring Your Own Key support](transparent-data-encryption-byok-overview.md). -- To start using TDE with Bring Your Own Key support, see the how-to guide, [Turn on transparent data encryption by using your own key from Key Vault](transparent-data-encryption-byok-configure.md). -- For more information about Key Vault, see [Secure access to a key vault](../../key-vault/general/security-features.md). diff --git a/articles/azure-sql/database/troubleshoot-common-connectivity-issues.md b/articles/azure-sql/database/troubleshoot-common-connectivity-issues.md deleted file mode 100644 index 00b107912dd18..0000000000000 --- a/articles/azure-sql/database/troubleshoot-common-connectivity-issues.md +++ /dev/null @@ -1,458 +0,0 @@ ---- -title: Working with transient errors -description: Learn how to troubleshoot, diagnose, and prevent a SQL connection error or transient error when connecting to Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. -keywords: sql connection,connection string,connectivity issues,transient error,connection error -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: troubleshooting -author: ramakoni1 -ms.author: ramakoni -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 01/14/2020 ---- - -# Troubleshoot transient connection errors in SQL Database and SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi-asa](../includes/appliesto-sqldb-sqlmi-asa.md)] - -This article describes how to prevent, troubleshoot, diagnose, and mitigate connection errors and transient errors that your client application encounters when it interacts with Azure SQL Database, Azure SQL Managed Instance, and Azure Synapse Analytics. Learn how to configure retry logic, build the connection string, and adjust other connection settings. - - - -## Transient errors (transient faults) - -A transient error, also known as a transient fault, has an underlying cause that soon resolves itself. An occasional cause of transient errors is when the Azure system quickly shifts hardware resources to better load-balance various workloads. Most of these reconfiguration events finish in less than 60 seconds. During this reconfiguration time span, you might have issues with connecting to your database in SQL Database. Applications that connect to your database should be built to expect these transient errors. To handle them, implement retry logic in their code instead of surfacing them to users as application errors. - -If your client program uses ADO.NET, your program is told about the transient error by the throw of **SqlException**. - - - -### Connection vs. command - -Retry the SQL Database and SQL Managed Instance connection or establish it again, depending on the following: - -- **A transient error occurs during a connection try** - -After a delay of several seconds, retry the connection. - -- **A transient error occurs during a SQL Database and SQL Managed Instance query command** - -Do not immediately retry the command. Instead, after a delay, freshly establish the connection. Then retry the command. - - - -## Retry logic for transient errors - -Client programs that occasionally encounter a transient error are more robust when they contain retry logic. When your program communicates with your database in SQL Database through third-party middleware, ask the vendor whether the middleware contains retry logic for transient errors. - - - -### Principles for retry - -- If the error is transient, retry to open a connection. -- Do not directly retry a SQL Database or SQL Managed Instance `SELECT` statement that failed with a transient error. Instead, establish a fresh connection, and then retry the `SELECT`. -- When a SQL Database or SQL Managed Instance `UPDATE` statement fails with a transient error, establish a fresh connection before you retry the UPDATE. The retry logic must ensure that either the entire database transaction finished or that the entire transaction is rolled back. - -### Other considerations for retry - -- A batch program that automatically starts after work hours and finishes before morning can afford to be very patient with long time intervals between its retry attempts. -- A user interface program should account for the human tendency to give up after too long a wait. The solution must not retry every few seconds, because that policy can flood the system with requests. - -### Interval increase between retries - -We recommend that you wait for 5 seconds before your first retry. Retrying after a delay shorter than 5 seconds risks overwhelming the cloud service. For each subsequent retry, the delay should grow exponentially, up to a maximum of 60 seconds. - -For a discussion of the blocking period for clients that use ADO.NET, see [Connection pooling (ADO.NET)](/dotnet/framework/data/adonet/sql-server-connection-pooling). - -You also might want to set a maximum number of retries before the program self-terminates. - -### Code samples with retry logic - -Code examples with retry logic are available at: - -- [Connect resiliently to Azure SQL with ADO.NET][step-4-connect-resiliently-to-sql-with-ado-net-a78n] -- [Connect resiliently to Azure SQL with PHP][step-4-connect-resiliently-to-sql-with-php-p42h] - - - -### Test your retry logic - -To test your retry logic, you must simulate or cause an error that can be corrected while your program is still running. - -#### Test by disconnecting from the network - -One way you can test your retry logic is to disconnect your client computer from the network while the program is running. The error is: - -- **SqlException.Number** = 11001 -- Message: "No such host is known" - -As part of the first retry attempt, you can reconnect your client computer to the network and then attempt to connect. - -To make this test practical, unplug your computer from the network before you start your program. Then your program recognizes a runtime parameter that causes the program to: - -- Temporarily add 11001 to its list of errors to consider as transient. -- Attempt its first connection as usual. -- After the error is caught, remove 11001 from the list. -- Display a message that tells the user to plug the computer into the network. -- Pause further execution by using either the **Console.ReadLine** method or a dialog with an OK button. The user presses the Enter key after the computer is plugged into the network. -- Attempt again to connect, expecting success. - -#### Test by misspelling the user name when connecting - -Your program can purposely misspell the user name before the first connection attempt. The error is: - -- **SqlException.Number** = 18456 -- Message: "Login failed for user 'WRONG_MyUserName'." - -As part of the first retry attempt, your program can correct the misspelling and then attempt to connect. - -To make this test practical, your program recognizes a runtime parameter that causes the program to: - -- Temporarily add 18456 to its list of errors to consider as transient. -- Purposely add 'WRONG_' to the user name. -- After the error is caught, remove 18456 from the list. -- Remove 'WRONG_' from the user name. -- Attempt again to connect, expecting success. - - - -## .NET SqlConnection parameters for connection retry - -If your client program connects to your database in SQL Database by using the .NET Framework class **System.Data.SqlClient.SqlConnection**, use .NET 4.6.1 or later (or .NET Core) so that you can use its connection retry feature. For more information on the feature, see [SqlConnection.ConnectionString Property](/dotnet/api/system.data.sqlclient.sqlconnection.connectionstring?view=netframework-4.8&preserve-view=true). - - - -When you build the [connection string](/dotnet/api/system.data.sqlclient.sqlconnection.connectionstring) for your **SqlConnection** object, coordinate the values among the following parameters: - -- **ConnectRetryCount**:  Default is 1. Range is 0 through 255. -- **ConnectRetryInterval**:  Default is 10 seconds. Range is 1 through 60. -- **Connection Timeout**:  Default is 15 seconds. Range is 0 through 2147483647. - -Specifically, your chosen values should make the following equality true: Connection Timeout = ConnectRetryCount * ConnectionRetryInterval - -For example, if the count equals 3 and the interval equals 10 seconds, a timeout of only 29 seconds doesn't give the system enough time for its third and final retry to connect: 29 < 3 * 10. - - - -## Connection vs. command - -The **ConnectRetryCount** and **ConnectRetryInterval** parameters let your **SqlConnection** object retry the connect operation without telling or bothering your program, such as returning control to your program. The retries can occur in the following situations: - -- SqlConnection.Open method call -- SqlConnection.Execute method call - -There is a subtlety. If a transient error occurs while your *query* is being executed, your **SqlConnection** object doesn't retry the connect operation. It certainly doesn't retry your query. However, **SqlConnection** very quickly checks the connection before sending your query for execution. If the quick check detects a connection problem, **SqlConnection** retries the connect operation. If the retry succeeds, your query is sent for execution. - -### Should ConnectRetryCount be combined with application retry logic - -Suppose your application has robust custom retry logic. It might retry the connect operation four times. If you add **ConnectRetryInterval** and **ConnectRetryCount** =3 to your connection string, you will increase the retry count to 4 * 3 = 12 retries. You might not intend such a high number of retries. - - - -## Connections to your database in SQL Database - - - -### Connection: Connection string - -The connection string that's necessary to connect to your database is slightly different from the string used to connect to SQL Server. You can copy the connection string for your database from the [Azure portal](https://portal.azure.com/). - -[!INCLUDE [sql-database-include-connection-string-20-portalshots](../../../includes/sql-database-include-connection-string-20-portalshots.md)] - - - -### Connection: IP address - -You must configure SQL Database to accept communication from the IP address of the computer that hosts your client program. To set up this configuration, edit the firewall settings through the [Azure portal](https://portal.azure.com/). - -If you forget to configure the IP address, your program fails with a handy error message that states the necessary IP address. - -[!INCLUDE [sql-database-include-ip-address-22-portal](../../../includes/sql-database-include-ip-address-22-v12portal.md)] - -For more information, see -[Configure firewall settings in SQL Database](firewall-configure.md). - - -### Connection: Ports - -Typically, you need to ensure that only port 1433 is open for outbound communication on the computer that hosts your client program. - -For example, when your client program is hosted on a Windows computer, you can use Windows Firewall on the host to open port 1433. - -1. Open Control Panel. -2. Select **All Control Panel Items** > **Windows Firewall** > **Advanced Settings** > **Outbound Rules** > **Actions** > **New Rule**. - -If your client program is hosted on an Azure virtual machine (VM), read [Ports beyond 1433 for ADO.NET 4.5 and SQL Database](adonet-v12-develop-direct-route-ports.md). - -For background information about configuration of ports and IP addresses in your database, see [Azure SQL Database firewall](firewall-configure.md). - - - -### Connection: ADO.NET 4.6.2 or later - -If your program uses ADO.NET classes like **System.Data.SqlClient.SqlConnection** to connect to SQL Database, we recommend that you use .NET Framework version 4.6.2 or later. - -#### Starting with ADO.NET 4.6.2 - -- The connection open attempt to be retried immediately for Azure SQL, thereby improving the performance of cloud-enabled apps. - -#### Starting with ADO.NET 4.6.1 - -- For SQL Database, reliability is improved when you open a connection by using the **SqlConnection.Open** method. The **Open** method now incorporates best-effort retry mechanisms in response to transient faults for certain errors within the connection timeout period. -- Connection pooling is supported, which includes an efficient verification that the connection object it gives your program is functioning. - -When you use a connection object from a connection pool, we recommend that your program temporarily closes the connection when it's not immediately in use. It's not expensive to reopen a connection, but it is to create a new connection. - -If you use ADO.NET 4.0 or earlier, we recommend that you upgrade to the latest ADO.NET. As of August 2018, you can [download ADO.NET 4.6.2](https://blogs.msdn.microsoft.com/dotnet/20../../announcing-the-net-framework-4-7-2/). - - - -## Diagnostics - - - -### Diagnostics: Test whether utilities can connect - -If your program fails to connect to your database in SQL Database, one diagnostic option is to try to connect with a utility program. Ideally, the utility connects by using the same library that your program uses. - -On any Windows computer, you can try these utilities: - -- SQL Server Management Studio (ssms.exe), which connects by using ADO.NET -- `sqlcmd.exe`, which connects by using [ODBC](/sql/connect/odbc/microsoft-odbc-driver-for-sql-server) - -After your program is connected, test whether a short SQL SELECT query works. - - - -### Diagnostics: Check the open ports - -If you suspect that connection attempts fail due to port issues, you can run a utility on your computer that reports on the port configurations. - -On Linux, the following utilities might be helpful: - -- `netstat -nap` -- `nmap -sS -O 127.0.0.1`: Change the example value to be your IP address. - -On Windows, the [PortQry.exe](https://www.microsoft.com/download/details.aspx?id=17148) utility might be helpful. Here's an example execution that queried the port situation on a database in SQL Database and that was run on a laptop computer: - -```cmd -[C:\Users\johndoe\] ->> portqry.exe -n johndoesvr9.database.windows.net -p tcp -e 1433 - -Querying target system called: johndoesvr9.database.windows.net - -Attempting to resolve name to IP address... -Name resolved to 23.100.117.95 - -querying... -TCP port 1433 (ms-sql-s service): LISTENING - -[C:\Users\johndoe\] ->> -``` - - - -### Diagnostics: Log your errors - -An intermittent problem is sometimes best diagnosed by detection of a general pattern over days or weeks. - -Your client can assist in a diagnosis by logging all errors it encounters. You might be able to correlate the log entries with error data that SQL Database logs itself internally. - -Enterprise Library 6 (EntLib60) offers .NET managed classes to assist with logging. For more information, see [5 - As easy as falling off a log: Use the Logging Application Block](/previous-versions/msp-n-p/dn440731(v=pandp.60)). - - - -### Diagnostics: Examine system logs for errors - -Here are some Transact-SQL SELECT statements that query error logs and other information. - -| Query of log | Description | -|:--- |:--- | -| `SELECT e.*`
    `FROM sys.event_log AS e`
    `WHERE e.database_name = 'myDbName'`
    `AND e.event_category = 'connectivity'`
    `AND 2 >= DateDiff`
      `(hour, e.end_time, GetUtcDate())`
    `ORDER BY e.event_category,`
      `e.event_type, e.end_time;` |The [sys.event_log](/sql/relational-databases/system-catalog-views/sys-event-log-azure-sql-database) view offers information about individual events, which includes some that can cause transient errors or connectivity failures.

    Ideally, you can correlate the **start_time** or **end_time** values with information about when your client program experienced problems.

    You must connect to the *master* database to run this query. | -| `SELECT c.*`
    `FROM sys.database_connection_stats AS c`
    `WHERE c.database_name = 'myDbName'`
    `AND 24 >= DateDiff`
      `(hour, c.end_time, GetUtcDate())`
    `ORDER BY c.end_time;` |The [sys.database_connection_stats](/sql/relational-databases/system-catalog-views/sys-database-connection-stats-azure-sql-database) view offers aggregated counts of event types for additional diagnostics.

    You must connect to the *master* database to run this query. | - - - -### Diagnostics: Search for problem events in the SQL Database log - -You can search for entries about problem events in the SQL Database log. Try the following Transact-SQL SELECT statement in the *master* database: - -```sql -SELECT - object_name - ,CAST(f.event_data as XML).value - ('(/event/@timestamp)[1]', 'datetime2') AS [timestamp] - ,CAST(f.event_data as XML).value - ('(/event/data[@name="error"]/value)[1]', 'int') AS [error] - ,CAST(f.event_data as XML).value - ('(/event/data[@name="state"]/value)[1]', 'int') AS [state] - ,CAST(f.event_data as XML).value - ('(/event/data[@name="is_success"]/value)[1]', 'bit') AS [is_success] - ,CAST(f.event_data as XML).value - ('(/event/data[@name="database_name"]/value)[1]', 'sysname') AS [database_name] -FROM - sys.fn_xe_telemetry_blob_target_read_file('el', null, null, null) AS f -WHERE - object_name != 'login_event' -- Login events are numerous. - and - '2015-06-21' < CAST(f.event_data as XML).value - ('(/event/@timestamp)[1]', 'datetime2') -ORDER BY - [timestamp] DESC -; -``` - -#### A few returned rows from sys.fn_xe_telemetry_blob_target_read_file - -The following example shows what a returned row might look like. The null values shown are often not null in other rows. - -``` -object_name timestamp error state is_success database_name - -database_xml_deadlock_report 2015-10-16 20:28:01.0090000 NULL NULL NULL AdventureWorks -``` - - - -## Enterprise Library 6 - -Enterprise Library 6 (EntLib60) is a framework of .NET classes that helps you implement robust clients of cloud services, one of which is SQL Database. To locate topics dedicated to each area in which EntLib60 can assist, see [Enterprise Library 6 - April 2013](/previous-versions/msp-n-p/dn169621(v=pandp.10)). - -Retry logic for handling transient errors is one area in which EntLib60 can assist. For more information, see [4 - Perseverance, secret of all triumphs: Use the Transient Fault Handling Application Block](/previous-versions/msp-n-p/dn440719(v=pandp.60)). - -> [!NOTE] -> The source code for EntLib60 is available for public download from the [Download Center](https://github.com/MicrosoftArchive/enterprise-library). Microsoft has no plans to make further feature updates or maintenance updates to EntLib. - - - -### EntLib60 classes for transient errors and retry - -The following EntLib60 classes are particularly useful for retry logic. All these classes are found in or under the namespace **Microsoft.Practices.EnterpriseLibrary.TransientFaultHandling**. - -In the namespace **Microsoft.Practices.EnterpriseLibrary.TransientFaultHandling**: - -- **RetryPolicy** class - - **ExecuteAction** method -- **ExponentialBackoff** class -- **SqlDatabaseTransientErrorDetectionStrategy** class -- **ReliableSqlConnection** class - - **ExecuteCommand** method - -In the namespace **Microsoft.Practices.EnterpriseLibrary.TransientFaultHandling.TestSupport**: - -- **AlwaysTransientErrorDetectionStrategy** class -- **NeverTransientErrorDetectionStrategy** class - -Here are some links to information about EntLib60: - -- Free book download: [Developer's Guide to Microsoft Enterprise Library, 2nd edition](https://www.microsoft.com/download/details.aspx?id=41145). -- Best practices: [Retry general guidance](/azure/architecture/best-practices/transient-faults) has an excellent in-depth discussion of retry logic. -- NuGet download: [Enterprise Library - Transient Fault Handling Application Block 6.0](https://www.nuget.org/packages/EnterpriseLibrary.TransientFaultHandling/). - - - -### EntLib60: The logging block - -- The logging block is a highly flexible and configurable solution that you can use to: - - Create and store log messages in a wide variety of locations. - - Categorize and filter messages. - - Collect contextual information that is useful for debugging and tracing, as well as for auditing and general logging requirements. -- The logging block abstracts the logging functionality from the log destination so that the application code is consistent, irrespective of the location and type of the target logging store. - -For more information, see -[5 - As easy as falling off a log: Use the Logging Application Block](/previous-versions/msp-n-p/dn440731(v=pandp.60)). - - - -### EntLib60 IsTransient method source code - -Next, from the **SqlDatabaseTransientErrorDetectionStrategy** class, is the C# source code for the **IsTransient** method. The source code clarifies which errors were considered transient and worthy of retry, as of April 2013. - -```csharp -public bool IsTransient(Exception ex) -{ - if (ex != null) - { - SqlException sqlException; - if ((sqlException = ex as SqlException) != null) - { - // Enumerate through all errors found in the exception. - foreach (SqlError err in sqlException.Errors) - { - switch (err.Number) - { - // SQL Error Code: 40501 - // The service is currently busy. Retry the request after 10 seconds. - // Code: (reason code to be decoded). - case ThrottlingCondition.ThrottlingErrorNumber: - // Decode the reason code from the error message to - // determine the grounds for throttling. - var condition = ThrottlingCondition.FromError(err); - - // Attach the decoded values as additional attributes to - // the original SQL exception. - sqlException.Data[condition.ThrottlingMode.GetType().Name] = - condition.ThrottlingMode.ToString(); - sqlException.Data[condition.GetType().Name] = condition; - - return true; - - case 10928: - case 10929: - case 10053: - case 10054: - case 10060: - case 40197: - case 40540: - case 40613: - case 40143: - case 233: - case 64: - // DBNETLIB Error Code: 20 - // The instance of SQL Server you attempted to connect to - // does not support encryption. - case (int)ProcessNetLibErrorCode.EncryptionNotSupported: - return true; - } - } - } - else if (ex is TimeoutException) - { - return true; - } - else - { - EntityException entityException; - if ((entityException = ex as EntityException) != null) - { - return this.IsTransient(entityException.InnerException); - } - } - } - - return false; -} -``` - -## Next steps - -- [Connection libraries for SQL Database and SQL Server](connect-query-content-reference-guide.md#libraries) -- [Connection pooling (ADO.NET)](/dotnet/framework/data/adonet/sql-server-connection-pooling) -- [*Retrying* is an Apache 2.0 licensed general-purpose retrying library, written in Python,](https://pypi.python.org/pypi/retrying) to simplify the task of adding retry behavior to just about anything. - - - -[step-4-connect-resiliently-to-sql-with-ado-net-a78n]: /sql/connect/ado-net/step-4-connect-resiliently-sql-ado-net - -[step-4-connect-resiliently-to-sql-with-php-p42h]: /sql/connect/php/step-4-connect-resiliently-to-sql-with-php - -## See also - -- [Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-common-errors-issues.md) -- [Troubleshooting transaction log errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-transaction-log-errors-issues.md) \ No newline at end of file diff --git a/articles/azure-sql/database/troubleshoot-common-errors-issues.md b/articles/azure-sql/database/troubleshoot-common-errors-issues.md deleted file mode 100644 index 175258ba29179..0000000000000 --- a/articles/azure-sql/database/troubleshoot-common-errors-issues.md +++ /dev/null @@ -1,518 +0,0 @@ ---- -title: Troubleshoot common connection issues to Azure SQL Database -description: Provides steps to troubleshoot Azure SQL Database connection issues and resolve other Azure SQL Database or Azure SQL Managed Instance specific issues -services: sql-database -ms.service: sql-db-mi -ms.subservice: connect -ms.topic: troubleshooting -ms.custom: seo-lt-2019, OKR 11/2019, sqldbrb=1 -author: ramakoni1 -ms.author: ramakoni -ms.reviewer: kendralittle, mathoma, vanto -ms.date: 01/18/2022 ---- - -# Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -You receive error messages when the connection to Azure SQL Database or Azure SQL Managed Instance fails. These connection problems can be caused by reconfiguration, firewall settings, a connection timeout, incorrect login information, or failure to apply best practices and design guidelines during the [application design](develop-overview.md) process. Additionally, if the maximum limit on some Azure SQL Database or SQL Managed Instance resources is reached, you can no longer connect. - -## Transient fault error messages (40197, 40613 and others) - -The Azure infrastructure has the ability to dynamically reconfigure servers when heavy workloads arise in the SQL Database service. This dynamic behavior might cause your client program to lose its connection to the database or instance. This kind of error condition is called a *transient fault*. Database reconfiguration events occur because of a planned event (for example, a software upgrade) or an unplanned event (for example, a process crash, or load balancing). Most reconfiguration events are generally short-lived and should be completed in less than 60 seconds at most. However, these events can occasionally take longer to finish, such as when a large transaction causes a long-running recovery. The following table lists various transient errors that applications can receive when connecting to Azure SQL Database. - -### List of transient fault error codes - -| Error code | Severity | Description | -| ---:| ---:|:--- | -| 926 |14 |Database 'replicatedmaster' cannot be opened. It has been marked SUSPECT by recovery. See the SQL Server errorlog for more information.

    This error may be logged on SQL Managed Instance errorlog, for a short period of time, during the last stage of a reconfiguration, while the old primary is shutting down its log.
    Other, non-transient scenarios involving this error message are described in the [MSSQL Errors documentation](/sql/relational-databases/errors-events/mssqlserver-926-database-engine-error).| -| 4060 |16 |Cannot open database "%.*ls" requested by the login. The login failed. For more information, see [Errors 4000 to 4999](/sql/relational-databases/errors-events/database-engine-events-and-errors#errors-4000-to-4999)| -| 40197 |17 |The service has encountered an error processing your request. Please try again. Error code %d.

    You receive this error when the service is down due to software or hardware upgrades, hardware failures, or any other failover problems. The error code (%d) embedded within the message of error 40197 provides additional information about the kind of failure or failover that occurred. Some examples of the error codes are embedded within the message of error 40197 are 40020, 40143, 40166, and 40540.

    Reconnecting automatically connects you to a healthy copy of your database. Your application must catch error 40197, log the embedded error code (%d) within the message for troubleshooting, and try reconnecting to SQL Database until the resources are available, and your connection is established again. For more information, see [Transient errors](troubleshoot-common-connectivity-issues.md#transient-errors-transient-faults).| -| 40501 |20 |The service is currently busy. Retry the request after 10 seconds. Incident ID: %ls. Code: %d. For more information, see:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md).| -| 40613 |17 |Database '%.*ls' on server '%.*ls' is not currently available. Please retry the connection later. If the problem persists, contact customer support, and provide them the session tracing ID of '%.*ls'.

    This error may occur if there is already an existing dedicated administrator connection (DAC) established to the database. For more information, see [Transient errors](troubleshoot-common-connectivity-issues.md#transient-errors-transient-faults).| -| 49918 |16 |Cannot process request. Not enough resources to process request.

    The service is currently busy. Please retry the request later. For more information, see:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md). | -| 49919 |16 |Cannot process create or update request. Too many create or update operations in progress for subscription "%ld".

    The service is busy processing multiple create or update requests for your subscription or server. Requests are currently blocked for resource optimization. Query [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) for pending operations. Wait until pending create or update requests are complete or delete one of your pending requests and retry your request later. For more information, see:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md). | -| 49920 |16 |Cannot process request. Too many operations in progress for subscription "%ld".

    The service is busy processing multiple requests for this subscription. Requests are currently blocked for resource optimization. Query [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) for operation status. Wait until pending requests are complete or delete one of your pending requests and retry your request later. For more information, see:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md). | -| 4221 |16 |Login to read-secondary failed due to long wait on 'HADR_DATABASE_WAIT_FOR_TRANSITION_TO_VERSIONING'. The replica is not available for login because row versions are missing for transactions that were in-flight when the replica was recycled. The issue can be resolved by rolling back or committing the active transactions on the primary replica. Occurrences of this condition can be minimized by avoiding long write transactions on the primary. | -| 615 | 21 | Could not find database ID %d, name '%.*ls' . Error Code 615.
    This means in-memory cache is not in-sync with SQL server instance and lookups are retrieving stale database ID.

    SQL logins use in-memory cache to get the database name to ID mapping. The cache should be in sync with backend database and updated whenever attach and detach of database to/from the SQL server instance occurs.
    You receive this error when detach workflow fail to clean-up the in-memory cache on time and subsequent lookups to the database point to stale database ID.

    Try reconnecting to SQL Database until the resource are available, and the connection is established again. For more information, see [Transient errors](troubleshoot-common-connectivity-issues.md#transient-errors-transient-faults).| - -### Steps to resolve transient connectivity issues - -1. Check the [Microsoft Azure Service Dashboard](https://azure.microsoft.com/status) for any known outages that occurred during the time during which the errors were reported by the application. -2. Applications that connect to a cloud service such as Azure SQL Database should expect periodic reconfiguration events and implement retry logic to handle these errors instead of surfacing application errors to users. -3. As a database approaches its resource limits, it can seem to be a transient connectivity issue. See [Resource limits](resource-limits-logical-server.md#what-happens-when-resource-limits-are-reached). -4. If connectivity problems continue, or if the duration for which your application encounters the error exceeds 60 seconds or if you see multiple occurrences of the error in a given day, file an Azure support request by selecting **Get Support** on the [Azure Support](https://azure.microsoft.com/support/options) site. - -#### Implementing Retry Logic - -It is strongly recommended that your client program has retry logic so that it could reestablish a connection after giving the transient fault time to correct itself. We recommend that you delay for 5 seconds before your first retry. Retrying after a delay shorter than 5-seconds risks overwhelming the cloud service. For each subsequent retry the delay should grow exponentially, up to a maximum of 60 seconds. - -For code examples of retry logic, see: - -- [Connect resiliently to SQL with ADO.NET](/sql/connect/ado-net/step-4-connect-resiliently-sql-ado-net) -- [Connect resiliently to SQL with PHP](/sql/connect/php/step-4-connect-resiliently-to-sql-with-php) - -For additional information on handling transient errors in your application review [Troubleshooting transient connection errors to SQL Database](troubleshoot-common-connectivity-issues.md) - -A discussion of the *blocking period* for clients that use ADO.NET is available in [Connection Pooling (ADO.NET)](/dotnet/framework/data/adonet/sql-server-connection-pooling). - -## A network-related or instance-specific error occurred while establishing a connection to your server - -The issue occurs if the application can't connect to the server. - -To resolve this issue, try the steps (in the order presented) in the [Steps to fix common connection issues](#steps-to-fix-common-connection-issues) section. - -## The server/instance was not found or was not accessible (errors 26, 40, 10053) - -### Error 26: Error Locating server specified - -`System.Data.SqlClient.SqlException: A network-related or instance-specific error occurred while establishing a connection to SQL Server. The server was not found or was not accessible. Verify that the instance name is correct and that SQL Server is configured to allow remote connections.(provider: SQL Network Interfaces, error: 26 – Error Locating Server/Instance Specified)` - -#### Error 40: Could not open a connection to the server - -`A network-related or instance-specific error occurred while establishing a connection to SQL Server. The server was not found or was not accessible. Verify that the instance name is correct and that SQL Server is configured to allow remote connections. (provider: Named Pipes Provider, error: 40 - Could not open a connection to SQL Server)` - -#### Error 10053: A transport-level error has occurred when receiving results from the server - -`10053: A transport-level error has occurred when receiving results from the server. (Provider: TCP Provider, error: 0 - An established connection was aborted by the software in your host machine)` - -These issues occur if the application can't connect to the server. - -To resolve these issues, try the steps (in the order presented) in the [Steps to fix common connection issues](#steps-to-fix-common-connection-issues) section. - -## Cannot connect to server due to firewall issues - -### Error 40615: Cannot connect to < servername > - -To resolve this issue, [configure firewall settings on SQL Database through the Azure portal](firewall-configure.md). - -### Error 5: Cannot connect to < servername > - -To resolve this issue, make sure that port 1433 is open for outbound connections on all firewalls between the client and the internet. - -## Unable to log in to the server (errors 18456, 40531) - -### Login failed for user '< User name >' - -`Login failed for user ''.This session has been assigned a tracing ID of ''. Provide this tracing ID to customer support when you need assistance. (Microsoft SQL Server, Error: 18456)` - -To resolve this issue, contact your service administrator to provide you with a valid user name and password. - -Typically, the service administrator can use the following steps to add the login credentials: - -1. Log in to the server by using SQL Server Management Studio (SSMS). -2. Run the following SQL query in the `master` database to check whether the login name is disabled: - - ```sql - SELECT name, is_disabled FROM sys.sql_logins; - ``` - -3. If the corresponding name is disabled, enable it by using the following statement: - - ```sql - ALTER LOGIN ENABLE; - ``` - -4. If the SQL login user name doesn't exist, edit and run the following SQL query to create a new SQL login: - - ```sql - CREATE LOGIN - WITH PASSWORD = ''; - GO - ``` - -5. In SSMS Object Explorer, expand **Databases**. -6. Select the database that you want to grant the user permission to. -7. Right-click **Security**, and then select **New**, **User**. -8. In the generated script with placeholders (sample shown below), replace template parameters by following the steps [here](/sql/ssms/template/replace-template-parameters) and execute it: - - ```sql - CREATE USER [] - FOR LOGIN [] - WITH DEFAULT_SCHEMA = []; - GO - - -- Add user to the database owner role - EXEC sp_addrolemember N'db_owner', N''; - GO - ``` - - You can also use `sp_addrolemember` to map specific users to specific database roles. - - > [!NOTE] - > In Azure SQL Database, consider the newer [ALTER ROLE](/sql/t-sql/statements/alter-role-transact-sql) syntax for managing database role membership. - -For more information, see [Managing databases and logins in Azure SQL Database](./logins-create-manage.md). - -## Connection timeout expired errors - -### System.Data.SqlClient.SqlException (0x80131904): Connection Timeout Expired - -`System.Data.SqlClient.SqlException (0x80131904): Connection Timeout Expired. The timeout period elapsed while attempting to consume the pre-login handshake acknowledgement. This could be because the pre-login handshake failed or the server was unable to respond back in time. The duration spent while attempting to connect to this server was - [Pre-Login] initialization=3; handshake=29995;` - -### System.Data.SqlClient.SqlException (0x80131904): Timeout expired - -`System.Data.SqlClient.SqlException (0x80131904): Timeout expired. The timeout period elapsed prior to completion of the operation or the server is not responding.` - -### System.Data.Entity.Core.EntityException: The underlying provider failed on Open - -`System.Data.Entity.Core.EntityException: The underlying provider failed on Open. -> System.Data.SqlClient.SqlException: Timeout expired. The timeout period elapsed prior to completion of the operation or the server is not responding. -> System.ComponentModel.Win32Exception: The wait operation timed out` - -### Cannot connect to < server name > - -`Cannot connect to .ADDITIONAL INFORMATION:Connection Timeout Expired. The timeout period elapsed during the post-login phase. The connection could have timed out while waiting for server to complete the login process and respond; Or it could have timed out while attempting to create multiple active connections. The duration spent while attempting to connect to this server was - [Pre-Login] initialization=231; handshake=983; [Login] initialization=0; authentication=0; [Post-Login] complete=13000; (Microsoft SQL Server, Error: -2) For help, click: http://go.microsoft.com/fwlink?ProdName=Microsoft%20SQL%20Server&EvtSrc=MSSQLServer&EvtID=-2&LinkId=20476 The wait operation timed out` - -These exceptions can occur either because of connection or query issues. To confirm that this error is caused by connectivity issues, see [Confirm whether an error is caused by a connectivity issue](#confirm-whether-an-error-is-caused-by-a-connectivity-issue). - -Connection timeouts occur because the application can't connect to the server. To resolve this issue, try the steps (in the order presented) in the [Steps to fix common connection issues](#steps-to-fix-common-connection-issues) section. - -## Resource governance errors - -Azure SQL Database uses a resource governance implementation based on [Resource Governor](/sql/relational-databases/resource-governor/resource-governor) to enforce resource limits. Learn more about [resource management in Azure SQL Database](resource-limits-logical-server.md). - -The most common resource governance errors are listed first with details, followed by a table of resource governance error messages. - -### Error 10928: Resource ID : 1. The request limit for the database is *%d* and has been reached. - -The detailed error message in this case reads: `Resource ID : 1. The request limit for the database is %d and has been reached. See 'http://go.microsoft.com/fwlink/?LinkId=267637' for assistance.` - -This error message indicates that the worker limit for Azure SQL Database has been reached. A value will be present instead of the placeholder *%d*. This value indicates the worker limit for your database at the time the limit was reached. - -> [!NOTE] -> The initial offering of Azure SQL Database supported only single threaded queries. At that time, the number of requests was always equivalent to the number of workers. Error message 10928 in Azure SQL Database contains the wording "The request limit for the database is *N* and has been reached" for backwards compatibility purposes. The limit reached is actually the number of workers. If your max degree of parallelism (MAXDOP) setting is equal to zero or is greater than one, the number of workers may be much higher than the number of requests, and the limit may be reached much sooner than when MAXDOP is equal to one. -> -> Learn more about [Sessions, workers, and requests](resource-limits-logical-server.md#sessions-workers-and-requests). - -#### Connect with the Dedicated Admin Connection (DAC) if needed - -If a live incident is ongoing where the worker limit has been approached or reached, you may receive Error 10928 when you connect using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS) or [Azure Data Studio](/sql/azure-data-studio/what-is). One session can connect using the [Diagnostic Connection for Database Administrators (DAC)](/sql/database-engine/configure-windows/diagnostic-connection-for-database-administrators#connecting-with-dac) even when the maximum worker threshold has been reached. - -To establish a connection with the DAC from SSMS: - -- From the menu, select **File > New > Database Engine Query** -- From the connection dialog box in the Server Name field, enter `admin:` (this will be something like `admin:servername.database.windows.net`). -- Select **Options >>** -- Select the **Connection Properties** tab -- In the **Connect to database:** box, type the name of your database -- Select **Connect**. - -If you receive Error 40613, `Database '%.*ls' on server '%.*ls' is not currently available. Please retry the connection later. If the problem persists, contact customer support, and provide them the session tracing ID of '%.*ls'`, this may indicate that another session is already connected to the DAC. Only one session may connect to the DAC for a single database or an elastic pool at a time. - -If you encounter the error 'Failed to connect to server' after selecting **Connect**, the DAC session may still have been established successfully if you are using a version of [SSMS prior to 18.9](/sql/ssms/release-notes-ssms#bug-fixes-in-189). Early versions of SSMS attempted to provide Intellisense for connections to the DAC. This failed, as the DAC supports only a single worker and Intellisense requires a separate worker. - -You cannot use a DAC connection with Object Explorer. - -#### Review your max_worker_percent usage - -To find resource consumption statistics for your database for 14 days, query the [sys.resource_stats](/sql/relational-databases/system-catalog-views/sys-resource-stats-azure-sql-database) system catalog view. The `max_worker_percent` column shows the percentage of workers used relative to the worker limit for your database. Connect to the master database on your [logical server](logical-servers.md) to query `sys.resource_stats`. - -```sql -SELECT start_time, end_time, database_name, sku, avg_cpu_percent, max_worker_percent, max_session_percent -FROM sys.resource_stats; -``` - -You can also query resource consumption statistics from the last hour from the -[sys.dm_db_resource_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-resource-stats-azure-sql-database) dynamic management view. Connect directly to your database to query `sys.dm_db_resource_stats`. - -```sql -SELECT end_time, avg_cpu_percent, max_worker_percent, max_session_percent -FROM sys.dm_db_resource_stats; -``` -#### Lower worker usage when possible - -Blocking chains can cause a sudden surge in the number of workers in a database. A large volume of concurrent parallel queries may cause a high number of workers. Increasing your [max degree of parallelism (MAXDOP](configure-max-degree-of-parallelism.md)) or setting MAXDOP to zero can increase the number of active workers. - -Triage an incident with insufficient workers by following these steps: - -1. Investigate if blocking is occurring or if you can identify a large volume of concurrent workers. Run the following query to examine current requests and check for blocking when your database is returning Error 10928. You may need to [connect with the Dedicated Admin Connection (DAC)](#connect-with-the-dedicated-admin-connection-dac-if-needed) to execute the query. - - ```sql - SELECT - r.session_id, r.request_id, r.blocking_session_id, r.start_time, - r.status, r.command, DB_NAME(r.database_id) AS database_name, - (SELECT COUNT(*) - FROM sys.dm_os_tasks AS t - WHERE t.session_id=r.session_id and t.request_id=r.request_id) AS worker_count, - i.parameters, i.event_info AS input_buffer, - r.last_wait_type, r.open_transaction_count, r.total_elapsed_time, r.cpu_time, - r.logical_reads, r.writes, s.login_time, s.login_name, s.program_name, s.host_name - FROM sys.dm_exec_requests as r - JOIN sys.dm_exec_sessions as s on r.session_id=s.session_id - OUTER APPLY sys.dm_exec_input_buffer (r.session_id,r.request_id) AS i - WHERE s.is_user_process=1; - GO - ``` - 1. Look for rows with a `blocking_session_id` to identify blocked sessions. Find each `blocking_session_id` in the list to determine if that session is also blocked. This will eventually lead you to the head blocker. Tune the head blocker query. - - > [!NOTE] - > For more thorough information on troubleshooting long running or blocking queries, see [Understand and resolve Azure SQL Database blocking problems](understand-resolve-blocking.md). - - 1. To identify a large volume of concurrent workers, review the number of requests overall and the `worker_count` column for each request. `Worker_count` is the number of workers at the time sampled and may change over time as the request is executed. Tune queries to reduce resource utilization if the cause of increased workers is concurrent queries that are running at their optimal degree of parallelism. For more information, see [Query Tuning/Hinting](performance-guidance.md#query-tuning-and-hinting). - -1. Evaluate the [maximum degree of parallelism (MAXDOP)](configure-max-degree-of-parallelism.md) setting for the database. - -#### Increase worker limits - -If the database consistently reaches its limit despite addressing blocking, optimizing queries, and validating your MAXDOP setting, consider adding more resources to the database to increase the worker limit. - -Find resource limits for Azure SQL Database by service tier and compute size: - -- [Resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -- [Resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -- [Resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -- [Resources limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) - -Learn more about [Azure SQL Database resource governance of workers](./resource-limits-logical-server.md#sessions-workers-and-requests). - -### Error 10929: Resource ID: 1 - -`10929: Resource ID: 1. The %s minimum guarantee is %d, maximum limit is %d and the current usage for the database is %d. However, the server is currently too busy to support requests greater than %d for this database. See http://go.microsoft.com/fwlink/?LinkId=267637 for assistance. Otherwise, please try again later.` - -### Error 40501: The service is currently busy - -`40501: The service is currently busy. Retry the request after 10 seconds. Incident ID: %ls. Code: %d.` - -This is an engine throttling error, an indication that resource limits are being exceeded. - -For more information about resource limits, see [Logical SQL server resource limits](./resource-limits-logical-server.md). - -### Error 40544: The database has reached its size quota - -`40544: The database has reached its size quota. Partition or delete data, drop indexes, or consult the documentation for possible resolutions. Incident ID: . Code: .` - -This error occurs when the database has reached its size quota. - -The following steps can either help you work around the problem or provide you with more options: - -1. Check the current size of the database by using the dashboard in the Azure portal. - - > [!NOTE] - > To identify which tables are consuming the most space and are therefore potential candidates for cleanup, run the following SQL query: - - ```sql - SELECT o.name, - SUM(p.row_count) AS 'Row Count', - SUM(p.reserved_page_count) * 8.0 / 1024 AS 'Table Size (MB)' - FROM sys.objects o - JOIN sys.dm_db_partition_stats p on p.object_id = o.object_id - GROUP BY o.name - ORDER BY [Table Size (MB)] DESC; - GO - ``` - -2. If the current size does not exceed the maximum size supported for your edition, you can use ALTER DATABASE to increase the MAXSIZE setting. -3. If the database is already past the maximum supported size for your edition, try one or more of the following steps: - - - Perform normal database cleanup activities. For example, clean up the unwanted data by using truncate/delete, or move data out by using SQL Server Integration Services (SSIS) or the bulk copy program (bcp) utility. - - Partition or delete data, drop indexes, or consult the documentation for possible resolutions. - - For database scaling, see [Scale single database resources](./single-database-scale.md) and [Scale elastic pool resources](./elastic-pool-scale.md). - -### Error 40549: Session is terminated because you have a long-running transaction - -`40549: Session is terminated because you have a long-running transaction. Try shortening your transaction.` - -If you repeatedly encounter this error, try to resolve the issue by following these steps: - -1. Run the following query to see any open sessions that have a high value for the `duration_ms` column: - - ```sql - SELECT - r.start_time, DATEDIFF(ms,start_time, SYSDATETIME()) as duration_ms, - r.session_id, r.request_id, r.blocking_session_id, - r.status, r.command, DB_NAME(r.database_id) AS database_name, - i.parameters, i.event_info AS input_buffer, - r.last_wait_type, r.open_transaction_count, r.total_elapsed_time, r.cpu_time, - r.logical_reads, r.writes, s.login_time, s.login_name, s.program_name, s.host_name - FROM sys.dm_exec_requests as r - JOIN sys.dm_exec_sessions as s on r.session_id=s.session_id - OUTER APPLY sys.dm_exec_input_buffer (r.session_id,r.request_id) AS i - WHERE s.is_user_process=1 - ORDER BY start_time ASC; - GO - ``` - You may choose to ignore rows where the `input_buffer` column shows a query reading from `sys.fn_MSxe_read_event_stream`: these requests are related to Extended Event sessions. -1. Review the `blocking_session_id` column to see if blocking is contributing to long-running transactions. - - > [!NOTE] - > For more information on troubleshooting blocking in Azure SQL Database, see [Understand and resolve Azure SQL Database blocking problems](understand-resolve-blocking.md). - -1. Consider batching your queries. For information on batching, see [How to use batching to improve SQL Database application performance](../performance-improve-use-batching.md). - -### Error 40551: The session has been terminated because of excessive TEMPDB usage - -`40551: The session has been terminated because of excessive TEMPDB usage. Try modifying your query to reduce the temporary table space usage.` - -To work around this issue, follow these steps: - -1. Change the queries to reduce temporary table space usage. -2. Drop temporary objects after they're no longer needed. -3. Truncate tables or remove unused tables. - -### Error 40552: The session has been terminated because of excessive transaction log space usage - -`40552: The session has been terminated because of excessive transaction log space usage. Try modifying fewer rows in a single transaction.` - -To resolve this issue, try the following methods: - -- The issue can occur because of insert, update, or delete operations. -Try to reduce the number of rows that are operated on immediately by implementing batching or splitting into multiple smaller transactions. -- The issue can occur because of index rebuild operations. To work around this issue, make sure the number of rows that are affected in the table * (average size of field that's updated in bytes + 80) < 2 gigabytes (GB). - - > [!NOTE] - > For an index rebuild, the average size of the field that's updated should be substituted by the average index size. - - > [!NOTE] - > For more information on troubleshooting a full transaction log in Azure SQL Database and Azure SQL Managed Instance, see [Troubleshooting transaction log errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-transaction-log-errors-issues.md). - - -### Error 40553: The session has been terminated because of excessive memory usage - -`40553: The session has been terminated because of excessive memory usage. Try modifying your query to process fewer rows.` - -To work around this issue, try to optimize the query. - -For an in-depth troubleshooting procedure, see [Is my query running fine in the cloud?](/archive/blogs/sqlblog/is-my-query-running-fine-in-the-cloud). - -For more information on other out of memory errors and sample queries, see [Troubleshoot out of memory errors with Azure SQL Database](troubleshoot-memory-errors-issues.md). - -### Table of resource governance error messages - -| Error code | Severity | Description | -| ---:| ---:|:--- | -| 10928 |20 |Resource ID: %d. The %s limit for the database is %d and has been reached. See 'http://go.microsoft.com/fwlink/?LinkId=267637' for assistance..

    The Resource ID indicates the resource that has reached the limit. When Resource ID = 1, this indicates a worker limit has been reached. Learn more in [Error 10928: Resource ID : 1. The request limit for the database is *%d* and has been reached.](#error-10928-resource-id--1-the-request-limit-for-the-database-is-d-and-has-been-reached) When Resource ID = 2, this indicates the session limit has been reached.

    Learn more about resource limits:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md). | -| 10929 |20 |Resource ID: %d. The %s minimum guarantee is %d, maximum limit is %d, and the current usage for the database is %d. However, the server is currently too busy to support requests greater than %d for this database. The Resource ID indicates the resource that has reached the limit. For worker threads, the Resource ID = 1. For sessions, the Resource ID = 2. For more information, see:
    •  [Logical SQL server resource limits](resource-limits-logical-server.md)
    •  [DTU-based limits for single databases](service-tiers-dtu.md)
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md).
    Otherwise, try again later. | -| 40544 |20 |The database has reached its size quota. Partition or delete data, drop indexes, or consult the documentation for possible resolutions. For database scaling, see [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md).| -| 40549 |16 |Session is terminated because you have a long-running transaction. Try shortening your transaction. For information on batching, see [How to use batching to improve SQL Database application performance](../performance-improve-use-batching.md).| -| 40550 |16 |The session has been terminated because it has acquired too many locks. Try reading or modifying fewer rows in a single transaction. For information on batching, see [How to use batching to improve SQL Database application performance](../performance-improve-use-batching.md).| -| 40551 |16 |The session has been terminated because of excessive `TEMPDB` usage. Try modifying your query to reduce the temporary table space usage.

    If you are using temporary objects, conserve space in the `TEMPDB` database by dropping temporary objects after they are no longer needed by the session. For more information on tempdb limits in SQL Database, see [Tempdb database in SQL Database](resource-limits-logical-server.md#tempdb-sizes).| -| 40552 |16 |The session has been terminated because of excessive transaction log space usage. Try modifying fewer rows in a single transaction. For information on batching, see [How to use batching to improve SQL Database application performance](../performance-improve-use-batching.md).

    If you perform bulk inserts using the `bcp.exe` utility or the `System.Data.SqlClient.SqlBulkCopy` class, try using the `-b batchsize` or `BatchSize` options to limit the number of rows copied to the server in each transaction. If you are rebuilding an index with the `ALTER INDEX` statement, try using the `REBUILD WITH ONLINE = ON` option. For information on transaction log sizes for the vCore purchasing model, see:
    •  [vCore-based limits for single databases](resource-limits-vcore-single-databases.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md)
    •  [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md).| -| 40553 |16 |The session has been terminated because of excessive memory usage. Try modifying your query to process fewer rows.

    Reducing the number of `ORDER BY` and `GROUP BY` operations in your Transact-SQL code reduces the memory requirements of your query. For database scaling, see [Scale single database resources](single-database-scale.md) and [Scale elastic pool resources](elastic-pool-scale.md). For more information on out of memory errors and sample queries, see [Troubleshoot out of memory errors with Azure SQL Database](troubleshoot-memory-errors-issues.md).| - -## Elastic pool errors - -The following errors are related to creating and using elastic pools: - -| Error code | Severity | Description | Corrective action | -|:--- |:--- |:--- |:--- | -| 1132 | 17 |The elastic pool has reached its storage limit. The storage usage for the elastic pool cannot exceed (%d) MBs. Attempting to write data to a database when the storage limit of the elastic pool has been reached. For information on resource limits, see:
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md).
    |Consider increasing the DTUs of and/or adding storage to the elastic pool if possible in order to increase its storage limit, reduce the storage used by individual databases within the elastic pool, or remove databases from the elastic pool. For elastic pool scaling, see [Scale elastic pool resources](elastic-pool-scale.md). For more information on removing unused space from databases, see [Manage file space for databases in Azure SQL Database](file-space-manage.md).| -| 10929 | 16 |The %s minimum guarantee is %d, maximum limit is %d, and the current usage for the database is %d. However, the server is currently too busy to support requests greater than %d for this database. For information on resource limits, see:
    •  [DTU-based limits for elastic pools](resource-limits-dtu-elastic-pools.md)
    •  [vCore-based limits for elastic pools](resource-limits-vcore-elastic-pools.md).
    Otherwise, try again later. DTU / vCore min per database; DTU / vCore max per database. The total number of [concurrent workers](resource-limits-logical-server.md#sessions-workers-and-requests) across all databases in the elastic pool attempted to exceed the pool limit. |Consider increasing the DTUs or vCores of the elastic pool if possible in order to increase its worker limit, or remove databases from the elastic pool. | -| 40844 | 16 |Database '%ls' on Server '%ls' is a '%ls' edition database in an elastic pool and cannot have a continuous copy relationship. |N/A | -| 40857 | 16 |Elastic pool not found for server: '%ls', elastic pool name: '%ls'. Specified elastic pool does not exist in the specified server. | Provide a valid elastic pool name. | -| 40858 | 16 |Elastic pool '%ls' already exists in server: '%ls'. Specified elastic pool already exists in the specified server. | Provide new elastic pool name. | -| 40859 | 16 |Elastic pool does not support service tier '%ls'. Specified service tier is not supported for elastic pool provisioning. |Provide the correct edition or leave service tier blank to use the default service tier. | -| 40860 | 16 |Elastic pool '%ls' and service objective '%ls' combination is invalid. Elastic pool and service tier can be specified together only if resource type is specified as 'ElasticPool'. |Specify correct combination of elastic pool and service tier. | -| 40861 | 16 |The database edition '%.*ls' cannot be different than the elastic pool service tier which is '%.*ls'. The database edition is different than the elastic pool service tier. |Do not specify a database edition that is different than the elastic pool service tier. Note that the database edition does not need to be specified. | -| 40862 | 16 |Elastic pool name must be specified if the elastic pool service objective is specified. Elastic pool service objective does not uniquely identify an elastic pool. |Specify the elastic pool name if using the elastic pool service objective. | -| 40864 | 16 |The DTUs for the elastic pool must be at least (%d) DTUs for service tier '%.*ls'. Attempting to set the DTUs for the elastic pool below the minimum limit. |Retry setting the DTUs for the elastic pool to at least the minimum limit. | -| 40865 | 16 |The DTUs for the elastic pool cannot exceed (%d) DTUs for service tier '%.*ls'. Attempting to set the DTUs for the elastic pool above the maximum limit. |Retry setting the DTUs for the elastic pool to no greater than the maximum limit. | -| 40867 | 16 |The DTU max per database must be at least (%d) for service tier '%.*ls'. Attempting to set the DTU max per database below the supported limit. | Consider using the elastic pool service tier that supports the desired setting. | -| 40868 | 16 |The DTU max per database cannot exceed (%d) for service tier '%.*ls'. Attempting to set the DTU max per database beyond the supported limit. | Consider using the elastic pool service tier that supports the desired setting. | -| 40870 | 16 |The DTU min per database cannot exceed (%d) for service tier '%.*ls'. Attempting to set the DTU min per database beyond the supported limit. | Consider using the elastic pool service tier that supports the desired setting. | -| 40873 | 16 |The number of databases (%d) and DTU min per database (%d) cannot exceed the DTUs of the elastic pool (%d). Attempting to specify DTU min for databases in the elastic pool that exceeds the DTUs of the elastic pool. | Consider increasing the DTUs of the elastic pool, or decrease the DTU min per database, or decrease the number of databases in the elastic pool. | -| 40877 | 16 |An elastic pool cannot be deleted unless it does not contain any databases. The elastic pool contains one or more databases and therefore cannot be deleted. |Remove databases from the elastic pool in order to delete it. | -| 40881 | 16 |The elastic pool '%.*ls' has reached its database count limit. The database count limit for the elastic pool cannot exceed (%d) for an elastic pool with (%d) DTUs. Attempting to create or add database to elastic pool when the database count limit of the elastic pool has been reached. | Consider increasing the DTUs of the elastic pool if possible in order to increase its database limit, or remove databases from the elastic pool. | -| 40889 | 16 |The DTUs or storage limit for the elastic pool '%.*ls' cannot be decreased since that would not provide sufficient storage space for its databases. Attempting to decrease the storage limit of the elastic pool below its storage usage. | Consider reducing the storage usage of individual databases in the elastic pool or remove databases from the pool in order to reduce its DTUs or storage limit. | -| 40891 | 16 |The DTU min per database (%d) cannot exceed the DTU max per database (%d). Attempting to set the DTU min per database higher than the DTU max per database. |Ensure the DTU min per databases does not exceed the DTU max per database. | -| TBD | 16 |The storage size for an individual database in an elastic pool cannot exceed the max size allowed by '%.*ls' service tier elastic pool. The max size for the database exceeds the max size allowed by the elastic pool service tier. |Set the max size of the database within the limits of the max size allowed by the elastic pool service tier. | - -## Cannot open database "master" requested by the login. The login failed - -This issue occurs because the account doesn't have permission to access the `master` database. But by default, SQL Server Management Studio (SSMS) tries to connect to the `master` database. - -To resolve this issue, follow these steps: - -1. On the login screen of SSMS, select **Options**, and then select **Connection Properties**. -2. In the **Connect to database** field, enter the user's default database name as the default login database, and then select **Connect**. - - ![Connection properties](./media/troubleshoot-common-errors-issues/cannot-open-database-master.png) - -## Read-only errors - -If you attempt to write to a database that is read-only, you'll receive an error. In some scenarios, the cause of the database's read-only status may not be immediately clear. - -### Error 3906: Failed to update database "DatabaseName" because the database is read-only. - -When attempting to modify a read-only database, the following error will be raised. - -``` -Msg 3906, Level 16, State 2, Line 1 -Failed to update database "%d" because the database is read-only. -``` - -#### You may be connected to a read-only replica - -For both Azure SQL Database and Azure SQL Managed Instance, you may be connected to a database on a read-only replica. In this case, the following query using the [DATABASEPROPERTYEX() function](/sql/t-sql/functions/databasepropertyex-transact-sql) will return `READ_ONLY`: - -```sql -SELECT DATABASEPROPERTYEX(DB_NAME(), 'Updateability'); -GO -``` - -If you're connecting using SQL Server Management Studio, verify if you have specified `ApplicationIntent=ReadOnly` in the **Additional Connection Parameters** [tab on your connection options](/sql/database-engine/availability-groups/windows/listeners-client-connectivity-application-failover#ConnectToSecondary). - -If the connection is from an application or a client using a connection string, validate if the connection string has specified `ApplicationIntent=ReadOnly`. Learn more in [Connect to a read-only replica](read-scale-out.md#connect-to-a-read-only-replica). - -#### The database may be set to read-only - -If you're using Azure SQL Database, the database itself may have been set to read-only. You can verify the database's status with the following query: - -```sql -SELECT name, is_read_only -FROM sys.databases -WHERE database_id = DB_ID(); -``` - -You can modify the read-only status for a database in Azure SQL Database using [ALTER DATABASE Transact-SQL](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true). You can’t currently set a database in a managed instance to read-only. - -## Confirm whether an error is caused by a connectivity issue - -To confirm whether an error is caused by a connectivity issue, review the stack trace for frames that show calls to open a connection like the following ones (note the reference to the **SqlConnection** class): - -``` -System.Data.SqlClient.SqlConnection.TryOpen(TaskCompletionSource`1 retry) - at System.Data.SqlClient.SqlConnection.Open() - at AzureConnectionTest.Program.Main(String[] args) -ClientConnectionId: -``` - -When the exception is triggered by query issues, you'll notice a call stack that's similar to the following (note the reference to the **SqlCommand** class). In this situation, [tune your queries](/archive/blogs/sqlblog/is-my-query-running-fine-in-the-cloud). - -``` - at System.Data.SqlClient.SqlCommand.ExecuteReader() - at AzureConnectionTest.Program.Main(String[] args) - ClientConnectionId: -``` - -For additional guidance on fine-tuning performance, see the following resources: - -- [How to maintain Azure SQL indexes and statistics](https://techcommunity.microsoft.com/t5/Azure-Database-Support-Blog/How-to-maintain-Azure-SQL-Indexes-and-Statistics/ba-p/368787) -- [Manual tune query performance in Azure SQL Database](./performance-guidance.md) -- [Monitoring performance Azure SQL Database by using dynamic management views](./monitoring-with-dmvs.md) -- [Operating the Query Store in Azure SQL Database](/sql/relational-databases/performance/best-practice-with-the-query-store#Insight) - -## Steps to fix common connection issues - -1. Make sure that TCP/IP is enabled as a client protocol on the application server. For more information, see [Configure client protocols](/sql/database-engine/configure-windows/configure-client-protocols). On application servers where you don't have SQL tools installed, verify that TCP/IP is enabled by running **cliconfg.exe** (SQL Server Client Network utility). -2. Check the application's connection string to make sure it's configured correctly. For example, make sure that the connection string specifies the correct port (1433) and fully qualified server name. -See [Get connection information](./connect-query-ssms.md#get-server-connection-information). -3. Try increasing the connection timeout value. We recommend using a connection timeout of at least 30 seconds. -4. Test the connectivity between the application server and the Azure SQL Database by using [SQL Server Management Studio (SSMS)](./connect-query-ssms.md), a UDL file, ping, or telnet. For more information, see [Troubleshooting connectivity issues](https://support.microsoft.com/help/4009936/solving-connectivity-errors-to-sql-server) and [Diagnostics for connectivity issues](./troubleshoot-common-connectivity-issues.md#diagnostics). - - > [!NOTE] - > As a troubleshooting step, you can also test connectivity on a different client computer. - -5. As a best practice, make sure that the retry logic is in place. For more information about retry logic, see [Troubleshoot transient faults and connection errors to SQL Database](./troubleshoot-common-connectivity-issues.md). - -If these steps don't resolve your problem, try to collect more data and then contact support. If your application is a cloud service, enable logging. This step returns a UTC time stamp of the failure. Additionally, SQL Database returns the tracing ID. [Microsoft Customer Support Services](https://azure.microsoft.com/support/options/) can use this information. - -For more information about how to enable logging, see [Enable diagnostics logging for apps in Azure App Service](../../app-service/troubleshoot-diagnostic-logs.md). - -## Next steps - -Learn more about related topics in the following articles: - -- [Azure SQL Database connectivity architecture](./connectivity-architecture.md) -- [Azure SQL Database and Azure Synapse Analytics network access controls](./network-access-controls-overview.md) -- [Troubleshooting transaction log errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-transaction-log-errors-issues.md) -- [Troubleshoot transient connection errors in SQL Database and SQL Managed Instance](troubleshoot-common-connectivity-issues.md) -- [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md) \ No newline at end of file diff --git a/articles/azure-sql/database/troubleshoot-memory-errors-issues.md b/articles/azure-sql/database/troubleshoot-memory-errors-issues.md deleted file mode 100644 index b3a9be809c304..0000000000000 --- a/articles/azure-sql/database/troubleshoot-memory-errors-issues.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Troubleshoot memory issues -titleSuffix: Azure SQL Database -description: Provides steps to investigate and troubleshoot out of memory issues in Azure SQL Database -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.topic: troubleshooting -ms.custom: -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 01/14/2022 ---- - -# Troubleshoot out of memory errors with Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -You may see error messages when the SQL database engine has failed to allocate sufficient memory to run the query. This can be caused by various reasons including the limits of selected service objective, aggregate workload memory demands, and memory demands by the query. For more information on the memory resource limit for Azure SQL Databases, see [Resource management in Azure SQL Database](resource-limits-logical-server.md#memory). - -> [!NOTE] -> **This article is focused on Azure SQL Database.** For more on troubleshooting out of memory issues in SQL Server, see [MSSQLSERVER_701](/sql/relational-databases/errors-events/mssqlserver-701-database-engine-error). - -Try the following avenues of investigation in response to: - -- Error code 701 with error message "There is insufficient system memory in resource pool '%ls' to run this query." -- Error code 802 with error message "There is insufficient memory available in the buffer pool." - -## View out of memory events - -If you encounter out of memory errors, review [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). Introduced in January 2022, this view includes predicted out of memory cause information that is determined by a heuristic algorithm and is provided with a finite degree of confidence. - -```sql -SELECT * FROM sys.dm_os_out_of_memory_events ORDER BY event_time DESC; -``` - - - -## Investigate memory allocation - -If out of memory errors persist in Azure SQL Database, consider at least temporarily increasing the service level objective of the database in the Azure portal. If out of memory errors persist, use the following queries to look for unusually high query memory grants that may contribute to an insufficient memory condition. Run the following example queries in the database that experienced the error (not in the `master` database of the Azure SQL logical server). - -### Use DMV to view out of memory events - -Beginning in April 2022, a new dynamic management view (DMV) has been added to allow visibility to the events and causes of out of memory (OOM) events in Azure SQL Database, `sys.dm_os_out_of_memory_events`. For more information, see [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). - -### Use DMVs to view memory clerks - -Start with a broad investigation, if the out of memory error occurred recently, by viewing the allocation of memory to memory clerks. Memory clerks are internal to the database engine for this Azure SQL Database. The top memory clerks in terms of pages allocated might be informative to what type of query or feature of SQL Server is consuming the most memory. - - -```sql -SELECT [type], [name], pages_kb, virtual_memory_committed_kb -FROM sys.dm_os_memory_clerks -WHERE memory_node_id <> 64 -- ignore Dedicated Admin Connection (DAC) node -ORDER BY pages_kb DESC; -GO -SELECT [type], [name], pages_kb, virtual_memory_committed_kb -FROM sys.dm_os_memory_clerks -WHERE memory_node_id <> 64 -- ignore Dedicated Admin Connection (DAC) node -ORDER BY virtual_memory_committed_kb DESC; -``` - - - Some common memory clerks, such as MEMORYCLERK_SQLQERESERVATIONS, are best resolved by identifying queries with large memory grants and improving their performance with better indexing and index tuning. - - While OBJECTSTORE_LOCK_MANAGER is unrelated to memory grants, it is expected to be high when queries claim many locks, for example, because of disabled lock escalation or very large transactions. - - Some clerks are expected to be the highest utilization: MEMORYCLERK_SQLBUFFERPOOL is almost always the top clerk, while CACHESTORE_COLUMNSTOREOBJECTPOOL will be high when columnstore indexes are used. Highest utilization by these clerks is expected. - - For more information about memory clerk types, see [sys.dm_os_memory_clerks](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-memory-clerks-transact-sql). - -### Use DMVs to investigate active queries - -In most cases, the query that failed is not the cause of this error. - -The following sample query for Azure SQL Database returns important information on transactions that are currently holding or waiting for memory grants. Target the top queries identified for examination and performance tuning, and evaluate whether or not they are executing as intended. Consider the timing of memory-intensive reporting queries or maintenance operations. - -```sql ---Active requests with memory grants -SELECT ---Session data - s.[session_id], s.open_transaction_count ---Memory usage -, r.granted_query_memory, mg.grant_time, mg.requested_memory_kb, mg.granted_memory_kb, mg.required_memory_kb, mg.used_memory_kb, mg.max_used_memory_kb ---Query -, query_text = t.text, input_buffer = ib.event_info, query_plan_xml = qp.query_plan, request_row_count = r.row_count, session_row_count = s.row_count ---Session history and status -, s.last_request_start_time, s.last_request_end_time, s.reads, s.writes, s.logical_reads, session_status = s.[status], request_status = r.status ---Session connection information -, s.host_name, s.program_name, s.login_name, s.client_interface_name, s.is_user_process -FROM sys.dm_exec_sessions s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON r.[session_id] = s.[session_id] -LEFT OUTER JOIN sys.dm_exec_query_memory_grants AS mg - ON mg.[session_id] = s.[session_id] -OUTER APPLY sys.dm_exec_sql_text (r.[sql_handle]) AS t -OUTER APPLY sys.dm_exec_input_buffer(s.[session_id], NULL) AS ib -OUTER APPLY sys.dm_exec_query_plan (r.[plan_handle]) AS qp -WHERE mg.granted_memory_kb > 0 -ORDER BY mg.granted_memory_kb desc, mg.requested_memory_kb desc; -``` - -You may decide to use the KILL statement to stop a currently executing query that is holding or waiting for a large memory grant. Use this statement carefully, especially when business critical processes are running. For more information, see [KILL (Transact-SQL)](/sql/t-sql/language-elements/kill-transact-sql). - - -### Use Query Store to investigate past query memory usage - -While the previous sample query reports only live query results, the following query uses the [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) to return information on past query execution. This can be helpful in investigating an out of memory error that occurred in the past. - -The following sample query for Azure SQL Database return important information on query executions recorded by the Query Store. Target the top queries identified for examination and performance tuning, and evaluate whether or not they are executing as intended. Note the time filter on `qsp.last_execution_time` to restrict results to recent history. You can adjust the TOP clause to produce more or fewer results depending on your environment. - -```sql -SELECT TOP 10 PERCENT --limit results - a.plan_id, query_id, plan_group_id, query_sql_text -, query_plan = TRY_CAST(query_plan as XML) -, avg_query_max_used_memory -, min_query_max_used_memory -, max_query_max_used_memory -, last_query_max_used_memory -, last_execution_time -, query_count_executions - FROM ( - SELECT - qsp.plan_id, qsp.query_id, qsp.plan_group_id, qsp.query_plan, qsqt.query_sql_text - , last_execution_time = MAX(qsp.last_execution_time) - , query_count_executions = SUM(qsrs.count_executions) - , avg_query_max_used_memory = AVG(qsrs.avg_query_max_used_memory) - , min_query_max_used_memory = MIN(qsrs.min_query_max_used_memory) - , max_query_max_used_memory = MAX(qsrs.max_query_max_used_memory) - , last_query_max_used_memory = MAX(qsrs_latest.last_query_max_used_memory) --only from latest result - FROM sys.query_store_plan AS qsp - INNER JOIN sys.query_store_query AS qsq - ON qsp.query_id = qsq.query_id - INNER JOIN sys.query_store_query_text AS qsqt - ON qsq.query_text_id = qsqt.query_text_id - INNER JOIN sys.query_store_runtime_stats AS qsrs - ON qsp.plan_id = qsrs.plan_id - INNER JOIN (SELECT plan_id - , last_query_max_used_memory - , rownum = ROW_NUMBER() OVER (PARTITION BY plan_id ORDER BY last_execution_time DESC) - FROM sys.query_store_runtime_stats qsrs) AS qsrs_latest - ON qsrs_latest.plan_id = qsp.plan_id - AND qsrs_latest.rownum = 1 --use latest last_query_max_used_memory per plan_id - WHERE DATEADD(hour, -24, sysdatetime()) < qsp.last_execution_time --past 24 hours only - AND qsrs_latest.last_query_max_used_memory > 0 - GROUP BY qsp.plan_id, qsp.query_id, qsp.plan_group_id, qsp.query_plan, qsqt.query_sql_text - ) AS a -ORDER BY max_query_max_used_memory DESC, avg_query_max_used_memory DESC; -``` - -### Extended events -In addition to the previous information, it may be helpful to capture a trace of the activities on the server to thoroughly investigate an out of memory issue in Azure SQL Database. - -There are two ways to capture traces in SQL Server; Extended Events (XEvents) and Profiler Traces. However, [SQL Server Profiler](/sql/tools/sql-server-profiler/sql-server-profiler) is deprecated trace technology not supported for Azure SQL Database. [Extended Events](/sql/relational-databases/extended-events/extended-events) is the newer tracing technology that allows more versatility and less impact to the observed system, and its interface is integrated into SQL Server Management Studio (SSMS). For more information on querying extended events in Azure SQL Database, see [Extended events in Azure SQL Database](./xevent-db-diff-from-svr.md). - -Refer to the document that explains how to use the [Extended Events New Session Wizard](/sql/relational-databases/extended-events/quick-start-extended-events-in-sql-server) in SSMS. For Azure SQL databases however, SSMS provides an Extended Events subfolder under each database in Object Explorer. Use an Extended Events session to capture these useful events, and identify the queries generating them: - -- Category Errors: - - error_reported - - exchange_spill - - hash_spill_details - -- Category Execution: - - excessive_non_grant_memory_used - -- Category Memory: - - query_memory_grant_blocking - - query_memory_grant_usage - -The capture of memory grant blocks, memory grant spills, or excessive memory grants could be potential clue to a query suddenly taking on more memory than it had in the past, and a potential explanation for an emergent out of memory error in an existing workload. - -### In-memory OLTP out of memory - -You may encounter `Error code 41805: There is insufficient memory in the resource pool '%ls' to run this operation` if using In-Memory OLTP. Reduce the amount of data in memory-optimized tables and memory-optimized table-valued parameters, or scale up the database to a higher service objective to have more memory. For more information on out of memory issues with SQL Server In-Memory OLTP, see [Resolve Out Of Memory issues](/sql/relational-databases/in-memory-oltp/resolve-out-of-memory-issues). - -### Get Azure SQL DB support - -If out of memory errors persist in Azure SQL Database, file an Azure support request by selecting **Get Support** on the [Azure Support](https://azure.microsoft.com/support/options) site. - -## Next steps - -- [Intelligent query processing in SQL databases](/sql/relational-databases/performance/intelligent-query-processing) -- [Query processing architecture guide](/sql/relational-databases/query-processing-architecture-guide) -- [Performance Center for SQL Server Database Engine and Azure SQL Database](/sql/relational-databases/performance/performance-center-for-sql-server-database-engine-and-azure-sql-database) -- [Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-common-errors-issues.md) -- [Troubleshoot transient connection errors in SQL Database and SQL Managed Instance](troubleshoot-common-connectivity-issues.md) -- [Demonstrating Intelligent Query Processing](https://github.com/Microsoft/sql-server-samples/tree/master/samples/features/intelligent-query-processing) -- [Resource management in Azure SQL Database](resource-limits-logical-server.md#memory). \ No newline at end of file diff --git a/articles/azure-sql/database/troubleshoot-transaction-log-errors-issues.md b/articles/azure-sql/database/troubleshoot-transaction-log-errors-issues.md deleted file mode 100644 index e0a0b2e2e6d32..0000000000000 --- a/articles/azure-sql/database/troubleshoot-transaction-log-errors-issues.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Troubleshoot transaction log issues -titleSuffix: Azure SQL Database and Azure SQL Managed Instance -description: Provides steps to troubleshoot Azure SQL Database transaction log issues in Azure SQL Database or Azure SQL Managed Instance -services: sql-database -ms.service: sql-db-mi -ms.subservice: development -ms.topic: troubleshooting -ms.custom: -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 07/23/2021 ---- - -# Troubleshooting transaction log errors with Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -You may see errors 9002 or 40552 when the transaction log is full and cannot accept new transactions. These errors occur when the database transaction log, managed by Azure SQL Database or Azure SQL Managed Instance, exceeds thresholds for space and cannot continue to accept transactions. - -These errors are similar to issues with a full transaction log in SQL Server, but have different resolutions in Azure SQL Database or Azure SQL Managed Instance. - -> [!NOTE] -> **This article is focused on Azure SQL Database and Azure SQL Managed Instance.** Azure SQL Database and Azure SQL Managed Instance are based on the latest stable version of the Microsoft SQL Server database engine, so much of the content is similar though troubleshooting options and tools may differ. For more on troubleshooting a transaction log in SQL Server, see [Troubleshoot a Full Transaction Log (SQL Server Error 9002)](/sql/relational-databases/logs/troubleshoot-a-full-transaction-log-sql-server-error-9002). - -## Automated backups and the transaction log - -There are some key differences in Azure SQL Database and Azure SQL Managed Instance in regards to database file space management. - -- In Azure SQL Database or Azure SQL Managed Instance, transaction log backup are taken automatically. For frequency, retention, and more information, see [Automated backups - Azure SQL Database & SQL Managed Instance](automated-backups-overview.md). -- In Azure SQL Database, free disk space, database file growth, and file location are also managed, so the typical causes and resolutions of transaction log issues are different from SQL Server. -- In Azure SQL Managed Instance, the location and name of database files cannot be managed but administrators can manage database files and file autogrowth settings. The typical causes and resolutions of transaction log issues are similar to SQL Server. - -Similar to SQL Server, the transaction log for each database is truncated whenever a log backup is taken. Truncation leaves empty space in the log file, which can then access new transactions. When the log file cannot be truncated by log backups, the log file grows to accommodate new transactions. If the log file grows to its maximum limits in Azure SQL Database or Azure SQL Managed Instance, new transactions cannot be accepted. This is a very unusual scenario. - -## Prevented transaction log truncation - -To discover what is preventing log truncation in a given case, refer to `log_reuse_wait_desc` in `sys.databases`. The log reuse wait informs you to what conditions or causes are preventing the transaction log from being truncated by a regular log backup. For more information, see [sys.databases (Transact-SQL)](/sql/relational-databases/system-catalog-views/sys-databases-transact-sql). - -```sql -SELECT [name], log_reuse_wait_desc FROM sys.databases; -``` - -The following values of `log_reuse_wait_desc` in `sys.databases` may indicate the reason why the database's transaction log truncation is being prevented: - -| log_reuse_wait_desc | Diagnosis | Response required | -|--|--|--| -| **NOTHING** | Typical state. There is nothing blocking the log from truncating. | No. | -| **CHECKPOINT** | A checkpoint is needed for log truncation. Rare. | No response required unless sustained. If sustained, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support). | -| **LOG BACKUP** | A log backup is in progress. | No response required unless sustained. If sustained, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support). | -| **ACTIVE BACKUP OR RESTORE** | A database backup is in progress. | No response required unless sustained. If sustained, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support). | -| **ACTIVE TRANSACTION** | An ongoing transaction is preventing log truncation. | The log file cannot be truncated due to active and/or uncommitted transactions. See next section.| -| **REPLICATION** | In Azure SQL Database, likely due to [change data capture (CDC)](/sql/relational-databases/track-changes/about-change-data-capture-sql-server) feature.
    In Azure SQL Managed Instance, due to [replication](../managed-instance/replication-transactional-overview.md) or CDC. | In Azure SQL Database, query [sys.dm_cdc_errors](/sql/relational-databases/system-dynamic-management-views/change-data-capture-sys-dm-cdc-errors) and resolve errors. If unresolvable, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support).
    In Azure SQL Managed Instance, if sustained, investigate agents involved with CDC or replication. For troubleshooting CDC, query jobs in [msdb.dbo.cdc_jobs](/sql/relational-databases/system-tables/dbo-cdc-jobs-transact-sql). If not present, add via [sys.sp_cdc_add_job](/sql/relational-databases/system-stored-procedures/sys-sp-cdc-add-job-transact-sql). For replication, consider [Troubleshooting transactional replication](/sql/relational-databases/replication/troubleshoot-tran-repl-errors). If unresolvable, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support). | -| **AVAILABILITY_REPLICA** | Synchronization to the secondary replica is in progress. | No response required unless sustained. If sustained, file a support request with [Azure Support](https://portal.azure.com/#create/Microsoft.Support). | - -### Log truncation prevented by an active transaction - -The most common scenario for a transaction log that cannot accept new transactions is a long-running or blocked transaction. - -Run this sample query to find uncommitted or active transactions and their properties. - -- Returns information about transaction properties, from [sys.dm_tran_active_transactions](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-session-transactions-transact-sql). -- Returns session connection information, from [sys.dm_exec_sessions](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sessions-transact-sql). -- Returns request information (for active requests), from [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql). This query can also be used to identify sessions being blocked, look for the `request_blocked_by`. For more information on blocking, see [Gather blocking information](understand-resolve-blocking.md#gather-blocking-information). -- Returns the current request's text or input buffer text, using the [sys.dm_exec_sql_text](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sql-text-transact-sql) or [sys.dm_exec_input_buffer](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-input-buffer-transact-sql) DMVs. If the data returned by the `text` field of `sys.dm_exec_sql_text` is NULL, the request is not active but has an outstanding transaction. In that case, the `event_info` field of `sys.dm_exec_input_buffer` will contain the last command string passed to the database engine. - -```sql -SELECT [database_name] = db_name(s.database_id) -, tat.transaction_id, tat.transaction_begin_time, tst.session_id -, session_open_transaction_count = tst.open_transaction_count --uncommitted and unrolled back transactions open. -, transaction_duration_s = datediff(s, tat.transaction_begin_time, sysdatetime()) -, input_buffer = ib.event_info -, request_text = CASE WHEN r.statement_start_offset = 0 and r.statement_end_offset= 0 THEN left(est.text, 4000) - ELSE SUBSTRING ( est.[text], r.statement_start_offset/2 + 1, - CASE WHEN r.statement_end_offset = -1 THEN LEN (CONVERT(nvarchar(max), est.[text])) - ELSE r.statement_end_offset/2 - r.statement_start_offset/2 + 1 - END ) END -, request_status = r.status -, request_blocked_by = r.blocking_session_id -, transaction_state = CASE tat.transaction_state - WHEN 0 THEN 'The transaction has not been completely initialized yet.' - WHEN 1 THEN 'The transaction has been initialized but has not started.' - WHEN 2 THEN 'The transaction is active - has not been committed or rolled back.' - WHEN 3 THEN 'The transaction has ended. This is used for read-only transactions.' - WHEN 4 THEN 'The commit process has been initiated on the distributed transaction. This is for distributed transactions only. The distributed transaction is still active but further processing cannot take place.' - WHEN 5 THEN 'The transaction is in a prepared state and waiting resolution.' - WHEN 6 THEN 'The transaction has been committed.' - WHEN 7 THEN 'The transaction is being rolled back.' - WHEN 8 THEN 'The transaction has been rolled back.' END -, transaction_name = tat.name -, azure_dtc_state --Applies to: Azure SQL Database only - = CASE tat.dtc_state - WHEN 1 THEN 'ACTIVE' - WHEN 2 THEN 'PREPARED' - WHEN 3 THEN 'COMMITTED' - WHEN 4 THEN 'ABORTED' - WHEN 5 THEN 'RECOVERED' END -, transaction_type = CASE tat.transaction_type WHEN 1 THEN 'Read/write transaction' - WHEN 2 THEN 'Read-only transaction' - WHEN 3 THEN 'System transaction' - WHEN 4 THEN 'Distributed transaction' END -, tst.is_user_transaction -, local_or_distributed = CASE tst.is_local WHEN 1 THEN 'Local transaction, not distributed' WHEN 0 THEN 'Distributed transaction or an enlisted bound session transaction.' END -, transaction_uow --for distributed transactions. -, s.login_time, s.host_name, s.program_name, s.client_interface_name, s.login_name, s.is_user_process -, session_cpu_time = s.cpu_time, session_logical_reads = s.logical_reads, session_reads = s.reads, session_writes = s.writes -, observed = sysdatetimeoffset() -FROM sys.dm_tran_active_transactions AS tat -INNER JOIN sys.dm_tran_session_transactions AS tst on tat.transaction_id = tst.transaction_id -INNER JOIN Sys.dm_exec_sessions AS s on s.session_id = tst.session_id -LEFT OUTER JOIN sys.dm_exec_requests AS r on r.session_id = s.session_id -CROSS APPLY sys.dm_exec_input_buffer(s.session_id, null) AS ib -OUTER APPLY sys.dm_exec_sql_text (r.sql_handle) AS est; -``` - - -### File management to free more space - -If the transaction log is prevented from truncating, freeing more space in the allocation of database files may be part of the solution. However, resolving the root the condition blocking transaction log file truncation is key. - -In some cases, temporarily creating more disk space will allow a long-running transaction to complete, removing the condition blocking the transaction log file from truncating with a normal transaction log backup. However, freeing up space in the allocation may provide only temporary relief until the transaction log grows again. - -For more information on managing the file space of databases and elastic pools, see [Manage file space for databases in Azure SQL Database](file-space-manage.md). - - -### Error 40552: The session has been terminated because of excessive transaction log space usage - -`40552: The session has been terminated because of excessive transaction log space usage. Try modifying fewer rows in a single transaction.` - -To resolve this issue, try the following methods: - -1. The issue can occur because of insert, update, or delete operations. Review the transaction to avoid unnecessary writes. Try to reduce the number of rows that are operated on immediately by implementing batching or splitting into multiple smaller transactions. For more information, see [How to use batching to improve SQL Database application performance](../performance-improve-use-batching.md). -2. The issue can occur because of index rebuild operations. To avoid this issue, ensure the following formula is true: (number of rows that are affected in the table) multiplied by (the average size of field that's updated in bytes + 80) < 2 gigabytes (GB). For large tables, consider creating partitions and performing index maintenance only on some partitions of the table. For more information, see [Create Partitioned Tables and Indexes](/sql/relational-databases/partitions/create-partitioned-tables-and-indexes?view=azuresqldb-current&preserve-view=true). -3. If you perform bulk inserts using the `bcp.exe` utility or the `System.Data.SqlClient.SqlBulkCopy` class, try using the `-b batchsize` or `BatchSize` options to limit the number of rows copied to the server in each transaction. For more information, see [bcp Utility](/sql/tools/bcp-utility). -4. If you are rebuilding an index with the `ALTER INDEX` statement, use the `SORT_IN_TEMPDB = ON` and `ONLINE = ON` options. For more information, see [ALTER INDEX (Transact-SQL)](/sql/t-sql/statements/alter-index-transact-sql). - -> [!NOTE] -> For more information on other resource governor errors, see [Resource governance errors](troubleshoot-common-errors-issues.md#resource-governance-errors). - -## Next steps - -- [Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-common-errors-issues.md) -- [Troubleshoot transient connection errors in SQL Database and SQL Managed Instance](troubleshoot-common-connectivity-issues.md) -- Video: [Data Loading Best Practices on Azure SQL Database](/shows/data-exposed/data-loading-best-practices-on-azure-sql-database?WT.mc_id=dataexposed-c9-niner) - -For information on transaction log sizes, see: -- For vCore resource limits for a single database, see [resource limits for single databases using the vCore purchasing model](resource-limits-vcore-single-databases.md) -- For vCore resource limits for elastic pools, see [resource limits for elastic pools using the vCore purchasing model](resource-limits-vcore-elastic-pools.md) -- For DTU resource limits for a single database, see [resource limits for single databases using the DTU purchasing model](resource-limits-dtu-single-databases.md) -- For DTU resource limits for elastic pools, see [resource limits for elastic pools using the DTU purchasing model](resource-limits-dtu-elastic-pools.md) -- For resource limits for SQL Managed Instance, see [resource limits for SQL Managed Instance](../managed-instance/resource-limits.md). diff --git a/articles/azure-sql/database/understand-resolve-blocking.md b/articles/azure-sql/database/understand-resolve-blocking.md deleted file mode 100644 index 6853fc2579f6d..0000000000000 --- a/articles/azure-sql/database/understand-resolve-blocking.md +++ /dev/null @@ -1,459 +0,0 @@ ---- -title: Understand and resolve Azure SQL blocking problems -titleSuffix: Azure SQL Database -description: "An overview of Azure SQL database-specific topics on blocking and troubleshooting." -services: sql-database -dev_langs: - - "TSQL" -ms.service: sql-database -ms.subservice: performance -ms.custom: -ms.devlang: -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 4/8/2022 ---- -# Understand and resolve Azure SQL Database blocking problems -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -## Objective - -The article describes blocking in Azure SQL databases and demonstrates how to troubleshoot and resolve blocking. - -In this article, the term connection refers to a single logged-on session of the database. Each connection appears as a session ID (SPID) or session_id in many DMVs. Each of these SPIDs is often referred to as a process, although it is not a separate process context in the usual sense. Rather, each SPID consists of the server resources and data structures necessary to service the requests of a single connection from a given client. A single client application may have one or more connections. From the perspective of Azure SQL Database, there is no difference between multiple connections from a single client application on a single client computer and multiple connections from multiple client applications or multiple client computers; they are atomic. One connection can block another connection, regardless of the source client. - -For information on troubleshooting deadlocks, see [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md). - -> [!NOTE] -> **This content is focused on Azure SQL Database.** Azure SQL Database is based on the latest stable version of the Microsoft SQL Server database engine, so much of the content is similar though troubleshooting options and tools may differ. For more on blocking in SQL Server, see [Understand and resolve SQL Server blocking problems](/troubleshoot/sql/performance/understand-resolve-blocking). - -## Understand blocking - -Blocking is an unavoidable and by-design characteristic of any relational database management system (RDBMS) with lock-based concurrency. Blocking in a database in Azure SQL Database occurs when one session holds a lock on a specific resource and a second SPID attempts to acquire a conflicting lock type on the same resource. Typically, the time frame for which the first SPID locks the resource is small. When the owning session releases the lock, the second connection is then free to acquire its own lock on the resource and continue processing. This is normal behavior and may happen many times throughout the course of a day with no noticeable effect on system performance. - -Each new database in Azure SQL Database has the [read committed snapshot](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#read_committed_snapshot--on--off--1) (RCSI) database setting enabled by default. Blocking between sessions reading data and sessions writing data is minimized under RCSI, which uses row versioning to increase concurrency. However, blocking and deadlocks may still occur in databases in Azure SQL Database because: - -- Queries that modify data may block one another. -- Queries may run under isolation levels that increase blocking. Isolation levels may be specified in application connection strings, [query hints](/sql/t-sql/queries/hints-transact-sql-query), or [SET statements](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) in Transact-SQL. -- [RCSI may be disabled](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#read_committed_snapshot--on--off--1), causing the database to use shared (S) locks to protect SELECT statements run under the read committed isolation level. This may increase blocking and deadlocks. - -[Snapshot isolation level](/sql/t-sql/statements/alter-database-transact-sql-set-options?view=azuresqldb-current&preserve-view=true#b-enable-snapshot-isolation-on-a-database) is also enabled by default for new databases in Azure SQL Database. Snapshot isolation is an additional row-based isolation level that provides transaction-level consistency for data and which uses row versions to select rows to update. To use snapshot isolation, queries or connections must explicitly set their transaction isolation level to `SNAPSHOT`. This may only be done when snapshot isolation is enabled for the database. - -You can identify if RCSI and/or snapshot isolation are enabled with Transact-SQL. Connect to your database in Azure SQL Database and run the following query: - -```sql -SELECT name, is_read_committed_snapshot_on, snapshot_isolation_state_desc -FROM sys.databases -WHERE name = DB_NAME(); -GO -``` - -If RCSI is enabled, the `is_read_committed_snapshot_on` column will return the value **1**. If snapshot isolation is enabled, the `snapshot_isolation_state_desc` column will return the value **ON**. - -The duration and transaction context of a query determine how long its locks are held and, thereby, their effect on other queries. SELECT statements run under RCSI [do not acquire shared (S) locks on the data being read](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-when-reading-data), and therefore do not block transactions that are modifying data. For INSERT, UPDATE, and DELETE statements, the locks are held during the query, both for data consistency and to allow the query to be rolled back if necessary. - -For queries executed within an [explicit transaction](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#starting-transactions), the type of locks and duration for which the locks are held are determined by the type of query, the transaction isolation level, and whether lock hints are used in the query. For a description of locking, lock hints, and transaction isolation levels, see the following articles: - -* [Locking in the Database Engine](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide) -* [Customizing Locking and Row Versioning](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#customizing-locking-and-row-versioning) -* [Lock Modes](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#lock_modes) -* [Lock Compatibility](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#lock_compatibility) -* [Transactions](/sql/t-sql/language-elements/transactions-transact-sql) - -When locking and blocking persists to the point where there is a detrimental effect on system performance, it is due to one of the following reasons: - -* A SPID holds locks on a set of resources for an extended period of time before releasing them. This type of blocking resolves itself over time but can cause performance degradation. - -* A SPID holds locks on a set of resources and never releases them. This type of blocking does not resolve itself and prevents access to the affected resources indefinitely. - -In the first scenario, the situation can be very fluid as different SPIDs cause blocking on different resources over time, creating a moving target. These situations are difficult to troubleshoot using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) to narrow down the issue to individual queries. In contrast, the second situation results in a consistent state that can be easier to diagnose. - -## Applications and blocking - -There may be a tendency to focus on server-side tuning and platform issues when facing a blocking problem. However, attention paid only to the database may not lead to a resolution, and can absorb time and energy better directed at examining the client application and the queries it submits. No matter what level of visibility the application exposes regarding the database calls being made, a blocking problem nonetheless frequently requires both the inspection of the exact SQL statements submitted by the application and the application's exact behavior regarding query cancellation, connection management, fetching all result rows, and so on. If the development tool does not allow explicit control over connection management, query cancellation, query time-out, result fetching, and so on, blocking problems may not be resolvable. This potential should be closely examined before selecting an application development tool for Azure SQL Database, especially for performance sensitive OLTP environments. - -Pay attention to database performance during the design and construction phase of the database and application. In particular, the resource consumption, isolation level, and transaction path length should be evaluated for each query. Each query and transaction should be as lightweight as possible. Good connection management discipline must be exercised, without it, the application may appear to have acceptable performance at low numbers of users, but the performance may degrade significantly as the number of users scales upward. - -With proper application and query design, Azure SQL Database is capable of supporting many thousands of simultaneous users on a single server, with little blocking. - -> [!Note] -> For more application development guidance, see [Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-common-errors-issues.md) and [Transient Fault Handling](/aspnet/aspnet/overview/developing-apps-with-windows-azure/building-real-world-cloud-apps-with-windows-azure/transient-fault-handling). - -## Troubleshoot blocking - -Regardless of which blocking situation we are in, the methodology for troubleshooting locking is the same. These logical separations are what will dictate the rest of the composition of this article. The concept is to find the head blocker and identify what that query is doing and why it is blocking. Once the problematic query is identified (that is, what is holding locks for the prolonged period), the next step is to analyze and determine why the blocking is happening. After we understand the why, we can then make changes by redesigning the query and the transaction. - -Steps in troubleshooting: - -1. Identify the main blocking session (head blocker) - -2. Find the query and transaction that is causing the blocking (what is holding locks for a prolonged period) - -3. Analyze/understand why the prolonged blocking occurs - -4. Resolve blocking issue by redesigning query and transaction - -Now let's dive in to discuss how to pinpoint the main blocking session with an appropriate data capture. - -## Gather blocking information - -To counteract the difficulty of troubleshooting blocking problems, a database administrator can use SQL scripts that constantly monitor the state of locking and blocking in the database in Azure SQL Database. To gather this data, there are essentially two methods. - -The first is to query dynamic management objects (DMOs) and store the results for comparison over time. Some objects referenced in this article are dynamic management views (DMVs) and some are dynamic management functions (DMFs). The second method is to use XEvents to capture what is executing. - -## Gather information from DMVs - -Referencing DMVs to troubleshoot blocking has the goal of identifying the SPID (session ID) at the head of the blocking chain and the SQL Statement. Look for victim SPIDs that are being blocked. If any SPID is being blocked by another SPID, then investigate the SPID owning the resource (the blocking SPID). Is that owner SPID being blocked as well? You can walk the chain to find the head blocker then investigate why it is maintaining its lock. - -Remember to run each of these scripts in the target database in Azure SQL Database. - -* The sp_who and sp_who2 commands are older commands to show all current sessions. The DMV `sys.dm_exec_sessions` returns more data in a result set that is easier to query and filter. You will find `sys.dm_exec_sessions` at the core of other queries. - -* If you already have a particular session identified, you can use `DBCC INPUTBUFFER()` to find the last statement that was submitted by a session. Similar results can be returned with the `sys.dm_exec_input_buffer` dynamic management function (DMF), in a result set that is easier to query and filter, providing the session_id and the request_id. For example, to return the most recent query submitted by session_id 66 and request_id 0: - -```sql -SELECT * FROM sys.dm_exec_input_buffer (66,0); -``` - -* Refer to the `blocking_session_id` column in `sys.dm_exec_requests`. When `blocking_session_id` = 0, a session is not being blocked. While `sys.dm_exec_requests` lists only requests currently executing, any connection (active or not) will be listed in `sys.dm_exec_sessions`. Build on this common join between `sys.dm_exec_requests` and `sys.dm_exec_sessions` in the next query. - -* Run this sample query to find the actively executing queries and their current SQL batch text or input buffer text, using the [sys.dm_exec_sql_text](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sql-text-transact-sql) or [sys.dm_exec_input_buffer](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-input-buffer-transact-sql) DMVs. If the data returned by the `text` field of `sys.dm_exec_sql_text` is NULL, the query is not currently executing. In that case, the `event_info` field of `sys.dm_exec_input_buffer` will contain the last command string passed to the SQL engine. This query can also be used to identify sessions blocking other sessions, including a list of session_ids blocked per session_id. - -```sql -WITH cteBL (session_id, blocking_these) AS -(SELECT s.session_id, blocking_these = x.blocking_these FROM sys.dm_exec_sessions s -CROSS APPLY (SELECT isnull(convert(varchar(6), er.session_id),'') + ', ' - FROM sys.dm_exec_requests as er - WHERE er.blocking_session_id = isnull(s.session_id ,0) - AND er.blocking_session_id <> 0 - FOR XML PATH('') ) AS x (blocking_these) -) -SELECT s.session_id, blocked_by = r.blocking_session_id, bl.blocking_these -, batch_text = t.text, input_buffer = ib.event_info, * -FROM sys.dm_exec_sessions s -LEFT OUTER JOIN sys.dm_exec_requests r on r.session_id = s.session_id -INNER JOIN cteBL as bl on s.session_id = bl.session_id -OUTER APPLY sys.dm_exec_sql_text (r.sql_handle) t -OUTER APPLY sys.dm_exec_input_buffer(s.session_id, NULL) AS ib -WHERE blocking_these is not null or r.blocking_session_id > 0 -ORDER BY len(bl.blocking_these) desc, r.blocking_session_id desc, r.session_id; -``` - -* Run this more elaborate sample query, provided by Microsoft Support, to identify the head of a multiple session blocking chain, including the query text of the sessions involved in a blocking chain. - -```sql -WITH cteHead ( session_id,request_id,wait_type,wait_resource,last_wait_type,is_user_process,request_cpu_time -,request_logical_reads,request_reads,request_writes,wait_time,blocking_session_id,memory_usage -,session_cpu_time,session_reads,session_writes,session_logical_reads -,percent_complete,est_completion_time,request_start_time,request_status,command -,plan_handle,sql_handle,statement_start_offset,statement_end_offset,most_recent_sql_handle -,session_status,group_id,query_hash,query_plan_hash) -AS ( SELECT sess.session_id, req.request_id, LEFT (ISNULL (req.wait_type, ''), 50) AS 'wait_type' - , LEFT (ISNULL (req.wait_resource, ''), 40) AS 'wait_resource', LEFT (req.last_wait_type, 50) AS 'last_wait_type' - , sess.is_user_process, req.cpu_time AS 'request_cpu_time', req.logical_reads AS 'request_logical_reads' - , req.reads AS 'request_reads', req.writes AS 'request_writes', req.wait_time, req.blocking_session_id,sess.memory_usage - , sess.cpu_time AS 'session_cpu_time', sess.reads AS 'session_reads', sess.writes AS 'session_writes', sess.logical_reads AS 'session_logical_reads' - , CONVERT (decimal(5,2), req.percent_complete) AS 'percent_complete', req.estimated_completion_time AS 'est_completion_time' - , req.start_time AS 'request_start_time', LEFT (req.status, 15) AS 'request_status', req.command - , req.plan_handle, req.[sql_handle], req.statement_start_offset, req.statement_end_offset, conn.most_recent_sql_handle - , LEFT (sess.status, 15) AS 'session_status', sess.group_id, req.query_hash, req.query_plan_hash - FROM sys.dm_exec_sessions AS sess - LEFT OUTER JOIN sys.dm_exec_requests AS req ON sess.session_id = req.session_id - LEFT OUTER JOIN sys.dm_exec_connections AS conn on conn.session_id = sess.session_id - ) -, cteBlockingHierarchy (head_blocker_session_id, session_id, blocking_session_id, wait_type, wait_duration_ms, -wait_resource, statement_start_offset, statement_end_offset, plan_handle, sql_handle, most_recent_sql_handle, [Level]) -AS ( SELECT head.session_id AS head_blocker_session_id, head.session_id AS session_id, head.blocking_session_id - , head.wait_type, head.wait_time, head.wait_resource, head.statement_start_offset, head.statement_end_offset - , head.plan_handle, head.sql_handle, head.most_recent_sql_handle, 0 AS [Level] - FROM cteHead AS head - WHERE (head.blocking_session_id IS NULL OR head.blocking_session_id = 0) - AND head.session_id IN (SELECT DISTINCT blocking_session_id FROM cteHead WHERE blocking_session_id != 0) - UNION ALL - SELECT h.head_blocker_session_id, blocked.session_id, blocked.blocking_session_id, blocked.wait_type, - blocked.wait_time, blocked.wait_resource, h.statement_start_offset, h.statement_end_offset, - h.plan_handle, h.sql_handle, h.most_recent_sql_handle, [Level] + 1 - FROM cteHead AS blocked - INNER JOIN cteBlockingHierarchy AS h ON h.session_id = blocked.blocking_session_id and h.session_id!=blocked.session_id --avoid infinite recursion for latch type of blocking - WHERE h.wait_type COLLATE Latin1_General_BIN NOT IN ('EXCHANGE', 'CXPACKET') or h.wait_type is null - ) -SELECT bh.*, txt.text AS blocker_query_or_most_recent_query -FROM cteBlockingHierarchy AS bh -OUTER APPLY sys.dm_exec_sql_text (ISNULL ([sql_handle], most_recent_sql_handle)) AS txt; -``` - -* To catch long-running or uncommitted transactions, use another set of DMVs for viewing current open transactions, including [sys.dm_tran_database_transactions](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-database-transactions-transact-sql), [sys.dm_tran_session_transactions](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-session-transactions-transact-sql), [sys.dm_exec_connections](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-connections-transact-sql), and sys.dm_exec_sql_text. There are several DMVs associated with tracking transactions, see more [DMVs on transactions](/sql/relational-databases/system-dynamic-management-views/transaction-related-dynamic-management-views-and-functions-transact-sql) here. - -```sql -SELECT [s_tst].[session_id], -[database_name] = DB_NAME (s_tdt.database_id), -[s_tdt].[database_transaction_begin_time], -[sql_text] = [s_est].[text] -FROM sys.dm_tran_database_transactions [s_tdt] -INNER JOIN sys.dm_tran_session_transactions [s_tst] ON [s_tst].[transaction_id] = [s_tdt].[transaction_id] -INNER JOIN sys.dm_exec_connections [s_ec] ON [s_ec].[session_id] = [s_tst].[session_id] -CROSS APPLY sys.dm_exec_sql_text ([s_ec].[most_recent_sql_handle]) AS [s_est]; -``` - -* Reference [sys.dm_os_waiting_tasks](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-waiting-tasks-transact-sql) that is at the thread/task layer of SQL. This returns information about what SQL wait type the request is currently experiencing. Like `sys.dm_exec_requests`, only active requests are returned by `sys.dm_os_waiting_tasks`. - -> [!Note] -> For much more on wait types including aggregated wait stats over time, see the DMV [sys.dm_db_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-wait-stats-azure-sql-database). This DMV returns aggregate wait stats for the current database only. - -* Use the [sys.dm_tran_locks](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-locks-transact-sql) DMV for more granular information on what locks have been placed by queries. This DMV can return large amounts of data on a production database, and is useful for diagnosing what locks are currently held. - -Due to the INNER JOIN on `sys.dm_os_waiting_tasks`, the following query restricts the output from `sys.dm_tran_locks` only to currently blocked requests, their wait status, and their locks: - -```sql -SELECT table_name = schema_name(o.schema_id) + '.' + o.name -, wt.wait_duration_ms, wt.wait_type, wt.blocking_session_id, wt.resource_description -, tm.resource_type, tm.request_status, tm.request_mode, tm.request_session_id -FROM sys.dm_tran_locks AS tm -INNER JOIN sys.dm_os_waiting_tasks as wt ON tm.lock_owner_address = wt.resource_address -LEFT OUTER JOIN sys.partitions AS p on p.hobt_id = tm.resource_associated_entity_id -LEFT OUTER JOIN sys.objects o on o.object_id = p.object_id or tm.resource_associated_entity_id = o.object_id -WHERE resource_database_id = DB_ID() -AND object_name(p.object_id) = ''; -``` - -* With DMVs, storing the query results over time will provide data points that will allow you to review blocking over a specified time interval to identify persisted blocking or trends. - -## Gather information from Extended Events - -In addition to the previous information, it is often necessary to capture a trace of the activities on the server to thoroughly investigate a blocking problem on Azure SQL Database. For example, if a session executes multiple statements within a transaction, only the last statement that was submitted will be represented. However, one of the earlier statements may be the reason locks are still being held. A trace will enable you to see all the commands executed by a session within the current transaction. - -There are two ways to capture traces in SQL Server; Extended Events (XEvents) and Profiler Traces. However, [SQL Server -Profiler](/sql/tools/sql-server-profiler/sql-server-profiler) -is deprecated trace technology not supported for Azure SQL Database. [Extended Events](/sql/relational-databases/extended-events/extended-events) is the newer tracing technology that allows more versatility and less impact to the observed system, and its interface is integrated into SQL Server Management Studio (SSMS). - -Refer to the document that explains how to use the [Extended Events New Session Wizard](/sql/relational-databases/extended-events/quick-start-extended-events-in-sql-server) in SSMS. For Azure SQL databases however, SSMS provides an Extended Events subfolder under each database in Object Explorer. Use an Extended Events session wizard to capture these useful events: - -- Category Errors: - - Attention - - Error_reported - - Execution_warning - -- Category Warnings: - - Missing_join_predicate - -- Category Execution: - - Rpc_completed - - Rpc_starting - - Sql_batch_completed - - Sql_batch_starting - -- Category deadlock_monitor - - database_xml_deadlock_report - -- Category session - - Existing_connection - - Login - - Logout - -> [!NOTE] -> For detailed information on deadlocks, see [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md). - -## Identify and resolve common blocking scenarios - -By examining the previous information, you can determine the cause of most blocking problems. The rest of this article is a discussion of how to use this information to identify and resolve some common blocking scenarios. This discussion assumes you have used the blocking scripts (referenced earlier) to capture information on the blocking SPIDs and have captured application activity using an XEvent session. - -## Analyze blocking data - -* Examine the output of the DMVs `sys.dm_exec_requests` and `sys.dm_exec_sessions` to determine the heads of the blocking chains, using `blocking_these` and `session_id`. This will most clearly identify which requests are blocked and which are blocking. Look further into the sessions that are blocked and blocking. Is there a common or root to the blocking chain? They likely share a common table, and one or more of the sessions involved in a blocking chain is performing a write operation. - -* Examine the output of the DMVs `sys.dm_exec_requests` and `sys.dm_exec_sessions` for information on the SPIDs at the head of the blocking chain. Look for the following fields: - - - `sys.dm_exec_requests.status` - This column shows the status of a particular request. Typically, a sleeping status indicates that the SPID has completed execution and is waiting for the application to submit another query or batch. A runnable or running status indicates that the SPID is currently processing a query. The following table gives brief explanations of the various status values. - - | Status | Meaning | - |:-|:-| - | Background | The SPID is running a background task, such as deadlock detection, log writer, or checkpoint. | - | Sleeping | The SPID is not currently executing. This usually indicates that the SPID is awaiting a command from the application. | - | Running | The SPID is currently running on a scheduler. | - | Runnable | The SPID is in the runnable queue of a scheduler and waiting to get scheduler time. | - | Suspended | The SPID is waiting for a resource, such as a lock or a latch. | - - - `sys.dm_exec_sessions.open_transaction_count` - This field tells you the number of open transactions in this session. If this value is greater than 0, the SPID is within an open transaction and may be holding locks acquired by any statement within the transaction. - - - `sys.dm_exec_requests.open_transaction_count` - Similarly, this field tells you the number of open transactions in this request. If this value is greater than 0, the SPID is within an open transaction and may be holding locks acquired by any statement within the transaction. - - - `sys.dm_exec_requests.wait_type`, `wait_time`, and `last_wait_type` - If the `sys.dm_exec_requests.wait_type` is NULL, the request is not currently waiting for anything and the `last_wait_type` value indicates the last `wait_type` that the request encountered. For more information about `sys.dm_os_wait_stats` and a description of the most common wait types, see [sys.dm_os_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-wait-stats-transact-sql). The `wait_time` value can be used to determine if the request is making progress. When a query against the `sys.dm_exec_requests` table returns a value in the `wait_time` column that is less than the `wait_time` value from a previous query of `sys.dm_exec_requests`, this indicates that the prior lock was acquired and released and is now waiting on a new lock (assuming non-zero `wait_time`). This can be verified by comparing the `wait_resource` between `sys.dm_exec_requests` output, which displays the resource for which the request is waiting. - - - `sys.dm_exec_requests.wait_resource` - This field indicates the resource that a blocked request is waiting on. The following table lists common `wait_resource` formats and their meaning: - - | Resource | Format | Example | Explanation | - |:-|:-|:-|:-| - | Table | DatabaseID:ObjectID:IndexID | TAB: 5:261575970:1 | In this case, database ID 5 is the pubs sample database and object ID 261575970 is the titles table and 1 is the clustered index. | - | Page | DatabaseID:FileID:PageID | PAGE: 5:1:104 | In this case, database ID 5 is pubs, file ID 1 is the primary data file, and page 104 is a page belonging to the titles table. To identify the object_id the page belongs to, use the dynamic management function [sys.dm_db_page_info](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-page-info-transact-sql), passing in the DatabaseID, FileId, PageId from the `wait_resource`. | - | Key | DatabaseID:Hobt_id (Hash value for index key) | KEY: 5:72057594044284928 (3300a4f361aa) | In this case, database ID 5 is Pubs, Hobt_ID 72057594044284928 corresponds to index_id 2 for object_id 261575970 (titles table). Use the `sys.partitions` catalog view to associate the hobt_id to a particular `index_id` and `object_id`. There is no way to unhash the index key hash to a specific key value. | - | Row | DatabaseID:FileID:PageID:Slot(row) | RID: 5:1:104:3 | In this case, database ID 5 is pubs, file ID 1 is the primary data file, page 104 is a page belonging to the titles table, and slot 3 indicates the row's position on the page. | - | Compile | DatabaseID:FileID:PageID:Slot(row) | RID: 5:1:104:3 | In this case, database ID 5 is pubs, file ID 1 is the primary data file, page 104 is a page belonging to the titles table, and slot 3 indicates the row's position on the page. | - - - `sys.dm_tran_active_transactions` - The [sys.dm_tran_active_transactions](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-active-transactions-transact-sql) DMV contains data about open transactions that can be joined to other DMVs for a complete picture of transactions awaiting commit or rollback. Use the following query to return information on open transactions, joined to other DMVs including [sys.dm_tran_session_transactions](/sql/relational-databases/system-dynamic-management-views/sys-dm-tran-session-transactions-transact-sql). Consider a transaction's current state, `transaction_begin_time`, and other situational data to evaluate whether it could be a source of blocking. - - ```sql - SELECT tst.session_id, [database_name] = db_name(s.database_id) - , tat.transaction_begin_time - , transaction_duration_s = datediff(s, tat.transaction_begin_time, sysdatetime()) - , transaction_type = CASE tat.transaction_type WHEN 1 THEN 'Read/write transaction' - WHEN 2 THEN 'Read-only transaction' - WHEN 3 THEN 'System transaction' - WHEN 4 THEN 'Distributed transaction' END - , input_buffer = ib.event_info, tat.transaction_uow - , transaction_state = CASE tat.transaction_state - WHEN 0 THEN 'The transaction has not been completely initialized yet.' - WHEN 1 THEN 'The transaction has been initialized but has not started.' - WHEN 2 THEN 'The transaction is active - has not been committed or rolled back.' - WHEN 3 THEN 'The transaction has ended. This is used for read-only transactions.' - WHEN 4 THEN 'The commit process has been initiated on the distributed transaction.' - WHEN 5 THEN 'The transaction is in a prepared state and waiting resolution.' - WHEN 6 THEN 'The transaction has been committed.' - WHEN 7 THEN 'The transaction is being rolled back.' - WHEN 8 THEN 'The transaction has been rolled back.' END - , transaction_name = tat.name, request_status = r.status - , azure_dtc_state = CASE tat.dtc_state - WHEN 1 THEN 'ACTIVE' - WHEN 2 THEN 'PREPARED' - WHEN 3 THEN 'COMMITTED' - WHEN 4 THEN 'ABORTED' - WHEN 5 THEN 'RECOVERED' END - , tst.is_user_transaction, tst.is_local - , session_open_transaction_count = tst.open_transaction_count - , s.host_name, s.program_name, s.client_interface_name, s.login_name, s.is_user_process - FROM sys.dm_tran_active_transactions tat - INNER JOIN sys.dm_tran_session_transactions tst on tat.transaction_id = tst.transaction_id - INNER JOIN sys.dm_exec_sessions s on s.session_id = tst.session_id - LEFT OUTER JOIN sys.dm_exec_requests r on r.session_id = s.session_id - CROSS APPLY sys.dm_exec_input_buffer(s.session_id, null) AS ib; - ``` - - - Other columns - - The remaining columns in [sys.dm_exec_sessions](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sessions-transact-sql) and [sys.dm_exec_request](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) can provide insight into the root of a problem as well. Their usefulness varies depending on the circumstances of the problem. For example, you can determine if the problem happens only from certain clients (hostname), on certain network libraries (net_library), when the last batch submitted by a SPID was `last_request_start_time` in `sys.dm_exec_sessions`, how long a request had been running using `start_time` in `sys.dm_exec_requests`, and so on. - - -## Common blocking scenarios - -The table below maps common symptoms to their probable causes. - -The Waittype, Open_Tran, and Status columns refer to information returned by [sys.dm_exec_request](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql), other columns may be returned by [sys.dm_exec_sessions](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-sessions-transact-sql). The "Resolves?" column indicates whether or not the blocking will resolve on its own, or whether the session should be killed via the `KILL` command. For more information, see [KILL (Transact-SQL)](/sql/t-sql/language-elements/kill-transact-sql). - -| Scenario | Waittype | Open_Tran | Status | Resolves? | Other Symptoms | -|:-|:-|:-|:-|:-|:-|--| -| 1 | NOT NULL | >= 0 | runnable | Yes, when query finishes. | In `sys.dm_exec_sessions`, `reads`, `cpu_time`, and/or `memory_usage` columns will increase over time. Duration for the query will be high when completed. | -| 2 | NULL | \>0 | sleeping | No, but SPID can be killed. | An attention signal may be seen in the Extended Event session for this SPID, indicating a query time-out or cancel has occurred. | -| 3 | NULL | \>= 0 | runnable | No. Will not resolve until client fetches all rows or closes connection. SPID can be killed, but it may take up to 30 seconds. | If open_transaction_count = 0, and the SPID holds locks while the transaction isolation level is default (READ COMMMITTED), this is a likely cause. | -| 4 | Varies | \>= 0 | runnable | No. Will not resolve until client cancels queries or closes connections. SPIDs can be killed, but may take up to 30 seconds. | The `hostname` column in `sys.dm_exec_sessions` for the SPID at the head of a blocking chain will be the same as one of the SPID it is blocking. | -| 5 | NULL | \>0 | rollback | Yes. | An attention signal may be seen in the Extended Events session for this SPID, indicating a query time-out or cancel has occurred, or simply a rollback statement has been issued. | -| 6 | NULL | \>0 | sleeping | Eventually. When Windows NT determines the session is no longer active, the Azure SQL Database connection will be broken. | The `last_request_start_time` value in `sys.dm_exec_sessions` is much earlier than the current time. | - -## Detailed blocking scenarios - -1. Blocking caused by a normally running query with a long execution time - - **Resolution**: The solution to this type of blocking problem is to look for ways to optimize the query. Actually, this class of blocking problem may just be a performance problem, and require you to pursue it as such. For information on troubleshooting a specific slow-running query, see [How to troubleshoot slow-running queries on SQL Server](/troubleshoot/sql/performance/troubleshoot-slow-running-queries). For more information, see [Monitor and Tune for Performance](/sql/relational-databases/performance/monitor-and-tune-for-performance). - - Reports from the [Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store) in SSMS are also a highly recommended and valuable tool for identifying the most costly queries, suboptimal execution plans. Also review the [Intelligent Performance](intelligent-insights-overview.md) section of the Azure portal for the Azure SQL database, including [Query Performance Insight](query-performance-insight-use.md). - - If the query performs only SELECT operations, consider [running the statement under snapshot isolation](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) if it is enabled in your database, especially if RCSI has been disabled. As when RCSI is enabled, queries reading data do not require shared (S) locks under snapshot isolation level. Additionally, snapshot isolation provides transaction level consistency for all statements in an explicit multi-statement transaction. Snapshot isolation may [already be enabled in your database](#understand-blocking). Snapshot isolation may also be used with queries performing modifications, but you must handle [update conflicts](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide#behavior-in-summary). - - If you have a long-running query that is blocking other users and cannot be optimized, consider moving it from an OLTP environment to a dedicated reporting system, a [synchronous read-only replica of the database](read-scale-out.md). - -1. Blocking caused by a sleeping SPID that has an uncommitted transaction - - This type of blocking can often be identified by a SPID that is sleeping or awaiting a command, yet whose transaction nesting level (`@@TRANCOUNT`, `open_transaction_count` from `sys.dm_exec_requests`) is greater than zero. This can occur if the application experiences a query time-out, or issues a cancel without also issuing the required number of - ROLLBACK and/or COMMIT statements. When a SPID receives a query time-out or a cancel, it will terminate the current query and batch, but does not automatically roll back or commit the transaction. The application is responsible for this, as Azure SQL Database cannot assume that an entire transaction must be rolled back due to a single query being canceled. The query time-out or cancel will appear as an ATTENTION signal event for the SPID in the Extended Event session. - - To demonstrate an uncommitted explicit transaction, issue the following query: - - ```sql - CREATE TABLE #test (col1 INT); - INSERT INTO #test SELECT 1; - BEGIN TRAN - UPDATE #test SET col1 = 2 where col1 = 1; - ``` - - Then, execute this query in the same window: - ```sql - SELECT @@TRANCOUNT; - ROLLBACK TRAN - DROP TABLE #test; - ``` - - The output of the second query indicates that the transaction nesting level is one. All the locks acquired in the transaction are still be held until the transaction was committed or rolled back. If applications explicitly open and commit transactions, a communication or other error could leave the session and its transaction in an open state. - - Use the script earlier in this article based on `sys.dm_tran_active_transactions` to identify currently uncommitted transactions across the instance. - - **Resolutions**: - - - Additionally, this class of blocking problem may also be a performance problem, and require you to pursue it as such. If the query execution time can be diminished, the query time-out or cancel would not occur. It is important that the application is able to handle the time-out or cancel scenarios should they arise, but you may also benefit from examining the performance of the query. - - - Applications must properly manage transaction nesting levels, or they may cause a blocking problem following the cancellation of the query in this manner. Consider the following: - - * In the error handler of the client application, execute `IF @@TRANCOUNT > 0 ROLLBACK TRAN` following any error, even if the client application does not believe a transaction is open. Checking for open transactions is required, because a stored procedure called during the batch could have started a transaction without the client application's knowledge. Certain conditions, such as canceling the query, prevent the procedure from executing past the current statement, so even if the procedure has logic to check `IF @@ERROR <> 0` and abort the transaction, this rollback code will not be executed in such cases. - * If connection pooling is being used in an application that opens the connection and runs a small number of queries before releasing the connection back to the pool, such as a Web-based application, temporarily disabling connection pooling may help alleviate the problem until the client application is modified to handle the errors appropriately. By disabling connection pooling, releasing the connection will cause a physical disconnect of the Azure SQL Database connection, resulting in the server rolling back any open transactions. - * Use `SET XACT_ABORT ON` for the connection, or in any stored procedures that begin transactions and are not cleaning up following an error. In the event of a run-time error, this setting will abort any open transactions and return control to the client. For more information, review [SET XACT_ABORT (Transact-SQL)](/sql/t-sql/statements/set-xact-abort-transact-sql). - - > [!NOTE] - > The connection is not reset until it is reused from the connection pool, so it is possible that a user could open a transaction and then release the connection to the connection pool, but it might not be reused for several seconds, during which time the transaction would remain open. If the connection is not reused, the transaction will be aborted when the connection times out and is removed from the connection pool. Thus, it is optimal for the client application to abort transactions in their error handler or use `SET XACT_ABORT ON` to avoid this potential delay. - - > [!CAUTION] - > Following `SET XACT_ABORT ON`, T-SQL statements following a statement that causes an error will not be executed. This could affect the intended flow of existing code. - -1. Blocking caused by a SPID whose corresponding client application did not fetch all result rows to completion - - After sending a query to the server, all applications must immediately fetch all result rows to completion. If an application does not fetch all result rows, locks can be left on the tables, blocking other users. If you are using an application that transparently submits SQL statements to the server, the application must fetch all result rows. If it does not (and if it cannot be configured to do so), you may be unable to resolve the blocking problem. To avoid the problem, you can restrict poorly behaved applications to a reporting or a decision-support database, separate from the main OLTP database. - - The impact of this scenario is reduced when read committed snapshot is enabled on the database, which is the default configuration in Azure SQL Database. Learn more in the [Understand blocking](#understand-blocking) section of this article. - - > [!NOTE] - > See [guidance for retry logic](./troubleshoot-common-connectivity-issues.md#retry-logic-for-transient-errors) for applications connecting to Azure SQL Database. - - **Resolution**: The application must be rewritten to fetch all rows of the result to completion. This does not rule out the use of [OFFSET and FETCH in the ORDER BY clause](/sql/t-sql/queries/select-order-by-clause-transact-sql#using-offset-and-fetch-to-limit-the-rows-returned) of a query to perform server-side paging. - -1. Blocking caused by a session in a rollback state - - A data modification query that is KILLed, or canceled outside of a user-defined transaction, will be rolled back. This can also occur as a side effect of the client network session disconnecting, or when a request is selected as the deadlock victim. This can often be identified by observing the output of `sys.dm_exec_requests`, which may indicate the ROLLBACK command, and the `percent_complete` column may show progress. - - Thanks to the [Accelerated Database Recovery feature](../accelerated-database-recovery.md) introduced in 2019, lengthy rollbacks should be rare. - - **Resolution**: Wait for the SPID to finish rolling back the changes that were made. - - To avoid this situation, do not perform large batch write operations or index creation or maintenance operations during busy hours on OLTP systems. If possible, perform such operations during periods of low activity. - -1. Blocking caused by an orphaned connection - - If the client application traps errors or the client workstation is restarted, the network session to the server may not be immediately canceled under some conditions. From the Azure SQL Database perspective, the client still appears to be present, and any locks acquired may still be retained. For more information, see [How to troubleshoot orphaned connections in SQL Server](/sql/t-sql/language-elements/kill-transact-sql#remarks). - - **Resolution**: If the client application has disconnected without appropriately cleaning up its resources, you can terminate the SPID by using the `KILL` command. The `KILL` command takes the SPID value as input. For example, to kill SPID 99, issue the following command: - - ```sql - KILL 99 - ``` - -## See also - -* [Analyze and prevent deadlocks in Azure SQL Database](analyze-prevent-deadlocks.md) -* [Monitoring and performance tuning in Azure SQL Database and Azure SQL Managed Instance](./monitor-tune-overview.md) -* [Monitoring performance by using the Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) -* [Transaction Locking and Row Versioning Guide](/sql/relational-databases/sql-server-transaction-locking-and-row-versioning-guide) -* [SET TRANSACTION ISOLATION LEVEL](/sql/t-sql/statements/set-transaction-isolation-level-transact-sql) -* [Quickstart: Extended events in SQL Server](/sql/relational-databases/extended-events/quick-start-extended-events-in-sql-server) -* [Intelligent Insights using AI to monitor and troubleshoot database performance](intelligent-insights-overview.md) - -## Next steps - -* [Azure SQL Database: Improving Performance Tuning with Automatic Tuning](/Shows/Data-Exposed/Azure-SQL-Database-Improving-Performance-Tuning-with-Automatic-Tuning) -* [Deliver consistent performance with Azure SQL](/learn/modules/azure-sql-performance/) -* [Troubleshooting connectivity issues and other errors with Azure SQL Database and Azure SQL Managed Instance](troubleshoot-common-errors-issues.md) -* [Transient Fault Handling](/aspnet/aspnet/overview/developing-apps-with-windows-azure/building-real-world-cloud-apps-with-windows-azure/transient-fault-handling) -* [Configure the max degree of parallelism (MAXDOP) in Azure SQL Database](configure-max-degree-of-parallelism.md) -* [Diagnose and troubleshoot high CPU on Azure SQL Database](high-cpu-diagnose-troubleshoot.md) diff --git a/articles/azure-sql/database/vnet-service-endpoint-rule-overview.md b/articles/azure-sql/database/vnet-service-endpoint-rule-overview.md deleted file mode 100644 index 7d4e4da2ab80a..0000000000000 --- a/articles/azure-sql/database/vnet-service-endpoint-rule-overview.md +++ /dev/null @@ -1,313 +0,0 @@ ---- -title: Virtual network endpoints and rules for databases in Azure SQL Database -description: "Mark a subnet as a virtual network service endpoint. Then add the endpoint as a virtual network rule to the ACL for your database. Your database then accepts communication from all virtual machines and other nodes on the subnet." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: sqldbrb=1, subject-rbac-steps -ms.devlang: -ms.topic: how-to -author: rohitnayakmsft -ms.author: rohitna -ms.reviewer: kendralittle, vanto, genemi, mathoma -ms.date: 12/06/2021 ---- -# Use virtual network service endpoints and rules for servers in Azure SQL Database - -[!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)] - -*Virtual network rules* are a firewall security feature that controls whether the server for your databases and elastic pools in [Azure SQL Database](sql-database-paas-overview.md) or for your dedicated SQL pool (formerly SQL DW) databases in [Azure Synapse Analytics](../../synapse-analytics/sql-data-warehouse/sql-data-warehouse-overview-what-is.md) accepts communications that are sent from particular subnets in virtual networks. This article explains why virtual network rules are sometimes your best option for securely allowing communication to your database in SQL Database and Azure Synapse Analytics. - -> [!NOTE] -> This article applies to both SQL Database and Azure Synapse Analytics. For simplicity, the term *database* refers to both databases in SQL Database and Azure Synapse Analytics. Likewise, any references to *server* refer to the [logical SQL server](logical-servers.md) that hosts SQL Database and Azure Synapse Analytics. - -To create a virtual network rule, there must first be a [virtual network service endpoint][vm-virtual-network-service-endpoints-overview-649d] for the rule to reference. - -## Create a virtual network rule - -If you want to only create a virtual network rule, you can skip ahead to the steps and explanation [later in this article](#anchor-how-to-by-using-firewall-portal-59j). - -## Details about virtual network rules - -This section describes several details about virtual network rules. - -### Only one geographic region - -Each virtual network service endpoint applies to only one Azure region. The endpoint doesn't enable other regions to accept communication from the subnet. - -Any virtual network rule is limited to the region that its underlying endpoint applies to. - -### Server level, not database level - -Each virtual network rule applies to your whole server, not just to one particular database on the server. In other words, virtual network rules apply at the server level, not at the database level. - -In contrast, IP rules can apply at either level. - -### Security administration roles - -There's a separation of security roles in the administration of virtual network service endpoints. Action is required from each of the following roles: - -- **Network Admin ([Network Contributor](../../role-based-access-control/built-in-roles.md#network-contributor) role):**  Turn on the endpoint. -- **Database Admin ([SQL Server Contributor](../../role-based-access-control/built-in-roles.md#sql-server-contributor) role):**  Update the access control list (ACL) to add the given subnet to the server. - -#### Azure RBAC alternative - -The roles of Network Admin and Database Admin have more capabilities than are needed to manage virtual network rules. Only a subset of their capabilities is needed. - -You have the option of using [role-based access control (RBAC)][rbac-what-is-813s] in Azure to create a single custom role that has only the necessary subset of capabilities. The custom role could be used instead of involving either the Network Admin or the Database Admin. The surface area of your security exposure is lower if you add a user to a custom role versus adding the user to the other two major administrator roles. - -> [!NOTE] -> In some cases, the database in SQL Database and the virtual network subnet are in different subscriptions. In these cases, you must ensure the following configurations: -> -> - Both subscriptions must be in the same Azure Active Directory (Azure AD) tenant. -> - The user has the required permissions to initiate operations, such as enabling service endpoints and adding a virtual network subnet to the given server. -> - Both subscriptions must have the Microsoft.Sql provider registered. - -## Limitations - -For SQL Database, the virtual network rules feature has the following limitations: - -- In the firewall for your database in SQL Database, each virtual network rule references a subnet. All these referenced subnets must be hosted in the same geographic region that hosts the database. -- Each server can have up to 128 ACL entries for any virtual network. -- Virtual network rules apply only to Azure Resource Manager virtual networks and not to [classic deployment model][arm-deployment-model-568f] networks. -- Turning on virtual network service endpoints to SQL Database also enables the endpoints for Azure Database for MySQL and Azure Database for PostgreSQL. With endpoints set to **ON**, attempts to connect from the endpoints to your Azure Database for MySQL or Azure Database for PostgreSQL instances might fail. - - The underlying reason is that Azure Database for MySQL and Azure Database for PostgreSQL likely don't have a virtual network rule configured. You must configure a virtual network rule for Azure Database for MySQL and Azure Database for PostgreSQL. - - To define virtual network firewall rules on a SQL logical server that's already configured with private endpoints, set **Deny public network access** to **No**. -- On the firewall, IP address ranges do apply to the following networking items, but virtual network rules don't: - - [Site-to-site (S2S) virtual private network (VPN)][vpn-gateway-indexmd-608y] - - On-premises via [Azure ExpressRoute](../../expressroute/index.yml) - -### Considerations when you use service endpoints - -When you use service endpoints for SQL Database, review the following considerations: - -- **Outbound to Azure SQL Database public IPs is required.** Network security groups (NSGs) must be opened to SQL Database IPs to allow connectivity. You can do this by using NSG [service tags](../../virtual-network/network-security-groups-overview.md#service-tags) for SQL Database. - -### ExpressRoute - -If you use [ExpressRoute](../../expressroute/expressroute-introduction.md?toc=%2fazure%2fvirtual-network%2ftoc.json) from your premises, for public peering or Microsoft peering, you'll need to identify the NAT IP addresses that are used. For public peering, each ExpressRoute circuit by default uses two NAT IP addresses applied to Azure service traffic when the traffic enters the Microsoft Azure network backbone. For Microsoft peering, the NAT IP addresses that are used are provided by either the customer or the service provider. To allow access to your service resources, you must allow these public IP addresses in the resource IP firewall setting. To find your public peering ExpressRoute circuit IP addresses, [open a support ticket with ExpressRoute](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/overview) via the Azure portal. To learn more about NAT for ExpressRoute public and Microsoft peering, see [NAT requirements for Azure public peering](../../expressroute/expressroute-nat.md?toc=%2fazure%2fvirtual-network%2ftoc.json#nat-requirements-for-azure-public-peering). - -To allow communication from your circuit to SQL Database, you must create IP network rules for the public IP addresses of your NAT. - - - -## Impact of using virtual network service endpoints with Azure Storage - -Azure Storage has implemented the same feature that allows you to limit connectivity to your Azure Storage account. If you choose to use this feature with an Azure Storage account that SQL Database is using, you can run into issues. Next is a list and discussion of SQL Database and Azure Synapse Analytics features that are affected by this. - -### Azure Synapse Analytics PolyBase and COPY statement - -PolyBase and the COPY statement are commonly used to load data into Azure Synapse Analytics from Azure Storage accounts for high throughput data ingestion. If the Azure Storage account that you're loading data from limits accesses only to a set of virtual network subnets, connectivity when you use PolyBase and the COPY statement to the storage account will break. For enabling import and export scenarios by using COPY and PolyBase with Azure Synapse Analytics connecting to Azure Storage that's secured to a virtual network, follow the steps in this section. - -#### Prerequisites - -- Install Azure PowerShell by using [this guide](/powershell/azure/install-az-ps). -- If you have a general-purpose v1 or Azure Blob Storage account, you must first upgrade to general-purpose v2 by following the steps in [Upgrade to a general-purpose v2 storage account](../../storage/common/storage-account-upgrade.md). -- You must have **Allow trusted Microsoft services to access this storage account** turned on under the Azure Storage account **Firewalls and Virtual networks** settings menu. Enabling this configuration will allow PolyBase and the COPY statement to connect to the storage account by using strong authentication where network traffic remains on the Azure backbone. For more information, see [this guide](../../storage/common/storage-network-security.md#exceptions). - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by SQL Database, but all future development is for the Az.Sql module. The AzureRM module will continue to receive bug fixes until at least December 2020. The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. For more about their compatibility, see [Introducing the new Azure PowerShell Az module](/powershell/azure/new-azureps-module-az). - -#### Steps - -1. If you have a standalone dedicated SQL pool, register your SQL server with Azure AD by using PowerShell: - - ```powershell - Connect-AzAccount - Select-AzSubscription -SubscriptionId - Set-AzSqlServer -ResourceGroupName your-database-server-resourceGroup -ServerName your-SQL-servername -AssignIdentity - ``` - - This step isn't required for the dedicated SQL pools within an Azure Synapse Analytics workspace. The system assigned managed identity (SA-MI) of the workspace is a member of the Synapse Administrator role and thus has elevated privileges on the dedicated SQL pools of the workspace. - -1. Create a **general-purpose v2 Storage Account** by following the steps in [Create a storage account](../../storage/common/storage-account-create.md). - - - If you have a general-purpose v1 or Blob Storage account, you must *first upgrade to v2* by following the steps in [Upgrade to a general-purpose v2 storage account](../../storage/common/storage-account-upgrade.md). - - For known issues with Azure Data Lake Storage Gen2, see [Known issues with Azure Data Lake Storage Gen2](../../storage/blobs/data-lake-storage-known-issues.md). - -1. On your storage account page, select **Access control (IAM)**. - -1. Select **Add** > **Add role assignment** to open the **Add role assignment** page. - -1. Assign the following role. For detailed steps, see [Assign Azure roles using the Azure portal](../../role-based-access-control/role-assignments-portal.md). - - | Setting | Value | - | --- | --- | - | Role | Storage Blob Data Contributor | - | Assign access to | User, group, or service principal | - | Members | Server or workspace hosting your dedicated SQL pool that you've registered with Azure AD | - - ![Screenshot that shows Add role assignment page in Azure portal.](../../../includes/role-based-access-control/media/add-role-assignment-page.png) - - > [!NOTE] - > Only members with Owner privilege on the storage account can perform this step. For various Azure built-in roles, see [Azure built-in roles](../../role-based-access-control/built-in-roles.md). - -1. To enable PolyBase connectivity to the Azure Storage account: - - 1. Create a database [master key](/sql/t-sql/statements/create-master-key-transact-sql) if you haven't created one earlier. - - ```sql - CREATE MASTER KEY [ENCRYPTION BY PASSWORD = 'somepassword']; - ``` - - 1. Create a database-scoped credential with **IDENTITY = 'Managed Service Identity'**. - - ```sql - CREATE DATABASE SCOPED CREDENTIAL msi_cred WITH IDENTITY = 'Managed Service Identity'; - ``` - - - There's no need to specify SECRET with an Azure Storage access key because this mechanism uses [Managed Identity](../../active-directory/managed-identities-azure-resources/overview.md) under the covers. This step isn't required for the dedicated SQL pools within an Azure Synapse Analytics workspace. The system assigned managed identity (SA-MI) of the workspace is a member of the Synapse Administrator role and thus has elevated privileges on the dedicated SQL pools of the workspace. - - - The IDENTITY name must be **'Managed Service Identity'** for PolyBase connectivity to work with an Azure Storage account secured to a virtual network. - - 1. Create an external data source with the `abfss://` scheme for connecting to your general-purpose v2 storage account using PolyBase. - - ```SQL - CREATE EXTERNAL DATA SOURCE ext_datasource_with_abfss WITH (TYPE = hadoop, LOCATION = 'abfss://myfile@mystorageaccount.dfs.core.windows.net', CREDENTIAL = msi_cred); - ``` - - - If you already have external tables associated with a general-purpose v1 or Blob Storage account, you should first drop those external tables. Then drop the corresponding external data source. Next, create an external data source with the `abfss://` scheme that connects to a general-purpose v2 storage account, as previously shown. Then re-create all the external tables by using this new external data source. You could use the [Generate and Publish Scripts Wizard](/sql/ssms/scripting/generate-and-publish-scripts-wizard) to generate create-scripts for all the external tables for ease. - - For more information on the `abfss://` scheme, see [Use the Azure Data Lake Storage Gen2 URI](../../storage/blobs/data-lake-storage-introduction-abfs-uri.md). - - For more information on the T-SQL commands, see [CREATE EXTERNAL DATA SOURCE](/sql/t-sql/statements/create-external-data-source-transact-sql). - - 1. Query as normal by using [external tables](/sql/t-sql/statements/create-external-table-transact-sql). - -### SQL Database blob auditing - -Azure SQL auditing can write SQL audit logs to your own storage account. If this storage account uses the virtual network service endpoints feature, see how to [write audit to a storage account behind VNet and firewall](./audit-write-storage-account-behind-vnet-firewall.md). - -## Add a virtual network firewall rule to your Azure SQL server - -Long ago, before this feature was enhanced, you were required to turn on virtual network service endpoints before you could implement a live virtual network rule in the firewall. The endpoints related a given virtual network subnet to a database in SQL Database. As of January 2018, you can circumvent this requirement by setting the **IgnoreMissingVNetServiceEndpoint** flag. Now, you can add a virtual network firewall rule to your server without turning on virtual network service endpoints. - -Merely setting a firewall rule doesn't help secure the server. You must also turn on virtual network service endpoints for the security to take effect. When you turn on service endpoints, your virtual network subnet experiences downtime until it completes the transition from turned off to on. This period of downtime is especially true in the context of large virtual networks. You can use the **IgnoreMissingVNetServiceEndpoint** flag to reduce or eliminate the downtime during transition. - -You can set the **IgnoreMissingVNetServiceEndpoint** flag by using PowerShell. For more information, see [PowerShell to create a virtual network service endpoint and rule for SQL Database][sql-db-vnet-service-endpoint-rule-powershell-md-52d]. - - - -> [!NOTE] -> For similar instructions in Azure Synapse Analytics, see [Azure Synapse Analytics IP firewall rules](../../synapse-analytics/security/synapse-workspace-ip-firewall.md) - -## Use Azure portal to create a virtual network rule - -This section illustrates how you can use the [Azure portal][http-azure-portal-link-ref-477t] to create a *virtual network rule* in your database in SQL Database. The rule tells your database to accept communication from a particular subnet that's been tagged as being a *virtual network service endpoint*. - -> [!NOTE] -> If you intend to add a service endpoint to the virtual network firewall rules of your server, first ensure that service endpoints are turned on for the subnet. -> -> If service endpoints aren't turned on for the subnet, the portal asks you to enable them. Select the **Enable** button on the same pane on which you add the rule. - -### Prerequisites - -You must already have a subnet that's tagged with the particular virtual network service endpoint *type name* relevant to SQL Database. - -- The relevant endpoint type name is **Microsoft.Sql**. -- If your subnet might not be tagged with the type name, see [Verify your subnet is an endpoint][sql-db-vnet-service-endpoint-rule-powershell-md-a-verify-subnet-is-endpoint-ps-100]. - - - -### Azure portal steps - -1. Sign in to the [Azure portal][http-azure-portal-link-ref-477t]. - -1. Search for and select **SQL servers**, and then select your server. Under **Security**, select **Firewalls and virtual networks**. - - :::image type="content" source="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-firewalls-and-virtual-networks.png" alt-text="Azure SQL logical server properties, Firewalls and Virtual Networks highlighted" lightbox="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-firewalls-and-virtual-networks.png"::: - -1. Set **Allow Azure services and resources to access this server** to **No**. - - > [!IMPORTANT] - > If you leave the control set to **ON**, your server accepts communication from any subnet inside the Azure boundary. That is communication that originates from one of the IP addresses that's recognized as those within ranges defined for Azure datacenters. Leaving the control set to **ON** might be excessive access from a security point of view. The Microsoft Azure Virtual Network service endpoint feature in coordination with the virtual network rules feature of SQL Database together can reduce your security surface area. - -1. Select **+ Add existing virtual network** in the **Virtual networks** section. - - :::image type="content" source="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-add-existing-10.png" alt-text="Screenshot that shows selecting + Add existing (subnet endpoint, as a SQL rule)." lightbox="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-add-existing-10.png"::: - -1. In the new **Create/Update** pane, fill in the boxes with the names of your Azure resources. - - > [!TIP] - > You must include the correct address prefix for your subnet. You can find the **Address prefix** value in the portal. Go to **All resources** > **All types** > **Virtual networks**. The filter displays your virtual networks. Select your virtual network, and then select **Subnets**. The **ADDRESS RANGE** column has the address prefix you need. - - :::image type="content" source="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-create-update-vnet-rule-20.png" alt-text="Screenshot that shows filling in boxes for the new rule." lightbox="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-create-update-vnet-rule-20.png"::: - -1. Select the **OK** button near the bottom of the pane. - -1. See the resulting virtual network rule on the **Firewall** pane. - - :::image type="content" source="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-result-rule-30.png" alt-text="Screenshot that shows the new rule on the Firewall pane." lightbox="../../sql-database/media/sql-database-vnet-service-endpoint-rule-overview/portal-firewall-vnet-result-rule-30.png"::: - -> [!NOTE] -> The following statuses or states apply to the rules: -> -> - **Ready**: Indicates that the operation you initiated has succeeded. -> - **Failed**: Indicates that the operation you initiated has failed. -> - **Deleted**: Only applies to the Delete operation and indicates that the rule has been deleted and no longer applies. -> - **InProgress**: Indicates that the operation is in progress. The old rule applies while the operation is in this state. - - -## Use PowerShell to create a virtual network rule - -A script can also create virtual network rules by using the PowerShell cmdlet `New-AzSqlServerVirtualNetworkRule` or [az network vnet create](/cli/azure/network/vnet#az-network-vnet-create). For more information, see [PowerShell to create a virtual network service endpoint and rule for SQL Database][sql-db-vnet-service-endpoint-rule-powershell-md-52d]. - -## Use REST API to create a virtual network rule - -Internally, the PowerShell cmdlets for SQL virtual network actions call REST APIs. You can call the REST APIs directly. For more information, see [Virtual network rules: Operations][rest-api-virtual-network-rules-operations-862r]. - -## Troubleshoot errors 40914 and 40615 - -Connection error 40914 relates to *virtual network rules*, as specified on the **Firewall** pane in the Azure portal. -Error 40615 is similar, except it relates to *IP address rules* on the firewall. - -### Error 40914 - -**Message text:** "Cannot open server '*[server-name]*' requested by the login. Client is not allowed to access the server." - -**Error description:** The client is in a subnet that has virtual network server endpoints. But the server has no virtual network rule that grants to the subnet the right to communicate with the database. - -**Error resolution:** On the **Firewall** pane of the Azure portal, use the virtual network rules control to [add a virtual network rule](#anchor-how-to-by-using-firewall-portal-59j) for the subnet. - -### Error 40615 - -**Message text:** "Cannot open server '{0}' requested by the login. Client with IP address '{1}' is not allowed to access the server." - -**Error description:** The client is trying to connect from an IP address that isn't authorized to connect to the server. The server firewall has no IP address rule that allows a client to communicate from the given IP address to the database. - -**Error resolution:** Enter the client's IP address as an IP rule. Use the **Firewall** pane in the Azure portal to do this step. - - - -## Related articles - -- [Azure virtual network service endpoints][vm-virtual-network-service-endpoints-overview-649d] -- [Server-level and database-level firewall rules][sql-db-firewall-rules-config-715d] - -## Next steps - -- [Use PowerShell to create a virtual network service endpoint and then a virtual network rule for SQL Database][sql-db-vnet-service-endpoint-rule-powershell-md-52d] -- [Virtual network rules: Operations][rest-api-virtual-network-rules-operations-862r] with REST APIs - - -[arm-deployment-model-568f]: ../../azure-resource-manager/management/deployment-models.md -[expressroute-indexmd-744v]:../index.yml -[rbac-what-is-813s]:../../role-based-access-control/overview.md -[sql-db-firewall-rules-config-715d]:firewall-configure.md -[sql-db-vnet-service-endpoint-rule-powershell-md-52d]:scripts/vnet-service-endpoint-rule-powershell-create.md -[sql-db-vnet-service-endpoint-rule-powershell-md-a-verify-subnet-is-endpoint-ps-100]:scripts/vnet-service-endpoint-rule-powershell-create.md#a-verify-subnet-is-endpoint-ps-100 -[vm-configure-private-ip-addresses-for-a-virtual-machine-using-the-azure-portal-321w]: ../virtual-network/virtual-networks-static-private-ip-arm-pportal.md -[vm-virtual-network-service-endpoints-overview-649d]: ../../virtual-network/virtual-network-service-endpoints-overview.md -[vpn-gateway-indexmd-608y]: ../../vpn-gateway/index.yml - - -[http-azure-portal-link-ref-477t]: https://portal.azure.com/ -[rest-api-virtual-network-rules-operations-862r]: /rest/api/sql/virtualnetworkrules - - diff --git a/articles/azure-sql/database/xevent-code-event-file.md b/articles/azure-sql/database/xevent-code-event-file.md deleted file mode 100644 index f3335230a4fd7..0000000000000 --- a/articles/azure-sql/database/xevent-code-event-file.md +++ /dev/null @@ -1,515 +0,0 @@ ---- -title: XEvent Event File code -description: Provides PowerShell and Transact-SQL for a two-phase code sample that demonstrates the Event File target in an extended event on Azure SQL Database and SQL Managed Instance. Azure Storage is a required part of this scenario. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 03/25/2022 ---- -# Event File target code for extended events in Azure SQL Database and SQL Managed Instance -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb-sqlmi.md)] - -[!INCLUDE [sql-database-xevents-selectors-1-include](../../../includes/sql-database-xevents-selectors-1-include.md)] - -You want a complete code sample for a robust way to capture and report information for an extended event. - -In Microsoft SQL Server, the [Event File target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server) is used to store event outputs into a local hard drive file. But local storage is not available to Azure SQL Database or SQL Managed Instance. Instead, use Azure Blob Storage to support the Event File target. - -This article presents a two-phase code sample: - -- PowerShell, to create an Azure Storage container in the cloud. -- Transact-SQL: - - To assign the Azure Storage container to an Event File target. - - To create and start the event session, and so on. - -## Prerequisites - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -- An Azure account and subscription. You can sign up for a [free trial](https://azure.microsoft.com/pricing/free-trial/). -- Any database you can create a table in. - - - Optionally you can [create an **AdventureWorksLT** demonstration database](single-database-create-quickstart.md) in minutes. - -- SQL Server Management Studio (ssms.exe), ideally its latest monthly update version: [Download SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) - -- You must have the [Azure PowerShell modules](https://go.microsoft.com/?linkid=9811175) installed. - - - The modules provide commands, such as - `New-AzStorageAccount`. - -## Phase 1: PowerShell code for Azure Storage container - -This PowerShell is phase 1 of the two-phase code sample. - -The script starts with commands to clean up after a possible previous run, and is rerunnable. - -1. Paste the PowerShell script into a simple text editor such as Notepad.exe, and save the script as a file with the extension **.ps1**. -2. Start PowerShell ISE as an Administrator. -3. At the prompt, type
    `Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
    and then press Enter. -4. In PowerShell ISE, open your **.ps1** file. Run the script. -5. The script first starts a new window in which you sign in to Azure. - - - If you rerun the script without disrupting your session, you have the convenient option of commenting out the **Add-AzureAccount** command. - -![PowerShell ISE, with Azure module installed, ready to run script.](./media/xevent-code-event-file/event-file-powershell-ise-b30.png) - -### PowerShell code - -This PowerShell script assumes you've already installed the `Az` module. For information, see [Install the Azure PowerShell module](/powershell/azure/install-Az-ps). - -```powershell -## TODO: Before running, find all 'TODO' and make each edit!! - -cls; - -#--------------- 1 ----------------------- - -'Script assumes you have already logged your PowerShell session into Azure. -But if not, run Connect-AzAccount (or Connect-AzAccount), just one time.'; -#Connect-AzAccount; # Same as Connect-AzAccount. - -#-------------- 2 ------------------------ - -' -TODO: Edit the values assigned to these variables, especially the first few! -'; - -# Ensure the current date is between -# the Expiry and Start time values that you edit here. - -$subscriptionName = 'YOUR_SUBSCRIPTION_NAME'; -$resourceGroupName = 'YOUR_RESOURCE-GROUP-NAME'; - -$policySasExpiryTime = '2018-08-28T23:44:56Z'; -$policySasStartTime = '2017-10-01'; - -$storageAccountLocation = 'YOUR_STORAGE_ACCOUNT_LOCATION'; -$storageAccountName = 'YOUR_STORAGE_ACCOUNT_NAME'; -$containerName = 'YOUR_CONTAINER_NAME'; -$policySasToken = ' ? '; - -$policySasPermission = 'rwl'; # Leave this value alone, as 'rwl'. - -#--------------- 3 ----------------------- - -# The ending display lists your Azure subscriptions. -# One should match the $subscriptionName value you assigned -# earlier in this PowerShell script. - -'Choose an existing subscription for the current PowerShell environment.'; - -Select-AzSubscription -Subscription $subscriptionName; - -#-------------- 4 ------------------------ - -' -Clean up the old Azure Storage Account after any previous run, -before continuing this new run.'; - -if ($storageAccountName) { - Remove-AzStorageAccount ` - -Name $storageAccountName ` - -ResourceGroupName $resourceGroupName; -} - -#--------------- 5 ----------------------- - -[System.DateTime]::Now.ToString(); - -' -Create a storage account. -This might take several minutes, will beep when ready. - ...PLEASE WAIT...'; - -New-AzStorageAccount ` - -Name $storageAccountName ` - -Location $storageAccountLocation ` - -ResourceGroupName $resourceGroupName ` - -SkuName 'Standard_LRS'; - -[System.DateTime]::Now.ToString(); -[System.Media.SystemSounds]::Beep.Play(); - -' -Get the access key for your storage account. -'; - -$accessKey_ForStorageAccount = ` - (Get-AzStorageAccountKey ` - -Name $storageAccountName ` - -ResourceGroupName $resourceGroupName - ).Value[0]; - -"`$accessKey_ForStorageAccount = $accessKey_ForStorageAccount"; - -'Azure Storage Account cmdlet completed. -Remainder of PowerShell .ps1 script continues. -'; - -#--------------- 6 ----------------------- - -# The context will be needed to create a container within the storage account. - -'Create a context object from the storage account and its primary access key. -'; - -$context = New-AzStorageContext ` - -StorageAccountName $storageAccountName ` - -StorageAccountKey $accessKey_ForStorageAccount; - -'Create a container within the storage account. -'; - -$containerObjectInStorageAccount = New-AzStorageContainer ` - -Name $containerName ` - -Context $context; - -'Create a security policy to be applied to the SAS token. -'; - -New-AzStorageContainerStoredAccessPolicy ` - -Container $containerName ` - -Context $context ` - -Policy $policySasToken ` - -Permission $policySasPermission ` - -ExpiryTime $policySasExpiryTime ` - -StartTime $policySasStartTime; - -' -Generate a SAS token for the container. -'; -try { - $sasTokenWithPolicy = New-AzStorageContainerSASToken ` - -Name $containerName ` - -Context $context ` - -Policy $policySasToken; -} -catch { - $Error[0].Exception.ToString(); -} - -#-------------- 7 ------------------------ - -'Display the values that YOU must edit into the Transact-SQL script next!: -'; - -"storageAccountName: $storageAccountName"; -"containerName: $containerName"; -"sasTokenWithPolicy: $sasTokenWithPolicy"; - -' -REMINDER: sasTokenWithPolicy here might start with "?" character, which you must exclude from Transact-SQL. -'; - -' -(Later, return here to delete your Azure Storage account. See the preceding Remove-AzStorageAccount -Name $storageAccountName)'; - -' -Now shift to the Transact-SQL portion of the two-part code sample!'; - -# EOFile -``` - -Take note of the few named values that the PowerShell script prints when it ends. You must edit those values into the Transact-SQL script that follows as phase 2. - - - -> [!NOTE] -> In the preceding PowerShell code example, SQL extended events are not compatible with the ADLS Gen2 storage accounts. - -## Phase 2: Transact-SQL code that uses Azure Storage container - -- In phase 1 of this code sample, you ran a PowerShell script to create an Azure Storage container. -- Next in phase 2, the following Transact-SQL script must use the container. - -The script starts with commands to clean up after a possible previous run, and is rerunnable. - -The PowerShell script printed a few named values when it ended. You must edit the Transact-SQL script to use those values. Find **TODO** in the Transact-SQL script to locate the edit points. - -1. Open SQL Server Management Studio (ssms.exe). -2. Connect to your database in Azure SQL Database or SQL Managed Instance. -3. Select to open a new query pane. -4. Paste the following Transact-SQL script into the query pane. -5. Find every **TODO** in the script and make the appropriate edits. -6. Save, and then run the script. - -> [!WARNING] -> The SAS key value generated by the preceding PowerShell script might begin with a '?' (question mark). When you use the SAS key in the following T-SQL script, you must *remove the leading '?'*. Otherwise your efforts might be blocked by security. - -### Transact-SQL code - -```sql ----- TODO: First, run the earlier PowerShell portion of this two-part code sample. ----- TODO: Second, find every 'TODO' in this Transact-SQL file, and edit each. - ----- Transact-SQL code for Event File target on Azure SQL Database or SQL Managed Instance. - -SET NOCOUNT ON; -GO - ----- Step 1. Establish one little table, and --------- ----- insert one row of data. - -IF EXISTS - (SELECT * FROM sys.objects - WHERE type = 'U' and name = 'gmTabEmployee') -BEGIN - DROP TABLE gmTabEmployee; -END -GO - -CREATE TABLE gmTabEmployee -( - EmployeeGuid uniqueIdentifier not null default newid() primary key, - EmployeeId int not null identity(1,1), - EmployeeKudosCount int not null default 0, - EmployeeDescr nvarchar(256) null -); -GO - -INSERT INTO gmTabEmployee ( EmployeeDescr ) - VALUES ( 'Jane Doe' ); -GO - ------- Step 2. Create key, and ------------ ------- Create credential (your Azure Storage container must already exist). - -IF NOT EXISTS - (SELECT * FROM sys.symmetric_keys - WHERE symmetric_key_id = 101) -BEGIN - CREATE MASTER KEY ENCRYPTION BY PASSWORD = '0C34C960-6621-4682-A123-C7EA08E3FC46' -- Or any newid(). -END -GO - -IF EXISTS - (SELECT * FROM sys.database_scoped_credentials - -- TODO: Assign AzureStorageAccount name, and the associated Container name. - WHERE name = 'https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent') -BEGIN - DROP DATABASE SCOPED CREDENTIAL - -- TODO: Assign AzureStorageAccount name, and the associated Container name. - [https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent] ; -END -GO - -CREATE - DATABASE SCOPED - CREDENTIAL - -- use '.blob.', and not '.queue.' or '.table.' etc. - -- TODO: Assign AzureStorageAccount name, and the associated Container name. - [https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent] - WITH - IDENTITY = 'SHARED ACCESS SIGNATURE', -- "SAS" token. - -- TODO: Paste in the long SasToken string here for Secret, but exclude any leading '?'. - SECRET = 'sv=2014-02-14&sr=c&si=gmpolicysastoken&sig=EjAqjo6Nu5xMLEZEkMkLbeF7TD9v1J8DNB2t8gOKTts%3D' - ; -GO - ------- Step 3. Create (define) an event session. -------- ------- The event session has an event with an action, ------- and a has a target. - -IF EXISTS - (SELECT * from sys.database_event_sessions - WHERE name = 'gmeventsessionname240b') -BEGIN - DROP - EVENT SESSION - gmeventsessionname240b - ON DATABASE; -END -GO - -CREATE - EVENT SESSION - gmeventsessionname240b - ON DATABASE - - ADD EVENT - sqlserver.sql_statement_starting - ( - ACTION (sqlserver.sql_text) - WHERE statement LIKE 'UPDATE gmTabEmployee%' - ) - ADD TARGET - package0.event_file - ( - -- TODO: Assign AzureStorageAccount name, and the associated Container name. - -- Also, tweak the .xel file name at end, if you like. - SET filename = - 'https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent/anyfilenamexel242b.xel' - ) - WITH - (MAX_MEMORY = 10 MB, - MAX_DISPATCH_LATENCY = 3 SECONDS) - ; -GO - ------- Step 4. Start the event session. ---------------- ------- Issue the SQL Update statements that will be traced. ------- Then stop the session. - ------- Note: If the target fails to attach, ------- the session must be stopped and restarted. - -ALTER - EVENT SESSION - gmeventsessionname240b - ON DATABASE - STATE = START; -GO - -SELECT 'BEFORE_Updates', EmployeeKudosCount, * FROM gmTabEmployee; - -UPDATE gmTabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 2 - WHERE EmployeeDescr = 'Jane Doe'; - -UPDATE gmTabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 13 - WHERE EmployeeDescr = 'Jane Doe'; - -SELECT 'AFTER__Updates', EmployeeKudosCount, * FROM gmTabEmployee; -GO - -ALTER - EVENT SESSION - gmeventsessionname240b - ON DATABASE - STATE = STOP; -GO - --------------- Step 5. Select the results. ---------- - -SELECT - *, 'CLICK_NEXT_CELL_TO_BROWSE_ITS_RESULTS!' as [CLICK_NEXT_CELL_TO_BROWSE_ITS_RESULTS], - CAST(event_data AS XML) AS [event_data_XML] -- TODO: In ssms.exe results grid, double-click this cell! - FROM - sys.fn_xe_file_target_read_file - ( - -- TODO: Fill in Storage Account name, and the associated Container name. - -- TODO: The name of the .xel file needs to be an exact match to the files in the storage account Container (You can use Storage Account explorer from the portal to find out the exact file names or you can retrieve the name using the following DMV-query: select target_data from sys.dm_xe_database_session_targets. The 3rd xml-node, "File name", contains the name of the file currently written to.) - 'https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent/anyfilenamexel242b', - null, null, null - ); -GO - --------------- Step 6. Clean up. ---------- - -DROP - EVENT SESSION - gmeventsessionname240b - ON DATABASE; -GO - -DROP DATABASE SCOPED CREDENTIAL - -- TODO: Assign AzureStorageAccount name, and the associated Container name. - [https://gmstorageaccountxevent.blob.core.windows.net/gmcontainerxevent] - ; -GO - -DROP TABLE gmTabEmployee; -GO - -PRINT 'Use PowerShell Remove-AzStorageAccount to delete your Azure Storage account!'; -GO -``` - -If the target fails to attach when you run, you must stop and restart the event session: - -```sql -ALTER EVENT SESSION gmeventsessionname240b - ON DATABASE STATE = STOP; -GO -ALTER EVENT SESSION gmeventsessionname240b - ON DATABASE STATE = START; -GO -``` - -## Output - -When the Transact-SQL script completes, select a cell under the **event_data_XML** column header. One **\** element is displayed which shows one UPDATE statement. - -Here is one **\** element that was generated during testing: - -```xml - - - 0 - Normal - - - 5 - - - 148 - - - 368 - - - UPDATE gmTabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 2 - WHERE EmployeeDescr = 'Jane Doe' - - - - -SELECT 'BEFORE_Updates', EmployeeKudosCount, * FROM gmTabEmployee; - -UPDATE gmTabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 2 - WHERE EmployeeDescr = 'Jane Doe'; - -UPDATE gmTabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 13 - WHERE EmployeeDescr = 'Jane Doe'; - -SELECT 'AFTER__Updates', EmployeeKudosCount, * FROM gmTabEmployee; - - - -``` - -The preceding Transact-SQL script used the following system function to read the event_file: - -- [sys.fn_xe_file_target_read_file (Transact-SQL)](/sql/relational-databases/system-functions/sys-fn-xe-file-target-read-file-transact-sql) - -An explanation of advanced options for the viewing of data from extended events is available at: - -- [Advanced Viewing of Target Data from Extended Events](/sql/relational-databases/extended-events/advanced-viewing-of-target-data-from-extended-events-in-sql-server) - -## Converting the code sample to run on SQL Server - -Suppose you wanted to run the preceding Transact-SQL sample on Microsoft SQL Server. - -- For simplicity, you would want to completely replace use of the Azure Storage container with a simple file such as `C:\myeventdata.xel`. The file would be written to the local hard drive of the computer that hosts SQL Server. -- You would not need any kind of Transact-SQL statements for **CREATE MASTER KEY** and **CREATE CREDENTIAL**. -- In the **CREATE EVENT SESSION** statement, in its **ADD TARGET** clause, you would replace the Http value assigned made to **filename=** with a full path string like `C:\myfile.xel`. - - - An Azure Storage account is not needed. - -## Next steps - -For more info about accounts and containers in the Azure Storage service, see: - -- [How to use Blob storage from .NET](../../storage/blobs/storage-quickstart-blobs-dotnet.md) -- [Naming and Referencing Containers, Blobs, and Metadata](/rest/api/storageservices/Naming-and-Referencing-Containers--Blobs--and-Metadata) -- [Working with the Root Container](/rest/api/storageservices/Working-with-the-Root-Container) -- [Lesson 1: Create a stored access policy and a shared access signature on an Azure container](/sql/relational-databases/tutorial-use-azure-blob-storage-service-with-sql-server-2016#1---create-stored-access-policy-and-shared-access-storage) - - [Lesson 2: Create a SQL Server credential using a shared access signature](/sql/relational-databases/tutorial-use-azure-blob-storage-service-with-sql-server-2016#2---create-a-sql-server-credential-using-a-shared-access-signature) -- [Extended Events for Microsoft SQL Server](/sql/relational-databases/extended-events/extended-events) diff --git a/articles/azure-sql/database/xevent-code-ring-buffer.md b/articles/azure-sql/database/xevent-code-ring-buffer.md deleted file mode 100644 index 4d87da9fdbaa1..0000000000000 --- a/articles/azure-sql/database/xevent-code-ring-buffer.md +++ /dev/null @@ -1,348 +0,0 @@ ---- -title: XEvent Ring Buffer code -description: Provides a Transact-SQL code sample that is made easy and quick by use of the Ring Buffer target, in Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: PowerShell -ms.topic: sample -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 12/19/2018 ---- -# Ring Buffer target code for extended events in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[!INCLUDE [sql-database-xevents-selectors-1-include](../../../includes/sql-database-xevents-selectors-1-include.md)] - -You want a complete code sample for the easiest quick way to capture and report information for an extended event during a test. The easiest target for extended event data is the [Ring Buffer target](/previous-versions/sql/sql-server-2016/bb630339(v=sql.130)). - -This topic presents a Transact-SQL code sample that: - -1. Creates a table with data to demonstrate with. -2. Creates a session for an existing extended event, namely **sqlserver.sql_statement_starting**. - - * The event is limited to SQL statements that contain a particular Update string: **statement LIKE '%UPDATE tabEmployee%'**. - * Chooses to send the output of the event to a target of type Ring Buffer, namely **package0.ring_buffer**. -3. Starts the event session. -4. Issues a couple of simple SQL UPDATE statements. -5. Issues a SQL SELECT statement to retrieve event output from the Ring Buffer. - - * **sys.dm_xe_database_session_targets** and other dynamic management views (DMVs) are joined. -6. Stops the event session. -7. Drops the Ring Buffer target, to release its resources. -8. Drops the event session and the demo table. - -## Prerequisites - -* An Azure account and subscription. You can sign up for a [free trial](https://azure.microsoft.com/pricing/free-trial/). -* Any database you can create a table in. - - * Optionally you can [create an **AdventureWorksLT** demonstration database](single-database-create-quickstart.md) in minutes. -* SQL Server Management Studio (ssms.exe), ideally its latest monthly update version. - You can download the latest ssms.exe from: - - * Topic titled [Download SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - * [A direct link to the download.](https://go.microsoft.com/fwlink/?linkid=616025) - -## Code sample - -With very minor modification, the following Ring Buffer code sample can be run on either Azure SQL Database or Microsoft SQL Server. The difference is the presence of the node '_database' in the name of some dynamic management views (DMVs), used in the FROM clause in Step 5. For example: - -* sys.dm_xe_database_session_targets -* sys.dm_xe_session_targets - -  - -```sql -GO ----- Transact-SQL. ----- Step set 1. - -SET NOCOUNT ON; -GO - - -IF EXISTS - (SELECT * FROM sys.objects - WHERE type = 'U' and name = 'tabEmployee') -BEGIN - DROP TABLE tabEmployee; -END -GO - - -CREATE TABLE tabEmployee -( - EmployeeGuid uniqueIdentifier not null default newid() primary key, - EmployeeId int not null identity(1,1), - EmployeeKudosCount int not null default 0, - EmployeeDescr nvarchar(256) null -); -GO - - -INSERT INTO tabEmployee ( EmployeeDescr ) - VALUES ( 'Jane Doe' ); -GO - ----- Step set 2. - - -IF EXISTS - (SELECT * from sys.database_event_sessions - WHERE name = 'eventsession_gm_azuresqldb51') -BEGIN - DROP EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE; -END -GO - - -CREATE - EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - ADD EVENT - sqlserver.sql_statement_starting - ( - ACTION (sqlserver.sql_text) - WHERE statement LIKE '%UPDATE tabEmployee%' - ) - ADD TARGET - package0.ring_buffer - (SET - max_memory = 500 -- Units of KB. - ); -GO - ----- Step set 3. - - -ALTER EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - STATE = START; -GO - ----- Step set 4. - - -SELECT 'BEFORE_Updates', EmployeeKudosCount, * FROM tabEmployee; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 102; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 1015; - -SELECT 'AFTER__Updates', EmployeeKudosCount, * FROM tabEmployee; -GO - ----- Step set 5. - - -SELECT - se.name AS [session-name], - ev.event_name, - ac.action_name, - st.target_name, - se.session_source, - st.target_data, - CAST(st.target_data AS XML) AS [target_data_XML] -FROM - sys.dm_xe_database_session_event_actions AS ac - - INNER JOIN sys.dm_xe_database_session_events AS ev ON ev.event_name = ac.event_name - AND CAST(ev.event_session_address AS BINARY(8)) = CAST(ac.event_session_address AS BINARY(8)) - - INNER JOIN sys.dm_xe_database_session_object_columns AS oc - ON CAST(oc.event_session_address AS BINARY(8)) = CAST(ac.event_session_address AS BINARY(8)) - - INNER JOIN sys.dm_xe_database_session_targets AS st - ON CAST(st.event_session_address AS BINARY(8)) = CAST(ac.event_session_address AS BINARY(8)) - - INNER JOIN sys.dm_xe_database_sessions AS se - ON CAST(ac.event_session_address AS BINARY(8)) = CAST(se.address AS BINARY(8)) -WHERE - oc.column_name = 'occurrence_number' - AND - se.name = 'eventsession_gm_azuresqldb51' - AND - ac.action_name = 'sql_text' -ORDER BY - se.name, - ev.event_name, - ac.action_name, - st.target_name, - se.session_source -; -GO - ----- Step set 6. - - -ALTER EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - STATE = STOP; -GO - ----- Step set 7. - - -ALTER EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - DROP TARGET package0.ring_buffer; -GO - ----- Step set 8. - - -DROP EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE; -GO - -DROP TABLE tabEmployee; -GO -``` - -  - -## Ring Buffer contents - -We used `ssms.exe` to run the code sample. - -To view the results, we clicked the cell under the column header **target_data_XML**. - -Then in the results pane we clicked the cell under the column header **target_data_XML**. This click created another file tab in ssms.exe in which the content of the result cell was displayed, as XML. - -The output is shown in the following block. It looks long, but it is just two **\** elements. - -  - -```xml - - - - - 0 - Normal - - - - 7 - - - - 184 - - - - 328 - - - - UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 102 - - - - ----- Step set 4. - - -SELECT 'BEFORE_Updates', EmployeeKudosCount, * FROM tabEmployee; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 102; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 1015; - -SELECT 'AFTER__Updates', EmployeeKudosCount, * FROM tabEmployee; - - - - - - - 0 - Normal - - - - 10 - - - - 340 - - - - 486 - - - - UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 1015 - - - - ----- Step set 4. - - -SELECT 'BEFORE_Updates', EmployeeKudosCount, * FROM tabEmployee; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 102; - -UPDATE tabEmployee - SET EmployeeKudosCount = EmployeeKudosCount + 1015; - -SELECT 'AFTER__Updates', EmployeeKudosCount, * FROM tabEmployee; - - - - -``` - -### Release resources held by your Ring Buffer - -When you are done with your Ring Buffer, you can remove it and release its resources issuing an **ALTER** like the following: - -```sql -ALTER EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - DROP TARGET package0.ring_buffer; -GO -``` - -The definition of your event session is updated, but not dropped. Later you can add another instance of the Ring Buffer to your event session: - -```sql -ALTER EVENT SESSION eventsession_gm_azuresqldb51 - ON DATABASE - ADD TARGET - package0.ring_buffer - (SET - max_memory = 500 -- Units of KB. - ); -``` - -## More information - -The primary topic for extended events on Azure SQL Database is: - -* [Extended event considerations in Azure SQL Database](xevent-db-diff-from-svr.md), which contrasts some aspects of extended events that differ between Azure SQL Database versus Microsoft SQL Server. - -Other code sample topics for extended events are available at the following links. However, you must routinely check any sample to see whether the sample targets Microsoft SQL Server versus Azure SQL Database. Then you can decide whether minor changes are needed to run the sample. - -* Code sample for Azure SQL Database: [Event File target code for extended events in Azure SQL Database](xevent-code-event-file.md) - - \ No newline at end of file diff --git a/articles/azure-sql/database/xevent-db-diff-from-svr.md b/articles/azure-sql/database/xevent-db-diff-from-svr.md deleted file mode 100644 index 36069d1d9229c..0000000000000 --- a/articles/azure-sql/database/xevent-db-diff-from-svr.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Extended events -description: Describes extended events (XEvents) in Azure SQL Database, and how event sessions differ slightly from event sessions in Microsoft SQL Server. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: reference -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: kendralittle, mathoma -ms.date: 07/23/2021 ---- -# Extended events in Azure SQL Database -[!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)] - -[!INCLUDE [sql-database-xevents-selectors-1-include](../../../includes/sql-database-xevents-selectors-1-include.md)] - -The feature set of extended events in Azure SQL Database is a robust subset of the features on SQL Server and Azure SQL Managed Instance. - -*XEvents* is an informal nickname that is sometimes used for 'extended events' in blogs and other informal locations. - -Additional information about extended events is available at: - -- [Quick Start: Extended events in SQL Server](/sql/relational-databases/extended-events/quick-start-extended-events-in-sql-server) -- [Extended Events](/sql/relational-databases/extended-events/extended-events) - -## Prerequisites - -This article assumes you already have some knowledge of: - -- [Azure SQL Database](https://azure.microsoft.com/services/sql-database/) -- [Extended events](/sql/relational-databases/extended-events/extended-events) - -- The bulk of our documentation about extended events applies to SQL Server, Azure SQL Database, and Azure SQL Managed Instance. - -Prior exposure to the following items is helpful when choosing the Event File as the [target](#AzureXEventsTargets): - -- [Azure Storage service](https://azure.microsoft.com/services/storage/) - -- [Azure PowerShell with Azure Storage](/powershell/module/az.storage/) - -## Code samples - -Related articles provide two code samples: - -- [Ring Buffer target code for extended events in Azure SQL Database](xevent-code-ring-buffer.md) - - - Short simple Transact-SQL script. - - We emphasize in the code sample article that, when you are done with a Ring Buffer target, you should release its resources by executing an alter-drop `ALTER EVENT SESSION ... ON DATABASE DROP TARGET ...;` statement. Later you can add another instance of Ring Buffer by `ALTER EVENT SESSION ... ON DATABASE ADD TARGET ...`. - -- [Event File target code for extended events in Azure SQL Database](xevent-code-event-file.md) - - - Phase 1 is PowerShell to create an Azure Storage container. - - Phase 2 is Transact-SQL that uses the Azure Storage container. - -## Transact-SQL differences - -- When you execute the [CREATE EVENT SESSION](/sql/t-sql/statements/create-event-session-transact-sql) command on SQL Server, you use the **ON SERVER** clause. But on Azure SQL Database you use the **ON DATABASE** clause instead. -- The **ON DATABASE** clause also applies to the [ALTER EVENT SESSION](/sql/t-sql/statements/alter-event-session-transact-sql) and [DROP EVENT SESSION](/sql/t-sql/statements/drop-event-session-transact-sql) Transact-SQL commands. - -- A best practice is to include the event session option of **STARTUP_STATE = ON** in your **CREATE EVENT SESSION** or **ALTER EVENT SESSION** statements. - - The **= ON** value supports an automatic restart after a reconfiguration of the logical database due to a failover. - -## New catalog views - -The extended events feature is supported by several [catalog views](/sql/relational-databases/system-catalog-views/catalog-views-transact-sql). Catalog views tell you about *metadata or definitions* of user-created event sessions in the current database. The views do not return information about instances of active event sessions. - -| Name of
    catalog view | Description | -|:--- |:--- | -| `sys.database_event_session_actions` |Returns a row for each action on each event of an event session. | -| `sys.database_event_session_events` |Returns a row for each event in an event session. | -| `sys.database_event_session_fields` |Returns a row for each customize-able column that was explicitly set on events and targets. | -| `sys.database_event_session_targets` |Returns a row for each event target for an event session. | -| `sys.database_event_sessions` |Returns a row for each event session in the database. | - -In Microsoft SQL Server, similar catalog views have names that include *.server\_* instead of *.database\_*. The name pattern is like `sys.server_event_%`. - -## New dynamic management views [(DMVs)](/sql/relational-databases/system-dynamic-management-views/system-dynamic-management-views) - -Azure SQL Database has [dynamic management views (DMVs)](/sql/relational-databases/system-dynamic-management-views/extended-events-dynamic-management-views) that support extended events. DMVs tell you about *active* event sessions. - -| Name of DMV | Description | -|:--- |:--- | -| `sys.dm_xe_database_session_event_actions` |Returns information about event session actions. | -| `sys.dm_xe_database_session_events` |Returns information about session events. | -| `sys.dm_xe_database_session_object_columns` |Shows the configuration values for objects that are bound to a session. | -| `sys.dm_xe_database_session_targets` |Returns information about session targets. | -| `sys.dm_xe_database_sessions` |Returns a row for each event session that is scoped to the current database. | - -In Microsoft SQL Server, similar catalog views are named without the *\_database* portion of the name, such as: - -- `sys.dm_xe_sessions` instead of `sys.dm_xe_database_sessions`. - -### DMVs common to both - -For extended events there are additional DMVs that are common to Azure SQL Database, Azure SQL Managed Instance, and Microsoft SQL Server: - -- `sys.dm_xe_map_values` -- `sys.dm_xe_object_columns` -- `sys.dm_xe_objects` -- `sys.dm_xe_packages` - - - -## Find the available extended events, actions, and targets - -To obtain a list of the available events, actions, and target, use the sample query: - -```sql -SELECT - o.object_type, - p.name AS [package_name], - o.name AS [db_object_name], - o.description AS [db_obj_description] - FROM - sys.dm_xe_objects AS o - INNER JOIN sys.dm_xe_packages AS p ON p.guid = o.package_guid - WHERE - o.object_type in - ( - 'action', 'event', 'target' - ) - ORDER BY - o.object_type, - p.name, - o.name; -``` - -   - -## Targets for your Azure SQL Database event sessions - -Here are targets that can capture results from your event sessions on Azure SQL Database: - -- [Ring Buffer target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#ring_buffer-target) - Briefly holds event data in memory. -- [Event Counter target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#event_counter-target) - Counts all events that occur during an extended events session. -- [Event File target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#event_file-target) - Writes complete buffers to an Azure Storage container. - -The [Event Tracing for Windows (ETW)](/dotnet/framework/wcf/samples/etw-tracing) API is not available for extended events on Azure SQL Database. - -## Restrictions - -There are a couple of security-related differences befitting the cloud environment of Azure SQL Database: - -- Extended events are founded on the single-tenant isolation model. An event session in one database cannot access data or events from another database. -- You cannot issue a `CREATE EVENT SESSION` statement in the context of the `master` database. - -## Permission model - -You must have **Control** permission on the database to issue a `CREATE EVENT SESSION` statement. The database owner (dbo) has **Control** permission. - -### Storage container authorizations - -The SAS token you generate for your Azure Storage container must specify **rwl** for the permissions. The **rwl** value provides the following permissions: - -- Read -- Write -- List - -## Performance considerations - -There are scenarios where intensive use of extended events can accumulate more active memory than is healthy for the overall system. Therefore Azure SQL Database dynamically sets and adjusts limits on the amount of active memory that can be accumulated by an event session. Many factors go into the dynamic calculation. - -There is a cap on memory available to XEvent sessions in Azure SQL Database: - - In single Azure SQL Database in the DTU purchasing model, each database can use up to 128 MB. This is raised to 256 MB only in the Premium tier. - - In single Azure SQL Database in the vCore purchasing model, each database can use up to 128 MB. - - In an elastic pool, individual databases are limited by the single database limits, and in total they cannot exceed 512 MB. - -If you receive an error message that says a memory maximum was enforced, some corrective actions you can take are: - -- Run fewer concurrent event sessions. -- Through your **CREATE** and **ALTER** statements for event sessions, reduce the amount of memory you specify on the **MAX\_MEMORY** clause. - -### Network latency - -The **Event File** target might experience network latency or failures while persisting data to Azure Storage blobs. Other events in Azure SQL Database might be delayed while they wait for the network communication to complete. This delay can slow your workload. - -- To mitigate this performance risk, avoid setting the **EVENT_RETENTION_MODE** option to **NO_EVENT_LOSS** in your event session definitions. - -## Related links - -- [Azure Storage Cmdlets](/powershell/module/Azure.Storage) -- [Using Azure PowerShell with Azure Storage](/powershell/module/az.storage/) -- [How to use Blob storage from .NET](../../storage/blobs/storage-quickstart-blobs-dotnet.md) -- [CREATE CREDENTIAL (Transact-SQL)](/sql/t-sql/statements/create-credential-transact-sql) -- [CREATE EVENT SESSION (Transact-SQL)](/sql/t-sql/statements/create-event-session-transact-sql) -- The Azure *Service Updates* webpage, narrowed by parameter to Azure SQL Database: - - [https://azure.microsoft.com/updates/?service=sql-database](https://azure.microsoft.com/updates/?service=sql-database) - - \ No newline at end of file diff --git a/articles/azure-sql/glossary-terms.md b/articles/azure-sql/glossary-terms.md deleted file mode 100644 index 06ab2faa6c1c3..0000000000000 --- a/articles/azure-sql/glossary-terms.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Glossary of terms -titleSuffix: Azure SQL Database & SQL Managed Instance -description: A glossary of terms for working with Azure SQL Database, Azure SQL Managed Instance, and SQL on Azure VM. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=4 -ms.devlang: -ms.topic: reference -author: MashaMSFT -ms.author: mathoma -ms.reviewer: kendralittle -ms.date: 04/06/2022 ---- -# Azure SQL glossary of terms -[!INCLUDE[appliesto-asf](./includes/appliesto-asf.md)] - -## Azure SQL Database - -|Context|Term|Definition| -|:---|:---|:---| -|Azure service|Azure SQL Database |[Azure SQL Database](database/sql-database-paas-overview.md) is a fully managed platform as a service (PaaS) database that handles most database management functions such as upgrading, patching, backups, and monitoring without user involvement.| -|Database engine | |The database engine used in Azure SQL Database is the most recent stable version of the same database engine shipped as the Microsoft SQL Server product. Some database engine features are exclusive to Azure SQL Database or are available before they are shipped with SQL Server. The database engine is configured and optimized for use in the cloud. In addition to core database functionality, Azure SQL Database provides cloud-native capabilities such as hyperscale and serverless compute.| -|Server entity| Logical server | A [logical server](database/logical-servers.md) is a construct that acts as a central administrative point for a collection of databases in Azure SQL Database and Azure Synapse Analytics. All databases managed by a server are created in the same region as the server. A server is a purely logical concept: a logical server is *not* a machine running an instance of the database engine. There is no instance-level access or instance features for a server. | -|Deployment option ||Databases may be deployed individually or as part of an elastic pool. You may move existing databases in and out of elastic pools. | -||Elastic pool|[Elastic pools](database/elastic-pool-overview.md) are a simple, cost-effective solution for managing and scaling multiple databases that have varying and unpredictable usage demands. The databases in an elastic pool are on a single logical server. The databases share a set allocation of resources at a set price.| -||Single database|If you deploy [single databases](database/single-database-overview.md), each database is isolated, using a dedicated database engine. Each has its own service tier within your selected purchasing model and a compute size defining the resources allocated to the database engine.| -|Purchasing model|| Azure SQL Database has two purchasing models. The purchasing model defines how you scale your database and how you are billed for compute, storage, etc. | -||DTU-based purchasing model|The [Database Transaction Unit (DTU)-based purchasing model](database/service-tiers-dtu.md) is based on a bundled measure of compute, storage, and I/O resources. Compute sizes are expressed in DTUs for single databases and in elastic database transaction units (eDTUs) for elastic pools. | -||vCore-based purchasing model (recommended)| A virtual core (vCore) represents a logical CPU. The [vCore-based purchasing model](database/service-tiers-vcore.md) offers greater control over the hardware configuration to better match compute and memory requirements of the workload, pricing discounts for [Azure Hybrid Benefit (AHB)](azure-hybrid-benefit.md) and [Reserved Instance (RI)](database/reserved-capacity-overview.md), more granular scaling, and greater transparency in hardware details. Newer capabilities (for example, hyperscale, serverless) are only available in the vCore model. | -|Service tier|| The service tier defines the storage architecture, storage and I/O limits, and business continuity options. Options for service tiers vary by purchasing model. | -||DTU-based service tiers | [Basic, standard, and premium service tiers](database/service-tiers-dtu.md#compare-service-tiers) are available in the DTU-based purchasing model.| -||vCore-based service tiers (recommended) |[General purpose, business critical, and hyperscale service tiers](database/service-tiers-sql-database-vcore.md#service-tiers) are available in the vCore-based purchasing model (recommended).| -|Compute tier|| The compute tier determines whether resources are continuously available (provisioned) or autoscaled (serverless). Compute tier availability varies by purchasing model and service tier. Only the vCore purchasing model's general purpose service tier makes serverless compute available.| -||Provisioned compute|The [provisioned compute tier](database/service-tiers-sql-database-vcore.md#compute-tiers) provides a specific amount of compute resources that are continuously provisioned independent of workload activity. Under the provisioned compute tier, you are billed at a fixed price per hour. -||Serverless compute| The [serverless compute tier](database/serverless-tier-overview.md) autoscales compute resources based on workload activity and bills for the amount of compute used per second. Azure SQL Database serverless is currently available in the vCore purchasing model's general purpose service tier with Gen5 hardware or newer.| -|Hardware configuration| Available hardware configurations | The vCore-based purchasing model allows you to select the appropriate hardware configuration for your workload. [Hardware configuration options](database/service-tiers-sql-database-vcore.md#hardware-configuration) include Gen5, M-series, Fsv2-series, and DC-series.| -|Compute size (service objective) ||Compute size (service objective) is the amount of CPU, memory, and storage resources available for a single database or elastic pool. Compute size also defines resource consumption limits, such as maximum IOPS, maximum log rate, etc. -||vCore-based sizing options| Configure the compute size for your database or elastic pool by selecting the appropriate service tier, compute tier, and hardware for your workload. When using an elastic pool, configure the reserved vCores for the pool, and optionally configure per-database settings. For sizing options and resource limits in the vCore-based purchasing model, see [vCore single databases](database/resource-limits-vcore-single-databases.md), and [vCore elastic pools](database/resource-limits-vcore-elastic-pools.md).| -||DTU-based sizing options| Configure the compute size for your database or elastic pool by selecting the appropriate service tier and selecting the maximum data size and number of DTUs. When using an elastic pool, configure the reserved eDTUs for the pool, and optionally configure per-database settings. For sizing options and resource limits in the DTU-based purchasing model, see [DTU single databases](database/resource-limits-dtu-single-databases.md) and [DTU elastic pools](database/resource-limits-dtu-elastic-pools.md). - - -## Azure SQL Managed Instance - -|Context|Term|More information| -|:---|:---|:---| -|Azure service|Azure SQL Managed Instance | [Azure SQL Managed Instance](managed-instance/sql-managed-instance-paas-overview.md) is a fully managed platform as a service (PaaS) deployment option of Azure SQL. It gives you an instance of SQL Server, including the SQL Server Agent, but removes much of the overhead of managing a virtual machine. Most of the features available in SQL Server are available in SQL Managed Instance. [Compare the features in Azure SQL Database and Azure SQL Managed Instance](database/features-comparison.md). | -|Database engine | |The database engine used in Azure SQL Managed Instance has near 100% compatibility with the latest SQL Server (Enterprise Edition) database engine. Some database engine features are exclusive to managed instances or are available in managed instances before they are shipped with SQL Server. Managed instances provide cloud-native capabilities and integrations such as a native [virtual network (VNet)](../virtual-network/virtual-networks-overview.md) implementation, automatic patching and version updates, [automated backups](database/automated-backups-overview.md), and [high availability](database/high-availability-sla.md). | -|Server entity|Managed instance | Each managed instance is itself an instance of SQL Server. Databases created on a managed instance are colocated with one another, and you may run cross-database queries. You can connect to the managed instance and use instance-level features such as linked servers and the SQL Server Agent. | -|Deployment option ||Managed instances may be deployed individually or as part of an instance pools (preview). Managed instances cannot currently be moved into, between, or out of instance pools.| -||Single instance| A single [managed instance](managed-instance/sql-managed-instance-paas-overview.md) is deployed to a dedicated set of isolated virtual machines that run inside the customer's virtual network subnet. These machines form a [virtual cluster](managed-instance/connectivity-architecture-overview.md#high-level-connectivity-architecture). Multiple managed instances can be deployed into a single virtual cluster if desired. | -||Instance pool (preview)|[Instance pools](managed-instance/instance-pools-overview.md) enable you to deploy multiple managed instances to the same virtual machine. Instance pools enable you to migrate smaller and less compute-intensive workloads to the cloud without consolidating them in a single larger managed instance. | -|Purchasing model|vCore-based purchasing model| SQL Managed Instance is available under the [vCore-based purchasing model](managed-instance/service-tiers-managed-instance-vcore.md). [Azure Hybrid Benefit](azure-hybrid-benefit.md) is available for managed instances. | -|Service tier| vCore-based service tiers| SQL Managed Instance offers two service tiers. Both service tiers guarantee 99.99% availability and enable you to independently select storage size and compute capacity. Select either the [general purpose or business critical service tier](managed-instance/sql-managed-instance-paas-overview.md#service-tiers) for a managed instance based upon your performance and latency requirements.| -|Compute|Provisioned compute| SQL Managed Instance provides a specific amount of [compute resources](managed-instance/service-tiers-managed-instance-vcore.md#compute) that are continuously provisioned independent of workload activity, and bills for the amount of compute provisioned at a fixed price per hour. | -|Hardware configuration|Available hardware configurations| SQL Managed Instance [hardware configurations](managed-instance/service-tiers-managed-instance-vcore.md#hardware-configurations) include standard-series (Gen5), premium-series, and memory optimized premium-series hardware. | -|Compute size | vCore-based sizing options | Compute size (service objective) is the maximum amount of CPU, memory, and storage resources available for a single managed instance or instance pool. Configure the compute size for your managed instance by selecting the appropriate service tier and hardware for your workload. Learn about [resource limits for managed instances](managed-instance/resource-limits.md). | - - -## SQL Server on Azure VMs -|Context|Term|More information| -|:---|:---|:---| -|Azure service|SQL Server on Azure Virtual Machines (VMs) | [SQL Server on Azure VMs](virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) enables you to use full versions of SQL Server in the cloud without having to manage any on-premises hardware. SQL Server VMs simplify licensing costs when you pay as you go. You have both SQL Server and OS access with some automated manageability features for SQL Server VMs, such as the [ SQL IaaS Agent extension](virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md).| -| Server entity | Virtual machine or VM | Azure VMs run in many geographic regions around the world. They also offer various machine sizes. The virtual machine image gallery allows you to create a SQL Server VM with the right version, edition, and operating system. | -| Image | Windows VMs or Linux VMs | You can choose to deploy SQL Server VMs with [Windows-based images](virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) or [Linux-based images](virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md). Image selection specifies both the OS version and SQL Server edition for your SQL Server VM. | -| Pricing | | Pricing for SQL Server on Azure VMs is based on SQL Server licensing, operating system (OS), and virtual machine cost. You can [reduce costs](virtual-machines/windows/pricing-guidance.md#reduce-costs) by optimizing your VM size and shutting down your VM when possible. | -| | SQL Server licensing cost | Choose the appropriate [free](virtual-machines/windows/pricing-guidance.md#free-licensed-sql-server-editions) or [paid](virtual-machines/windows/pricing-guidance.md#paid-sql-server-editions) SQL Server edition for your usage and requirements. For paid editions, you may [pay per usage](virtual-machines/windows/pricing-guidance.md#pay-per-usage) (also known as pay as you go) or use [Azure Hybrid Benefit](virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md). | -| | OS and virtual machine cost | OS and virtual machine cost is based upon factors including your choice of image, VM size, and storage configuration. | -| VM configuration | | You need to configure settings including security, storage, and high availability/disaster recovery for your SQL Server VM. The easiest way to configure a SQL Server VM is to use one of our Marketplace images, but you can also use this [quick checklist](virtual-machines/windows/performance-guidelines-best-practices-checklist.md) for a series of best practices and guidelines to navigate these choices. | -| | VM size | [VM size](virtual-machines/windows/performance-guidelines-best-practices-vm-size.md) determines processing power, memory, and storage capacity. You can [collect a performance baseline](virtual-machines/windows/performance-guidelines-best-practices-collect-baseline.md) and/or use the [SKU recommendation](/sql/dma/dma-sku-recommend-sql-db) tool to help select the best VM size for your workload. | -| | Storage configuration | Your storage configuration options are determined by your selection of VM size and selection of storage settings including disk type, caching settings, and disk striping. Learn how to choose a VM size with [enough storage scalability](virtual-machines/windows/performance-guidelines-best-practices-storage.md) for your workload and a mixture of disks (usually in a storage pool) that meet the capacity and performance requirements of your business. | -| | Security considerations | You can enable Microsoft Defender for SQL, integrate Azure Key Vault, control access, and secure connections to your SQL Server VM. Learn [security guidelines](virtual-machines/windows/security-considerations-best-practices.md) to establish secure access to SQL Server VMs. | -| SQL IaaS Agent extension | | The [SQL IaaS Agent extension](virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md) (SqlIaasExtension) runs on SQL Server VMs to automate management and administration tasks. There's no extra cost associated with the extension. | -| | Automated patching | [Automated Patching](virtual-machines/windows/automated-patching.md) establishes a maintenance window for a SQL Server VM when security updates will be automatically applied by the SQL IaaS Agent extension. Note that there may be other mechanisms for applying Automatic Updates. If you configure automated patching using the SQL IaaS Agent extension you should ensure that there are no other conflicting update schedules. | -| | Automated backup | [Automated Backup v2](virtual-machines/windows/automated-backup.md) automatically configures Managed Backup to Microsoft Azure for all existing and new databases on a SQL Server VM running SQL Server 2016 or later Standard, Enterprise, or Developer editions. | diff --git a/articles/azure-sql/identify-query-performance-issues.md b/articles/azure-sql/identify-query-performance-issues.md deleted file mode 100644 index 8022888e16391..0000000000000 --- a/articles/azure-sql/identify-query-performance-issues.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Types of query performance issues -titleSuffix: Azure SQL Database & Azure SQL Managed Instance -description: Learn about types of query performance issues in Azure SQL Database and Azure SQL Managed Instance, and how to identify and resolve queries with these issues. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: -ms.devlang: -ms.topic: troubleshooting -author: NikaKinska -ms.author: nnikolic -ms.reviewer: mathoma, wiassaf, kendralittle -ms.date: 03/18/2022 ---- - -# Detectable types of query performance bottlenecks in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -When trying to resolve a performance bottleneck, start by determining whether the bottleneck is occurring while the query is in a running state or a waiting state. Different resolutions apply depending upon this determination. Use the following diagram to help understand the factors that can cause either a running-related problem or a waiting-related problem. Problems and resolutions relating to each type of problem are discussed in this article. - -You can use [Intelligent Insights](database/intelligent-insights-troubleshoot-performance.md#detectable-database-performance-patterns) or SQL Server [DMVs](database/monitoring-with-dmvs.md) to detect these types of performance bottlenecks. - -![Workload states](./media/identify-query-performance-issues/workload-states.png) - -**Running-related problems**: Running-related problems are generally related to compilation problems resulting in a suboptimal query plan or execution problems related to insufficient or overused resources. -**Waiting-related problems**: Waiting-related problems are generally related to: - -- Locks (blocking) -- I/O -- Contention related to tempdb usage -- Memory grant waits - -## Compilation problems resulting in a suboptimal query plan - -A suboptimal plan generated by the SQL Query Optimizer may be the cause of slow query performance. The SQL Query Optimizer might produce a suboptimal plan because of a missing index, stale statistics, an incorrect estimate of the number of rows to be processed, or an inaccurate estimate of the required memory. If you know the query was executed faster in the past or on another instance, compare the actual execution plans to see if they're different. - -- Identify any missing indexes using one of these methods: - - - Use [Intelligent Insights](database/intelligent-insights-troubleshoot-performance.md#missing-index). - - Review recommendations in the [Database Advisor](database/database-advisor-implement-performance-recommendations.md) for single and pooled databases in Azure SQL Database. You may also choose to enable [automatic tuning options for tuning indexes](database/automatic-tuning-overview.md#automatic-tuning-options) for Azure SQL Database. - - Missing indexes in DMVs and query execution plans. This article shows you how to [detect and tune nonclustered indexes using missing index requests](/sql/relational-databases/indexes/tune-nonclustered-missing-index-suggestions). -- Try to [update statistics](/sql/t-sql/statements/update-statistics-transact-sql) or [rebuild indexes](/sql/relational-databases/indexes/reorganize-and-rebuild-indexes) to get the better plan. Enable [automatic plan correction](/azure/azure-sql/database/automatic-tuning-overview) in Azure SQL Database or Azure SQL Managed Instance to automatically mitigate these problems. -- As an advanced troubleshooting step, use [Query Store hints](/sql/relational-databases/performance/query-store-hints) to apply [query hints](/sql/t-sql/queries/hints-transact-sql-query) using the Query Store, without making code changes. - - This [example](database/performance-guidance.md#query-tuning-and-hinting) shows the impact of a suboptimal query plan due to a parameterized query, how to detect this condition, and how to use a query hint to resolve. - -- Try changing the database compatibility level and implementing intelligent query processing. The SQL Query Optimizer may generate a different query plan depending upon the compatibility level for your database. Higher compatibility levels provide more [intelligent query processing capabilities](/sql/relational-databases/performance/intelligent-query-processing). - - - For more information on query processing, see [Query Processing Architecture Guide](/sql/relational-databases/query-processing-architecture-guide). - - To change database compatibility levels and read more about the differences between compatibility levels, see [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level). - - To read more about cardinality estimation, see [Cardinality Estimation](/sql/relational-databases/performance/cardinality-estimation-sql-server) - -## Resolving queries with suboptimal query execution plans - -The following sections discuss how to resolve queries with suboptimal query execution plan. - -### Queries that have parameter sensitive plan (PSP) problems - -A parameter sensitive plan (PSP) problem happens when the query optimizer generates a query execution plan that's optimal only for a specific parameter value (or set of values) and the cached plan is then not optimal for parameter values that are used in consecutive executions. Plans that aren't optimal can then cause query performance problems and degrade overall workload throughput. - -For more information on parameter sniffing and query processing, see the [Query-processing architecture guide](/sql/relational-databases/query-processing-architecture-guide#ParamSniffing). - -Several workarounds can mitigate PSP problems. Each workaround has associated tradeoffs and drawbacks: - -- Use the [RECOMPILE](/sql/t-sql/queries/hints-transact-sql-query) query hint at each query execution. This workaround trades compilation time and increased CPU for better plan quality. The `RECOMPILE` option is often not possible for workloads that require a high throughput. -- Use the [OPTION (OPTIMIZE FOR…)](/sql/t-sql/queries/hints-transact-sql-query) query hint to override the actual parameter value with a typical parameter value that produces a plan that's good enough for most parameter value possibilities. This option requires a good understanding of optimal parameter values and associated plan characteristics. -- Use the [OPTION (OPTIMIZE FOR UNKNOWN)](/sql/t-sql/queries/hints-transact-sql-query) query hint to override the actual parameter value and instead use the density vector average. You can also do this by capturing the incoming parameter values in local variables and then using the local variables within the predicates instead of using the parameters themselves. For this fix, the average density must be *good enough*. -- Disable parameter sniffing entirely by using the [DISABLE_PARAMETER_SNIFFING](/sql/t-sql/queries/hints-transact-sql-query) query hint. -- Use the [KEEPFIXEDPLAN](/sql/t-sql/queries/hints-transact-sql-query) query hint to prevent recompilations in cache. This workaround assumes that the good-enough common plan is the one in cache already. You can also disable automatic statistics updates to reduce the chances that the good plan will be evicted and a new bad plan will be compiled. -- Force the plan by explicitly using the [USE PLAN](/sql/t-sql/queries/hints-transact-sql-query) query hint by rewriting the query and adding the hint in the query text. Or set a specific plan by using Query Store or by enabling [automatic tuning](/azure/azure-sql/database/automatic-tuning-overview). -- Replace the single procedure with a nested set of procedures that can each be used based on conditional logic and the associated parameter values. -- Create dynamic string execution alternatives to a static procedure definition. - -For more information about resolving PSP problems, see these blog posts: - -- [I smell a parameter](/archive/blogs/queryoptteam/i-smell-a-parameter) -- [Conor vs. dynamic SQL vs. procedures vs. plan quality for parameterized queries](/archive/blogs/conor_cunningham_msft/conor-vs-dynamic-sql-vs-procedures-vs-plan-quality-for-parameterized-queries) - -### Compile activity caused by improper parameterization - -When a query has literals, either the database engine automatically parameterizes the statement or a user explicitly parameterizes the statement to reduce the number of compilations. A high number of compilations for a query using the same pattern but different literal values can result in high CPU usage. Similarly, if you only partially parameterize a query that continues to have literals, the database engine doesn't parameterize the query further. - -Here's an example of a partially parameterized query: - -```sql -SELECT * -FROM t1 JOIN t2 ON t1.c1 = t2.c1 -WHERE t1.c1 = @p1 AND t2.c2 = '961C3970-0E54-4E8E-82B6-5545BE897F8F'; -``` - -In this example, `t1.c1` takes `@p1`, but `t2.c2` continues to take GUID as literal. In this case, if you change the value for `c2`, the query is treated as a different query, and a new compilation will happen. To reduce compilations in this example, you would also parameterize the GUID. - -The following query shows the count of queries by query hash to determine whether a query is properly parameterized: - -```sql -SELECT TOP 10 - q.query_hash - , count (distinct p.query_id ) AS number_of_distinct_query_ids - , min(qt.query_sql_text) AS sampled_query_text -FROM sys.query_store_query_text AS qt - JOIN sys.query_store_query AS q - ON qt.query_text_id = q.query_text_id - JOIN sys.query_store_plan AS p - ON q.query_id = p.query_id - JOIN sys.query_store_runtime_stats AS rs - ON rs.plan_id = p.plan_id - JOIN sys.query_store_runtime_stats_interval AS rsi - ON rsi.runtime_stats_interval_id = rs.runtime_stats_interval_id -WHERE - rsi.start_time >= DATEADD(hour, -2, GETUTCDATE()) - AND query_parameterization_type_desc IN ('User', 'None') -GROUP BY q.query_hash -ORDER BY count (distinct p.query_id) DESC; -``` - -### Factors that affect query plan changes - -A query execution plan recompilation might result in a generated query plan that differs from the original cached plan. An existing original plan might be automatically recompiled for various reasons: - -- Changes in the schema are referenced by the query -- Data changes to the tables are referenced by the query -- Query context options were changed - -A compiled plan might be ejected from the cache for various reasons, such as: - -- Instance restarts -- Database-scoped configuration changes -- Memory pressure -- Explicit requests to clear the cache - -If you use a RECOMPILE hint, a plan won't be cached. - -A recompilation (or fresh compilation after cache eviction) can still result in the generation of a query execution plan that's identical to the original. When the plan changes from the prior or original plan, these explanations are likely: - -- **Changed physical design**: For example, newly created indexes more effectively cover the requirements of a query. The new indexes might be used on a new compilation if the query optimizer decides that using that new index is more optimal than using the data structure that was originally selected for the first version of the query execution. Any physical changes to the referenced objects might result in a new plan choice at compile time. - -- **Server resource differences**: When a plan in one system differs from the plan in another system, resource availability, such as the number of available processors, can influence which plan gets generated. For example, if one system has more processors, a parallel plan might be chosen. For more information on parallelism in Azure SQL Database, see [Configure the max degree of parallelism (MAXDOP) in Azure SQL Database](database/configure-max-degree-of-parallelism.md). - -- **Different statistics**: The statistics associated with the referenced objects might have changed or might be materially different from the original system's statistics. If the statistics change and a recompilation happens, the query optimizer uses the statistics starting from when they changed. The revised statistics' data distributions and frequencies might differ from those of the original compilation. These changes are used to create cardinality estimates. (*Cardinality estimates* are the number of rows that are expected to flow through the logical query tree.) Changes to cardinality estimates might lead you to choose different physical operators and associated orders of operations. Even minor changes to statistics can result in a changed query execution plan. - -- **Changed database compatibility level or cardinality estimator version**: Changes to the database compatibility level can enable new strategies and features that might result in a different query execution plan. Beyond the database compatibility level, a disabled or enabled trace flag 4199 or a changed state of the database-scoped configuration QUERY_OPTIMIZER_HOTFIXES can also influence query execution plan choices at compile time. Trace flags 9481 (force legacy CE) and 2312 (force default CE) also affect the plan. - -## Resource limits issues - -Slow query performance not related to suboptimal query plans and missing indexes are generally related to insufficient or overused resources. If the query plan is optimal, the query (and the database) might be hitting the resource limits for the database, elastic pool, or managed instance. An example might be excess log write throughput for the service level. - -- Detecting resource issues using the Azure portal: To see if resource limits are the problem, see [SQL Database resource monitoring](database/monitor-tune-overview.md#azure-sql-database-and-azure-sql-managed-instance-resource-monitoring). For single databases and elastic pools, see [Database Advisor performance recommendations](database/database-advisor-implement-performance-recommendations.md) and [Query Performance Insights](database/query-performance-insight-use.md). -- Detecting resource limits using [Intelligent Insights](database/intelligent-insights-troubleshoot-performance.md#reaching-resource-limits) -- Detecting resource issues using [DMVs](database/monitoring-with-dmvs.md): - - - The [sys.dm_db_resource_stats](database/monitoring-with-dmvs.md#monitor-resource-use) DMV returns CPU, I/O, and memory consumption for the database. One row exists for every 15-second interval, even if there's no activity in the database. Historical data is maintained for one hour. - - The [sys.resource_stats](database/monitoring-with-dmvs.md#monitor-resource-use) DMV returns CPU usage and storage data for Azure SQL Database. The data is collected and aggregated in five-minute intervals. - - [Many individual queries that cumulatively consume high CPU](database/monitoring-with-dmvs.md#many-individual-queries-that-cumulatively-consume-high-cpu) - -If you identify the problem as insufficient resource, you can upgrade resources to increase the capacity of your database to absorb the CPU requirements. For more information, see [Scale single database resources in Azure SQL Database](database/single-database-scale.md) and [Scale elastic pool resources in Azure SQL Database](database/elastic-pool-scale.md). For information about scaling a managed instance, see [Service-tier resource limits](managed-instance/resource-limits.md#service-tier-characteristics) - -## Performance problems caused by increased workload volume - -An increase in application traffic and workload volume can cause increased CPU usage. But you must be careful to properly diagnose this problem. When you see a high-CPU problem, answer these questions to determine whether the increase is caused by changes to the workload volume: - -- Are the queries from the application the cause of the high-CPU problem? -- For the [top CPU-consuming queries that you can identify](database/monitoring-with-dmvs.md#the-cpu-issue-occurred-in-the-past): - - - Were multiple execution plans associated with the same query? If so, why? - - For queries with the same execution plan, were the execution times consistent? Did the execution count increase? If so, the workload increase is likely causing performance problems. - -In summary, if the query execution plan didn't execute differently but CPU usage increased along with execution count, the performance problem is likely related to a workload increase. - -It's not always easy to identify a workload volume change that's driving a CPU problem. Consider these factors: - -- **Changed resource usage**: For example, consider a scenario where CPU usage increased to 80 percent for an extended period of time. CPU usage alone doesn't mean the workload volume changed. Regressions in the query execution plan and changes in data distribution can also contribute to more resource usage even though the application executes the same workload. - -- **The appearance of a new query**: An application might drive a new set of queries at different times. - -- **An increase or decrease in the number of requests**: This scenario is the most obvious measure of a workload. The number of queries doesn't always correspond to more resource utilization. However, this metric is still a significant signal, assuming other factors are unchanged. - -Use Intelligent Insights to detect [workload increases](database/intelligent-insights-troubleshoot-performance.md#workload-increase) and [plan regressions](database/intelligent-insights-troubleshoot-performance.md#plan-regression). - -- **Parallelism**: Excessive parallelism can worsen cause other concurrent workload performance by starving other queries of CPU and worker thread resources. For more information on parallelism in Azure SQL Database, see [Configure the max degree of parallelism (MAXDOP) in Azure SQL Database](database/configure-max-degree-of-parallelism.md). - -## Waiting-related problems - -Once you have eliminated a suboptimal plan and *Waiting-related* problems that are related to execution problems, the performance problem is generally the queries are probably waiting for some resource. Waiting-related problems might be caused by: - -- **Blocking**: - - One query might hold the lock on objects in the database while others try to access the same objects. You can identify blocking queries by using [DMVs](database/monitoring-with-dmvs.md#monitoring-blocked-queries) or [Intelligent Insights](database/intelligent-insights-troubleshoot-performance.md#locking). For more information, see [Understand and resolve Azure SQL blocking problems](database/understand-resolve-blocking.md). -- **IO problems** - - Queries might be waiting for the pages to be written to the data or log files. In this case, check the `INSTANCE_LOG_RATE_GOVERNOR`, `WRITE_LOG`, or `PAGEIOLATCH_*` wait statistics in the DMV. See using DMVs to [identify IO performance issues](database/monitoring-with-dmvs.md#identify-io-performance-issues). -- **Tempdb problems** - - If the workload uses temporary tables or there are `tempdb` spills in the plans, the queries might have a problem with `tempdb` throughput. To investigate further, review [identify tempdb issues](database/monitoring-with-dmvs.md#identify-tempdb-performance-issues). -- **Memory-related problems** - - If the workload doesn't have enough memory, the page life expectancy might drop, or the queries might get less memory than they need. In some cases, built-in intelligence in Query Optimizer will fix memory-related problems. See using DMVs to [identify memory grant issues](database/monitoring-with-dmvs.md#identify-memory-grant-wait-performance-issues). For more information and sample queries, see [Troubleshoot out of memory errors with Azure SQL Database](database/troubleshoot-memory-errors-issues.md). If you encounter out of memory errors, review [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). - -### Methods to show top wait categories - -These methods are commonly used to show the top categories of wait types: - -- Use Intelligent Insights to identify queries with performance degradation due to [increased waits](database/intelligent-insights-troubleshoot-performance.md#increased-wait-statistic) -- Use [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) to find wait statistics for each query over time. In Query Store, wait types are combined into wait categories. You can find the mapping of wait categories to wait types in [sys.query_store_wait_stats](/sql/relational-databases/system-catalog-views/sys-query-store-wait-stats-transact-sql#wait-categories-mapping-table). -- Use [sys.dm_db_wait_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-db-wait-stats-azure-sql-database) to return information about all the waits encountered by threads that executed during a query operation. You can use this aggregated view to diagnose performance problems with Azure SQL Database and also with specific queries and batches. Queries can be waiting on resources, queue waits, or external waits. -- Use [sys.dm_os_waiting_tasks](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-waiting-tasks-transact-sql) to return information about the queue of tasks that are waiting on some resource. - -In high-CPU scenarios, Query Store and wait statistics might not reflect CPU usage if: - -- High-CPU-consuming queries are still executing. -- The high-CPU-consuming queries were running when a failover happened. - -DMVs that track Query Store and wait statistics show results for only successfully completed and timed-out queries. They don't show data for currently executing statements until the statements finish. Use the dynamic management view [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) to track currently executing queries and the associated worker time. - -> [!TIP] -> Additional tools: -> -> - [TigerToolbox waits and latches](https://github.com/Microsoft/tigertoolbox/tree/master/Waits-and-Latches) -> - [TigerToolbox usp_whatsup](https://github.com/Microsoft/tigertoolbox/tree/master/usp_WhatsUp) - -## Next steps - -- [Configure the max degree of parallelism (MAXDOP) in Azure SQL Database](database/configure-max-degree-of-parallelism.md) -- [Understand and resolve Azure SQL Database blocking problems in Azure SQL Database](database/understand-resolve-blocking.md) -- [Diagnose and troubleshoot high CPU on Azure SQL Database](database/high-cpu-diagnose-troubleshoot.md) -- [SQL Database monitoring and tuning overview](database/monitor-tune-overview.md) -- [Monitoring Microsoft Azure SQL Database and Azure SQL Managed Instance performance using dynamic management views](database/monitoring-with-dmvs.md) -- [Tune nonclustered indexes with missing index suggestions](/sql/relational-databases/indexes/tune-nonclustered-missing-index-suggestions) \ No newline at end of file diff --git a/articles/azure-sql/in-memory-oltp-configure.md b/articles/azure-sql/in-memory-oltp-configure.md deleted file mode 100644 index e6f8917682b1b..0000000000000 --- a/articles/azure-sql/in-memory-oltp-configure.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: In-Memory OLTP improves SQL txn perf -description: Adopt In-Memory OLTP to improve transactional performance in an existing database in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-database -ms.custom: sqldbrb=2 -ms.subservice: performance -ms.topic: how-to -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: mathoma, kendralittle -ms.date: 11/07/2018 ---- -# Use In-Memory OLTP to improve your application performance in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -[In-Memory OLTP](in-memory-oltp-overview.md) can be used to improve the performance of transaction processing, data ingestion, and transient data scenarios, in [Premium and Business Critical tier](database/service-tiers-vcore.md) databases without increasing the pricing tier. - -> [!NOTE] -> Learn how [Quorum doubles key database's workload while lowering DTU by 70% with Azure SQL Database](https://customers.microsoft.com/story/quorum-doubles-key-databases-workload-while-lowering-dtu-with-sql-database) - -Follow these steps to adopt In-Memory OLTP in your existing database. - -## Step 1: Ensure you are using a Premium and Business Critical tier database - -In-Memory OLTP is supported only in Premium and Business Critical tier databases. In-Memory is supported if the returned result is 1 (not 0): - -```sql -SELECT DatabasePropertyEx(Db_Name(), 'IsXTPSupported'); -``` - -*XTP* stands for *Extreme Transaction Processing* - -## Step 2: Identify objects to migrate to In-Memory OLTP - -SSMS includes a **Transaction Performance Analysis Overview** report that you can run against a database with an active workload. The report identifies tables and stored procedures that are candidates for migration to In-Memory OLTP. - -In SSMS, to generate the report: - -* In the **Object Explorer**, right-click your database node. -* Click **Reports** > **Standard Reports** > **Transaction Performance Analysis Overview**. - -For more information, see [Determining if a Table or Stored Procedure Should Be Ported to In-Memory OLTP](/sql/relational-databases/in-memory-oltp/determining-if-a-table-or-stored-procedure-should-be-ported-to-in-memory-oltp). - -## Step 3: Create a comparable test database - -Suppose the report indicates your database has a table that would benefit from being converted to a memory-optimized table. We recommend that you first test to confirm the indication by testing. - -You need a test copy of your production database. The test database should be at the same service tier level as your production database. - -To ease testing, tweak your test database as follows: - -1. Connect to the test database by using SSMS. -2. To avoid needing the WITH (SNAPSHOT) option in queries, set the database option as shown in the following T-SQL statement: - - ```sql - ALTER DATABASE CURRENT - SET - MEMORY_OPTIMIZED_ELEVATE_TO_SNAPSHOT = ON; - ``` - -## Step 4: Migrate tables - -You must create and populate a memory-optimized copy of the table you want to test. You can create it by using either: - -* The handy Memory Optimization Wizard in SSMS. -* Manual T-SQL. - -### Memory Optimization Wizard in SSMS - -To use this migration option: - -1. Connect to the test database with SSMS. -2. In the **Object Explorer**, right-click on the table, and then click **Memory Optimization Advisor**. - - The **Table Memory Optimizer Advisor** wizard is displayed. -3. In the wizard, click **Migration validation** (or the **Next** button) to see if the table has any unsupported features that are unsupported in memory-optimized tables. For more information, see: - - * The *memory optimization checklist* in [Memory Optimization Advisor](/sql/relational-databases/in-memory-oltp/memory-optimization-advisor). - * [Transact-SQL Constructs Not Supported by In-Memory OLTP](/sql/relational-databases/in-memory-oltp/transact-sql-constructs-not-supported-by-in-memory-oltp). - * [Migrating to In-Memory OLTP](/sql/relational-databases/in-memory-oltp/plan-your-adoption-of-in-memory-oltp-features-in-sql-server). -4. If the table has no unsupported features, the advisor can perform the actual schema and data migration for you. - -### Manual T-SQL - -To use this migration option: - -1. Connect to your test database by using SSMS (or a similar utility). -2. Obtain the complete T-SQL script for your table and its indexes. - - * In SSMS, right-click your table node. - * Click **Script Table As** > **CREATE To** > **New Query Window**. -3. In the script window, add WITH (MEMORY_OPTIMIZED = ON) to the CREATE TABLE statement. -4. If there is a CLUSTERED index, change it to NONCLUSTERED. -5. Rename the existing table by using SP_RENAME. -6. Create the new memory-optimized copy of the table by running your edited CREATE TABLE script. -7. Copy the data to your memory-optimized table by using INSERT...SELECT * INTO: - -```sql -INSERT INTO [] - SELECT * FROM []; -``` - -## Step 5 (optional): Migrate stored procedures - -The In-Memory feature can also modify a stored procedure for improved performance. - -### Considerations with natively compiled stored procedures - -A natively compiled stored procedure must have the following options on its T-SQL WITH clause: - -* NATIVE_COMPILATION -* SCHEMABINDING: meaning tables that the stored procedure cannot have their column definitions changed in any way that would affect the stored procedure, unless you drop the stored procedure. - -A native module must use one big [ATOMIC blocks](/sql/relational-databases/in-memory-oltp/atomic-blocks-in-native-procedures) for transaction management. There is no role for an explicit BEGIN TRANSACTION, or for ROLLBACK TRANSACTION. If your code detects a violation of a business rule, it can terminate the atomic block with a [THROW](/sql/t-sql/language-elements/throw-transact-sql) statement. - -### Typical CREATE PROCEDURE for natively compiled - -Typically the T-SQL to create a natively compiled stored procedure is similar to the following template: - -```sql -CREATE PROCEDURE schemaname.procedurename - @param1 type1, … - WITH NATIVE_COMPILATION, SCHEMABINDING - AS - BEGIN ATOMIC WITH - (TRANSACTION ISOLATION LEVEL = SNAPSHOT, - LANGUAGE = N'your_language__see_sys.languages' - ) - … - END; -``` - -* For the TRANSACTION_ISOLATION_LEVEL, SNAPSHOT is the most common value for the natively compiled stored procedure. However, a subset of the other values is also supported: - - * REPEATABLE READ - * SERIALIZABLE -* The LANGUAGE value must be present in the sys.languages view. - -### How to migrate a stored procedure - -The migration steps are: - -1. Obtain the CREATE PROCEDURE script to the regular interpreted stored procedure. -2. Rewrite its header to match the previous template. -3. Ascertain whether the stored procedure T-SQL code uses any features that are not supported for natively compiled stored procedures. Implement workarounds if necessary. - - For details see [Migration Issues for Natively Compiled Stored Procedures](/sql/relational-databases/in-memory-oltp/a-guide-to-query-processing-for-memory-optimized-tables). -4. Rename the old stored procedure by using SP_RENAME. Or simply DROP it. -5. Run your edited CREATE PROCEDURE T-SQL script. - -## Step 6: Run your workload in test - -Run a workload in your test database that is similar to the workload that runs in your production database. This should reveal the performance gain achieved by your use of the In-Memory feature for tables and stored procedures. - -Major attributes of the workload are: - -* Number of concurrent connections. -* Read/write ratio. - -To tailor and run the test workload, consider using the handy `ostress.exe` tool, which illustrated in this [in-memory](in-memory-oltp-overview.md) article. - -To minimize network latency, run your test in the same Azure geographic region where the database exists. - -## Step 7: Post-implementation monitoring - -Consider monitoring the performance effects of your In-Memory implementations in production: - -* [Monitor In-Memory storage](in-memory-oltp-monitor-space.md). -* [Monitoring using dynamic management views](database/monitoring-with-dmvs.md) - -## Related links - -* [In-Memory OLTP (In-Memory Optimization)](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) -* [Introduction to Natively Compiled Stored Procedures](/sql/relational-databases/in-memory-oltp/a-guide-to-query-processing-for-memory-optimized-tables) -* [Memory Optimization Advisor](/sql/relational-databases/in-memory-oltp/memory-optimization-advisor) diff --git a/articles/azure-sql/in-memory-oltp-monitor-space.md b/articles/azure-sql/in-memory-oltp-monitor-space.md deleted file mode 100644 index 6b323b220e144..0000000000000 --- a/articles/azure-sql/in-memory-oltp-monitor-space.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Monitor XTP In-memory storage -description: Estimate and monitor XTP In-memory storage use, capacity; resolve capacity error 41823 -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: mathoma, kendralittle -ms.date: 01/25/2019 ---- -# Monitor In-Memory OLTP storage in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -When using [In-Memory OLTP](in-memory-oltp-overview.md), data in memory-optimized tables and table variables resides in In-Memory OLTP storage. - -## Determine whether data fits within the In-Memory OLTP storage cap - -Determine the storage caps of the different service tiers. Each Premium and Business Critical service tier has a maximum In-Memory OLTP storage size. - -- [DTU-based resource limits - single database](database/resource-limits-dtu-single-databases.md) -- [DTU-based resource limits - elastic pools](database/resource-limits-dtu-elastic-pools.md) -- [vCore-based resource limits - single databases](database/resource-limits-vcore-single-databases.md) -- [vCore-based resource limits - elastic pools](database/resource-limits-vcore-elastic-pools.md) -- [vCore-based resource limits - managed instance](managed-instance/resource-limits.md) - -Estimating memory requirements for a memory-optimized table works the same way for SQL Server as it does in Azure SQL Database and Azure SQL Managed Instance. Take a few minutes to review [Estimate memory requirements](/sql/relational-databases/in-memory-oltp/estimate-memory-requirements-for-memory-optimized-tables). - -Table and table variable rows, as well as indexes, count toward the max user data size. In addition, ALTER TABLE needs enough room to create a new version of the entire table and its indexes. - -Once this limit is exceeded, insert and update operations may start failing with error 41823 for single databases in Azure SQL Database and databases in Azure SQL Managed Instance, and error 41840 for elastic pools in Azure SQL Database. At that point you need to either delete data to reclaim memory, or upgrade the service tier or compute size of your database. - -## Monitoring and alerting - -You can monitor In-memory storage use as a percentage of the storage cap for your compute size in the [Azure portal](https://portal.azure.com/): - -1. On the Database blade, locate the Resource utilization box and click on Edit. -2. Select the metric `In-Memory OLTP Storage percentage`. -3. To add an alert, click on the Resource Utilization box to open the Metric blade, then click on Add alert. - -Or use the following query to show the In-memory storage utilization: - -```sql - SELECT xtp_storage_percent FROM sys.dm_db_resource_stats -``` - -## Correct out-of-In-Memory OLTP storage situations - Errors 41823 and 41840 - -Hitting the In-Memory OLTP storage cap in your database results in INSERT, UPDATE, ALTER and CREATE operations failing with error message 41823 (for single databases) or error 41840 (for elastic pools). Both errors cause the active transaction to abort. - -Error messages 41823 and 41840 indicate that the memory-optimized tables and table variables in the database or pool have reached the maximum In-Memory OLTP storage size. - -To resolve this error, either: - -- Delete data from the memory-optimized tables, potentially offloading the data to traditional, disk-based tables; or, -- Upgrade the service tier to one with enough in-memory storage for the data you need to keep in memory-optimized tables. - -> [!NOTE] -> In rare cases, errors 41823 and 41840 can be transient, meaning there is enough available In-Memory OLTP storage, and retrying the operation succeeds. We therefore recommend to both monitor the overall available In-Memory OLTP storage and to retry when first encountering error 41823 or 41840. For more information about retry logic, see [Conflict Detection and Retry Logic with In-Memory OLTP](/sql/relational-databases/In-memory-oltp/transactions-with-memory-optimized-tables#conflict-detection-and-retry-logic). - -## Next steps - -For monitoring guidance, see [Monitoring using dynamic management views](database/monitoring-with-dmvs.md). \ No newline at end of file diff --git a/articles/azure-sql/in-memory-oltp-overview.md b/articles/azure-sql/in-memory-oltp-overview.md deleted file mode 100644 index 7306ead6c6b4b..0000000000000 --- a/articles/azure-sql/in-memory-oltp-overview.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: In-memory technologies -description: In-memory technologies greatly improve the performance of transactional and analytics workloads in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: mathoma, kendralittle -ms.date: 10/18/2021 ---- -# Optimize performance by using in-memory technologies in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -In-memory technologies enable you to improve performance of your application, and potentially reduce cost of your database. - -## When to use in-memory technologies - -By using in-memory technologies, you can achieve performance improvements with various workloads: - -- **Transactional** (online transactional processing (OLTP)) where most of the requests read or update smaller set of data (for example, CRUD operations). -- **Analytic** (online analytical processing (OLAP)) where most of the queries have complex calculations for the reporting purposes, with a certain number of queries that load and append data to the existing tables (so called bulk-load), or delete the data from the tables. -- **Mixed** (hybrid transaction/analytical processing (HTAP)) where both OLTP and OLAP queries are executed on the same set of data. - -In-memory technologies can improve performance of these workloads by keeping the data that should be processed into the memory, using native compilation of the queries, or advanced processing such as batch processing and SIMD instructions that are available on the underlying hardware. - -## Overview - -Azure SQL Database and Azure SQL Managed Instance have the following in-memory technologies: - -- *[In-Memory OLTP](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization)* increases number of transactions per second and reduces latency for transaction processing. Scenarios that benefit from In-Memory OLTP are: high-throughput transaction processing such as trading and gaming, data ingestion from events or IoT devices, caching, data load, and temporary table and table variable scenarios. -- *Clustered columnstore indexes* reduce your storage footprint (up to 10 times) and improve performance for reporting and analytics queries. You can use it with fact tables in your data marts to fit more data in your database and improve performance. Also, you can use it with historical data in your operational database to archive and be able to query up to 10 times more data. -- *Nonclustered columnstore indexes* for HTAP help you to gain real-time insights into your business through querying the operational database directly, without the need to run an expensive extract, transform, and load (ETL) process and wait for the data warehouse to be populated. Nonclustered columnstore indexes allow fast execution of analytics queries on the OLTP database, while reducing the impact on the operational workload. -- *Memory-optimized clustered columnstore indexes* for HTAP enables you to perform fast transaction processing, and to *concurrently* run analytics queries very quickly on the same data. - -Both columnstore indexes and In-Memory OLTP have been part of the SQL Server product since 2012 and 2014, respectively. Azure SQL Database, Azure SQL Managed Instance, and SQL Server share the same implementation of in-memory technologies. - -## Benefits of in-memory technology - -Because of the more efficient query and transaction processing, in-memory technologies also help you to reduce cost. You typically don't need to upgrade the pricing tier of the database to achieve performance gains. In some cases, you might even be able reduce the pricing tier, while still seeing performance improvements with in-memory technologies. - -By using In-Memory OLTP, [Quorum Business Solutions was able to double their workload while improving DTUs by 70%](https://resources.quorumsoftware.com/case-studies/quorum-doubles-key-database-s-workload-while-lowering-dtu). For more information, see the blog post: [In-Memory OLTP](https://azure.microsoft.com/blog/in-memory-oltp-in-azure-sql-database/). - -> [!NOTE] -> In-memory technologies are available in the Premium and Business Critical tiers. - -This article describes aspects of In-Memory OLTP and columnstore indexes that are specific to Azure SQL Database and Azure SQL Managed Instance, and also includes samples: - -- You'll see the impact of these technologies on storage and data size limits. -- You'll see how to manage the movement of databases that use these technologies between the different pricing tiers. -- You'll see two samples that illustrate the use of In-Memory OLTP, as well as columnstore indexes. - -For more information about in-memory in SQL Server, see: - -- [In-Memory OLTP Overview and Usage Scenarios](/sql/relational-databases/in-memory-oltp/overview-and-usage-scenarios) (includes references to customer case studies and information to get started) -- [Documentation for In-Memory OLTP](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) -- [Columnstore Indexes Guide](/sql/relational-databases/indexes/columnstore-indexes-overview) -- Hybrid transactional/analytical processing (HTAP), also known as [real-time operational analytics](/sql/relational-databases/indexes/get-started-with-columnstore-for-real-time-operational-analytics) - -## In-Memory OLTP - -In-Memory OLTP technology provides extremely fast data access operations by keeping all data in memory. It also uses specialized indexes, native compilation of queries, and latch-free data-access to improve performance of the OLTP workload. There are two ways to organize your In-Memory OLTP data: - -- **Memory-optimized rowstore** format where every row is a separate memory object. This is a classic In-Memory OLTP format optimized for high-performance OLTP workloads. There are two types of memory-optimized tables that can be used in the memory-optimized rowstore format: - - - *Durable tables* (SCHEMA_AND_DATA) where the rows placed in memory are preserved after server restart. This type of tables behaves like a traditional rowstore table with the additional benefits of in-memory optimizations. - - *Non-durable tables* (SCHEMA_ONLY) where the rows are not-preserved after restart. This type of table is designed for temporary data (for example, replacement of temp tables), or tables where you need to quickly load data before you move it to some persisted table (so called staging tables). - -- **Memory-optimized columnstore** format where data is organized in a columnar format. This structure is designed for HTAP scenarios where you need to run analytic queries on the same data structure where your OLTP workload is running. - -> [!Note] -> In-Memory OLTP technology is designed for the data structures that can fully reside in memory. Since the In-memory data cannot be offloaded to disk, make sure that you are using database that has enough memory. See [Data size and storage cap for In-Memory OLTP](#data-size-and-storage-cap-for-in-memory-oltp) for more details. - -- A quick primer on In-Memory OLTP: [Quickstart 1: In-Memory OLTP Technologies for Faster T-SQL Performance](/sql/relational-databases/in-memory-oltp/survey-of-initial-areas-in-in-memory-oltp). - -There is a programmatic way to understand whether a given database supports In-Memory OLTP. You can execute the following Transact-SQL query: - -```sql -SELECT DatabasePropertyEx(DB_NAME(), 'IsXTPSupported'); -``` - -If the query returns **1**, In-Memory OLTP is supported in this database. The following queries identify all objects that need to be removed before a database can be downgraded to General Purpose, Standard, or Basic: - -```sql -SELECT * FROM sys.tables WHERE is_memory_optimized=1 -SELECT * FROM sys.table_types WHERE is_memory_optimized=1 -SELECT * FROM sys.sql_modules WHERE uses_native_compilation=1 -``` - -### Data size and storage cap for In-Memory OLTP - -In-Memory OLTP includes memory-optimized tables, which are used for storing user data. These tables are required to fit in memory. Because you manage memory directly in SQL Database, we have the concept of a quota for user data. This idea is referred to as *In-Memory OLTP storage*. - -Each supported single database pricing tier and each elastic pool pricing tier includes a certain amount of In-Memory OLTP storage. - -- [DTU-based resource limits - single database](database/resource-limits-dtu-single-databases.md) -- [DTU-based resource limits - elastic pools](database/resource-limits-dtu-elastic-pools.md) -- [vCore-based resource limits - single databases](database/resource-limits-vcore-single-databases.md) -- [vCore-based resource limits - elastic pools](database/resource-limits-vcore-elastic-pools.md) -- [vCore-based resource limits - managed instance](managed-instance/resource-limits.md) - -The following items count toward your In-Memory OLTP storage cap: - -- Active user data rows in memory-optimized tables and table variables. Note that old row versions don't count toward the cap. -- Indexes on memory-optimized tables. -- Operational overhead of ALTER TABLE operations. - -If you hit the cap, you receive an out-of-quota error, and you are no longer able to insert or update data. To mitigate this error, delete data or increase the pricing tier of the database or pool. - -For details about monitoring In-Memory OLTP storage utilization and configuring alerts when you almost hit the cap, see [Monitor in-memory storage](in-memory-oltp-monitor-space.md). - -#### About elastic pools - -With elastic pools, the In-Memory OLTP storage is shared across all databases in the pool. Therefore, the usage in one database can potentially affect other databases. Two mitigations for this are: - -- Configure a `Max-eDTU` or `MaxvCore` for databases that is lower than the eDTU or vCore count for the pool as a whole. This maximum caps the In-Memory OLTP storage utilization, in any database in the pool, to the size that corresponds to the eDTU count. -- Configure a `Min-eDTU` or `MinvCore` that is greater than 0. This minimum guarantees that each database in the pool has the amount of available In-Memory OLTP storage that corresponds to the configured `Min-eDTU` or `vCore`. - -### Changing service tiers of databases that use In-Memory OLTP technologies - -You can always upgrade your database or instance to a higher tier, such as from General Purpose to Business Critical (or Standard to Premium). The available functionality and resources only increase. - -But downgrading the tier can negatively impact your database. The impact is especially apparent when you downgrade from Business Critical to General Purpose (or Premium to Standard or Basic) when your database contains In-Memory OLTP objects. Memory-optimized tables are unavailable after the downgrade (even if they remain visible). The same considerations apply when you're lowering the pricing tier of an elastic pool, or moving a database with in-memory technologies, into a General Purpose, Standard, or Basic elastic pool. - -> [!Important] -> In-Memory OLTP isn't supported in the General Purpose, Standard or Basic tier. Therefore, it isn't possible to move a database that has any In-Memory OLTP objects to one of these tiers. - -Before you downgrade the database to General Purpose, Standard, or Basic, remove all memory-optimized tables and table types, as well as all natively compiled T-SQL modules. - -*Scaling-down resources in Business Critical tier*: Data in memory-optimized tables must fit within the In-Memory OLTP storage that is associated with the tier of the database or the managed instance, or it is available in the elastic pool. If you try to scale-down the tier or move the database into a pool that doesn't have enough available In-Memory OLTP storage, the operation fails. - -## In-memory columnstore - -In-memory columnstore technology is enabling you to store and query a large amount of data in the tables. Columnstore technology uses column-based data storage format and batch query processing to achieve gain up to 10 times the query performance in OLAP workloads over traditional row-oriented storage. You can also achieve gains up to 10 times the data compression over the uncompressed data size. -There are two types of columnstore models that you can use to organize your data: - -- **Clustered columnstore** where all data in the table is organized in the columnar format. In this model, all rows in the table are placed in columnar format that highly compresses the data and enables you to execute fast analytical queries and reports on the table. Depending on the nature of your data, the size of your data might be decreased 10x-100x. Clustered columnstore model also enables fast ingestion of large amount of data (bulk-load) since large batches of data greater than 100K rows are compressed before they are stored on disk. This model is a good choice for the classic data warehouse scenarios. -- **Non-clustered columnstore** where the data is stored in traditional rowstore table and there is an index in the columnstore format that is used for the analytical queries. This model enables Hybrid Transactional-Analytic Processing (HTAP): the ability to run performant real-time analytics on a transactional workload. OLTP queries are executed on rowstore table that is optimized for accessing a small set of rows, while OLAP queries are executed on columnstore index that is better choice for scans and analytics. The query optimizer dynamically chooses rowstore or columnstore format based on the query. Non-clustered columnstore indexes don't decrease the size of the data since original data-set is kept in the original rowstore table without any change. However, the size of additional columnstore index should be in order of magnitude smaller than the equivalent B-tree index. - -> [!Note] -> In-memory columnstore technology keeps only the data that is needed for processing in the memory, while the data that cannot fit into the memory is stored on-disk. Therefore, the amount of data in in-memory columnstore structures can exceed the amount of available memory. - -In-depth video about the technology: - -- [Columnstore Index: In-memory Analytics Videos from Ignite 2016](/archive/blogs/sqlserverstorageengine/columnstore-index-in-memory-analytics-i-e-columnstore-index-videos-from-ignite-2016) - -### Data size and storage for columnstore indexes - -Columnstore indexes aren't required to fit in memory. Therefore, the only cap on the size of the indexes is the maximum overall database size, which is documented in the [DTU-based purchasing model](database/service-tiers-dtu.md) and [vCore-based purchasing model](database/service-tiers-vcore.md) articles. - -When you use clustered columnstore indexes, columnar compression is used for the base table storage. This compression can significantly reduce the storage footprint of your user data, which means that you can fit more data in the database. And the compression can be further increased with [columnar archival compression](/sql/relational-databases/data-compression/data-compression#using-columnstore-and-columnstore-archive-compression). The amount of compression that you can achieve depends on the nature of the data, but 10 times the compression is not uncommon. - -For example, if you have a database with a maximum size of 1 terabyte (TB) and you achieve 10 times the compression by using columnstore indexes, you can fit a total of 10 TB of user data in the database. - -When you use nonclustered columnstore indexes, the base table is still stored in the traditional rowstore format. Therefore, the storage savings aren't as significant as with clustered columnstore indexes. However, if you're replacing a number of traditional nonclustered indexes with a single columnstore index, you can still see an overall savings in the storage footprint for the table. - -### Changing service tiers of databases containing Columnstore indexes - -*Downgrading single database to Basic or Standard* might not be possible if your target tier is below S3. Columnstore indexes are supported only on the Business Critical/Premium pricing tier and on the Standard tier, S3 and above, and not on the Basic tier. When you downgrade your database to an unsupported tier or level, your columnstore index becomes unavailable. The system maintains your columnstore index, but it never leverages the index. If you later upgrade back to a supported tier or level, your columnstore index is immediately ready to be leveraged again. - -If you have a **clustered** columnstore index, the whole table becomes unavailable after the downgrade. Therefore we recommend that you drop all *clustered* columnstore indexes before you downgrade your database to an unsupported tier or level. - -> [!Note] -> SQL Managed Instance supports Columnstore indexes in all tiers. - - - -## Next steps - -- [Quickstart 1: In-Memory OLTP Technologies for faster T-SQL Performance](/sql/relational-databases/in-memory-oltp/survey-of-initial-areas-in-in-memory-oltp) -- [Use In-Memory OLTP in an existing Azure SQL application](in-memory-oltp-configure.md) -- [Monitor In-Memory OLTP storage](in-memory-oltp-monitor-space.md) for In-Memory OLTP -- [Try in-memory features](in-memory-sample.md) - -## Additional resources - -### Deeper information - -- [Learn how Quorum doubles key database's workload while lowering DTU by 70% with In-Memory OLTP in SQL Database](https://customers.microsoft.com/story/quorum-doubles-key-databases-workload-while-lowering-dtu-with-sql-database) -- [In-Memory OLTP Blog Post](https://azure.microsoft.com/blog/in-memory-oltp-in-azure-sql-database/) -- [Learn about In-Memory OLTP](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) -- [Learn about columnstore indexes](/sql/relational-databases/indexes/columnstore-indexes-overview) -- [Learn about real-time operational analytics](/sql/relational-databases/indexes/get-started-with-columnstore-for-real-time-operational-analytics) -- See [Common Workload Patterns and Migration Considerations](/previous-versions/dn673538(v=msdn.10)) (which describes workload patterns where In-Memory OLTP commonly provides significant performance gains) - -### Application design - -- [In-Memory OLTP (in-memory optimization)](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) -- [Use In-Memory OLTP in an existing Azure SQL application](in-memory-oltp-configure.md) - -### Tools - -- [Azure portal](https://portal.azure.com/) -- [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) -- [SQL Server Data Tools (SSDT)](/sql/ssdt/download-sql-server-data-tools-ssdt) \ No newline at end of file diff --git a/articles/azure-sql/in-memory-sample.md b/articles/azure-sql/in-memory-sample.md deleted file mode 100644 index 9b4a1fe0cb383..0000000000000 --- a/articles/azure-sql/in-memory-sample.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: In-Memory sample -description: Try Azure SQL Database In-Memory technologies with OLTP and columnstore sample. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: sample -author: srinia -ms.author: srinia -ms.reviewer: mathoma, kendralittle -ms.date: 12/18/2018 ---- -# In-Memory sample -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -In-Memory technologies in Azure SQL Database enable you to improve performance of your application, and potentially reduce cost of your database. By using In-Memory technologies in Azure SQL Database, you can achieve performance improvements with various workloads. - -In this article you'll see two samples that illustrate the use of In-Memory OLTP, as well as columnstore indexes in Azure SQL Database. - -For more information, see: - -- [In-Memory OLTP Overview and Usage Scenarios](/sql/relational-databases/in-memory-oltp/overview-and-usage-scenarios) (includes references to customer case studies and information to get started) -- [Documentation for In-Memory OLTP](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) -- [Columnstore Indexes Guide](/sql/relational-databases/indexes/columnstore-indexes-overview) -- Hybrid transactional/analytical processing (HTAP), also known as [real-time operational analytics](/sql/relational-databases/indexes/get-started-with-columnstore-for-real-time-operational-analytics) - - - -  - -## 1. Install the In-Memory OLTP sample - -You can create the AdventureWorksLT sample database with a few clicks in the [Azure portal](https://portal.azure.com/). Then, the steps in this section explain how you can enrich your AdventureWorksLT database with In-Memory OLTP objects and demonstrate performance benefits. - -For a more simplistic, but more visually appealing performance demo for In-Memory OLTP, see: - -- Release: [in-memory-oltp-demo-v1.0](https://github.com/Microsoft/sql-server-samples/releases/tag/in-memory-oltp-demo-v1.0) -- Source code: [in-memory-oltp-demo-source-code](https://github.com/microsoft/sql-server-samples/tree/master/samples/features/in-memory-database) - -### Installation steps - -1. In the [Azure portal](https://portal.azure.com/), create a Premium or Business Critical database on a server. Set the **Source** to the AdventureWorksLT sample database. For detailed instructions, see [Create your first database in Azure SQL Database](database/single-database-create-quickstart.md). - -2. Connect to the database with SQL Server Management Studio [(SSMS.exe)](/sql/ssms/download-sql-server-management-studio-ssms). - -3. Copy the [In-Memory OLTP Transact-SQL script](https://raw.githubusercontent.com/microsoft/sql-server-samples/master/samples/features/in-memory-database/in-memory-oltp/t-sql-scripts/sql_in-memory_oltp_sample.sql) to your clipboard. The T-SQL script creates the necessary In-Memory objects in the AdventureWorksLT sample database that you created in step 1. - -4. Paste the T-SQL script into SSMS, and then execute the script. The `MEMORY_OPTIMIZED = ON` clause CREATE TABLE statements are crucial. For example: - -```sql -CREATE TABLE [SalesLT].[SalesOrderHeader_inmem]( - [SalesOrderID] int IDENTITY NOT NULL PRIMARY KEY NONCLUSTERED ..., - ... -) WITH (MEMORY_OPTIMIZED = ON); -``` - -### Error 40536 - -If you get error 40536 when you run the T-SQL script, run the following T-SQL script to verify whether the database supports In-Memory: - -```sql -SELECT DatabasePropertyEx(DB_Name(), 'IsXTPSupported'); -``` - -A result of **0** means that In-Memory isn't supported, and **1** means that it is supported. To diagnose the problem, ensure that the database is at the Premium service tier. - -### About the created memory-optimized items - -**Tables**: The sample contains the following memory-optimized tables: - -- SalesLT.Product_inmem -- SalesLT.SalesOrderHeader_inmem -- SalesLT.SalesOrderDetail_inmem -- Demo.DemoSalesOrderHeaderSeed -- Demo.DemoSalesOrderDetailSeed - -You can inspect memory-optimized tables through the **Object Explorer** in SSMS. Right-click **Tables** > **Filter** > **Filter Settings** > **Is Memory Optimized**. The value equals 1. - -Or you can query the catalog views, such as: - -```sql -SELECT is_memory_optimized, name, type_desc, durability_desc - FROM sys.tables - WHERE is_memory_optimized = 1; -``` - -**Natively compiled stored procedure**: You can inspect SalesLT.usp_InsertSalesOrder_inmem through a catalog view query: - -```sql -SELECT uses_native_compilation, OBJECT_NAME(object_id), definition - FROM sys.sql_modules - WHERE uses_native_compilation = 1; -``` - -  - -### Run the sample OLTP workload - -The only difference between the following two *stored procedures* is that the first procedure uses memory-optimized versions of the tables, while the second procedure uses the regular on-disk tables: - -- SalesLT**.**usp_InsertSalesOrder**_inmem** -- SalesLT**.**usp_InsertSalesOrder**_ondisk** - -In this section, you see how to use the handy **ostress.exe** utility to execute the two stored procedures at stressful levels. You can compare how long it takes for the two stress runs to finish. - -When you run ostress.exe, we recommend that you pass parameter values designed for both of the following: - -- Run a large number of concurrent connections, by using -n100. -- Have each connection loop hundreds of times, by using -r500. - -However, you might want to start with much smaller values like -n10 and -r50 to ensure that everything is working. - -### Script for ostress.exe - -This section displays the T-SQL script that is embedded in our ostress.exe command line. The script uses items that were created by the T-SQL script that you installed earlier. - -The following script inserts a sample sales order with five line items into the following memory-optimized *tables*: - -- SalesLT.SalesOrderHeader_inmem -- SalesLT.SalesOrderDetail_inmem - -```sql -DECLARE - @i int = 0, - @od SalesLT.SalesOrderDetailType_inmem, - @SalesOrderID int, - @DueDate datetime2 = sysdatetime(), - @CustomerID int = rand() * 8000, - @BillToAddressID int = rand() * 10000, - @ShipToAddressID int = rand() * 10000; - -INSERT INTO @od - SELECT OrderQty, ProductID - FROM Demo.DemoSalesOrderDetailSeed - WHERE OrderID= cast((rand()*60) as int); - -WHILE (@i < 20) -begin; - EXECUTE SalesLT.usp_InsertSalesOrder_inmem @SalesOrderID OUTPUT, - @DueDate, @CustomerID, @BillToAddressID, @ShipToAddressID, @od; - SET @i = @i + 1; -end -``` - -To make the *_ondisk* version of the preceding T-SQL script for ostress.exe, you would replace both occurrences of the *_inmem* substring with *_ondisk*. These replacements affect the names of tables and stored procedures. - -#### Install RML utilities and `ostress` - -Ideally, you would plan to run ostress.exe on an Azure virtual machine (VM). You would create an [Azure VM](https://azure.microsoft.com/documentation/services/virtual-machines/) in the same Azure geographic region where your AdventureWorksLT database resides. But you can run ostress.exe on your laptop instead. - -On the VM, or on whatever host you choose, install the Replay Markup Language (RML) utilities. The utilities include ostress.exe. - -For more information, see: - -- The ostress.exe discussion in [Sample Database for In-Memory OLTP](/sql/relational-databases/in-memory-oltp/sample-database-for-in-memory-oltp). -- [Sample Database for In-Memory OLTP](/sql/relational-databases/in-memory-oltp/sample-database-for-in-memory-oltp). -- The [blog for installing ostress.exe](https://techcommunity.microsoft.com/t5/sql-server-support/cumulative-update-2-to-the-rml-utilities-for-microsoft-sql/ba-p/317910). - - - -#### Run the *_inmem* stress workload first - -You can use an *RML Cmd Prompt* window to run our ostress.exe command line. The command-line parameters direct `ostress` to: - -- Run 100 connections concurrently (-n100). -- Have each connection run the T-SQL script 50 times (-r50). - -``` -ostress.exe -n100 -r50 -S.database.windows.net -U -P -d -q -Q"DECLARE @i int = 0, @od SalesLT.SalesOrderDetailType_inmem, @SalesOrderID int, @DueDate datetime2 = sysdatetime(), @CustomerID int = rand() * 8000, @BillToAddressID int = rand() * 10000, @ShipToAddressID int = rand()* 10000; INSERT INTO @od SELECT OrderQty, ProductID FROM Demo.DemoSalesOrderDetailSeed WHERE OrderID= cast((rand()*60) as int); WHILE (@i < 20) begin; EXECUTE SalesLT.usp_InsertSalesOrder_inmem @SalesOrderID OUTPUT, @DueDate, @CustomerID, @BillToAddressID, @ShipToAddressID, @od; set @i += 1; end" -``` - -To run the preceding ostress.exe command line: - -1. Reset the database data content by running the following command in SSMS, to delete all the data that was inserted by any previous runs: - - ``` tsql - EXECUTE Demo.usp_DemoReset; - ``` - -2. Copy the text of the preceding ostress.exe command line to your clipboard. - -3. Replace the `` for the parameters -S -U -P -d with the correct real values. - -4. Run your edited command line in an RML Cmd window. - -#### Result is a duration - -When `ostress.exe` finishes, it writes the run duration as its final line of output in the RML Cmd window. For example, a shorter test run lasted about 1.5 minutes: - -`11/12/15 00:35:00.873 [0x000030A8] OSTRESS exiting normally, elapsed time: 00:01:31.867` - -#### Reset, edit for *_ondisk*, then rerun - -After you have the result from the *_inmem* run, perform the following steps for the *_ondisk* run: - -1. Reset the database by running the following command in SSMS to delete all the data that was inserted by the previous run: - - ```sql - EXECUTE Demo.usp_DemoReset; - ``` - -2. Edit the ostress.exe command line to replace all *_inmem* with *_ondisk*. - -3. Rerun ostress.exe for the second time, and capture the duration result. - -4. Again, reset the database (for responsibly deleting what can be a large amount of test data). - -#### Expected comparison results - -Our In-Memory tests have shown that performance improved by **nine times** for this simplistic workload, with `ostress` running on an Azure VM in the same Azure region as the database. - - - -  - -## 2. Install the In-Memory Analytics sample - -In this section, you compare the IO and statistics results when you're using a columnstore index versus a traditional b-tree index. - -For real-time analytics on an OLTP workload, it's often best to use a nonclustered columnstore index. For details, see [Columnstore Indexes Described](/sql/relational-databases/indexes/columnstore-indexes-overview). - -### Prepare the columnstore analytics test - -1. Use the Azure portal to create a fresh AdventureWorksLT database from the sample. - - Use that exact name. - - Choose any Premium service tier. - -2. Copy the [sql_in-memory_analytics_sample](https://raw.githubusercontent.com/microsoft/sql-server-samples/master/samples/features/in-memory-database/in-memory-oltp/t-sql-scripts/sql_in-memory_analytics_sample.sql) to your clipboard. - - The T-SQL script creates the necessary In-Memory objects in the AdventureWorksLT sample database that you created in step 1. - - The script creates the Dimension table and two fact tables. The fact tables are populated with 3.5 million rows each. - - The script might take 15 minutes to complete. - -3. Paste the T-SQL script into SSMS, and then execute the script. The **COLUMNSTORE** keyword in the **CREATE INDEX** statement is crucial, as in:
    `CREATE NONCLUSTERED COLUMNSTORE INDEX ...;` - -4. Set AdventureWorksLT to compatibility level 130:
    `ALTER DATABASE AdventureworksLT SET compatibility_level = 130;` - - Level 130 is not directly related to In-Memory features. But level 130 generally provides faster query performance than 120. - -#### Key tables and columnstore indexes - -- dbo.FactResellerSalesXL_CCI is a table that has a clustered columnstore index, which has advanced compression at the *data* level. - -- dbo.FactResellerSalesXL_PageCompressed is a table that has an equivalent regular clustered index, which is compressed only at the *page* level. - -#### Key queries to compare the columnstore index - -There are [several T-SQL query types that you can run](https://raw.githubusercontent.com/microsoft/sql-server-samples/master/samples/features/in-memory-database/in-memory-oltp/t-sql-scripts/clustered_columnstore_sample_queries.sql) to see performance improvements. In step 2 in the T-SQL script, pay attention to this pair of queries. They differ only on one line: - -- `FROM FactResellerSalesXL_PageCompressed a` -- `FROM FactResellerSalesXL_CCI a` - -A clustered columnstore index is in the FactResellerSalesXL\_CCI table. - -The following T-SQL script excerpt prints statistics for IO and TIME for the query of each table. - -```sql -/********************************************************************* -Step 2 -- Overview --- Page Compressed BTree table v/s Columnstore table performance differences --- Enable actual Query Plan in order to see Plan differences when Executing -*/ --- Ensure Database is in 130 compatibility mode -ALTER DATABASE AdventureworksLT SET compatibility_level = 130 -GO - --- Execute a typical query that joins the Fact Table with dimension tables --- Note this query will run on the Page Compressed table, Note down the time -SET STATISTICS IO ON -SET STATISTICS TIME ON -GO - -SELECT c.Year - ,e.ProductCategoryKey - ,FirstName + ' ' + LastName AS FullName - ,count(SalesOrderNumber) AS NumSales - ,sum(SalesAmount) AS TotalSalesAmt - ,Avg(SalesAmount) AS AvgSalesAmt - ,count(DISTINCT SalesOrderNumber) AS NumOrders - ,count(DISTINCT a.CustomerKey) AS CountCustomers -FROM FactResellerSalesXL_PageCompressed a -INNER JOIN DimProduct b ON b.ProductKey = a.ProductKey -INNER JOIN DimCustomer d ON d.CustomerKey = a.CustomerKey -Inner JOIN DimProductSubCategory e on e.ProductSubcategoryKey = b.ProductSubcategoryKey -INNER JOIN DimDate c ON c.DateKey = a.OrderDateKey -GROUP BY e.ProductCategoryKey,c.Year,d.CustomerKey,d.FirstName,d.LastName -GO -SET STATISTICS IO OFF -SET STATISTICS TIME OFF -GO - - --- This is the same Prior query on a table with a clustered columnstore index CCI --- The comparison numbers are even more dramatic the larger the table is (this is an 11 million row table only) -SET STATISTICS IO ON -SET STATISTICS TIME ON -GO -SELECT c.Year - ,e.ProductCategoryKey - ,FirstName + ' ' + LastName AS FullName - ,count(SalesOrderNumber) AS NumSales - ,sum(SalesAmount) AS TotalSalesAmt - ,Avg(SalesAmount) AS AvgSalesAmt - ,count(DISTINCT SalesOrderNumber) AS NumOrders - ,count(DISTINCT a.CustomerKey) AS CountCustomers -FROM FactResellerSalesXL_CCI a -INNER JOIN DimProduct b ON b.ProductKey = a.ProductKey -INNER JOIN DimCustomer d ON d.CustomerKey = a.CustomerKey -Inner JOIN DimProductSubCategory e on e.ProductSubcategoryKey = b.ProductSubcategoryKey -INNER JOIN DimDate c ON c.DateKey = a.OrderDateKey -GROUP BY e.ProductCategoryKey,c.Year,d.CustomerKey,d.FirstName,d.LastName -GO - -SET STATISTICS IO OFF -SET STATISTICS TIME OFF -GO -``` - -In a database with the P2 pricing tier, you can expect about nine times the performance gain for this query by using the clustered columnstore index compared with the traditional index. With P15, you can expect about 57 times the performance gain by using the columnstore index. - -## Next steps - -- [Quickstart 1: In-Memory OLTP Technologies for faster T-SQL Performance](/sql/relational-databases/in-memory-oltp/survey-of-initial-areas-in-in-memory-oltp) - -- [Use In-Memory OLTP in an existing Azure SQL application](in-memory-oltp-configure.md) - -- [Monitor In-Memory OLTP storage](in-memory-oltp-monitor-space.md) for In-Memory OLTP - -## Additional resources - -### Deeper information - -- [Learn how Quorum doubles key database's workload while lowering DTU by 70% with In-Memory OLTP in Azure SQL Database](https://customers.microsoft.com/story/quorum-doubles-key-databases-workload-while-lowering-dtu-with-sql-database) - -- [In-Memory OLTP in Azure SQL Database Blog Post](https://azure.microsoft.com/blog/in-memory-oltp-in-azure-sql-database/) - -- [Learn about In-Memory OLTP](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) - -- [Learn about columnstore indexes](/sql/relational-databases/indexes/columnstore-indexes-overview) - -- [Learn about real-time operational analytics](/sql/relational-databases/indexes/get-started-with-columnstore-for-real-time-operational-analytics) - -- See [Common Workload Patterns and Migration Considerations](/previous-versions/dn673538(v=msdn.10)) (which describes workload patterns where In-Memory OLTP commonly provides significant performance gains) - -#### Application design - -- [In-Memory OLTP (In-Memory Optimization)](/sql/relational-databases/in-memory-oltp/in-memory-oltp-in-memory-optimization) - -- [Use In-Memory OLTP in an existing Azure SQL application](in-memory-oltp-configure.md) - -#### Tools - -- [Azure portal](https://portal.azure.com/) - -- [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) - -- [SQL Server Data Tools (SSDT)](/sql/ssdt/download-sql-server-data-tools-ssdt) \ No newline at end of file diff --git a/articles/azure-sql/includes/appliesto-asf.md b/articles/azure-sql/includes/appliesto-asf.md deleted file mode 100644 index 46c220ba7621a..0000000000000 --- a/articles/azure-sql/includes/appliesto-asf.md +++ /dev/null @@ -1 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Managed Instance :::image type="icon" source="../media/applies-to/yes.png" border="false":::SQL Server on Azure VM diff --git a/articles/azure-sql/includes/appliesto-sqldb-asa-formerly-sqldw.md b/articles/azure-sql/includes/appliesto-sqldb-asa-formerly-sqldw.md deleted file mode 100644 index 101a0cff9eee0..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb-asa-formerly-sqldw.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -ms.service: sql-database -ms.topic: include -ms.date: 09/21/2021 -author: WilliamDAssafMSFT -ms.author: wiassaf ---- - -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure Synapse Analytics (dedicated SQL pool (formerly SQL DW) only) - diff --git a/articles/azure-sql/includes/appliesto-sqldb-asa.md b/articles/azure-sql/includes/appliesto-sqldb-asa.md deleted file mode 100644 index 64dc45e52841d..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb-asa.md +++ /dev/null @@ -1,2 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure Synapse Analytics - diff --git a/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md b/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md deleted file mode 100644 index 27f90b5280ed2..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa-dedicated-only.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -ms.service: sql-database -ms.topic: include -ms.date: 01/26/2022 -author: WilliamDAssafMSFT -ms.author: wiassaf ---- - -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Managed Instance :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure Synapse Analytics (dedicated SQL pools only) - diff --git a/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa.md b/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa.md deleted file mode 100644 index 198f8b4bb7bb9..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb-sqlmi-asa.md +++ /dev/null @@ -1,2 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Managed Instance :::image type="icon" source="../media/applies-to/yes.png" border="false"::: Azure Synapse Analytics - diff --git a/articles/azure-sql/includes/appliesto-sqldb-sqlmi.md b/articles/azure-sql/includes/appliesto-sqldb-sqlmi.md deleted file mode 100644 index b4b7f3475c1fa..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb-sqlmi.md +++ /dev/null @@ -1,2 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Managed Instance - diff --git a/articles/azure-sql/includes/appliesto-sqldb.md b/articles/azure-sql/includes/appliesto-sqldb.md deleted file mode 100644 index 4850b771bf929..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqldb.md +++ /dev/null @@ -1 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Database diff --git a/articles/azure-sql/includes/appliesto-sqlmi.md b/articles/azure-sql/includes/appliesto-sqlmi.md deleted file mode 100644 index d5fa66b8cff56..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqlmi.md +++ /dev/null @@ -1,2 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::Azure SQL Managed Instance - diff --git a/articles/azure-sql/includes/appliesto-sqlvm.md b/articles/azure-sql/includes/appliesto-sqlvm.md deleted file mode 100644 index d7701cba5801e..0000000000000 --- a/articles/azure-sql/includes/appliesto-sqlvm.md +++ /dev/null @@ -1,2 +0,0 @@ -**APPLIES TO:** :::image type="icon" source="../media/applies-to/yes.png" border="false":::SQL Server on Azure VM - diff --git a/articles/azure-sql/includes/auto-failover-group-overview.md b/articles/azure-sql/includes/auto-failover-group-overview.md deleted file mode 100644 index 6e766ca4d3c40..0000000000000 --- a/articles/azure-sql/includes/auto-failover-group-overview.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Auto-failover group overview -description: De-duplicating content between SQL Database and SQL Managed Instance, in this case using an include for an overview of the auto-failover group feature. -ms.topic: include -author: MashaMSFT -ms.author: mathoma -ms.reviewer: kendralittle, emlisa, mlandzic -ms.date: 03/01/2022 ---- - -The auto-failover groups feature allows you to manage the replication and failover of a group of databases on a server or all user databases in a managed instance to another Azure region. It is a declarative abstraction on top of the [active geo-replication](../database/active-geo-replication-overview.md) feature, designed to simplify deployment and management of geo-replicated databases at scale. - -**Automatic failover** - -You can initiate a geo-failover manually or you can delegate it to the Azure service based on a user-defined policy. The latter option allows you to automatically recover multiple related databases in a secondary region after a catastrophic failure or other unplanned event that results in full or partial loss of the SQL Database or SQL Managed Instance availability in the primary region. Typically, these are outages that cannot be automatically mitigated by the built-in high availability infrastructure. Examples of geo-failover triggers include natural disasters, or incidents caused by a tenant or control ring being down due to an OS kernel memory leak on compute nodes. For more information, see [Azure SQL high availability](../database/high-availability-sla.md). - -**Offload read-only workloads** - -To reduce traffic to your primary databases, you can also use the secondary databases in a failover group to offload read-only workloads. Use the read-only listener to direct read-only traffic to a readable secondary database. - -**Endpoint redirection** - -Auto-failover groups provide read-write and read-only listener end-points that remain unchanged during geo-failovers. This means you do not have to change the connection string for your application after a geo-failover, because connections are automatically routed to the current primary. Whether you use manual or automatic failover activation, a geo-failover switches all secondary databases in the group to the primary role. After the geo-failover is completed, the DNS record is automatically updated to redirect the endpoints to the new region. For geo-failover RPO and RTO, see [Overview of Business Continuity](../database/business-continuity-high-availability-disaster-recover-hadr-overview.md). - -**Recovering an application** - -To achieve full business continuity, adding regional database redundancy is only part of the solution. Recovering an application (service) end-to-end after a catastrophic failure requires recovery of all components that constitute the service and any dependent services. Examples of these components include the client software (for example, a browser with a custom JavaScript), web front ends, storage, and DNS. It is critical that all components are resilient to the same failures and become available within the recovery time objective (RTO) of your application. Therefore, you need to identify all dependent services and understand the guarantees and capabilities they provide. Then, you must take adequate steps to ensure that your service functions during the failover of the services on which it depends. \ No newline at end of file diff --git a/articles/azure-sql/includes/auto-failover-group-terminology.md b/articles/azure-sql/includes/auto-failover-group-terminology.md deleted file mode 100644 index 345c5c868fe32..0000000000000 --- a/articles/azure-sql/includes/auto-failover-group-terminology.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Auto-failover group overview -description: De-duplicating content between SQL Database and SQL Managed Instance, in this case using an include for the terminology for auto-failover groups that overlap between both products. -ms.topic: include -author: MashaMSFT -ms.author: mathoma -ms.reviewer: kendralittle, emlisa, mlandzic -ms.date: 03/01/2022 ---- - -- **Automatic failover policy** - - By default, a failover group is configured with an automatic failover policy. The system triggers a geo-failover after the failure is detected and the grace period has expired. The system must verify that the outage cannot be mitigated by the built-in [high availability infrastructure](../database/high-availability-sla.md), for example due to the scale of the impact. If you want to control the geo-failover workflow from the application or manually, you can turn off automatic failover policy. - - > [!NOTE] - > Because verification of the scale of the outage and how quickly it can be mitigated involves human actions, the grace period cannot be set below one hour. This limitation applies to all databases in the failover group regardless of their data synchronization state. - -- **Read-only failover policy** - - By default, the failover of the read-only listener is disabled. It ensures that the performance of the primary is not impacted when the secondary is offline. However, it also means the read-only sessions will not be able to connect until the secondary is recovered. If you cannot tolerate downtime for the read-only sessions and can use the primary for both read-only and read-write traffic at the expense of the potential performance degradation of the primary, you can enable failover for the read-only listener by configuring the `AllowReadOnlyFailoverToPrimary` property. In that case, the read-only traffic will be automatically redirected to the primary if the secondary is not available. - - > [!NOTE] - > The `AllowReadOnlyFailoverToPrimary` property only has effect if automatic failover policy is enabled and an automatic geo-failover has been triggered. In that case, if the property is set to True, the new primary will serve both read-write and read-only sessions. - -- **Planned failover** - - Planned failover performs full data synchronization between primary and secondary databases before the secondary switches to the primary role. This guarantees no data loss. Planned failover is used in the following scenarios: - - - Perform disaster recovery (DR) drills in production when data loss is not acceptable - - Relocate the databases to a different region - - Return the databases to the primary region after the outage has been mitigated (failback) - -- **Unplanned failover** - - Unplanned or forced failover immediately switches the secondary to the primary role without waiting for recent changes to propagate from the primary. This operation may result in data loss. Unplanned failover is used as a recovery method during outages when the primary is not accessible. When the outage is mitigated, the old primary will automatically reconnect and become a new secondary. A planned failover may be executed to fail back, returning the replicas to their original primary and secondary roles. - -- **Manual failover** - - You can initiate a geo-failover manually at any time regardless of the automatic failover configuration. During an outage that impacts the primary, if automatic failover policy is not configured, a manual failover is required to promote the secondary to the primary role. You can initiate a forced (unplanned) or friendly (planned) failover. A friendly failover is only possible when the old primary is accessible, and can be used to relocate the primary to the secondary region without data loss. When a failover is completed, the DNS records are automatically updated to ensure connectivity to the new primary. - -- **Grace period with data loss** - - Because the data is replicated to the secondary database using asynchronous replication, an automatic geo-failover may result in data loss. You can customize the automatic failover policy to reflect your application’s tolerance to data loss. By configuring `GracePeriodWithDataLossHours`, you can control how long the system waits before initiating a forced failover, which may result in data loss. \ No newline at end of file diff --git a/articles/azure-sql/includes/create-configure-database.md b/articles/azure-sql/includes/create-configure-database.md deleted file mode 100644 index 84af5cbf14679..0000000000000 --- a/articles/azure-sql/includes/create-configure-database.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -ms.service: sql-database -ms.subservice: deployment-configuration -ms.topic: include -ms.date: 12/17/2020 -author: MashaMSFT -ms.author: mathoma ---- - - You can use one of these quickstarts to create and then configure a database: - - | Action | SQL Database | SQL Managed Instance | SQL Server on Azure VM | Azure Synapse Analytics | - |:--- |:--- |:---|:---|:---| - | Create| [Portal](../database/single-database-create-quickstart.md) | [Portal](../managed-instance/instance-create-quickstart.md) | [Portal](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) | [Portal](../../synapse-analytics/quickstart-create-workspace.md) | - || [CLI](../database/scripts/create-and-configure-database-cli.md) | | | [CLI](../../synapse-analytics/quickstart-create-workspace-cli.md) | - || [PowerShell](../database/scripts/create-and-configure-database-powershell.md) | [PowerShell](../managed-instance/scripts/create-configure-managed-instance-powershell.md) | [PowerShell](../virtual-machines/windows/sql-vm-create-powershell-quickstart.md) | [PowerShell](../../synapse-analytics/quickstart-create-workspace-powershell.md) | - || | | [Deployment template](../virtual-machines/windows/create-sql-vm-resource-manager-template.md) | [Deployment template](../../synapse-analytics/quickstart-deployment-template-workspaces.md) | - | Configure | [Server-level IP firewall rule](../database/firewall-create-server-level-portal-quickstart.md)| [Connectivity from a VM](../managed-instance/connect-vm-instance-configure.md)| | - |||[Connectivity from on-premises](../managed-instance/point-to-site-p2s-configure.md) | [Connect to a SQL Server instance](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) | - | Get connection information | [Azure SQL](../database/connect-query-content-reference-guide.md#get-server-connection-information)|[Azure SQL](../database/connect-query-content-reference-guide.md#get-server-connection-information)| [SQL VM](../virtual-machines/windows/sql-vm-create-portal-quickstart.md?#connect-to-sql-server)| [Synapse SQL](../../synapse-analytics/sql/connect-overview.md#find-your-server-name)| - diff --git a/articles/azure-sql/includes/media/sql-database-create-manage-portal/add-azure-sql-resources.png b/articles/azure-sql/includes/media/sql-database-create-manage-portal/add-azure-sql-resources.png deleted file mode 100644 index d7bae5da88b30..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-manage-portal/add-azure-sql-resources.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-manage-portal/single-sql-database-deployment-options.png b/articles/azure-sql/includes/media/sql-database-create-manage-portal/single-sql-database-deployment-options.png deleted file mode 100644 index 32d865068aca6..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-manage-portal/single-sql-database-deployment-options.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/additional-settings.png b/articles/azure-sql/includes/media/sql-database-create-single-database/additional-settings.png deleted file mode 100644 index 59d8f01d3557b..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/additional-settings.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/cloudshell.png b/articles/azure-sql/includes/media/sql-database-create-single-database/cloudshell.png deleted file mode 100644 index 3a3f079690715..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/cloudshell.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/create-single-database.png b/articles/azure-sql/includes/media/sql-database-create-single-database/create-single-database.png deleted file mode 100644 index af692dec7f49f..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/create-single-database.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/networking.png b/articles/azure-sql/includes/media/sql-database-create-single-database/networking.png deleted file mode 100644 index 726d481edae53..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/networking.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/new-server.png b/articles/azure-sql/includes/media/sql-database-create-single-database/new-server.png deleted file mode 100644 index 41bfa67d610b8..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/new-server.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/new-sql-database-basics.png b/articles/azure-sql/includes/media/sql-database-create-single-database/new-sql-database-basics.png deleted file mode 100644 index 967c7b593662d..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/new-sql-database-basics.png and /dev/null differ diff --git a/articles/azure-sql/includes/media/sql-database-create-single-database/sqldbportal.png b/articles/azure-sql/includes/media/sql-database-create-single-database/sqldbportal.png deleted file mode 100644 index fb8eb0081189e..0000000000000 Binary files a/articles/azure-sql/includes/media/sql-database-create-single-database/sqldbportal.png and /dev/null differ diff --git a/articles/azure-sql/includes/sql-ag-use-dnn-listener.md b/articles/azure-sql/includes/sql-ag-use-dnn-listener.md deleted file mode 100644 index 86941209c1ed9..0000000000000 --- a/articles/azure-sql/includes/sql-ag-use-dnn-listener.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Use a distributed network name (DNN) listener instead of a VNN listener for availability groups on SQL Server VMs. -description: Message to redirect customers to using the DNN listener instead of the VNN listener. -ms.topic: include -author: MashaMSFT -ms.author: mathoma ---- - -> [!NOTE] -> Customers on SQL Server 2019 CU8 and later on Windows 2016 and later can replace the traditional VNN listener and Azure Load Balancer with a [distributed network name (DNN) listener](../virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md) instead. Skip the rest of the steps in this article that create the listener and load balancer. diff --git a/articles/azure-sql/includes/sql-database-akv-permission-delay.md b/articles/azure-sql/includes/sql-database-akv-permission-delay.md deleted file mode 100644 index d037cb8df9926..0000000000000 --- a/articles/azure-sql/includes/sql-database-akv-permission-delay.md +++ /dev/null @@ -1,2 +0,0 @@ -> [!NOTE] -> It may take around 10 minutes for any permission changes to take effect for the key vault. This includes revoking access permissions to the TDE protector in AKV, and users within this time frame may still have access permissions. \ No newline at end of file diff --git a/articles/azure-sql/includes/sql-database-create-manage-portal.md b/articles/azure-sql/includes/sql-database-create-manage-portal.md deleted file mode 100644 index 8d7e2fc730984..0000000000000 --- a/articles/azure-sql/includes/sql-database-create-manage-portal.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: MashaMSFT -ms.service: sql-database -ms.subservice: service-overview -ms.topic: include -ms.date: 08/23/2019 -ms.author: mathoma -ms.reviewer: kendralittle ---- - -## Create and manage Azure SQL resources with the Azure portal - -The Azure portal provides a single page where you can manage [all of your Azure SQL resources](https://go.microsoft.com/fwlink/?linkid=2100641) including your SQL Server on Azure virtual machines (VMs). - -To access the **Azure SQL** page, from the Azure portal menu, select **Azure SQL** or search for and select **Azure SQL** in any page. - -> [!NOTE] -> **Azure SQL** provides a quick and easy way to access all of your SQL resources in the Azure portal, including single and pooled databases in Azure SQL Database as well as the logical server hosting them, SQL Managed Instances, and SQL Server on Azure VMs. [Azure SQL](../azure-sql-iaas-vs-paas-what-is-overview.md) is not a service or resource, but rather a family of SQL-related services. - -To manage existing resources, select the desired item in the list. To create new Azure SQL resources, select **+ Create**. - -:::image type="content" source="media/sql-database-create-manage-portal/add-azure-sql-resources.png" alt-text="Screenshot of the Azure SQL portal page." lightbox="media/sql-database-create-manage-portal/add-azure-sql-resources.png"::: - -After selecting **+ Create**, view additional information about the different options by selecting **Show details** on any tile. - -:::image type="content" source="media/sql-database-create-manage-portal/single-sql-database-deployment-options.png" alt-text="Screenshot of database tile details in the Azure portal." lightbox="media/sql-database-create-manage-portal/single-sql-database-deployment-options.png"::: - -For details, see: - -- [Create a single database](../database/single-database-create-quickstart.md) -- [Create an elastic pool](../database/elastic-pool-overview.md#create-a-new-sql-database-elastic-pool-by-using-the-azure-portal) -- [Create a managed instance](../managed-instance/instance-create-quickstart.md) -- [Create a SQL virtual machine](../virtual-machines/windows/sql-vm-create-portal-quickstart.md) \ No newline at end of file diff --git a/articles/azure-sql/includes/sql-database-create-single-database.md b/articles/azure-sql/includes/sql-database-create-single-database.md deleted file mode 100644 index 66f48808fc757..0000000000000 --- a/articles/azure-sql/includes/sql-database-create-single-database.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -author: MashaMSFT -ms.service: sql-database -ms.subservice: service-overview -ms.topic: include -ms.date: 04/06/2022 -ms.author: mathoma -ms.reviewer: vanto ---- - -In this step, you create a [logical SQL server](../database/logical-servers.md) and a [single database](../database/single-database-overview.md) that uses AdventureWorksLT sample data. You can create the database by using Azure portal menus and screens, or by using an Azure CLI or PowerShell script in the Azure Cloud Shell. - -All the methods include setting up a server-level firewall rule to allow the public IP address of the computer you're using to access the server. For more information about creating server-level firewall rules, see [Create a server-level firewall](../database/firewall-create-server-level-portal-quickstart.md). You can also set database-level firewall rules. See [Create a database-level firewall rule](/sql/relational-databases/system-stored-procedures/sp-set-database-firewall-rule-azure-sql-database). - -# [Portal](#tab/azure-portal) - -To create a resource group, server, and single database in the Azure portal: - -1. Sign in to the [portal](https://portal.azure.com). -1. From the Search bar, search for and select **Azure SQL**. -1. On the **Azure SQL** page, select **Add**. - - :::image type="content" source="./media/sql-database-create-single-database/sqldbportal.png" alt-text="Add to Azure SQL"::: - -1. On the **Select SQL deployment option** page, select the **SQL databases** tile, with **Single database** under **Resource type**. You can view more information about the different databases by selecting **Show details**. -1. Select **Create**. - - :::image type="content" source="./media/sql-database-create-single-database/create-single-database.png" alt-text="Create single database"::: - -1. On the **Basics** tab of the **Create SQL database** form, under **Project details**, select the correct Azure **Subscription** if it isn't already selected. -1. Under **Resource group**, select **Create new**, enter *myResourceGroup*, and select **OK**. -1. Under **Database details**, for **Database name** enter *mySampleDatabase*. -1. For **Server**, select **Create new**, and fill out the **New server** form as follows: - - **Server name**: Enter *mysqlserver*, and some characters for uniqueness. - - **Server admin login**: Enter *AzureAdmin*. - - **Password**: Enter a password that meets requirements, and enter it again in the **Confirm password** field. - - **Location**: Drop down and choose a location, such as **(US) West US**. - - Select **OK**. - - :::image type="content" source="./media/sql-database-create-single-database/new-server.png" alt-text="New server"::: - - Record the server admin login and password so you can log in to the server and its databases. If you forget your login or password, you can get the login name or reset the password on the **SQL server** page after database creation. To open the **SQL server** page, select the server name on the database **Overview** page. - -1. Under **Compute + storage**, if you want to reconfigure the defaults, select **Configure database**. - - On the **Configure** page, you can optionally: - - Change the **Compute tier** from **Provisioned** to **Serverless**. - - Review and change the settings for **vCores** and **Data max size**. - - Select **Change configuration** to change hardware configuration. - - After making any changes, select **Apply**. - -1. Select **Next: Networking** at the bottom of the page. - - :::image type="content" source="./media/sql-database-create-single-database/new-sql-database-basics.png" alt-text="New SQL database - Basic tab"::: - -1. On the **Networking** tab, under **Connectivity method**, select **Public endpoint**. -1. Under **Firewall rules**, set **Add current client IP address** to **Yes**. -1. Select **Next: Additional settings** at the bottom of the page. - - :::image type="content" source="./media/sql-database-create-single-database/networking.png" alt-text="Networking tab"::: - - For more information about firewall settings, see [Allow Azure services and resources to access this server](../database/network-access-controls-overview.md) and [Add a private endpoint](../database/private-endpoint-overview.md). - -1. On the **Additional settings** tab, in the **Data source** section, for **Use existing data**, select **Sample**. -1. Optionally, enable [Microsoft Defender for SQL](../database/azure-defender-for-sql.md). -1. Optionally, set the [maintenance window](../database/maintenance-window.md) so planned maintenance is performed at the best time for your database. -1. Select **Review + create** at the bottom of the page. - - :::image type="content" source="./media/sql-database-create-single-database/additional-settings.png" alt-text="Additional settings tab"::: - -1. After reviewing settings, select **Create**. - -# [PowerShell](#tab/azure-powershell) - -You can create a resource group, server, and single database using Windows PowerShell. If you don't want to use the Azure Cloud Shell, [install the Azure PowerShell module](/powershell/azure/install-az-ps). - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -To run the following code sample in the Azure Cloud Shell, select **Try it** in the code title bar. When the Cloud Shell opens, select **Copy** in the code sample title bar, and paste the code sample into the Cloud Shell window. In the code, replace `` with your Azure Subscription ID, and for `$startIp` and `$endIp`, replace `0.0.0.0` with the public IP address of the computer you're using. - -Follow the onscreen prompts to sign in to Azure and run the code. - -You can also use Azure Cloud Shell from the Azure portal, by selecting the Cloud Shell icon on the top bar. - - :::image type="content" source="./media/sql-database-create-single-database/cloudshell.png" alt-text="Azure Cloud Shell"::: - -The first time you use Cloud Shell from the portal, select **PowerShell** on the **Welcome** dialog. Subsequent sessions will use PowerShell, or you can select it from the Cloud Shell control bar. - -The following PowerShell code creates an Azure resource group, server, single database, and firewall rule for access to the server. Make sure to record the generated resource group and server names, so you can manage these resources later. - - ```powershell-interactive - # Set variables for your server and database - $subscriptionId = '' - $resourceGroupName = "myResourceGroup-$(Get-Random)" - $location = "West US" - $adminLogin = "azureuser" - $password = "Azure1234567" - $serverName = "mysqlserver-$(Get-Random)" - $databaseName = "mySampleDatabase" - - # The ip address range that you want to allow to access your server - $startIp = "0.0.0.0" - $endIp = "0.0.0.0" - - # Show randomized variables - Write-host "Resource group name is" $resourceGroupName - Write-host "Server name is" $serverName - - # Connect to Azure - Connect-AzAccount - - # Set subscription ID - Set-AzContext -SubscriptionId $subscriptionId - - # Create a resource group - Write-host "Creating resource group..." - $resourceGroup = New-AzResourceGroup -Name $resourceGroupName -Location $location -Tag @{Owner="SQLDB-Samples"} - $resourceGroup - - # Create a server with a system wide unique server name - Write-host "Creating primary server..." - $server = New-AzSqlServer -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -Location $location ` - -SqlAdministratorCredentials $(New-Object -TypeName System.Management.Automation.PSCredential ` - -ArgumentList $adminLogin, $(ConvertTo-SecureString -String $password -AsPlainText -Force)) - $server - - # Create a server firewall rule that allows access from the specified IP range - Write-host "Configuring firewall for primary server..." - $serverFirewallRule = New-AzSqlServerFirewallRule -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -FirewallRuleName "AllowedIPs" -StartIpAddress $startIp -EndIpAddress $endIp - $serverFirewallRule - - # Create General Purpose Gen4 database with 1 vCore - Write-host "Creating a gen5 2 vCore database..." - $database = New-AzSqlDatabase -ResourceGroupName $resourceGroupName ` - -ServerName $serverName ` - -DatabaseName $databaseName ` - -Edition GeneralPurpose ` - -VCore 2 ` - -ComputeGeneration Gen5 ` - -MinimumCapacity 2 ` - -SampleName "AdventureWorksLT" - $database - ``` - -The preceding code uses these PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. | -| [New-AzSqlServer](/powershell/module/az.sql/new-azsqlserver) | Creates a server that hosts databases and elastic pools. | -| [New-AzSqlServerFirewallRule](/powershell/module/az.sql/new-azsqlserverfirewallrule) | Creates a server-level firewall rule for a server. | -| [New-AzSqlDatabase](/powershell/module/az.sql/new-azsqldatabase) | Creates a database. | - -For more Azure SQL Database PowerShell samples, see [Azure PowerShell samples](../database/powershell-script-content-guide.md). - -# [Azure CLI](#tab/azure-cli) - -The following Azure CLI code blocks create a resource group, server, single database, and server-level IP firewall rule for access to the server. Make sure to record the generated resource group and server names, so you can manage these resources later. - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment-h3.md](../../../includes/azure-cli-prepare-your-environment-h3.md)] - -[!INCLUDE [cli-launch-cloud-shell-sign-in.md](../../../includes/cli-launch-cloud-shell-sign-in.md)] - -### Set parameter values to create resources - -Set the parameter values for use in creating the database and required resources. Server names need to be globally unique across all of Azure so the $RANDOM function is used to create the server name. - -Change the location as appropriate for your environment. Replace `0.0.0.0` with the IP address range to match your specific environment. Use the public IP address of the computer you're using to restrict access to the server to only your IP address. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="SetParameterValues"::: - -### Create a resource group - -Use this script to create a resource group with the [az group create](/cli/azure/group) command. An Azure resource group is a logical container into which Azure resources are deployed and managed. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateResourceGroup"::: - -### Create a server - -Use this script to create a server with the [az sql server create](/cli/azure/sql/server) command. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateServer"::: - -### Configure a server-based firewall rule - -Use this script to create a firewall rule with the [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule) command. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateFirewallRule"::: - -### Create a single database using Azure CLI - -Use this script to create a database with the [az sql db create](/cli/azure/sql/db) command. - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/create-and-configure-database/create-and-configure-database.sh" id="CreateDatabase"::: - -### Azure CLI command reference - -The preceding code uses these Azure CLI commands: - -| Command | Description | -|---|---| -| [az account set](/cli/azure/account#az-account-set) | Sets a subscription to be the current active subscription. | -| [az group create](/cli/azure/group#az-group-create) | Creates a resource group in which all resources are stored. | -| [az sql server create](/cli/azure/sql/server#az-sql-server-create) | Creates a server that hosts databases and elastic pools. | -| [az sql server firewall-rule create](/cli/azure/sql/server/firewall-rule##az-sql-server-firewall-rule-create) | Creates a server-level firewall rule. | -| [az sql db create](/cli/azure/sql/db#az-sql-db-create) | Creates a database. | - -For additional Azure SQL Database Azure CLI scripts, see [Azure CLI samples](../database/az-cli-script-samples-content-guide.md). - ---- diff --git a/articles/azure-sql/includes/sql-vm-feature-benefits.md b/articles/azure-sql/includes/sql-vm-feature-benefits.md deleted file mode 100644 index 9b4aa55320c55..0000000000000 --- a/articles/azure-sql/includes/sql-vm-feature-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Feature benefits for the SQL Server on Azure VM -description: Feature benefits unlocked by registering your SQL Server on Azure VM with the SQL IaaS agent extension -ms.topic: include -author: MashaMSFT -ms.author: mathoma -ms.reviewer: ---- - -| Feature | Description | -| --- | --- | -| **Portal management** | Unlocks [management in the portal](../virtual-machines/windows/manage-sql-vm-portal.md), so that you can view all of your SQL Server VMs in one place, and enable or disable SQL specific features directly from the portal.
    Management mode: Lightweight & full| -| **Automated backup** |Automates the scheduling of backups for all databases for either the default instance or a [properly installed named instance](../virtual-machines/windows/frequently-asked-questions-faq.yml#can-i-use-a-named-instance-of-sql-server-with-the-iaas-extension-) of SQL Server on the VM. For more information, see [Automated backup for SQL Server in Azure virtual machines (Resource Manager)](../virtual-machines/windows/automated-backup-sql-2014.md).
    Management mode: Full| -| **Automated patching** |Configures a maintenance window during which important Windows and SQL Server security updates to your VM can take place, so you can avoid updates during peak times for your workload. For more information, see [Automated patching for SQL Server in Azure virtual machines (Resource Manager)](../virtual-machines/windows/automated-patching.md).
    Management mode: Full| -| **Azure Key Vault integration** |Enables you to automatically install and configure Azure Key Vault on your SQL Server VM. For more information, see [Configure Azure Key Vault integration for SQL Server on Azure Virtual Machines (Resource Manager)](../virtual-machines/windows/azure-key-vault-integration-configure.md).
    Management mode: Full| -| **Flexible licensing** | Save on cost by [seamlessly transitioning](../virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md) from the bring-your-own-license (also known as the Azure Hybrid Benefit) to the pay-as-you-go licensing model and back again.
    Management mode: Lightweight & full| -| **Flexible version / edition** | If you decide to change the [version](../virtual-machines/windows/change-sql-server-version.md) or [edition](../virtual-machines/windows/change-sql-server-edition.md) of SQL Server, you can update the metadata within the Azure portal without having to redeploy the entire SQL Server VM.
    Management mode: Lightweight & full| -| **Defender for Cloud portal integration** | If you've enabled [Microsoft Defender for SQL](../../defender-for-cloud/defender-for-sql-usage.md), then you can view Defender for Cloud recommendations directly in the [SQL virtual machines](../virtual-machines/windows/manage-sql-vm-portal.md) resource of the Azure portal. See [Security best practices](../virtual-machines/windows/security-considerations-best-practices.md) to learn more.
    Management mode: Lightweight & full| -| **SQL best practices assessment** | Enables you to assess the health of your SQL Server VMs using configuration best practices. For more information, see [SQL best practices assessment](../virtual-machines/windows/sql-assessment-for-sql-vm.md).
    Management mode: Full| -| **View disk utilization in portal** | Allows you to view a graphical representation of the disk utilization of your SQL data files in the Azure portal.
    Management mode: Full | - - diff --git a/articles/azure-sql/includes/ssms-connect-azure-ad.md b/articles/azure-sql/includes/ssms-connect-azure-ad.md deleted file mode 100644 index e5b27b6f750d4..0000000000000 --- a/articles/azure-sql/includes/ssms-connect-azure-ad.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Connect to SQL using Azure Active Directory (Azure AD) with SSMS 18.6 and later -description: Message to explain starting December 2021, you can only use Azure Active Directory (Azure AD) with SSMS 18.6 and later -ms.topic: include -author: markingmyname -ms.author: maghan -ms.reviewer: drskwier ---- - -> [!NOTE] -> In December 2021, releases of SSMS prior to 18.6 will no longer authenticate through Azure Active Directory with MFA. -> -> To continue utilizing Azure Active Directory authentication with MFA, you need [SSMS 18.6 or later](/sql/ssms/download-sql-server-management-studio-ssms). \ No newline at end of file diff --git a/articles/azure-sql/includes/vcore-overview.md b/articles/azure-sql/includes/vcore-overview.md deleted file mode 100644 index 67b0fafaec8c8..0000000000000 --- a/articles/azure-sql/includes/vcore-overview.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: vCore purchasing model overview -description: De-duplicating content between SQL Database and SQL Managed Instance, in this case using an include for an overview of the vCore purchasing model. -ms.topic: include -author: MashaMSFT -ms.author: mathoma -ms.reviewer: kendralittle, dfurman -ms.date: 04/06/2022 ---- - -A virtual core (vCore) represents a logical CPU and offers you the option to choose the physical characteristics of the hardware (for example, the number of cores, the memory, and the storage size). The vCore-based purchasing model gives you flexibility, control, transparency of individual resource consumption, and a straightforward way to translate on-premises workload requirements to the cloud. This model optimizes price, and allows you to choose compute, memory, and storage resources based on your workload needs. - -In the vCore-based purchasing model, your costs depend on the choice and usage of: - -- Service tier -- Hardware configuration -- Compute resources (the number of vCores and the amount of memory) -- Reserved database storage -- Actual backup storage diff --git a/articles/azure-sql/index.yml b/articles/azure-sql/index.yml deleted file mode 100644 index a8ab0236c2bd3..0000000000000 --- a/articles/azure-sql/index.yml +++ /dev/null @@ -1,239 +0,0 @@ -### YamlMime:Landing - -title: Azure SQL documentation - Azure Staged -summary: "Find documentation about the Azure SQL family of SQL Server database engine products in the cloud: Azure SQL Database, Azure SQL Managed Instance, and SQL Server on Azure VM." - -metadata: - title: Azure SQL documentation - Azure Staged - description: "Azure SQL is a family of SQL Server database engine products in the cloud, from a fully managed database in Azure SQL Database, a fully managed instance in Azure SQL Managed Instance, or SQL Server installed to virtual machine in Azure." - services: sql-database - ms.service: sql-db-mi - ms.subservice: service-overview - ms.tgt_pltfrm: na - ms.devlang: - ms.topic: landing-page - author: MashaMSFT - ms.author: mathoma - ms.reviewer: kendralittle - ms.date: 10/30/2019 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Azure SQL Database - linkLists: - - linkListType: whats-new - links: - - text: What's new? - url: database/doc-changes-updates-release-notes-whats-new.md - - linkListType: overview - links: - - text: What is SQL Database? - url: database/sql-database-paas-overview.md - - text: What are elastic pools? - url: database/elastic-pool-overview.md - - linkListType: quickstart - links: - - text: Create SQL Database - url: database/single-database-create-quickstart.md - - text: Configure firewall - url: database/firewall-create-server-level-portal-quickstart.md - - linkListType: video - links: - - text: Azure SQL Database overview - url: /shows/Azure-SQL-for-Beginners/Azure-SQL-Database-Overview-7-of-61 - - linkListType: concept - links: - - text: Migrate from SQL Server - url: database/migrate-to-database-from-sql-server.md - - text: Advanced security - url: database/security-overview.md - - text: Business continuity - url: database/business-continuity-high-availability-disaster-recover-hadr-overview.md - - text: Monitoring and tuning - url: database/monitor-tune-overview.md - - text: T-SQL differences with SQL Server - url: database/transact-sql-tsql-differences-sql-server.md - - - # Card - - title: Azure SQL Managed Instance - linkLists: - - linkListType: whats-new - links: - - text: What's new? - url: managed-instance/doc-changes-updates-release-notes-whats-new.md - - linkListType: overview - links: - - text: What is SQL Managed Instance? - url: managed-instance/sql-managed-instance-paas-overview.md - - text: What are instance pools? - url: managed-instance/instance-pools-overview.md - - linkListType: quickstart - links: - - text: Create SQL Managed Instance - url: managed-instance/instance-create-quickstart.md - - text: Configure VM to connect - url: managed-instance/connect-vm-instance-configure.md - - text: Restore sample database - url: managed-instance/restore-sample-database-quickstart.md - - linkListType: video - links: - - text: Azure SQL Managed Instance overview - url: /shows/Azure-SQL-for-Beginners/Azure-SQL-Managed-Instance-Overview-6-of-61 - - linkListType: concept - links: - - text: Migrate from SQL Server - url: migration-guides/managed-instance/sql-server-to-managed-instance-guide.md - - text: Advanced security - url: database/security-overview.md - - text: Business continuity - url: database/business-continuity-high-availability-disaster-recover-hadr-overview.md - - text: Monitoring and tuning - url: database/monitor-tune-overview.md - - text: T-SQL differences with SQL Server - url: managed-instance/transact-sql-tsql-differences-sql-server.md - - - # Card - - title: SQL Server on Azure VM - linkLists: - - linkListType: overview - links: - - text: What's new? - url: virtual-machines/windows/doc-changes-updates-release-notes-whats-new.md - - text: What is SQL Server on Windows VM? - url: virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md - - text: What is SQL Server on Linux VM? - url: virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md - - linkListType: quickstart - links: - - text: Create SQL on Azure VM (Windows) - url: virtual-machines/windows/sql-vm-create-portal-quickstart.md - - text: Create SQL on Azure VM (Linux) - url: virtual-machines/linux/sql-vm-create-portal-quickstart.md - - linkListType: video - links: - - text: SQL Server on Azure VM overview - url: /shows/Azure-SQL-for-Beginners/SQL-Server-on-Azure-VM-Overview-4-of-61 - - linkListType: concept - links: - - text: Migrate from SQL Server - url: virtual-machines/windows/migrate-to-vm-from-sql-server.md - - text: Security considerations - url: virtual-machines/windows/security-considerations-best-practices.md - - text: High availability & disaster recovery - url: virtual-machines/windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md - - text: Performance guidelines - url: ./virtual-machines/windows/performance-guidelines-best-practices-checklist.md - - # Card - - title: Migrate from SQL Server - linkLists: - - linkListType: deploy - links: - - text: Azure SQL Database - url: migration-guides/database/sql-server-to-sql-database-overview.md - - text: Azure SQL Managed Instance - url: migration-guides/managed-instance/sql-server-to-managed-instance-overview.md - - text: SQL Server on Azure VMs - url: migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md - - # Card - - title: Connect and query - linkLists: - - linkListType: quickstart - links: - - text: Overview - url: database/connect-query-content-reference-guide.md - - text: SQL Server Management Studio (SSMS) - url: database/connect-query-ssms.md - - text: Azure Data Studio - url: /sql/azure-data-studio/quickstart-sql-database?toc=%2Fazure%2Fazure-sql%2Ftoc.json - - text: Azure portal - url: database/connect-query-portal.md - - text: Visual Studio (.NET) - url: database/connect-query-dotnet-visual-studio.md - - text: Visual Studio Code - url: database/connect-query-vscode.md - - text: .NET Core - url: database/connect-query-dotnet-core.md - - text: Python - url: database/connect-query-python.md - - # Card - - title: Reference - linkLists: - - linkListType: deploy - links: - - text: Azure CLI samples - url: database/az-cli-script-samples-content-guide.md - - text: PowerShell samples - url: database/powershell-script-content-guide.md - - text: ARM template samples - url: database/arm-templates-content-guide.md - - linkListType: download - links: - - text: SQL Server Management Studio (SSMS) - url: /sql/ssms/download-sql-server-management-studio-ssms - - text: Azure Data Studio - url: /sql/azure-data-studio/download-azure-data-studio - - text: SQL Server Data Tools - url: /sql/ssdt/download-sql-server-data-tools-ssdt - - text: Visual Studio 2019 - url: https://visualstudio.microsoft.com/downloads/ - - linkListType: reference - links: - - text: Migration guide - url: https://datamigration.microsoft.com/ - - text: Transact-SQL (T-SQL) - url: /sql/t-sql/language-reference - - text: Azure CLI - url: /cli/azure/azure-cli-reference-for-sql - - text: PowerShell - url: /powershell/module/az.sql - - text: REST API - url: /rest/api/sql/ - - - # Card - - title: Learn Azure SQL - linkLists: - - linkListType: learn - links: - - text: Azure SQL for beginners - url: https://aka.ms/azuresql4beginners - - text: Azure SQL fundamentals - url: /learn/paths/azure-sql-fundamentals/ - - text: Azure SQL hands-on labs - url: https://aka.ms/asqlworkshop - - text: Azure SQL bootcamp - url: https://aka.ms/azuresqlbootcamp - - text: Educational SQL resources - url: /sql/sql-server/educational-sql-resources - - # Card - - title: Development - linkLists: - - linkListType: overview - links: - - text: Application development - url: database/develop-overview.md - - text: Disaster recovery app design - url: database/designing-cloud-solutions-for-disaster-recovery.md - - text: Managing rolling upgrades (SQL DB) - url: database/manage-application-rolling-upgrade.md - - text: Development strategies (SQL VM) - url: virtual-machines/windows/application-patterns-development-strategies.md - - text: SaaS database tenancy patterns - url: database/saas-tenancy-app-design-patterns.md - - linkListType: how-to-guide - links: - - text: Design first database (SSMS) - url: database/design-first-database-tutorial.md - - text: Design first database (C#) - url: database/design-first-database-csharp-tutorial.md diff --git a/articles/azure-sql/load-from-csv-with-bcp.md b/articles/azure-sql/load-from-csv-with-bcp.md deleted file mode 100644 index cde406212f3be..0000000000000 --- a/articles/azure-sql/load-from-csv-with-bcp.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Load data from CSV file into a database (bcp) -description: For a small data size, uses bcp to import data into Azure SQL Database. -services: sql-database -ms.service: sql-database -ms.subservice: data-movement -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: dzsquared -ms.author: drskwier -ms.reviewer: mathoma, kendralittle -ms.date: 01/25/2019 ---- -# Load data from CSV into Azure SQL Database or SQL Managed Instance (flat files) -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -You can use the bcp command-line utility to import data from a CSV file into Azure SQL Database or Azure SQL Managed Instance. - -## Before you begin - -### Prerequisites - -To complete the steps in this article, you need: - -* A database in Azure SQL Database -* The bcp command-line utility installed -* The sqlcmd command-line utility installed - -You can download the bcp and sqlcmd utilities from the [Microsoft sqlcmd Documentation](/sql/tools/sqlcmd-utility?view=sql-server-ver15&preserve-view=true). - -### Data in ASCII or UTF-16 format - -If you are trying this tutorial with your own data, your data needs to use the ASCII or UTF-16 encoding since bcp does not support UTF-8. - -## 1. Create a destination table - -Define a table in SQL Database as the destination table. The columns in the table must correspond to the data in each row of your data file. - -To create a table, open a command prompt and use sqlcmd.exe to run the following command: - -```cmd -sqlcmd.exe -S -d -U -P -I -Q " - CREATE TABLE DimDate2 - ( - DateId INT NOT NULL, - CalendarQuarter TINYINT NOT NULL, - FiscalQuarter TINYINT NOT NULL - ) - ; -" -``` - -## 2. Create a source data file - -Open Notepad and copy the following lines of data into a new text file and then save this file to your local temp directory, C:\Temp\DimDate2.txt. This data is in ASCII format. - -```txt -20150301,1,3 -20150501,2,4 -20151001,4,2 -20150201,1,3 -20151201,4,2 -20150801,3,1 -20150601,2,4 -20151101,4,2 -20150401,2,4 -20150701,3,1 -20150901,3,1 -20150101,1,3 -``` - -(Optional) To export your own data from a SQL Server database, open a command prompt and run the following command. Replace TableName, ServerName, DatabaseName, Username, and Password with your own information. - -```cmd -bcp out C:\Temp\DimDate2_export.txt -S -d -U -P -q -c -t , -``` - -## 3. Load the data - -To load the data, open a command prompt and run the following command, replacing the values for Server Name, Database name, Username, and Password with your own information. - -```cmd -bcp DimDate2 in C:\Temp\DimDate2.txt -S -d -U -P -q -c -t , -``` - -Use this command to verify the data was loaded properly - -```cmd -sqlcmd.exe -S -d -U -P -I -Q "SELECT * FROM DimDate2 ORDER BY 1;" -``` - -The results should look like this: - -| DateId | CalendarQuarter | FiscalQuarter | -| --- | --- | --- | -| 20150101 |1 |3 | -| 20150201 |1 |3 | -| 20150301 |1 |3 | -| 20150401 |2 |4 | -| 20150501 |2 |4 | -| 20150601 |2 |4 | -| 20150701 |3 |1 | -| 20150801 |3 |1 | -| 20150801 |3 |1 | -| 20151001 |4 |2 | -| 20151101 |4 |2 | -| 20151201 |4 |2 | - -## Next steps - -To migrate a SQL Server database, see [SQL Server database migration](database/migrate-to-database-from-sql-server.md). - - -[bcp]: /sql/tools/bcp-utility -[CREATE TABLE syntax]: /sql/t-sql/statements/create-table-azure-sql-data-warehouse - - -[Microsoft Download Center]: https://www.microsoft.com/download/details.aspx?id=36433 diff --git a/articles/azure-sql/managed-instance/aad-security-configure-tutorial.md b/articles/azure-sql/managed-instance/aad-security-configure-tutorial.md deleted file mode 100644 index 8738c91db2fe2..0000000000000 --- a/articles/azure-sql/managed-instance/aad-security-configure-tutorial.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -title: SQL Managed Instance security with Azure AD server principals (logins) -description: Learn about techniques and features to secure Azure SQL Managed Instance, and use Azure AD server principals (logins) -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.topic: tutorial -author: GitHubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 11/06/2019 ---- -# Tutorial: Security in Azure SQL Managed Instance using Azure AD server principals (logins) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance provides nearly all security features that the latest SQL Server (Enterprise Edition) database engine has: - -- Limit access in an isolated environment -- Use authentication mechanisms that require identity: Azure Active Directory (Azure AD) and SQL Authentication -- Use authorization with role-based memberships and permissions -- Enable security features - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> -> - Create an Azure AD server principal (login) for a managed instance -> - Grant permissions to Azure AD server principals (logins) in a managed instance -> - Create Azure AD users from Azure AD server principals (logins) -> - Assign permissions to Azure AD users and manage database security -> - Use impersonation with Azure AD users -> - Use cross-database queries with Azure AD users -> - Learn about security features, such as threat protection, auditing, data masking, and encryption - -To learn more, see the [Azure SQL Managed Instance overview](sql-managed-instance-paas-overview.md). - -## Prerequisites - -To complete the tutorial, make sure you have the following prerequisites: - -- [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS) -- A managed instance - - Follow this article: [Quickstart: Create a managed instance](instance-create-quickstart.md) -- Able to access your managed instance and [provisioned an Azure AD administrator for the managed instance](../database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). To learn more, see: - - [Connect your application to a managed instance](connect-application-instance.md) - - [SQL Managed Instance connectivity architecture](connectivity-architecture-overview.md) - - [Configure and manage Azure Active Directory authentication with SQL](../database/authentication-aad-configure.md) - -## Limit access - -Managed instances can be accessed through a private IP address. Much like an isolated SQL Server environment, applications or users need access to the SQL Managed Instance network (VNet) before a connection can be established. For more information, see [Connect your application to SQL Managed Instance](connect-application-instance.md). - -It is also possible to configure a service endpoint on a managed instance, which allows for public connections in the same fashion as for Azure SQL Database. -For more information, see [Configure public endpoint in Azure SQL Managed Instance](public-endpoint-configure.md). - -> [!NOTE] -> Even with service endpoints enabled, [Azure SQL Database firewall rules](../database/firewall-configure.md) do not apply. Azure SQL Managed Instance has its own [built-in firewall](management-endpoint-verify-built-in-firewall.md) to manage connectivity. - -## Create an Azure AD server principal (login) using SSMS - -The first Azure AD server principal (login) can be created by the standard SQL admin account (non-Azure AD) that is a `sysadmin`, or the Azure AD admin for the managed instance created during the provisioning process. For more information, see [Provision an Azure Active Directory administrator for SQL Managed Instance](../database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). - -See the following articles for examples of connecting to SQL Managed Instance: - -- [Quickstart: Configure Azure VM to connect to SQL Managed Instance](connect-vm-instance-configure.md) -- [Quickstart: Configure a point-to-site connection to SQL Managed Instance from on-premises](point-to-site-p2s-configure.md) - -1. Log into your managed instance using a standard SQL login account (non-Azure AD) that is a `sysadmin` or an Azure AD admin for SQL Managed Instance, using [SQL Server Management Studio](point-to-site-p2s-configure.md#connect-with-ssms). - -2. In **Object Explorer**, right-click the server and choose **New Query**. - -3. In the query window, use the following syntax to create a login for a local Azure AD account: - - ```sql - USE master - GO - CREATE LOGIN login_name FROM EXTERNAL PROVIDER - GO - ``` - - This example creates a login for the account nativeuser@aadsqlmi.onmicrosoft.com. - - ```sql - USE master - GO - CREATE LOGIN [nativeuser@aadsqlmi.onmicrosoft.com] FROM EXTERNAL PROVIDER - GO - ``` - -4. On the toolbar, select **Execute** to create the login. - -5. Check the newly added login, by executing the following T-SQL command: - - ```sql - SELECT * - FROM sys.server_principals; - GO - ``` - - ![Screenshot of the Results tab in the S S M S Object Explorer showing the name, principal_id, sid, type, and type_desc of the newly added login.](./media/aad-security-configure-tutorial/native-login.png) - -For more information, see [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true). - -## Grant permissions to create logins - -To create other Azure AD server principals (logins), SQL Server roles or permissions must be granted to the principal (SQL or Azure AD). - -### SQL authentication - -- If the login is a SQL principal, only logins that are part of the `sysadmin` role can use the create command to create logins for an Azure AD account. - -### Azure AD authentication - -- To allow the newly created Azure AD server principal (login) the ability to create other logins for other Azure AD users, groups, or applications, grant the login `sysadmin` or `securityadmin` server role. -- At a minimum, **ALTER ANY LOGIN** permission must be granted to the Azure AD server principal (login) to create other Azure AD server principals (logins). -- By default, the standard permission granted to newly created Azure AD server principals (logins) in master is: **CONNECT SQL** and **VIEW ANY DATABASE**. -- The `sysadmin` server role can be granted to many Azure AD server principals (logins) within a managed instance. - -To add the login to the `sysadmin` server role: - -1. Log into the managed instance again, or use the existing connection with the Azure AD admin or SQL principal that is a `sysadmin`. - -1. In **Object Explorer**, right-click the server and choose **New Query**. - -1. Grant the Azure AD server principal (login) the `sysadmin` server role by using the following T-SQL syntax: - - ```sql - ALTER SERVER ROLE sysadmin ADD MEMBER login_name - GO - ``` - - The following example grants the `sysadmin` server role to the login nativeuser@aadsqlmi.onmicrosoft.com - - ```sql - ALTER SERVER ROLE sysadmin ADD MEMBER [nativeuser@aadsqlmi.onmicrosoft.com] - GO - ``` - -## Create additional Azure AD server principals (logins) using SSMS - -Once the Azure AD server principal (login) has been created, and provided with `sysadmin` privileges, that login can create additional logins using the **FROM EXTERNAL PROVIDER** clause with **CREATE LOGIN**. - -1. Connect to the managed instance with the Azure AD server principal (login), using SQL Server Management Studio. Enter your SQL Managed Instance host name. For Authentication in SSMS, there are three options to choose from when logging in with an Azure AD account: - - - Active Directory - Universal with MFA support - - Active Directory - Password - - Active Directory - Integrated
    - - ![Screenshot of the Connect to Server dialog in S S M S with Active Directory - Universal with MFA support selected in the Authentication dropdown.](./media/aad-security-configure-tutorial/ssms-login-prompt.png) - - For more information, see [Universal Authentication (SSMS support for Multi-Factor Authentication)](../database/authentication-mfa-ssms-overview.md). - -1. Select **Active Directory - Universal with MFA support**. This brings up a Multi-Factor Authentication login window. Sign in with your Azure AD password. - - ![Screenshot of the Multi-Factor Authentication login window with the cursor in the Enter password field.](./media/aad-security-configure-tutorial/mfa-login-prompt.png) - -1. In SSMS **Object Explorer**, right-click the server and choose **New Query**. -1. In the query window, use the following syntax to create a login for another Azure AD account: - - ```sql - USE master - GO - CREATE LOGIN login_name FROM EXTERNAL PROVIDER - GO - ``` - - This example creates a login for the Azure AD user bob@aadsqlmi.net, whose domain aadsqlmi.net is federated with the Azure AD aadsqlmi.onmicrosoft.com domain. - - Execute the following T-SQL command. Federated Azure AD accounts are the SQL Managed Instance replacements for on-premises Windows logins and users. - - ```sql - USE master - GO - CREATE LOGIN [bob@aadsqlmi.net] FROM EXTERNAL PROVIDER - GO - ``` - -1. Create a database in the managed instance using the [CREATE DATABASE](/sql/t-sql/statements/create-database-transact-sql?view=azuresqldb-mi-current&preserve-view=true) syntax. This database will be used to test user logins in the next section. - 1. In **Object Explorer**, right-click the server and choose **New Query**. - 1. In the query window, use the following syntax to create a database named **MyMITestDB**. - - ```sql - CREATE DATABASE MyMITestDB; - GO - ``` - -1. Create a SQL Managed Instance login for a group in Azure AD. The group will need to exist in Azure AD before you can add the login to SQL Managed Instance. See [Create a basic group and add members using Azure Active Directory](../../active-directory/fundamentals/active-directory-groups-create-azure-portal.md). Create a group _mygroup_ and add members to this group. - -1. Open a new query window in SQL Server Management Studio. - - This example assumes there exists a group called _mygroup_ in Azure AD. Execute the following command: - - ```sql - USE master - GO - CREATE LOGIN [mygroup] FROM EXTERNAL PROVIDER - GO - ``` - -1. As a test, log into the managed instance with the newly created login or group. Open a new connection to the managed instance, and use the new login when authenticating. -1. In **Object Explorer**, right-click the server and choose **New Query** for the new connection. -1. Check server permissions for the newly created Azure AD server principal (login) by executing the following command: - - ```sql - SELECT * FROM sys.fn_my_permissions (NULL, 'DATABASE') - GO - ``` - -Guest users are supported as individual users (without being part of an AAD group (although they can be)) and the logins can be created in master directly (for example, joe@contoso.con) using the current login syntax. - -## Create an Azure AD user from the Azure AD server principal (login) - -Authorization to individual databases works much in the same way in SQL Managed Instance as it does with databases in SQL Server. A user can be created from an existing login in a database, and be provided with permissions on that database, or added to a database role. - -Now that we've created a database called **MyMITestDB**, and a login that only has default permissions, the next step is to create a user from that login. At the moment, the login can connect to the managed instance, and see all the databases, but can't interact with the databases. If you sign in with the Azure AD account that has the default permissions, and try to expand the newly created database, you'll see the following error: - -![Screenshot of an error message from the the S S M S Object Explorer that reads "The database MyMITestDB is not accessible. (ObjectExplorer)".](./media/aad-security-configure-tutorial/ssms-db-not-accessible.png) - -For more information on granting database permissions, see [Getting Started with Database Engine Permissions](/sql/relational-databases/security/authentication-access/getting-started-with-database-engine-permissions). - -### Create an Azure AD user and create a sample table - -1. Log into your managed instance using a `sysadmin` account using SQL Server Management Studio. -1. In **Object Explorer**, right-click the server and choose **New Query**. -1. In the query window, use the following syntax to create an Azure AD user from an Azure AD server principal (login): - - ```sql - USE -- provide your database name - GO - CREATE USER user_name FROM LOGIN login_name - GO - ``` - - The following example creates a user bob@aadsqlmi.net from the login bob@aadsqlmi.net: - - ```sql - USE MyMITestDB - GO - CREATE USER [bob@aadsqlmi.net] FROM LOGIN [bob@aadsqlmi.net] - GO - ``` - -1. It's also supported to create an Azure AD user from an Azure AD server principal (login) that is a group. - - The following example creates a login for the Azure AD group _mygroup_ that exists in your Azure AD instance. - - ```sql - USE MyMITestDB - GO - CREATE USER [mygroup] FROM LOGIN [mygroup] - GO - ``` - - All users that belong to *mygroup* can access the **MyMITestDB** database. - - > [!IMPORTANT] - > When creating a **USER** from an Azure AD server principal (login), specify the user_name as the same login_name from **LOGIN**. - - For more information, see [CREATE USER](/sql/t-sql/statements/create-user-transact-sql?view=azuresqldb-mi-current&preserve-view=true). - -1. In a new query window, create a test table using the following T-SQL command: - - ```sql - USE MyMITestDB - GO - CREATE TABLE TestTable - ( - AccountNum varchar(10), - City varchar(255), - Name varchar(255), - State varchar(2) - ); - ``` - -1. Create a connection in SSMS with the user that was created. You'll notice that you cannot see the table **TestTable** that was created by the `sysadmin` earlier. We need to provide the user with permissions to read data from the database. - -1. You can check the current permission the user has by executing the following command: - - ```sql - SELECT * FROM sys.fn_my_permissions('MyMITestDB','DATABASE') - GO - ``` - -### Add users to database-level roles - -For the user to see data in the database, we can provide [database-level roles](/sql/relational-databases/security/authentication-access/database-level-roles) to the user. - -1. Log into your managed instance using a `sysadmin` account using SQL Server Management Studio. - -1. In **Object Explorer**, right-click the server and choose **New Query**. - -1. Grant the Azure AD user the `db_datareader` database role by using the following T-SQL syntax: - - ```sql - Use -- provide your database name - ALTER ROLE db_datareader ADD MEMBER user_name - GO - ``` - - The following example provides the user bob@aadsqlmi.net and the group _mygroup_ with `db_datareader` permissions on the **MyMITestDB** database: - - ```sql - USE MyMITestDB - GO - ALTER ROLE db_datareader ADD MEMBER [bob@aadsqlmi.net] - GO - ALTER ROLE db_datareader ADD MEMBER [mygroup] - GO - ``` - -1. Check the Azure AD user that was created in the database exists by executing the following command: - - ```sql - SELECT * FROM sys.database_principals - GO - ``` - -1. Create a new connection to the managed instance with the user that has been added to the `db_datareader` role. -1. Expand the database in **Object Explorer** to see the table. - - ![Screenshot from Object Explorer in S S M S showing the folder structure for Tables in MyMITestDB. The dbo.TestTable folder is highlighted.](./media/aad-security-configure-tutorial/ssms-test-table.png) - -1. Open a new query window and execute the following SELECT statement: - - ```sql - SELECT * - FROM TestTable - ``` - - Are you able to see data from the table? You should see the columns being returned. - - ![Screenshot of the Results tab in the S S M S Object Explorer showing the table column headers AccountNum, City, Name, and State.](./media/aad-security-configure-tutorial/ssms-test-table-query.png) - -## Impersonate Azure AD server-level principals (logins) - -SQL Managed Instance supports the impersonation of Azure AD server-level principals (logins). - -### Test impersonation - -1. Log into your managed instance using a `sysadmin` account using SQL Server Management Studio. - -1. In **Object Explorer**, right-click the server and choose **New Query**. - -1. In the query window, use the following command to create a new stored procedure: - - ```sql - USE MyMITestDB - GO - CREATE PROCEDURE dbo.usp_Demo - WITH EXECUTE AS 'bob@aadsqlmi.net' - AS - SELECT user_name(); - GO - ``` - -1. Use the following command to see that the user you're impersonating when executing the stored procedure is **bob\@aadsqlmi.net**. - - ```sql - Exec dbo.usp_Demo - ``` - -1. Test impersonation by using the EXECUTE AS LOGIN statement: - - ```sql - EXECUTE AS LOGIN = 'bob@aadsqlmi.net' - GO - SELECT SUSER_SNAME() - REVERT - GO - ``` - -> [!NOTE] -> Only the SQL server-level principals (logins) that are part of the `sysadmin` role can execute the following operations targeting Azure AD principals: -> -> - EXECUTE AS USER -> - EXECUTE AS LOGIN - -## Use cross-database queries - -Cross-database queries are supported for Azure AD accounts with Azure AD server principals (logins). To test a cross-database query with an Azure AD group, we need to create another database and table. You can skip creating another database and table if one already exists. - -1. Log into your managed instance using a `sysadmin` account using SQL Server Management Studio. -1. In **Object Explorer**, right-click the server and choose **New Query**. -1. In the query window, use the following command to create a database named **MyMITestDB2** and table named **TestTable2**: - - ```sql - CREATE DATABASE MyMITestDB2; - GO - USE MyMITestDB2 - GO - CREATE TABLE TestTable2 - ( - EmpId varchar(10), - FirstName varchar(255), - LastName varchar(255), - Status varchar(10) - ); - ``` - -1. In a new query window, execute the following command to create the user _mygroup_ in the new database **MyMITestDB2**, and grant SELECT permissions on that database to _mygroup_: - - ```sql - USE MyMITestDB2 - GO - CREATE USER [mygroup] FROM LOGIN [mygroup] - GO - GRANT SELECT TO [mygroup] - GO - ``` - -1. Sign into the managed instance using SQL Server Management Studio as a member of the Azure AD group _mygroup_. Open a new query window and execute the cross-database SELECT statement: - - ```sql - USE MyMITestDB - SELECT * FROM MyMITestDB2..TestTable2 - GO - ``` - - You should see the table results from **TestTable2**. - -## Additional supported scenarios - -- SQL Agent management and job executions are supported for Azure AD server principals (logins). -- Database backup and restore operations can be executed by Azure AD server principals (logins). -- [Auditing](auditing-configure.md) of all statements related to Azure AD server principals (logins) and authentication events. -- Dedicated administrator connection for Azure AD server principals (logins) that are members of the `sysadmin` server-role. -- Azure AD server principals (logins) are supported with using the [sqlcmd utility](/sql/tools/sqlcmd-utility) and [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) tool. -- Logon triggers are supported for logon events coming from Azure AD server principals (logins). -- Service Broker and DB mail can be setup using Azure AD server principals (logins). - -## Next steps - -### Enable security features - -See the [SQL Managed Instance security features](sql-managed-instance-paas-overview.md#security-features) article for a comprehensive list of ways to secure your database. The following security features are discussed: - -- [SQL Managed Instance auditing](auditing-configure.md) -- [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine) -- [Threat detection](threat-detection-configure.md) -- [Dynamic data masking](/sql/relational-databases/security/dynamic-data-masking) -- [Row-level security](/sql/relational-databases/security/row-level-security) -- [Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql) - -### SQL Managed Instance capabilities - -For a complete overview of SQL Managed Instance capabilities, see: - -> [!div class="nextstepaction"] -> [SQL Managed Instance capabilities](sql-managed-instance-paas-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/alerts-create.md b/articles/azure-sql/managed-instance/alerts-create.md deleted file mode 100644 index ed6f63047d194..0000000000000 --- a/articles/azure-sql/managed-instance/alerts-create.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Setup alerts and notifications for Managed Instance (Azure portal) -description: Use the Azure portal to create SQL Managed Instance alerts, which can trigger notifications or automation when the conditions you specify are met. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: performance -ms.custom: -ms.devlang: -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma, wiassaf -ms.date: 05/04/2020 ---- -# Create alerts for Azure SQL Managed Instance using the Azure portal -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article shows you how to set up alerts for databases in Azure SQL Managed Instance Database using the Azure portal. Alerts can send you an email, call a web hook, execute Azure Function, runbook, call an external ITSM compatible ticketing system, call you on the phone or send a text message when some metric, such is for example instance storage size, or CPU usage, reaches a predefined threshold. This article also provides best practices for setting alert periods. - - -## Overview - -You can receive an alert based on monitoring metrics for, or events on, your Azure services. - -* **Metric values** - The alert triggers when the value of a specified metric crosses a threshold you assign in either direction. That is, it triggers both when the condition is first met and then afterwards when that condition is no longer being met. - -You can configure an alert to do the following when it triggers: - -* Send email notifications to the service administrator and coadministrators -* Send email to additional emails that you specify. -* Call a phone number with voice prompt -* Send text message to a phone number -* Call a webhook -* Call Azure Function -* Call Azure runbook -* Call an external ticketing ITSM compatible system - -You can configure and get information about alert rules using [the Azure portal, PowerShell or the Azure CLI](../../azure-monitor/alerts/alerts-classic-portal.md) or [Azure Monitor REST API](/rest/api/monitor/alertrules). - -## Alerting metrics available for managed instance - -> [!IMPORTANT] -> Alerting metrics are available for managed instance only. Alerting metrics for individual databases in managed instance are not available. -> Database diagnostics telemetry is on the other hand available in the form of [diagnostics logs](../database/metrics-diagnostic-telemetry-logging-streaming-export-configure.md#diagnostic-telemetry-for-export). Alerts on diagnostics logs can be setup from within [SQL Analytics](../../azure-monitor/insights/azure-sql.md) product using [log alert scripts](../../azure-monitor/insights/azure-sql.md#create-alerts-for-sql-managed-instance) for managed instance. - -The following managed instance metrics are available for alerting configuration: - -| Metric | Description | Unit of measure \ possible values | -| :--------- | --------------------- | ----------- | -| Average CPU percentage | Average percentage of CPU utilization in selected time period. | 0-100 (percent) | -| IO bytes read | IO bytes read in the selected time period. | Bytes | -| IO bytes written | IO bytes written in the selected time period. | Bytes | -| IO requests count | Count of IO requests in the selected time period. | Numerical | -| Storage space reserved | Current max. storage space reserved for the managed instance. Changes with resource scaling operation. | MB (Megabytes) | -| Storage space used | Storage space used in the selected period. Changes with storage consumption by databases and the instance. | MB (Megabytes) | -| Virtual core count | vCores provisioned for the managed instance. Changes with resource scaling operation. | 4-80 (vCores) | - -## Create an alert rule on a metric with the Azure portal - -1. In Azure [portal](https://portal.azure.com/), locate the managed instance you are interested in monitoring, and select it. - -2. Select **Metrics** menu item in the Monitoring section. - - ![Monitoring](./media/alerts-create/mi-alerting-menu-annotated.png) - -3. On the drop-down menu, select one of the metrics you wish to set up your alert on (Storage space used is shown in the example). - -4. Select aggregation period - average, minimum, or maximum reached in the given time period (Avg, Min, or Max). - -5. Select **New alert rule** - -6. In the Create alert rule pane click on **Condition name** (Storage space used is shown in the example) - - ![Define condition](./media/alerts-create/mi-create-metrics-alert-smaller-annotated.png) - -7. On the Configure signal logic pane, define Operator, Aggregation type, and Threshold value - - * Operator type options are greater than, equal and less than (the threshold value) - * Aggregation type options are min, max or average (in the aggregation granularity period) - * Threshold value is the alert value which will be evaluated based on the operator and aggregation criteria - - ![Configure_signal_logic](./media/alerts-create/mi-configure-signal-logic-annotated.png) - - In the example shown in the screenshot, value of 1840876 MB is used representing a threshold value of 1.8 TB. As the operator in the example is set to greater than, the alert will be created if the storage space consumption on the managed instance goes over 1.8 TB. Note that the threshold value for storage space metrics must be expressed in MB. - -8. Set the evaluation period - aggregation granularity in minutes and frequency of evaluation. The frequency of evaluation will denote time the alerting system will periodically check if the threshold condition has been met. - -9. Select action group. Action group pane will show up through which you will be able to select an existing, or create a new action. This action defines that will happen upon triggering an alert (for example, sending email, calling you on the phone, executing a webhook, Azure function, or a runbook, for example). - - ![Select_action_group](./media/alerts-create/mi-select-action-group-smaller-annotated.png) - - * To create new action group, select **+Create action group** - - ![Create_action_group_alerts](./media/alerts-create/mi-create-alert-action-group-smaller-annotated.png) - - * Define how do you want to be alerted: Enter action group name, short name, action name and select Action Type. The Action Type defines if you will be notified via email, text message, voice call, or if perhaps webhook, Azure function, runbook will be executed, or ITSM ticket will be created in your compatible system. - - ![Define_how_to_be_alerted](./media/alerts-create/mi-add-alerts-action-group-annotated.png) - -10. Fill in the alert rule details for your records, select the severity type. - - ![Rule_description](./media/alerts-create/mi-rule-details-complete-smaller-annotated.png) - - * Complete creating the alert rule by clicking on **Create alert rule** button. - -New alert rule will become active within a few minutes and will be triggered based on your settings. - -## Verifying alerts - -> [!NOTE] -> To supress noisy alerts, see [Supression of alerts using action rules](../../azure-monitor/alerts/alerts-action-rules.md#suppression-of-alerts). - -Upon setting up an alerting rule, verify that you are satisfied with the alerting trigger and its frequency. For the example shown on this page for setting up an alert on storage space used, if your alerting option was email, you might receive email such is the one shown below. - - ![alert_example](./media/alerts-create/mi-email-alert-example-smaller-annotated.png) - -The email shows the alert name, details of the threshold and why the alert was triggered helping you to verify and troubleshoot your alert. You can use **See in Azure portal** button to view alert received via email in Azure portal. - -## View, suspend, activate, modify and delete existing alert rules - -> [!NOTE] -> Existing alerts need to be managed from Alerts menu from Azure portal dashboard. Existing alerts cannot be modified from Managed Instance resource blade. - -To view, suspend, activate, modify and delete existing alerts: - -1. Search for Alerts using Azure portal search. Click on Alerts. - - ![find_alerts](./media/alerts-create/mi-manage-alerts-browse-smaller-annotated.png) - - Alternatively, you could also click on Alerts on the Azure navigation bar, if you have it configured. - -2. On the Alerts pane, select Manage alert rules. - - ![modify_alerts](./media/alerts-create/mi-manage-alert-rules-smaller-annotated.png) - - List of existing alerts will show up. Select an individual existing alert rule to manage it. Existing active rules can be modified and tuned to your preference. Active rules can also be suspended without being deleted. - -## Next steps - -* Learn about Azure Monitor alerting system, see [Overview of alerts in Microsoft Azure](../../azure-monitor/alerts/alerts-overview.md) -* Learn more about metric alerts, see [Understand how metric alerts work in Azure Monitor](../../azure-monitor/alerts/alerts-metric-overview.md) -* Learn about configuring a webhook in alerts, see [Call a webhook with a classic metric alert](../../azure-monitor/alerts/alerts-webhooks.md) -* Learn about configuring and managing alerts using PowerShell, see [Action rules](/powershell/module/az.monitor/add-azmetricalertrulev2) -* Learn about configuring and managing alerts using API, see [Azure Monitor REST API reference](/rest/api/monitor/) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/api-references-create-manage-instance.md b/articles/azure-sql/managed-instance/api-references-create-manage-instance.md deleted file mode 100644 index 4ea49727b56f4..0000000000000 --- a/articles/azure-sql/managed-instance/api-references-create-manage-instance.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Management API reference for Azure SQL Managed Instance -description: Learn about creating and configuring managed instances of Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: development -ms.topic: reference -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 03/12/2019 -ms.custom: devx-track-azurecli ---- -# Managed API reference for Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -You can create and configure managed instances of Azure SQL Managed Instance using the Azure portal, PowerShell, Azure CLI, REST API, and Transact-SQL. In this article, you can find an overview of the functions and the API that you can use to create and configure managed instances. - -## Azure portal: Create a managed instance - -For a quickstart showing you how to create a managed instance, see [Quickstart: Create a managed instance](instance-create-quickstart.md). - -## PowerShell: Create and configure managed instances - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRM modules are substantially identical. - -To create and manage managed instances with Azure PowerShell, use the following PowerShell cmdlets. If you need to install or upgrade PowerShell, see [Install the Azure PowerShell module](/powershell/azure/install-az-ps). - -> [!TIP] -> For PowerShell example scripts, see [Quickstart script: Create a managed instance using a PowerShell library](/archive/blogs/sqlserverstorageengine/quick-start-script-create-azure-sql-managed-instance-using-powershell). - -| Cmdlet | Description | -| --- | --- | -|[New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance)|Creates a managed instance. | -|[Get-AzSqlInstance](/powershell/module/az.sql/get-azsqlinstance)|Returns information about a managed instance.| -|[Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance)|Sets properties for a managed instance.| -|[Remove-AzSqlInstance](/powershell/module/az.sql/remove-azsqlinstance)|Removes a managed instance.| -|[Get-AzSqlInstanceOperation](/powershell/module/az.sql/get-azsqlinstanceoperation)|Gets a list of management operations performed on the managed instance or specific operation.| -|[Stop-AzSqlInstanceOperation](/powershell/module/az.sql/stop-azsqlinstanceoperation)|Cancels the specific management operation performed on the managed instance.| -|[New-AzSqlInstanceDatabase](/powershell/module/az.sql/new-azsqlinstancedatabase)|Creates a SQL Managed Instance database.| -|[Get-AzSqlInstanceDatabase](/powershell/module/az.sql/get-azsqlinstancedatabase)|Returns information about a SQL Managed Instance database.| -|[Remove-AzSqlInstanceDatabase](/powershell/module/az.sql/remove-azsqlinstancedatabase)|Removes a SQL Managed Instance database.| -|[Restore-AzSqlInstanceDatabase](/powershell/module/az.sql/restore-azsqlinstancedatabase)|Restores a SQL Managed Instance database.| - -## Azure CLI: Create and configure managed instances - -To create and configure managed instances with [Azure CLI](/cli/azure), use the following [Azure CLI commands for SQL Managed Instance](/cli/azure/sql/mi). Use [Azure Cloud Shell](../../cloud-shell/overview.md) to run Azure CLI in your browser, or [install](/cli/azure/install-azure-cli) it on macOS, Linux, or Windows. - -> [!TIP] -> For an Azure CLI quickstart, see [Working with SQL Managed Instance using Azure CLI](https://medium.com/azure-sqldb-managed-instance/working-with-sql-managed-instance-using-azure-cli-611795fe0b44). - -| Cmdlet | Description | -| --- | --- | -|[az sql mi create](/cli/azure/sql/mi#az-sql-mi-create) |Creates a managed instance.| -|[az sql mi list](/cli/azure/sql/mi#az-sql-mi-list)|Lists available managed instances.| -|[az sql mi show](/cli/azure/sql/mi#az-sql-mi-show)|Gets the details for a managed instance.| -|[az sql mi update](/cli/azure/sql/mi#az-sql-mi-update)|Updates a managed instance.| -|[az sql mi delete](/cli/azure/sql/mi#az-sql-mi-delete)|Removes a managed instance.| -|[az sql mi op list](/cli/azure/sql/mi/op#az-sql-mi-op-list)|Gets a list of management operations performed on the managed instance.| -|[az sql mi op show](/cli/azure/sql/mi/op#az-sql-mi-op-show)|Gets the specific management operation performed on the managed instance.| -|[az sql mi op cancel](/cli/azure/sql/mi/op#az-sql-mi-op-cancel)|Cancels the specific management operation performed on the managed instance.| -|[az sql midb create](/cli/azure/sql/midb#az-sql-midb-create) |Creates a managed database.| -|[az sql midb list](/cli/azure/sql/midb#az-sql-midb-list)|Lists available managed databases.| -|[az sql midb restore](/cli/azure/sql/midb#az-sql-midb-restore)|Restores a managed database.| -|[az sql midb delete](/cli/azure/sql/midb#az-sql-midb-delete)|Removes a managed database.| - -## Transact-SQL: Create and configure instance databases - -To create and configure instance databases after the managed instance is created, use the following T-SQL commands. You can issue these commands using the Azure portal, [SQL Server Management Studio](/sql/ssms/use-sql-server-management-studio), [Azure Data Studio](/sql/azure-data-studio/what-is), [Visual Studio Code](https://code.visualstudio.com/docs), or any other program that can connect to a server and pass Transact-SQL commands. - -> [!TIP] -> For quickstarts showing you how to configure and connect to a managed instance using SQL Server Management Studio on Microsoft Windows, see [Quickstart: Configure Azure VM to connect to Azure SQL Managed Instance](connect-vm-instance-configure.md) and [Quickstart: Configure a point-to-site connection to Azure SQL Managed Instance from on-premises](point-to-site-p2s-configure.md). - -> [!IMPORTANT] -> You cannot create or delete a managed instance using Transact-SQL. - -| Command | Description | -| --- | --- | -|[CREATE DATABASE](/sql/t-sql/statements/create-database-transact-sql?preserve-view=true&view=azuresqldb-mi-current)|Creates a new instance database in SQL Managed Instance. You must be connected to the master database to create a new database.| -| [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&view=azuresqldb-mi-current) |Modifies an instance database in SQL Managed Instance.| - -## REST API: Create and configure managed instances - -To create and configure managed instances, use these REST API requests. - -| Command | Description | -| --- | --- | -|[Managed Instances - Create Or Update](/rest/api/sql/managedinstances/createorupdate)|Creates or updates a managed instance.| -|[Managed Instances - Delete](/rest/api/sql/managedinstances/delete)|Deletes a managed instance.| -|[Managed Instances - Get](/rest/api/sql/managedinstances/get)|Gets a managed instance.| -|[Managed Instances - List](/rest/api/sql/managedinstances/list)|Returns a list of managed instances in a subscription.| -|[Managed Instances - List By Resource Group](/rest/api/sql/managedinstances/listbyresourcegroup)|Returns a list of managed instances in a resource group.| -|[Managed Instances - Update](/rest/api/sql/managedinstances/update)|Updates a managed instance.| -|[Managed Instance Operations - List By Managed Instance](/rest/api/sql/managedinstanceoperations/listbymanagedinstance)|Gets a list of management operations performed on the managed instance.| -|[Managed Instance Operations - Get](/rest/api/sql/managedinstanceoperations/get)|Gets the specific management operation performed on the managed instance.| -|[Managed Instance Operations - Cancel](/rest/api/sql/managedinstanceoperations/cancel)|Cancels the specific management operation performed on the managed instance.| - -## Next steps - -- To learn about migrating a SQL Server database to Azure, see [Migrate to Azure SQL Database](../database/migrate-to-database-from-sql-server.md). -- For information about supported features, see [Features](../database/features-comparison.md). diff --git a/articles/azure-sql/managed-instance/auditing-configure.md b/articles/azure-sql/managed-instance/auditing-configure.md deleted file mode 100644 index f956492c6ffe5..0000000000000 --- a/articles/azure-sql/managed-instance/auditing-configure.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: SQL Managed Instance auditing -description: Learn how to get started with Azure SQL Managed Instance auditing using T-SQL -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -f1_keywords: - - "mi.azure.sqlaudit.general.f1" -author: sravanisaluru -ms.author: srsaluru -ms.date: "03/25/2022" -ms.reviewer: vanto ---- -# Get started with Azure SQL Managed Instance auditing -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -[Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) auditing tracks database events and writes them to an audit log in your Azure storage account. Auditing also: - -- Helps you maintain regulatory compliance, understand database activity, and gain insight into discrepancies and anomalies that could indicate business concerns or suspected security violations. -- Enables and facilitates adherence to compliance standards, although it doesn't guarantee compliance. For more information about Azure programs that support standards compliance, see the [Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942), where you can find the most current list of compliance certifications. - -> [!IMPORTANT] -> Auditing for Azure SQL Database, Azure Synapse and Azure SQL Managed Instance is optimized for availability and performance. During very high activity, or high network load, Azure SQL Database, Azure Synapse and Azure SQL Managed Instance allow operations to proceed and may not record some audited events. - -## Set up auditing for your server to Azure Storage - -The following section describes the configuration of auditing on your managed instance. - -1. Go to the [Azure portal](https://portal.azure.com). -2. Create an Azure Storage **container** where audit logs are stored. - - 1. Navigate to the Azure storage account where you would like to store your audit logs. - - > [!IMPORTANT] - > - Use a storage account in the same region as the managed instance to avoid cross-region reads/writes. - > - If your storage account is behind a Virtual Network or a Firewall, please see [Grant access from a virtual network](../../storage/common/storage-network-security.md#grant-access-from-a-virtual-network). - > - If you change retention period from 0 (unlimited retention) to any other value, please note that retention will only apply to logs written after retention value was changed (logs written during the period when retention was set to unlimited are preserved, even after retention is enabled). - - 1. In the storage account, go to **Overview** and click **Blobs**. - - ![Azure Blobs widget](./media/auditing-configure/1_blobs_widget.png) - - 1. In the top menu, click **+ Container** to create a new container. - - ![Create blob container icon](./media/auditing-configure/2_create_container_button.png) - - 1. Provide a container **Name**, set **Public access level** to **Private**, and then click **OK**. - - ![Create blob container configuration](./media/auditing-configure/3_create_container_config.png) - - > [!IMPORTANT] - > Customers wishing to configure an immutable log store for their server- or database-level audit events should follow the [instructions provided by Azure Storage](../../storage/blobs/immutable-time-based-retention-policy-overview.md#allow-protected-append-blobs-writes). (Please ensure you have selected **Allow additional appends** when you configure the immutable blob storage.) - -3. After you create the container for the audit logs, there are two ways to configure it as the target for the audit logs: [using T-SQL](#blobtsql) or [using the SQL Server Management Studio (SSMS) UI](#blobssms): - - - **Configure blob storage for audit logs using T-SQL:** - - 1. In the containers list, click the newly created container and then click **Container properties**. - - ![Blob container properties button](./media/auditing-configure/4_container_properties_button.png) - - 1. Copy the container URL by clicking the copy icon and save the URL (for example, in Notepad) for future use. The container URL format should be `https://.blob.core.windows.net/` - - ![Blob container copy URL](./media/auditing-configure/5_container_copy_name.png) - - 1. Generate an Azure Storage **SAS token** to grant managed instance auditing access rights to the storage account: - - - Navigate to the Azure storage account where you created the container in the previous step. - - - Click on **Shared access signature** in the **Storage Settings** menu. - - ![Shared access signature icon in storage settings menu](./media/auditing-configure/6_storage_settings_menu.png) - - - Configure the SAS as follows: - - - **Allowed services**: Blob - - - **Start date**: to avoid time zone-related issues, use yesterday’s date - - - **End date**: choose the date on which this SAS token expires - - > [!NOTE] - > Renew the token upon expiry to avoid audit failures. - - - Click **Generate SAS**. - - ![SAS configuration](./media/auditing-configure/7_sas_configure.png) - - - The SAS token appears at the bottom. Copy the token by clicking on the copy icon, and save it (for example, in Notepad) for future use. - - ![Copy SAS token](./media/auditing-configure/8_sas_copy.png) - - > [!IMPORTANT] - > Remove the question mark (“?”) character from the beginning of the token. - - 1. Connect to your managed instance via SQL Server Management Studio or any other supported tool. - - 1. Execute the following T-SQL statement to **create a new credential** using the container URL and SAS token that you created in the previous steps: - - ```SQL - CREATE CREDENTIAL [] - WITH IDENTITY='SHARED ACCESS SIGNATURE', - SECRET = '' - GO - ``` - - 1. Execute the following T-SQL statement to create a new server audit (choose your own audit name, and use the container URL that you created in the previous steps). If not specified, the `RETENTION_DAYS` default is 0 (unlimited retention): - - ```SQL - CREATE SERVER AUDIT [] - TO URL ( PATH ='' , RETENTION_DAYS = integer ) - GO - ``` - - 1. Continue by [creating a server audit specification or database audit specification](#createspec). - - - **Configure blob storage for audit logs using SQL Server Management Studio 18:** - - 1. Connect to the managed instance using the SQL Server Management Studio UI. - - 1. Expand the root note of Object Explorer. - - 1. Expand the **Security** node, right-click on the **Audits** node, and click on **New Audit**: - - ![Expand security and audit node](./media/auditing-configure/10_mi_SSMS_new_audit.png) - - 1. Make sure **URL** is selected in **Audit destination** and click on **Browse**: - - ![Browse Azure Storage](./media/auditing-configure/11_mi_SSMS_audit_browse.png) - - 1. (Optional) Sign in to your Azure account: - - ![Sign in to Azure](./media/auditing-configure/12_mi_SSMS_sign_in_to_azure.png) - - 1. Select a subscription, storage account, and blob container from the dropdowns, or create your own container by clicking on **Create**. Once you have finished, click **OK**: - - ![Select Azure subscription, storage account, and blob container](./media/auditing-configure/13_mi_SSMS_select_subscription_account_container.png) - - 1. Click **OK** in the **Create Audit** dialog. - - > [!NOTE] - > When using SQL Server Management Studio UI to create audit, a credential to the container with SAS key will be automatically created. - - 1. After you configure the blob container as target for the audit logs, create and enable a server audit specification or database audit specification as you would for SQL Server: - - - [Create server audit specification T-SQL guide](/sql/t-sql/statements/create-server-audit-specification-transact-sql) - - [Create database audit specification T-SQL guide](/sql/t-sql/statements/create-database-audit-specification-transact-sql) - -5. Enable the server audit that you created in step 3: - - ```SQL - ALTER SERVER AUDIT [] - WITH (STATE=ON); - GO - ``` - -For additional information: - -- [Auditing differences between Azure SQL Managed Instance and a database in SQL Server](#auditing-differences-between-databases-in-azure-sql-managed-instance-and-databases-in-sql-server) -- [CREATE SERVER AUDIT](/sql/t-sql/statements/create-server-audit-transact-sql) -- [ALTER SERVER AUDIT](/sql/t-sql/statements/alter-server-audit-transact-sql) - - - -## Auditing of Microsoft Support operations - -Auditing of Microsoft Support operations for SQL Managed Instance allows you to audit Microsoft support engineers' operations when they need to access your server during a support request. The use of this capability, along with your auditing, enables more transparency into your workforce and allows for anomaly detection, trend visualization, and data loss prevention. - -To enable auditing of Microsoft Support operations, navigate to **Create Audit** under **Security** > **Audit** in your SQL Manage Instance, and select **Microsoft support operations**. - -![create audit icon](./media/auditing-configure/support-operations.png) - - -## Set up auditing for your server to Event Hubs or Azure Monitor logs - -Audit logs from a managed instance can be sent to Azure Event Hubs or Azure Monitor logs. This section describes how to configure this: - -1. Navigate in the [Azure portal](https://portal.azure.com/) to the managed instance. - -2. Click on **Diagnostic settings**. - -3. Click on **Turn on diagnostics**. If diagnostics is already enabled, **+Add diagnostic setting** will show instead. - -4. Select **SQLSecurityAuditEvents** in the list of logs. - -5. Select a destination for the audit events: Event Hubs, Azure Monitor logs, or both. Configure for each target the required parameters (e.g. Log Analytics workspace). - -6. Click **Save**. - - ![Configure diagnostic settings](./media/auditing-configure/9_mi_configure_diagnostics.png) - -7. Connect to the managed instance using **SQL Server Management Studio (SSMS)** or any other supported client. - -8. Execute the following T-SQL statement to create a server audit: - - ```SQL - CREATE SERVER AUDIT [] TO EXTERNAL_MONITOR; - GO - ``` - -9. Create and enable a server audit specification or database audit specification as you would for SQL Server: - - - [Create Server audit specification T-SQL guide](/sql/t-sql/statements/create-server-audit-specification-transact-sql) - - [Create Database audit specification T-SQL guide](/sql/t-sql/statements/create-database-audit-specification-transact-sql) - -10. Enable the server audit created in step 8: - - ```SQL - ALTER SERVER AUDIT [] - WITH (STATE=ON); - GO - ``` - -## Consume audit logs - -### Consume logs stored in Azure Storage - -There are several methods you can use to view blob auditing logs. - -- Use the system function `sys.fn_get_audit_file` (T-SQL) to return the audit log data in tabular format. For more information on using this function, see the [sys.fn_get_audit_file documentation](/sql/relational-databases/system-functions/sys-fn-get-audit-file-transact-sql). - -- You can explore audit logs by using a tool such as [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer/). In Azure Storage, auditing logs are saved as a collection of blob files within a container that was defined to store the audit logs. For further details about the hierarchy of the storage folder, naming conventions, and log format, see the [Blob Audit Log Format Reference](../database/audit-log-format.md). - -- For a full list of audit log consumption methods, refer to [Get started with Azure SQL Database auditing](/azure/azure-sql/database/auditing-overview). - -### Consume logs stored in Event Hubs - -To consume audit logs data from Event Hubs, you will need to set up a stream to consume events and write them to a target. For more information, see the Azure Event Hubs documentation. - -### Consume and analyze logs stored in Azure Monitor logs - -If audit logs are written to Azure Monitor logs, they are available in the Log Analytics workspace, where you can run advanced searches on the audit data. As a starting point, navigate to the Log Analytics workspace. Under the **General** section, click **Logs** and enter a simple query, such as: `search "SQLSecurityAuditEvents"` to view the audit logs. - -Azure Monitor logs gives you real-time operational insights using integrated search and custom dashboards to readily analyze millions of records across all your workloads and servers. For additional useful information about Azure Monitor logs search language and commands, see [Azure Monitor logs search reference](../../azure-monitor/logs/log-query-overview.md). - -[!INCLUDE [azure-monitor-log-analytics-rebrand](../../../includes/azure-monitor-log-analytics-rebrand.md)] - -## Auditing differences between databases in Azure SQL Managed Instance and databases in SQL Server - -The key differences between auditing in databases in Azure SQL Managed Instance and databases in SQL Server are: - -- With Azure SQL Managed Instance, auditing works at the server level and stores `.xel` log files in Azure Blob storage. -- In SQL Server, audit works at the server level, but stores events in the file system and Windows event logs. - -XEvent auditing in managed instances supports Azure Blob storage targets. File and Windows logs are **not supported**. - -The key differences in the `CREATE AUDIT` syntax for auditing to Azure Blob storage are: - -- A new syntax `TO URL` is provided and enables you to specify the URL of the Azure Blob storage container where the `.xel` files are placed. -- A new syntax `TO EXTERNAL MONITOR` is provided to enable Event Hubs and Azure Monitor log targets. -- The syntax `TO FILE` is **not supported** because Azure SQL Managed Instance cannot access Windows file shares. -- Shutdown option is **not supported**. -- `queue_delay` of 0 is **not supported**. - -## Next steps - -- For a full list of audit log consumption methods, refer to [Get started with Azure SQL Database auditing](/azure/azure-sql/database/auditing-overview). -- For more information about Azure programs that support standards compliance, see the [Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942), where you can find the most current list of compliance certifications. - - diff --git a/articles/azure-sql/managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md b/articles/azure-sql/managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md deleted file mode 100644 index 7fc59874cb5c6..0000000000000 --- a/articles/azure-sql/managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Create an Azure SQL Managed Instance using a user-assigned managed identity -description: This article guides you through creating an Azure SQL Managed Instance using a user-assigned managed identity -titleSuffix: Azure SQL Managed Instance -ms.service: sql-managed-instance -ms.subservice: security -ms.topic: conceptual -author: GithubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 12/15/2021 ---- - -# Create an Azure SQL Managed Instance with a user-assigned managed identity - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!NOTE] -> User-assigned managed identity for Azure SQL is in **public preview**. If you are looking for a guide on Azure SQL Database, see [Create an Azure SQL logical server using a user-assigned managed identity](../database/authentication-azure-ad-user-assigned-managed-identity-create-server.md) - -This how-to guide outlines the steps to create an [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) with a [user-assigned managed identity](../../active-directory/managed-identities-azure-resources/overview.md#managed-identity-types). For more information on the benefits of using a user-assigned managed identity for the server identity in Azure SQL Database, see [User-assigned managed identity in Azure AD for Azure SQL](../database/authentication-azure-ad-user-assigned-managed-identity.md). - -## Prerequisites - -- To provision a Managed Instance with a user-assigned managed identity, the [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) role (or a role with greater permissions), along with an Azure RBAC role containing the following action is required: - - Microsoft.ManagedIdentity/userAssignedIdentities/*/assign/action - For example, the [Managed Identity Operator](../../role-based-access-control/built-in-roles.md#managed-identity-operator) has this action. -- Create a user-assigned managed identity and assign it the necessary permission to be a server or managed instance identity. For more information, see [Manage user-assigned managed identities](../../active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities.md) and [user-assigned managed identity permissions for Azure SQL](../database/authentication-azure-ad-user-assigned-managed-identity.md#permissions). -- [Az.Sql module 3.4](https://www.powershellgallery.com/packages/Az.Sql/3.4.0) or higher is required when using PowerShell for user-assigned managed identities. -- [The Azure CLI 2.26.0](/cli/azure/install-azure-cli) or higher is required to use the Azure CLI with user-assigned managed identities. -- For a list of limitations and known issues with using user-assigned managed identity, see [User-assigned managed identity in Azure AD for Azure SQL](../database/authentication-azure-ad-user-assigned-managed-identity.md#limitations-and-known-issues) - -# [Portal](#tab/azure-portal) - -1. Browse to the [Select SQL deployment](https://portal.azure.com/#create/Microsoft.AzureSQL) option page in the Azure portal. - -1. If you aren't already signed in to Azure portal, sign in when prompted. - -1. Under **SQL managed instances**, leave **Resource type** set to **Single instance**, and select **Create**. - -1. Fill out the mandatory information required on the **Basics** tab for **Project details** and **Managed Instance details**. This is a minimum set of information required to provision a SQL Managed Instance. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-create-basic.png" alt-text="Azure portal screenshot of the create Managed Instance basic tab"::: - - For more information on the configuration options, see [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -1. Under **Authentication**, select a preferred authentication model. If you're looking to only configure [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md), see our guide [here](../database/authentication-azure-ad-only-authentication-create-server.md?tabs=azure-portal). - -1. Next, go through the **Networking** tab configuration, or leave the default settings. - -1. On the Security tab, under **Identity (preview)**, select **Configure Identities**. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/create-instance-configure-identities.png" alt-text="Screenshot of Azure portal security settings of the create managed instance process"::: - -1. On the **Identity (preview)** blade, under **User assigned managed identity**, select **Add**. Select the desired **Subscription** and then under **User assigned managed identities** select the desired user assigned managed identity from the selected subscription. Then select the **Select** button. - - :::image type="content" source="media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-user-assigned-managed-identity-configuration.png" alt-text="Azure portal screenshot of adding user assigned managed identity when configuring managed instance identity"::: - - :::image type="content" source="../database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-user-assigned-managed-identity.png" alt-text="Azure portal screenshot of user assigned managed identity when configuring managed instance identity"::: - -1. Under **Primary identity**, select the same user-assigned managed identity selected in the previous step. - - :::image type="content" source="../database/media/authentication-azure-ad-user-assigned-managed-identity/select-a-primary-identity.png" alt-text="Azure portal screenshot of selecting primary identity for the managed instance"::: - - > [!NOTE] - > If the system-assigned managed identity is the primary identity, the **Primary identity** field must be empty. - -1. Select **Apply** - -1. You can leave the rest of the settings default. For more information on other tabs and settings, follow the guide in the article [Quickstart: Create an Azure SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -1. Once you are done with configuring your settings, select **Review + create** to proceed. Select **Create** to start provisioning the managed instance. - -# [The Azure CLI](#tab/azure-cli) - -The Azure CLI command `az sql mi create` is used to provision a new Azure SQL Managed Instance. The below command will provision a managed instance with a user-assigned managed identity, and also enable [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The managed instance SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provision, the SQL Administrator login won't be used. - -The Azure AD admin will be the account you set for ``, and can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Your subscription ID can be found in the Azure portal -- ``: Name of the resource group for your managed instance. The resource group should also include the virtual network and subnet created -- ``: The user-assigned managed identity. Can also be used as the primary identity. -- ``: The primary identity you want to use as the instance identity -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: The Azure AD Object ID for the user -- ``: Name the managed instance you want to create -- The `subnet` parameter needs to be updated with the ``, ``, ``, and ``. - -```azurecli -az sql mi create --assign-identity --identity-type UserAssigned --user-assigned-identity-id /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/ --primary-user-assigned-identity-id /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/ --enable-ad-only-auth --external-admin-principal-type User --external-admin-name --external-admin-sid -g -n --subnet /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/ -``` - -For more information, see [az sql mi create](/cli/azure/sql/mi#az-sql-mi-create). - -> [!NOTE] -> The above example provisions a managed instance with only a user-assigned managed identity. You could set the `--identity-type` to be `UserAssigned,SystemAssigned` if you wanted both types of managed identities to be created with the instance. - -# [PowerShell](#tab/azure-powershell) - -The PowerShell command `New-AzSqlInstance` is used to provision a new Azure SQL Managed Instance. The below command will provision a managed instance with a user-assigned managed identity, and also enable [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The managed instance SQL Administrator login will be automatically created and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provision, the SQL Administrator login won't be used. - -The Azure AD admin will be the account you set for ``, and can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Name the managed instance you want to create -- ``: Name of the resource group for your managed instance. The resource group should also include the virtual network and subnet created -- ``: Your subscription ID can be found in the Azure portal -- ``: The user-assigned managed identity. Can also be used as the primary identity. -- ``: The primary identity you want to use as the instance identity -- ``: Location of the managed instance, such as `West US`, or `Central US` -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- The `SubnetId` parameter needs to be updated with the ``, ``, ``, and ``. - - -```powershell -New-AzSqlInstance -Name "" -ResourceGroupName "" -AssignIdentity -IdentityType "UserAssigned" -UserAssignedIdentityId "/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" -PrimaryUserAssignedIdentityId "/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" -ExternalAdminName "" -EnableActiveDirectoryOnlyAuthentication -Location "" -SubnetId "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/" -LicenseType LicenseIncluded -StorageSizeInGB 1024 -VCore 16 -Edition "GeneralPurpose" -ComputeGeneration Gen5 -``` - -For more information, see [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance). - -> [!NOTE] -> The above example provisions a managed instance with only a user-assigned managed identity. You could set the You could set the `-IdentityType` to be `"UserAssigned,SystemAssigned"` if you wanted both types of managed identities to be created with the instance. - -# [REST API](#tab/rest-api) - -The [Managed Instances - Create Or Update](/rest/api/sql/2020-11-01-preview/managed-instances/create-or-update) REST API can be used to create a managed instance with a user-assigned managed identity. - -> [!NOTE] -> The script requires a virtual network and subnet be created as a prerequisite. - -The script below will provision a managed instance with a user-assigned managed identity, set the Azure AD admin as ``, and enable [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). The instance SQL Administrator login will also be created automatically and the password will be set to a random password. Since SQL Authentication connectivity is disabled with this provisioning, the SQL Administrator login won't be used. - -The Azure AD admin, `` can be used to manage the instance when the provisioning is complete. - -Replace the following values in the example: - -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **Overview** pane, you should see your **Tenant ID** -- ``: Your subscription ID can be found in the Azure portal -- ``: Use a unique managed instance name -- ``: Name of the resource group for your logical server -- ``: Can be an Azure AD user or group. For example, `DummyLogin` -- ``: Location of the server, such as `westus2`, or `centralus` -- ``: Can be found by going to the [Azure portal](https://portal.azure.com), and going to your **Azure Active Directory** resource. In the **User** pane, search for the Azure AD user and find their **Object ID** -- The `subnetId` parameter needs to be updated with the ``, the `Subscription ID`, ``, and `` - - -```rest -Import-Module Azure -Import-Module MSAL.PS - -$tenantId = '' -$clientId = '1950a258-227b-4e31-a9cf-717495945fc2' # Static Microsoft client ID used for getting a token -$subscriptionId = '' -$uri = "urn:ietf:wg:oauth:2.0:oob" -$instanceName = "" -$resourceGroupName = "" -$scopes ="https://management.core.windows.net/.default" - -Login-AzAccount -tenantId $tenantId - -# Login as an Azure AD user with permission to provision a managed instance - -$result = Get-MsalToken -RedirectUri $uri -ClientId $clientId -TenantId $tenantId -Scopes $scopes - -$authHeader = @{ -'Content-Type'='application\json; ' -'Authorization'=$result.CreateAuthorizationHeader() -} - -$body = '{ -"name": "", "type": "Microsoft.Sql/managedInstances", "identity": {"type" : "UserAssigned", "UserAssignedIdentities" : {"/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/" : {}}},"location": "", "sku": {"name": "GP_Gen5", "tier": "GeneralPurpose", "family":"Gen5","capacity": 8}, -"properties": { "PrimaryUserAssignedIdentityId":"/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/","administrators":{ "login":"", "sid":"", "tenantId":"", "principalType":"User", "azureADOnlyAuthentication":true }, -"subnetId": "/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", -"licenseType": "LicenseIncluded", "vCores": 8, "storageSizeInGB": 2048, "collation": "SQL_Latin1_General_CP1_CI_AS", "proxyOverride": "Proxy", "timezoneId": "UTC", "privateEndpointConnections": [], "storageAccountType": "GRS", "zoneRedundant": false - } -}' - -# To provision the instance, execute the `PUT` command - -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$instanceName/?api-version=2020-11-01-preview -Method PUT -Headers $authHeader -Body $body -ContentType "application/json" - -``` - -To check the results, execute the `GET` command: - -```rest -Invoke-RestMethod -Uri https://management.azure.com/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Sql/managedInstances/$instanceName/?api-version=2020-11-01-preview -Method GET -Headers $authHeader | Format-List -``` - -# [ARM Template](#tab/arm-template) - -To provision a new managed instance with a user-assigned managed identity, virtual network and subnet, with an Azure AD admin set for the instance and Azure AD-only authentication enabled, use the following template. - -Use a [Custom deployment in the Azure portal](https://portal.azure.com/#create/Microsoft.Template), and **Build your own template in the editor**. Next, **Save** the configuration once you pasted in the example. - -To get your user-assigned managed identity **Resource ID**, search for **Managed Identities** in the [Azure portal](https://portal.azure.com). Find your managed identity, and go to **Properties**. An example of your UMI **Resource ID** will look like `/subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/`. - -```json -{ - "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", - "contentVersion": "1.0.0.1", - "parameters": { - "managedInstanceName": { - "type": "String", - "metadata": { - "description": "Enter managed instance name." - } - }, - "aad_admin_name": { - "type": "String", - "metadata": { - "description": "The name of the Azure AD admin for the SQL managed instance." - } - }, - "aad_admin_objectid": { - "type": "String", - "metadata": { - "description": "The Object ID of the Azure AD admin." - } - }, - "aad_admin_tenantid": { - "type": "String", - "defaultValue": "[subscription().tenantId]", - "metadata": { - "description": "The Tenant ID of the Azure Active Directory" - } - }, - "aad_admin_type": { - "defaultValue": "User", - "allowedValues": [ - "User", - "Group", - "Application" - ], - "type": "String" - }, - "aad_only_auth": { - "defaultValue": true, - "type": "Bool" - }, - "user_identity_resource_id": { - "defaultValue": "", - "type": "String", - "metadata": { - "description": "The Resource ID of the user-assigned managed identity, in the form of /subscriptions//resourceGroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/." - } - }, - "location": { - "defaultValue": "[resourceGroup().location]", - "type": "String", - "metadata": { - "description": "Enter location. If you leave this field blank resource group location would be used." - } - }, - "virtualNetworkName": { - "type": "String", - "defaultValue": "SQLMI-VNET", - "metadata": { - "description": "Enter virtual network name. If you leave this field blank name will be created by the template." - } - }, - "addressPrefix": { - "defaultValue": "10.0.0.0/16", - "type": "String", - "metadata": { - "description": "Enter virtual network address prefix." - } - }, - "subnetName": { - "type": "String", - "defaultValue": "ManagedInstances", - "metadata": { - "description": "Enter subnet name. If you leave this field blank name will be created by the template." - } - }, - "subnetPrefix": { - "defaultValue": "10.0.0.0/24", - "type": "String", - "metadata": { - "description": "Enter subnet address prefix." - } - }, - "skuName": { - "defaultValue": "GP_Gen5", - "allowedValues": [ - "GP_Gen5", - "BC_Gen5" - ], - "type": "String", - "metadata": { - "description": "Enter sku name." - } - }, - "vCores": { - "defaultValue": 16, - "allowedValues": [ - 8, - 16, - 24, - 32, - 40, - 64, - 80 - ], - "type": "Int", - "metadata": { - "description": "Enter number of vCores." - } - }, - "storageSizeInGB": { - "defaultValue": 256, - "minValue": 32, - "maxValue": 8192, - "type": "Int", - "metadata": { - "description": "Enter storage size." - } - }, - "licenseType": { - "defaultValue": "LicenseIncluded", - "allowedValues": [ - "BasePrice", - "LicenseIncluded" - ], - "type": "String", - "metadata": { - "description": "Enter license type." - } - } - }, - "variables": { - "networkSecurityGroupName": "[concat('SQLMI-', parameters('managedInstanceName'), '-NSG')]", - "routeTableName": "[concat('SQLMI-', parameters('managedInstanceName'), '-Route-Table')]" - }, - "resources": [ - { - "type": "Microsoft.Network/networkSecurityGroups", - "apiVersion": "2020-06-01", - "name": "[variables('networkSecurityGroupName')]", - "location": "[parameters('location')]", - "properties": { - "securityRules": [ - { - "name": "allow_tds_inbound", - "properties": { - "description": "Allow access to data", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "1433", - "sourceAddressPrefix": "VirtualNetwork", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 1000, - "direction": "Inbound" - } - }, - { - "name": "allow_redirect_inbound", - "properties": { - "description": "Allow inbound redirect traffic to Managed Instance inside the virtual network", - "protocol": "Tcp", - "sourcePortRange": "*", - "destinationPortRange": "11000-11999", - "sourceAddressPrefix": "VirtualNetwork", - "destinationAddressPrefix": "*", - "access": "Allow", - "priority": 1100, - "direction": "Inbound" - } - }, - { - "name": "deny_all_inbound", - "properties": { - "description": "Deny all other inbound traffic", - "protocol": "*", - "sourcePortRange": "*", - "destinationPortRange": "*", - "sourceAddressPrefix": "*", - "destinationAddressPrefix": "*", - "access": "Deny", - "priority": 4096, - "direction": "Inbound" - } - }, - { - "name": "deny_all_outbound", - "properties": { - "description": "Deny all other outbound traffic", - "protocol": "*", - "sourcePortRange": "*", - "destinationPortRange": "*", - "sourceAddressPrefix": "*", - "destinationAddressPrefix": "*", - "access": "Deny", - "priority": 4096, - "direction": "Outbound" - } - } - ] - } - }, - { - "type": "Microsoft.Network/routeTables", - "apiVersion": "2020-06-01", - "name": "[variables('routeTableName')]", - "location": "[parameters('location')]", - "properties": { - "disableBgpRoutePropagation": false - } - }, - { - "type": "Microsoft.Network/virtualNetworks", - "apiVersion": "2020-06-01", - "name": "[parameters('virtualNetworkName')]", - "location": "[parameters('location')]", - "dependsOn": [ - "[variables('routeTableName')]", - "[variables('networkSecurityGroupName')]" - ], - "properties": { - "addressSpace": { - "addressPrefixes": [ - "[parameters('addressPrefix')]" - ] - }, - "subnets": [ - { - "name": "[parameters('subnetName')]", - "properties": { - "addressPrefix": "[parameters('subnetPrefix')]", - "routeTable": { - "id": "[resourceId('Microsoft.Network/routeTables', variables('routeTableName'))]" - }, - "networkSecurityGroup": { - "id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('networkSecurityGroupName'))]" - }, - "delegations": [ - { - "name": "miDelegation", - "properties": { - "serviceName": "Microsoft.Sql/managedInstances" - } - } - ] - } - } - ] - } - }, - { - "type": "Microsoft.Sql/managedInstances", - "apiVersion": "2020-11-01-preview", - "name": "[parameters('managedInstanceName')]", - "location": "[parameters('location')]", - "dependsOn": [ - "[parameters('virtualNetworkName')]" - ], - "sku": { - "name": "[parameters('skuName')]" - }, - "identity": { - "type": "UserAssigned", - "UserAssignedIdentities": { - "[parameters('user_identity_resource_id')]": {} - } - }, - "properties": { - "subnetId": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkName'), parameters('subnetName'))]", - "storageSizeInGB": "[parameters('storageSizeInGB')]", - "vCores": "[parameters('vCores')]", - "licenseType": "[parameters('licenseType')]", - "PrimaryUserAssignedIdentityId": "[parameters('user_identity_resource_id')]", - "administrators": { - "login": "[parameters('aad_admin_name')]", - "sid": "[parameters('aad_admin_objectid')]", - "tenantId": "[parameters('aad_admin_tenantid')]", - "principalType": "[parameters('aad_admin_type')]", - "azureADOnlyAuthentication": "[parameters('aad_only_auth')]" - } - } - } - ] -} -``` - ---- - -## See also - -- [User-assigned managed identity in Azure AD for Azure SQL](../database/authentication-azure-ad-user-assigned-managed-identity.md) -- [Create an Azure SQL logical server using a user-assigned managed identity](../database/authentication-azure-ad-user-assigned-managed-identity-create-server.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/auto-failover-group-configure-sql-mi.md b/articles/azure-sql/managed-instance/auto-failover-group-configure-sql-mi.md deleted file mode 100644 index f325cb6d47576..0000000000000 --- a/articles/azure-sql/managed-instance/auto-failover-group-configure-sql-mi.md +++ /dev/null @@ -1,441 +0,0 @@ ---- -title: Configure an auto-failover group -description: Learn how to configure an auto-failover group for Azure SQL Managed Instance by using the Azure portal, and Azure PowerShell. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: high-availability -ms.custom: devx-track-azurecli, sql-db-mi-split -ms.topic: how-to -ms.devlang: -author: MladjoA -ms.author: mlandzic -ms.reviewer: kendralittle, mathoma -ms.date: 03/01/2022 ---- -# Configure an auto-failover group for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](../database/auto-failover-group-configure-sql-db.md) -> * [Azure SQL Managed Instance](auto-failover-group-configure-sql-mi.md) - -This topic teaches you how to configure an [auto-failover group](auto-failover-group-sql-mi.md) for Azure SQL Managed Instance using the Azure portal and Azure PowerShell. For an end-to-end experience, review the [Auto-failover group tutorial](failover-group-add-instance-tutorial.md). - -> [!NOTE] -> This article covers auto-failover groups for Azure SQL Managed Instance. For Azure SQL Database, see [Configure auto-failover groups in SQL Database](../database/auto-failover-group-configure-sql-db.md). - - -## Prerequisites - -Consider the following prerequisites: - -- The secondary managed instance must be empty. -- The subnet range for the secondary virtual network must not overlap the subnet range of the primary virtual network. -- The collation and time zone of the secondary managed instance must match that of the primary managed instance. -- When connecting the two gateways, the **Shared Key** should be the same for both connections. -- You'll need to either configure [ExpressRoute](../../expressroute/expressroute-howto-circuit-portal-resource-manager.md) or create a gateway for the virtual network of each SQL Managed Instance, connect the two gateways, and then create the failover group. -- Deploy both managed instances to [paired regions](../../availability-zones/cross-region-replication-azure.md) for performance reasons. Managed instances residing in geo-paired regions have much better performance compared to unpaired regions. - -## Create primary virtual network gateway - -If you haven't configured [ExpressRoute](../../expressroute/expressroute-howto-circuit-portal-resource-manager.md), you can create the primary virtual network gateway with the Azure portal, or PowerShell. - -> [!NOTE] -> The SKU of the gateway affects throughput performance. This article deploys a gateway with the most basic SKU (`HwGw1`). Deploy a higher SKU (example: `VpnGw3`) to achieve higher throughput. For all available options, see [Gateway SKUs](../../vpn-gateway/vpn-gateway-about-vpngateways.md#benchmark) - -# [Portal](#tab/azure-portal) - -Create the primary virtual network gateway using the Azure portal. - -1. In the [Azure portal](https://portal.azure.com), go to your resource group and select the **Virtual network** resource for your primary managed instance. -1. Select **Subnets** under **Settings** and then select to add a new **Gateway subnet**. Leave the default values. - - ![Add gateway for primary managed instance](./media/auto-failover-group-configure-sql-mi/add-subnet-gateway-primary-vnet.png) - -1. Once the subnet gateway is created, select **Create a resource** from the left navigation pane and then type `Virtual network gateway` in the search box. Select the **Virtual network gateway** resource published by **Microsoft**. - - ![Create a new virtual network gateway](./media/auto-failover-group-configure-sql-mi/create-virtual-network-gateway.png) - -1. Fill out the required fields to configure the gateway your primary managed instance. - - The following table shows the values necessary for the gateway for the primary managed instance: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your primary managed instance is. | - | **Name** | The name for your virtual network gateway. | - | **Region** | The region where your primary managed instance is. | - | **Gateway type** | Select **VPN**. | - | **VPN Type** | Select **Route-based** | - | **SKU**| Leave default of `VpnGw1`. | - | **Location**| The location where your secondary managed instance and secondary virtual network is. | - | **Virtual network**| Select the virtual network for your secondary managed instance. | - | **Public IP address**| Select **Create new**. | - | **Public IP address name**| Enter a name for your IP address. | - - -1. Leave the other values as default, and then select **Review + create** to review the settings for your virtual network gateway. - - ![Primary gateway settings](./media/auto-failover-group-configure-sql-mi/settings-for-primary-gateway.png) - -1. Select **Create** to create your new virtual network gateway. - -# [PowerShell](#tab/azure-powershell) - -Create the primary virtual network gateway using PowerShell. - - ```powershell-interactive - $primaryResourceGroupName = "" - $primaryVnetName = "" - $primaryGWName = "" - $primaryGWPublicIPAddress = $primaryGWName + "-ip" - $primaryGWIPConfig = $primaryGWName + "-ipc" - $primaryGWAsn = 61000 - - # Get the primary virtual network - $vnet1 = Get-AzVirtualNetwork -Name $primaryVnetName -ResourceGroupName $primaryResourceGroupName - $primaryLocation = $vnet1.Location - - # Create primary gateway - Write-host "Creating primary gateway..." - $subnet1 = Get-AzVirtualNetworkSubnetConfig -Name GatewaySubnet -VirtualNetwork $vnet1 - $gwpip1= New-AzPublicIpAddress -Name $primaryGWPublicIPAddress -ResourceGroupName $primaryResourceGroupName ` - -Location $primaryLocation -AllocationMethod Dynamic - $gwipconfig1 = New-AzVirtualNetworkGatewayIpConfig -Name $primaryGWIPConfig ` - -SubnetId $subnet1.Id -PublicIpAddressId $gwpip1.Id - - $gw1 = New-AzVirtualNetworkGateway -Name $primaryGWName -ResourceGroupName $primaryResourceGroupName ` - -Location $primaryLocation -IpConfigurations $gwipconfig1 -GatewayType Vpn ` - -VpnType RouteBased -GatewaySku VpnGw1 -EnableBgp $true -Asn $primaryGWAsn - $gw1 - ``` - ---- - -## Create secondary virtual network gateway - -Create the secondary virtual network gateway using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Repeat the steps in the previous section to create the virtual network subnet and gateway for the secondary managed instance. Fill out the required fields to configure the gateway for your secondary managed instance. - -The following table shows the values necessary for the gateway for the secondary managed instance: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your secondary managed instance is. | - | **Name** | The name for your virtual network gateway, such as `secondary-mi-gateway`. | - | **Region** | The region where your secondary managed instance is. | - | **Gateway type** | Select **VPN**. | - | **VPN Type** | Select **Route-based** | - | **SKU**| Leave default of `VpnGw1`. | - | **Location**| The location where your secondary managed instance and secondary virtual network is. | - | **Virtual network**| Select the virtual network that was created in section 2, such as `vnet-sql-mi-secondary`. | - | **Public IP address**| Select **Create new**. | - | **Public IP address name**| Enter a name for your IP address, such as `secondary-gateway-IP`. | - - - ![Secondary gateway settings](./media/auto-failover-group-configure-sql-mi/settings-for-secondary-gateway.png) - -# [PowerShell](#tab/azure-powershell) - -Create the secondary virtual network gateway using PowerShell. - - ```powershell-interactive - $secondaryResourceGroupName = "" - $secondaryVnetName = "" - $secondaryGWName = "" - $secondaryGWPublicIPAddress = $secondaryGWName + "-IP" - $secondaryGWIPConfig = $secondaryGWName + "-ipc" - $secondaryGWAsn = 62000 - - # Get the secondary virtual network - $vnet2 = Get-AzVirtualNetwork -Name $secondaryVnetName -ResourceGroupName $secondaryResourceGroupName - $secondaryLocation = $vnet2.Location - - # Create the secondary gateway - Write-host "Creating secondary gateway..." - $subnet2 = Get-AzVirtualNetworkSubnetConfig -Name GatewaySubnet -VirtualNetwork $vnet2 - $gwpip2= New-AzPublicIpAddress -Name $secondaryGWPublicIPAddress -ResourceGroupName $secondaryResourceGroupName ` - -Location $secondaryLocation -AllocationMethod Dynamic - $gwipconfig2 = New-AzVirtualNetworkGatewayIpConfig -Name $secondaryGWIPConfig ` - -SubnetId $subnet2.Id -PublicIpAddressId $gwpip2.Id - - $gw2 = New-AzVirtualNetworkGateway -Name $secondaryGWName -ResourceGroupName $secondaryResourceGroupName ` - -Location $secondaryLocation -IpConfigurations $gwipconfig2 -GatewayType Vpn ` - -VpnType RouteBased -GatewaySku VpnGw1 -EnableBgp $true -Asn $secondaryGWAsn - - $gw2 - ``` - ---- - -## Connect the gateways - -Create connections between the two gateways using the Azure portal or PowerShell. - -Two connections need to be created - the connection from the primary gateway to the secondary gateway, and then the connection from the secondary gateway to the primary gateway. - -The shared key used for both connections should be the same for each connection. - -# [Portal](#tab/azure-portal) - -Create connections between the two gateways using the Azure portal. - -1. Select **Create a resource** from the [Azure portal](https://portal.azure.com). -1. Type `connection` in the search box and then press enter to search, which takes you to the **Connection** resource, published by Microsoft. -1. Select **Create** to create your connection. -1. On the **Basics** tab, select the following values and then select **OK**. - 1. Select `VNet-to-VNet` for the **Connection type**. - 1. Select your subscription from the drop-down. - 1. Select the resource group for your managed instance in the drop-down. - 1. Select the location of your primary managed instance from the drop-down. -1. On the **Settings** tab, select or enter the following values and then select **OK**: - 1. Choose the primary network gateway for the **First virtual network gateway**, such as `Primary-Gateway`. - 1. Choose the secondary network gateway for the **Second virtual network gateway**, such as `Secondary-Gateway`. - 1. Select the checkbox next to **Establish bidirectional connectivity**. - 1. Either leave the default primary connection name, or rename it to a value of your choice. - 1. Provide a **Shared key (PSK)** for the connection, such as `mi1m2psk`. - - ![Create gateway connection](./media/auto-failover-group-configure-sql-mi/create-gateway-connection.png) - -1. On the **Summary** tab, review the settings for your bidirectional connection and then select **OK** to create your connection. - -# [PowerShell](#tab/azure-powershell) - -Create connections between the two gateways using PowerShell. - - ```powershell-interactive - $vpnSharedKey = "mi1mi2psk" - $primaryResourceGroupName = "" - $primaryGWConnection = "" - $primaryLocation = "" - $secondaryResourceGroupName = "" - $secondaryGWConnection = "" - $secondaryLocation = "" - - # Connect the primary to secondary gateway - Write-host "Connecting the primary gateway" - New-AzVirtualNetworkGatewayConnection -Name $primaryGWConnection -ResourceGroupName $primaryResourceGroupName ` - -VirtualNetworkGateway1 $gw1 -VirtualNetworkGateway2 $gw2 -Location $primaryLocation ` - -ConnectionType Vnet2Vnet -SharedKey $vpnSharedKey -EnableBgp $true - $primaryGWConnection - - # Connect the secondary to primary gateway - Write-host "Connecting the secondary gateway" - - New-AzVirtualNetworkGatewayConnection -Name $secondaryGWConnection -ResourceGroupName $secondaryResourceGroupName ` - -VirtualNetworkGateway1 $gw2 -VirtualNetworkGateway2 $gw1 -Location $secondaryLocation ` - -ConnectionType Vnet2Vnet -SharedKey $vpnSharedKey -EnableBgp $true - $secondaryGWConnection - ``` - ---- - -## Create the failover group - -Create the failover group for your managed instances by using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Create the failover group for your SQL Managed Instances by using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** isn't in the list, select **All services**, then type Azure SQL in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the primary managed instance you want to add to the failover group. -1. Under **Settings**, navigate to **Instance Failover Groups** and then choose to **Add group** to open the **Instance Failover Group** page. - - ![Add a failover group](./media/auto-failover-group-configure-sql-mi/add-failover-group.png) - -1. On the **Instance Failover Group** page, type the name of your failover group and then choose the secondary managed instance from the drop-down. Select **Create** to create your failover group. - - ![Create failover group](./media/auto-failover-group-configure-sql-mi/create-failover-group.png) - -1. Once failover group deployment is complete, you'll be taken back to the **Failover group** page. - -# [PowerShell](#tab/azure-powershell) - -Create the failover group for your managed instances using PowerShell. - - ```powershell-interactive - $primaryResourceGroupName = "" - $failoverGroupName = "" - $primaryLocation = "" - $secondaryLocation = "" - $primaryManagedInstance = "" - $secondaryManagedInstance = "" - - # Create failover group - Write-host "Creating the failover group..." - $failoverGroup = New-AzSqlDatabaseInstanceFailoverGroup -Name $failoverGroupName ` - -Location $primaryLocation -ResourceGroupName $primaryResourceGroupName -PrimaryManagedInstanceName $primaryManagedInstance ` - -PartnerRegion $secondaryLocation -PartnerManagedInstanceName $secondaryManagedInstance ` - -FailoverPolicy Automatic -GracePeriodWithDataLossHours 1 - $failoverGroup - ``` - ---- - -## Test failover - -Test failover of your failover group using the Azure portal or PowerShell. - -# [Portal](#tab/azure-portal) - -Test failover of your failover group using the Azure portal. - -1. Navigate to your _secondary_ managed instance within the [Azure portal](https://portal.azure.com) and select **Instance Failover Groups** under settings. -1. Review which managed instance is the primary, and which managed instance is the secondary. -1. Select **Failover** and then select **Yes** on the warning about TDS sessions being disconnected. - - ![Fail over the failover group](./media/auto-failover-group-configure-sql-mi/failover-mi-failover-group.png) - -1. Review which manged instance is the primary and which instance is the secondary. If failover succeeded, the two instances should have switched roles. - - ![Managed instances have switched roles after failover](./media/auto-failover-group-configure-sql-mi/mi-switched-after-failover.png) - -1. Go to the new _secondary_ managed instance and select **Failover** once again to fail the primary instance back to the primary role. - -# [PowerShell](#tab/azure-powershell) - -Test failover of your failover group using PowerShell. - - ```powershell-interactive - $primaryResourceGroupName = "" - $secondaryResourceGroupName = "" - $failoverGroupName = "" - $primaryLocation = "" - $secondaryLocation = "" - $primaryManagedInstance = "" - $secondaryManagedInstance = "" - - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $primaryResourceGroupName ` - -Location $secondaryLocation -Name $failoverGroupName - - # Failover the primary managed instance to the secondary role - Write-host "Failing primary over to the secondary location" - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $secondaryResourceGroupName ` - -Location $secondaryLocation -Name $failoverGroupName | Switch-AzSqlDatabaseInstanceFailoverGroup - Write-host "Successfully failed failover group to secondary location" - - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $primaryResourceGroupName ` - -Location $secondaryLocation -Name $failoverGroupName - - # Fail primary managed instance back to primary role - Write-host "Failing primary back to primary role" - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $primaryResourceGroupName ` - -Location $primaryLocation -Name $failoverGroupName | Switch-AzSqlDatabaseInstanceFailoverGroup - Write-host "Successfully failed failover group to primary location" - - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $primaryResourceGroupName ` - -Location $secondaryLocation -Name $failoverGroupName - ``` - ---- - - - -## Locate listener endpoint - -Once your failover group is configured, update the connection string for your application to the listener endpoint. This will keep your application connected to the failover group listener, rather than the primary database, elastic pool, or instance database. That way, you don't have to manually update the connection string every time your database entity fails over, and traffic is routed to whichever entity is currently primary. - -The listener endpoint is in the form of `fog-name.database.windows.net`, and is visible in the Azure portal, when viewing the failover group: - -![Failover group connection string](./media/auto-failover-group-configure-sql-mi/find-failover-group-connection-string.png) - -## Create group between instances in different subscriptions - -You can create a failover group between SQL Managed Instances in two different subscriptions, as long as subscriptions are associated to the same [Azure Active Directory Tenant](../../active-directory/fundamentals/active-directory-whatis.md#terminology). When using PowerShell API, you can do it by specifying the `PartnerSubscriptionId` parameter for the secondary SQL Managed Instance. When using REST API, each instance ID included in the `properties.managedInstancePairs` parameter can have its own Subscription ID. - -> [!IMPORTANT] -> Azure portal does not support creation of failover groups across different subscriptions. Also, for the existing failover groups across different subscriptions and/or resource groups, failover can't be initiated manually via portal from the primary SQL Managed Instance. Initiate it from the geo-secondary instance instead. - -## Change the secondary region - -Let's assume that instance A is the primary instance, instance B is the existing secondary instance, and instance C is the new secondary instance in the third region. To make the transition, follow these steps: - -1. Create instance C with same size as A and in the same DNS zone. -2. Delete the failover group between instances A and B. At this point the logins will be failing because the SQL aliases for the failover group listeners have been deleted and the gateway won't recognize the failover group name. The secondary databases will be disconnected from the primaries and will become read-write databases. -3. Create a failover group with the same name between instance A and C. Follow the instructions in [failover group with SQL Managed Instance tutorial](failover-group-add-instance-tutorial.md). This is a size-of-data operation and will complete when all databases from instance A are seeded and synchronized. -4. Delete instance B if not needed to avoid unnecessary charges. - -> [!NOTE] -> After step 2 and until step 3 is completed the databases in instance A will remain unprotected from a catastrophic failure of instance A. - -## Change the primary region - -Let's assume instance A is the primary instance, instance B is the existing secondary instance, and instance C is the new primary instance in the third region. To make the transition, follow these steps: - -1. Create instance C with same size as B and in the same DNS zone. -2. Connect to instance B and manually failover to switch the primary instance to B. Instance A will become the new secondary instance automatically. -3. Delete the failover group between instances A and B. At this point login attempts using failover group endpoints will be failing. The secondary databases on A will be disconnected from the primaries and will become read-write databases. -4. Create a failover group with the same name between instance A and C. Follow the instructions in the [failover group with managed instance tutorial](failover-group-add-instance-tutorial.md). This is a size-of-data operation and will complete when all databases from instance A are seeded and synchronized. At this point login attempts will stop failing. -5. Delete instance A if not needed to avoid unnecessary charges. - -> [!CAUTION] -> After step 3 and until step 4 is completed the databases in instance A will remain unprotected from a catastrophic failure of instance A. - -> [!IMPORTANT] -> When the failover group is deleted, the DNS records for the listener endpoints are also deleted. At that point, there's a non-zero probability of somebody else creating a failover group with the same name. Because failover group names must be globally unique, this will prevent you from using the same name again. To minimize this risk, don't use generic failover group names. - -## Enabling geo-replication between MI virtual networks - -When you set up a failover group between primary and secondary SQL Managed Instances in two different regions, each instance is isolated using an independent virtual network. To allow replication traffic between these VNets ensure these prerequisites are met: - -- The two instances of SQL Managed Instance need to be in different Azure regions. -- The two instances of SQL Managed Instance need to be the same service tier, and have the same storage size. -- Your secondary instance of SQL Managed Instance must be empty (no user databases). -- The virtual networks used by the instances of SQL Managed Instance need to be connected through a [VPN Gateway](../../vpn-gateway/vpn-gateway-about-vpngateways.md) or [Express Route](../../expressroute/expressroute-howto-circuit-portal-resource-manager.md). When two virtual networks connect through an on-premises network, ensure there's no firewall rule blocking ports 5022, and 11000-11999. Global VNet Peering is supported with the limitation described in the note below. - - > [!IMPORTANT] - > [On 9/22/2020 support for global virtual network peering for newly created virtual clusters was announced](https://azure.microsoft.com/updates/global-virtual-network-peering-support-for-azure-sql-managed-instance-now-available/). It means that global virtual network peering is supported for SQL managed instances created in empty subnets after the announcement date, as well for all the subsequent managed instances created in those subnets. For all the other SQL managed instances peering support is limited to the networks in the same region due to the [constraints of global virtual network peering](../../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). See also the relevant section of the [Azure Virtual Networks frequently asked questions](../../virtual-network/virtual-networks-faq.md#what-are-the-constraints-related-to-global-vnet-peering-and-load-balancers) article for more details. To be able to use global virtual network peering for SQL managed instances from virtual clusters created before the announcement date, consider configuring non-default [maintenance window](../database/maintenance-window.md) on the instances, as it will move the instances into new virtual clusters that support global virtual network peering. - -- The two SQL Managed Instance VNets can't have overlapping IP addresses. -- You need to set up your Network Security Groups (NSG) such that ports 5022 and the range 11000~12000 are open inbound and outbound for connections from the subnet of the other managed instance. This is to allow replication traffic between the instances. - - > [!IMPORTANT] - > Misconfigured NSG security rules leads to stuck database seeding operations. - -- The secondary SQL Managed Instance is configured with the correct DNS zone ID. DNS zone is a property of a SQL Managed Instance and underlying virtual cluster, and its ID is included in the host name address. The zone ID is generated as a random string when the first SQL Managed Instance is created in each VNet and the same ID is assigned to all other instances in the same subnet. Once assigned, the DNS zone can't be modified. SQL Managed Instances included in the same failover group must share the DNS zone. You accomplish this by passing the primary instance's zone ID as the value of DnsZonePartner parameter when creating the secondary instance. - - > [!NOTE] - > For a detailed tutorial on configuring failover groups with SQL Managed Instance, see [add a SQL Managed Instance to a failover group](../managed-instance/failover-group-add-instance-tutorial.md). - -## Permissions - - - - -Permissions for a failover group are managed via [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - -Azure RBAC write access is necessary to create and manage failover groups. The [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) role has all the necessary permissions to manage failover groups. - -The following table lists specific permission scopes for Azure SQL Managed Instance: - -| **Action** | **Permission** | **Scope**| -| :---- | :---- | :---- | -|**Create failover group**| Azure RBAC write access | Primary managed instance
    Secondary managed instance| -| **Update failover group**| Azure RBAC write access | Failover group
    All databases within the managed instance| -| **Fail over failover group** | Azure RBAC write access | Failover group on new primary managed instance | - - - -## Next steps - -For detailed steps configuring a failover group, see the following tutorials: - -- [Add a single database to a failover group](../database/failover-group-add-single-database-tutorial.md) -- [Add an elastic pool to a failover group](../database/failover-group-add-elastic-pool-tutorial.md) -- [Add a managed instance to a failover group](../managed-instance/failover-group-add-instance-tutorial.md) - -For an overview of the feature, see [auto-failover groups](auto-failover-group-sql-mi.md). diff --git a/articles/azure-sql/managed-instance/auto-failover-group-sql-mi.md b/articles/azure-sql/managed-instance/auto-failover-group-sql-mi.md deleted file mode 100644 index 586be4f5d19f4..0000000000000 --- a/articles/azure-sql/managed-instance/auto-failover-group-sql-mi.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -title: Auto-failover groups overview & best practices -description: Auto-failover groups let you manage geo-replication and automatic / coordinated failover of all user databases on a managed instance in Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: high-availability -ms.custom: sql-db-mi-split -ms.topic: conceptual -author: MladjoA -ms.author: mlandzic -ms.reviewer: kendralittle, mathoma -ms.date: 03/15/2022 ---- - -# Auto-failover groups overview & best practices (Azure SQL Managed Instance) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](../database/auto-failover-group-sql-db.md) -> * [Azure SQL Managed Instance](auto-failover-group-sql-mi.md) - -The auto-failover groups feature allows you to manage the replication and failover of all user databases in a managed instance to another Azure region. This article focuses on using the Auto-failover group feature with Azure SQL Managed Instance and some best practices. - -To get started, review [Configure auto-failover group](auto-failover-group-configure-sql-mi.md). For an end-to-end experience, see the [Auto-failover group tutorial](failover-group-add-instance-tutorial.md). - -> [!NOTE] -> This article covers auto-failover groups for Azure SQL Managed Instance. For Azure SQL Database, see [Auto-failover groups in SQL Database](../database/auto-failover-group-sql-db.md). - -## Overview - -[!INCLUDE [auto-failover-groups-overview](../includes/auto-failover-group-overview.md)] - - -## Terminology and capabilities - - - -- **Failover group (FOG)** - - A failover group allows for all user databases within a managed instance to fail over as a unit to another Azure region in case the primary managed instance becomes unavailable due to a primary region outage. Since failover groups for SQL Managed Instance contain all user databases within the instance, only one failover group can be configured on an instance. - - > [!IMPORTANT] - > The name of the failover group must be globally unique within the `.database.windows.net` domain. - -- **Primary** - - The managed instance that hosts the primary databases in the failover group. - -- **Secondary** - - The managed instance that hosts the secondary databases in the failover group. The secondary cannot be in the same Azure region as the primary. - -- **DNS zone** - - A unique ID that is automatically generated when a new SQL Managed Instance is created. A multi-domain (SAN) certificate for this instance is provisioned to authenticate the client connections to any instance in the same DNS zone. The two managed instances in the same failover group must share the DNS zone. - -- **Failover group read-write listener** - - A DNS CNAME record that points to the current primary. It is created automatically when the failover group is created and allows the read-write workload to transparently reconnect to the primary when the primary changes after failover. When the failover group is created on a SQL Managed Instance, the DNS CNAME record for the listener URL is formed as `..database.windows.net`. - -- **Failover group read-only listener** - - A DNS CNAME record that points to the current secondary. It is created automatically when the failover group is created and allows the read-only SQL workload to transparently connect to the secondary when the secondary changes after failover. When the failover group is created on a SQL Managed Instance, the DNS CNAME record for the listener URL is formed as `.secondary..database.windows.net`. - -[!INCLUDE [auto-failover-group-terminology](../includes/auto-failover-group-terminology.md)] - - -## Failover group architecture - -The auto-failover group must be configured on the primary instance and will connect it to the secondary instance in a different Azure region. All user databases in the instance will be replicated to the secondary instance. System databases like _master_ and _msdb_ will not be replicated. - -The following diagram illustrates a typical configuration of a geo-redundant cloud application using managed instance and auto-failover group: - -:::image type="content" source="media/auto-failover-group-sql-mi/auto-failover-group-mi.png" alt-text="Auto-failover group diagram for SQL MI"::: - -If your application uses SQL Managed Instance as the data tier, follow the general guidelines and best practices outlined in this article when designing for business continuity. - - -> [!IMPORTANT] -> If you deploy auto-failover groups in a hub-and-spoke network topology cross-region, replication traffic should go directly between the two managed instance subnets rather than directed through the hub networks. - -## Initial seeding - -When adding managed instances to a failover group, there is an initial seeding phase before data replication starts. The initial seeding phase is the longest and most expensive operation. Once initial seeding completes, data is synchronized, and then only subsequent data changes are replicated. The time it takes for the initial seeding to complete depends on the size of your data, number of replicated databases, the load on primary databases, and the speed of the link between the primary and secondary. Under normal circumstances, possible seeding speed is up to 360 GB an hour for SQL Managed Instance. Seeding is performed for all databases in parallel. - -For SQL Managed Instance, consider the speed of the Express Route link between the two instances when estimating the time of the initial seeding phase. If the speed of the link between the two instances is slower than what is necessary, the time to seed is likely to be noticeably impacted. You can use the stated seeding speed, number of databases, total size of data, and the link speed to estimate how long the initial seeding phase will take before data replication starts. For example, for a single 100 GB database, the initial seed phase would take about 1.2 hours if the link is capable of pushing 84 GB per hour, and if there are no other databases being seeded. If the link can only transfer 10 GB per hour, then seeding a 100 GB database will take about 10 hours. If there are multiple databases to replicate, seeding will be executed in parallel, and, when combined with a slow link speed, the initial seeding phase may take considerably longer, especially if the parallel seeding of data from all databases exceeds the available link bandwidth. If the network bandwidth between two instances is limited and you are adding multiple managed instances to a failover group, consider adding multiple managed instances to the failover group sequentially, one by one. Given an appropriately sized gateway SKU between the two managed instances, and if corporate network bandwidth allows it, it's possible to achieve speeds as high as 360 GB an hour. - - -## Creating the geo-secondary instance - -To ensure non-interrupted connectivity to the primary SQL Managed Instance after failover, both the primary and secondary instances must be in the same DNS zone. It will guarantee that the same multi-domain (SAN) certificate can be used to authenticate client connections to either of the two instances in the failover group. When your application is ready for production deployment, create a secondary SQL Managed Instance in a different region and make sure it shares the DNS zone with the primary SQL Managed Instance. You can do it by specifying an optional parameter during creation. If you are using PowerShell or the REST API, the name of the optional parameter is `DNSZonePartner`. The name of the corresponding optional field in the Azure portal is *Primary Managed Instance*. - -> [!IMPORTANT] -> The first managed instance created in the subnet determines DNS zone for all subsequent instances in the same subnet. This means that two instances from the same subnet cannot belong to different DNS zones. - -For more information about creating the secondary SQL Managed Instance in the same DNS zone as the primary instance, see [Create a secondary managed instance](../managed-instance/failover-group-add-instance-tutorial.md#create-a-secondary-managed-instance). - -## Use paired regions - -Deploy both managed instances to [paired regions](../../availability-zones/cross-region-replication-azure.md) for performance reasons. SQL Managed Instance failover groups in paired regions have better performance compared to unpaired regions. - -## Enable geo-replication traffic between two instances - -Because each managed instance is isolated in its own VNet, two-directional traffic between these VNets must be allowed. See [Azure VPN gateway](../../vpn-gateway/vpn-gateway-about-vpngateways.md) - - - -## Manage geo-failover to a geo-secondary instance - -The failover group will manage geo-failover of all databases on the primary managed instance. When a group is created, each database in the instance will be automatically geo-replicated to the geo-secondary instance. You cannot use failover groups to initiate a partial failover of a subset of databases. - -> [!IMPORTANT] -> If a database is dropped on the primary managed instance, it will also be dropped automatically on the geo-secondary managed instance. - -## Use the read-write listener (primary MI) - -For read-write workloads, use `.zone_id.database.windows.net` as the server name. Connections will be automatically directed to the primary. This name does not change after failover. The geo-failover involves updating the DNS record, so the client connections are redirected to the new primary only after the client DNS cache is refreshed. Because the secondary instance shares the DNS zone with the primary, the client application will be able to reconnect to it using the same server-side SAN certificate. The read-write listener and read-only listener cannot be reached via the [public endpoint for managed instance](public-endpoint-configure.md). - -## Use the read-only listener (secondary MI) - -If you have logically isolated read-only workloads that are tolerant to data latency, you can run them on the geo-secondary. To connect directly to the geo-secondary, use `.secondary..database.windows.net` as the server name. - -In the Business Critical tier, SQL Managed Instance supports the use of [read-only replicas](../database/read-scale-out.md) to offload read-only query workloads, using the `ApplicationIntent=ReadOnly` parameter in the connection string. When you have configured a geo-replicated secondary, you can use this capability to connect to either a read-only replica in the primary location or in the geo-replicated location: - -- To connect to a read-only replica in the primary location, use `ApplicationIntent=ReadOnly` and `..database.windows.net`. -- To connect to a read-only replica in the secondary location, use `ApplicationIntent=ReadOnly` and `.secondary..database.windows.net`. - -The read-write listener and read-only listener cannot be reached via [public endpoint for managed instance](public-endpoint-configure.md). - - -## Potential performance degradation after failover - -A typical Azure application uses multiple Azure services and consists of multiple components. The automatic geo-failover of the failover group is triggered based on the state the Azure SQL components alone. Other Azure services in the primary region may not be affected by the outage and their components may still be available in that region. Once the primary databases switch to the secondary region, the latency between the dependent components may increase. To avoid the impact of higher latency on the application's performance, ensure the redundancy of all the application's components in the secondary region and fail over application components together with the database. - -## Potential data loss after failover - -If an outage occurs in the primary region, recent transactions may not be able to replicate to the geo-secondary. Failover is deferred for the period you specify using `GracePeriodWithDataLossHours`. If you configured the automatic failover policy, be prepared for data loss. In general, during outages, Azure favors availability. Setting `GracePeriodWithDataLossHours` to a larger number, such as 24 hours, or disabling automatic geo-failover lets you reduce the likelihood of data loss at the expense of database availability. - -## DNS update - -The DNS update of the read-write listener will happen immediately after the failover is initiated. This operation will not result in data loss. However, the process of switching database roles can take up to 5 minutes under normal conditions. Until it is completed, some databases in the new primary instance will still be read-only. If a failover is initiated using PowerShell, the operation to switch the primary replica role is synchronous. If it is initiated using the Azure portal, the UI will indicate completion status. If it is initiated using the REST API, use standard Azure Resource Manager’s polling mechanism to monitor for completion. - -> [!IMPORTANT] -> Use manual planned failover to move the primary back to the original location once the outage that caused the geo-failover is mitigated. - - -## Enable scenarios dependent on objects from the system databases - -System databases are **not** replicated to the secondary instance in a failover group. To enable scenarios that depend on objects from the system databases, make sure to create the same objects on the secondary instance and keep them synchronized with the primary instance. - -For example, if you plan to use the same logins on the secondary instance, make sure to create them with the identical SID. - -```SQL --- Code to create login on the secondary instance -CREATE LOGIN foo WITH PASSWORD = '', SID = ; -``` - -To learn more, see [Replication of logins and agent jobs](https://techcommunity.microsoft.com/t5/modernization-best-practices-and/azure-sql-managed-instance-sync-agent-jobs-and-logins-in/ba-p/2860495). - -## Synchronize instance properties and retention policies instances - -Instances in a failover group remain separate Azure resources, and no changes made to the configuration of the primary instance will be automatically replicated to the secondary instance. Make sure to perform all relevant changes both on primary _and_ secondary instance. For example, if you change backup storage redundancy or long-term backup retention policy on primary instance, make sure to change it on secondary instance as well. - - -## Use failover groups and virtual network service endpoints - -If you are using [Virtual Network service endpoints and rules](../database/vnet-service-endpoint-rule-overview.md) to restrict access to your SQL Managed Instance, be aware that each virtual network service endpoint applies to only one Azure region. The endpoint does not enable other regions to accept communication from the subnet. Therefore, only the client applications deployed in the same region can connect to the primary database. - -## Prevent loss of critical data - - - -Due to the high latency of wide area networks, geo-replication uses an asynchronous replication mechanism. Asynchronous replication makes the possibility of data loss unavoidable if the primary fails. To protect critical transactions from data loss, an application developer can call the [sp_wait_for_database_copy_sync](/sql/relational-databases/system-stored-procedures/active-geo-replication-sp-wait-for-database-copy-sync) stored procedure immediately after committing the transaction. Calling `sp_wait_for_database_copy_sync` blocks the calling thread until the last committed transaction has been transmitted and hardened in the transaction log of the secondary database. However, it does not wait for the transmitted transactions to be replayed (redone) on the secondary. `sp_wait_for_database_copy_sync` is scoped to a specific geo-replication link. Any user with the connection rights to the primary database can call this procedure. - -> [!NOTE] -> `sp_wait_for_database_copy_sync` prevents data loss after geo-failover for specific transactions, but does not guarantee full synchronization for read access. The delay caused by a `sp_wait_for_database_copy_sync` procedure call can be significant and depends on the size of the not yet transmitted transaction log on the primary at the time of the call. - -## Failover group status -Auto-failover group reports its status describing the current state of the data replication: - -- Seeding - [Initial seeding](auto-failover-group-sql-mi.md#initial-seeding) is taking place after creation of the failover group, until all user databases are initialized on the secondary instance. Failover process cannot be initiated while auto-failover group is in the Seeding status, since user databases are not copied to secondary instance yet. -- Synchronizing - the usual status of auto-failover group. It means that data changes on the primary instance are being replicated asynchronously to the secondary instance. This status doesn't guarantee that the data is fully synchronized at every moment. There may be data changes from primary still to be replicated to the secondary due to asynchronous nature of the replication process between instances in the auto-failover group. Both automatic and manual failovers can be initiated while the auto-failover group is in the Seeding status. -- Failover in progress - this status indicates that either automatically or manually initiated failover process is in progress. No changes to the failover group or additional failovers can be initiated while the auto-failover group is in this status. - -## Permissions - - - -Permissions for a failover group are managed via [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - -Azure RBAC write access is necessary to create and manage failover groups. The [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) has all the necessary permissions to manage failover groups. - -For specific permission scopes, review how to [configure auto-failover groups in Azure SQL Managed Instance](auto-failover-group-configure-sql-mi.md#permissions). - -## Limitations - -Be aware of the following limitations: - -- Failover groups cannot be created between two instances in the same Azure region. -- Failover groups cannot be renamed. You will need to delete the group and re-create it with a different name. -- Database rename is not supported for databases in failover group. You will need to temporarily delete failover group to be able to rename a database, or remove the database from the failover group. -- System databases are not replicated to the secondary instance in a failover group. Therefore, scenarios that depend on objects from the system databases such as Server Logins and Agent jobs, require objects to be manually created on the secondary instances and also manually kept in sync after any changes made on primary instance. The only exception is Service master Key (SMK) for SQL Managed Instance, that is replicated automatically to secondary instance during creation of failover group. Any subsequent changes of SMK on the primary instance however will not be replicated to secondary instance. To learn more, see how to [Enable scenarios dependent on objects from the system databases](#enable-scenarios-dependent-on-objects-from-the-system-databases). -- Failover groups cannot be created between instances if any of them are in an instance pool. - -## Programmatically manage failover groups - -Auto-failover groups can also be managed programmatically using Azure PowerShell, Azure CLI, and REST API. The following tables describe the set of commands available. Active geo-replication includes a set of Azure Resource Manager APIs for management, including the [Azure SQL Database REST API](/rest/api/sql/) and [Azure PowerShell cmdlets](/powershell/azure/). These APIs require the use of resource groups and support Azure role-based access control (Azure RBAC). For more information on how to implement access roles, see [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). - - -# [PowerShell](#tab/azure-powershell) - -| Cmdlet | Description | -| --- | --- | -| [New-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/new-azsqldatabaseinstancefailovergroup) |This command creates a failover group and registers it on both primary and secondary instances| -| [Set-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/set-azsqldatabaseinstancefailovergroup) |Modifies configuration of a failover group| -| [Get-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/get-azsqldatabaseinstancefailovergroup) |Retrieves a failover group's configuration| -| [Switch-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/switch-azsqldatabaseinstancefailovergroup) |Triggers failover of a failover group to the secondary instance| -| [Remove-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/remove-azsqldatabaseinstancefailovergroup) | Removes a failover group| - - -# [Azure CLI](#tab/azure-cli) - -| Command | Description | -| --- | --- | -| [az sql failover-group create](/cli/azure/sql/failover-group#az-sql-failover-group-create) |This command creates a failover group and registers it on both primary and secondary servers| -| [az sql failover-group delete](/cli/azure/sql/failover-group#az-sql-failover-group-delete) | Removes a failover group from the server | -| [az sql failover-group show](/cli/azure/sql/failover-group#az-sql-failover-group-show) | Retrieves a failover group configuration | -| [az sql failover-group update](/cli/azure/sql/failover-group#az-sql-failover-group-update) |Modifies a failover group's configuration and/or adds one or more databases to a failover group| -| [az sql failover-group set-primary](/cli/azure/sql/failover-group#az-sql-failover-group-set-primary) | Triggers failover of a failover group to the secondary server | - -# [REST API](#tab/rest-api) - -| API | Description | -| --- | --- | -| [Create or Update Failover Group](/rest/api/sql/instancefailovergroups/createorupdate) | Creates or updates a failover group's configuration | -| [Delete Failover Group](/rest/api/sql/instancefailovergroups/delete) | Removes a failover group from the instance | -| [Failover (Planned)](/rest/api/sql/instancefailovergroups/failover) | Triggers failover from the current primary instance to this instance with full data synchronization. | -| [Force Failover Allow Data Loss](/rest/api/sql/instancefailovergroups/forcefailoverallowdataloss) | Triggers failover from the current primary instance to the secondary instance without synchronizing data. This operation may result in data loss. | -| [Get Failover Group](/rest/api/sql/instancefailovergroups/get) | retrieves a failover group's configuration. | -| [List Failover Groups - List By Location](/rest/api/sql/instancefailovergroups/listbylocation) | Lists the failover groups in a location. | - ---- - -## Next steps - -- For detailed tutorials, see - - [Add a SQL Managed Instance to a failover group](../managed-instance/failover-group-add-instance-tutorial.md) -- For a sample script, see: - - [Use PowerShell to create an auto-failover group on a SQL Managed Instance](scripts/add-to-failover-group-powershell.md) -- For a business continuity overview and scenarios, see [Business continuity overview](../database/business-continuity-high-availability-disaster-recover-hadr-overview.md) -- To learn about automated backups, see [SQL Database automated backups](../database/automated-backups-overview.md). -- To learn about using automated backups for recovery, see [Restore a database from the service-initiated backups](../database/recovery-using-backups.md). diff --git a/articles/azure-sql/managed-instance/backup-activity-monitor.md b/articles/azure-sql/managed-instance/backup-activity-monitor.md deleted file mode 100644 index 4e4273768d69f..0000000000000 --- a/articles/azure-sql/managed-instance/backup-activity-monitor.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "Monitor backup activity" -titleSuffix: Azure SQL Managed Instance -description: Learn how to monitor Azure SQL Managed Instance backup activity using extended events. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: backup-restore -ms.custom: mode-other -ms.devlang: -ms.topic: quickstart -author: MilanMSFT -ms.author: mlazic -ms.reviewer: mathoma, nvraparl -ms.date: 12/14/2018 ---- -# Monitor backup activity for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you to configure extended event (XEvent) sessions to monitor backup activity for [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). - -## Overview - -Azure SQL Managed Instance emits events (also known as [Extended Events or XEvents](../database/xevent-db-diff-from-svr.md)) during backup activity for the purpose of reporting. Configure an XEvent session to track information such as backup status, backup type, size, time, and location within the msdb database. This information can be integrated with backup monitoring software and also used for the purpose of Enterprise Audit. - -Enterprise Audits may require proof of successful backups, time of backup, and duration of the backup. - -## Configure XEvent session - -Use the extended event `backup_restore_progress_trace` to record the progress of your SQL Managed Instance back up. Modify the XEvent sessions as needed to track the information you're interested in for your business. These T-SQL snippets store the XEvent sessions in the ring buffer, but it's also possible to write to [Azure Blob Storage](../database/xevent-code-event-file.md). XEvent sessions storing data in the ring buffer have a limit of about 1000 messages so should only be used to track recent activity. Additionally, ring buffer data is lost upon failover. As such, for a historical record of backups, write to an event file instead. - -### Simple tracking - -Configure a simple XEvent session to capture simple events about complete full backups. This script collects the name of the database, the total number of bytes processed, and the time the backup completed. - -Use Transact-SQL (T-SQL) to configure the simple XEvent session: - - -```sql -CREATE EVENT SESSION [Simple backup trace] ON SERVER -ADD EVENT sqlserver.backup_restore_progress_trace( -WHERE operation_type = 0 -AND trace_message LIKE '%100 percent%') -ADD TARGET package0.ring_buffer -WITH(STARTUP_STATE=ON) -GO -ALTER EVENT SESSION [Simple backup trace] ON SERVER -STATE = start; -``` - - - -### Verbose tracking - -Configure a verbose XEvent session to track greater details about your backup activity. This script captures start and finish of both full, differential and log backups. Since this script is more verbose, it fills up the ring buffer faster, so entries may recycle faster than with the simple script. - -Use Transact-SQL (T-SQL) to configure the verbose XEvent session: - -```sql -CREATE EVENT SESSION [Verbose backup trace] ON SERVER -ADD EVENT sqlserver.backup_restore_progress_trace( - WHERE ( - [operation_type]=(0) AND ( - [trace_message] like '%100 percent%' OR - [trace_message] like '%BACKUP DATABASE%' OR [trace_message] like '%BACKUP LOG%')) - ) -ADD TARGET package0.ring_buffer -WITH (MAX_MEMORY=4096 KB,EVENT_RETENTION_MODE=ALLOW_SINGLE_EVENT_LOSS, - MAX_DISPATCH_LATENCY=30 SECONDS,MAX_EVENT_SIZE=0 KB,MEMORY_PARTITION_MODE=NONE, - TRACK_CAUSALITY=OFF,STARTUP_STATE=ON) - -ALTER EVENT SESSION [Verbose backup trace] ON SERVER -STATE = start; - -``` - -## Monitor backup progress - -After the XEvent session is created, you can use Transact-SQL (T-SQL) to query ring buffer results and monitor the progress of the backup. Once the XEvent starts, it collects all backup events so entries are added to the session roughly every 5-10 minutes. - -### Simple tracking - -The following Transact-SQL (T-SQL) code queries the simple XEvent session and returns the name of the database, the total number of bytes processed, and the time the backup completed: - -```sql -WITH -a AS (SELECT xed = CAST(xet.target_data AS xml) -FROM sys.dm_xe_session_targets AS xet -JOIN sys.dm_xe_sessions AS xe -ON (xe.address = xet.event_session_address) -WHERE xe.name = 'Backup trace'), -b AS(SELECT -d.n.value('(@timestamp)[1]', 'datetime2') AS [timestamp], -ISNULL(db.name, d.n.value('(data[@name="database_name"]/value)[1]', 'varchar(200)')) AS database_name, -d.n.value('(data[@name="trace_message"]/value)[1]', 'varchar(4000)') AS trace_message -FROM a -CROSS APPLY xed.nodes('/RingBufferTarget/event') d(n) -LEFT JOIN master.sys.databases db -ON db.physical_database_name = d.n.value('(data[@name="database_name"]/value)[1]', 'varchar(200)')) -SELECT * FROM b -``` - -The following screenshot shows an example of the output of the above query: - -![Screenshot of the xEvent output](./media/backup-activity-monitor/present-xevents-output.png) - -In this example, five databases were automatically backed up over the course of 2 hours and 30 minutes, and there are 130 entries in the XEvent session. - -### Verbose tracking - -The following Transact-SQL (T-SQL) code queries the verbose XEvent session and returns the name of the database, as well as the start and finish of both full, differential and log backups. - - -```sql -WITH -a AS (SELECT xed = CAST(xet.target_data AS xml) -FROM sys.dm_xe_session_targets AS xet -JOIN sys.dm_xe_sessions AS xe -ON (xe.address = xet.event_session_address) -WHERE xe.name = 'Verbose backup trace'), -b AS(SELECT -d.n.value('(@timestamp)[1]', 'datetime2') AS [timestamp], -ISNULL(db.name, d.n.value('(data[@name="database_name"]/value)[1]', 'varchar(200)')) AS database_name, -d.n.value('(data[@name="trace_message"]/value)[1]', 'varchar(4000)') AS trace_message -FROM a -CROSS APPLY xed.nodes('/RingBufferTarget/event') d(n) -LEFT JOIN master.sys.databases db -ON db.physical_database_name = d.n.value('(data[@name="database_name"]/value)[1]', 'varchar(200)')) -SELECT * FROM b -``` - -The following screenshot shows an example of a full backup in the XEvent session: - -:::image type="content" source="media/backup-activity-monitor/output-with-full.png" alt-text="XEvent output showing full backups"::: - -The following screenshot shows an example of an output of a differential backup in the XEvent session: - -:::image type="content" source="media/backup-activity-monitor/output-with-differential.png" alt-text="XEvent output showing differential backups"::: - - -## Next steps - -Once your backup has completed, you can then [restore to a point in time](point-in-time-restore.md) or [configure a long-term retention policy](long-term-backup-retention-configure.md). - -To learn more, see [automated backups](../database/automated-backups-overview.md). diff --git a/articles/azure-sql/managed-instance/bread/toc.yml b/articles/azure-sql/managed-instance/bread/toc.yml deleted file mode 100644 index fb8563a5ceaf0..0000000000000 --- a/articles/azure-sql/managed-instance/bread/toc.yml +++ /dev/null @@ -1,22 +0,0 @@ -- name: Azure - tocHref: /sql/ - topicHref: /azure/index - items: - - name: SQL Managed Instance - tocHref: /sql/ - topicHref: /azure/azure-sql/managed-instance - items: - - name: Machine Learning - tocHref: /sql/machine-learning/ - topicHref: /azure/azure-sql/managed-instance/machine-learning-services-overview -- name: Azure - tocHref: /azure/ - topicHref: /azure/index - items: - - name: SQL Managed Instance - tocHref: /azure/azure-sql-edge/ - topicHref: /azure/azure-sql/managed-instance - items: - - name: Machine Learning - tocHref: /azure/azure-sql-edge/ - topicHref: /azure/azure-sql/managed-instance/machine-learning-services-overview diff --git a/articles/azure-sql/managed-instance/connect-application-instance.md b/articles/azure-sql/managed-instance/connect-application-instance.md deleted file mode 100644 index 77a48fb8f821f..0000000000000 --- a/articles/azure-sql/managed-instance/connect-application-instance.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Connect your application to SQL Managed Instance -titleSuffix: Azure SQL Managed Instance -description: This article discusses how to connect your application to Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: connect -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, vanto -ms.date: 08/20/2021 ---- - -# Connect your application to Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Today you have multiple choices when deciding how and where you host your application. - -You may choose to host application in the cloud by using Azure App Service or some of Azure's virtual network integrated options like Azure App Service Environment, Azure Virtual Machines, and virtual machine scale sets. You could also take hybrid cloud approach and keep your applications on-premises. - -Whatever choice you make, you can connect it to Azure SQL Managed Instance. - -This article describes how to connect an application to Azure SQL Managed Instance in a number of different application scenarios from inside the virtual network. - -> [!IMPORTANT] -> You can also enable data access to your managed instance from outside a virtual network. You are able to access your managed instance from multi-tenant Azure services like Power BI, Azure App Service, or an on-premises network that are not connected to a VPN by using the public endpoint on a managed instance. You will need to enable public endpoint on the managed instance and allow public endpoint traffic on the network security group associated with the managed instance subnet. See more important details on [Configure public endpoint in Azure SQL Managed Instance](./public-endpoint-configure.md). - -![High availability](./media/connect-application-instance/application-deployment-topologies.png) - - -## Connect inside the same VNet - -Connecting an application inside the same virtual network as SQL Managed Instance is the simplest scenario. Virtual machines inside the virtual network can connect to each other directly even if they are inside different subnets. That means that all you need to connect an application inside App Service Environment or a virtual machine is to set the connection string appropriately. - -## Connect inside a different VNet - -Connecting an application when it resides within a different virtual network from SQL Managed Instance is a bit more complex because SQL Managed Instance has private IP addresses in its own virtual network. To connect, an application needs access to the virtual network where SQL Managed Instance is deployed. So you need to make a connection between the application and the SQL Managed Instance virtual network. The virtual networks don't have to be in the same subscription in order for this scenario to work. - -There are two options for connecting virtual networks: - -- [Azure VNet peering](../../virtual-network/virtual-network-peering-overview.md) -- VNet-to-VNet VPN gateway ([Azure portal](../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md), [PowerShell](../../vpn-gateway/vpn-gateway-vnet-vnet-rm-ps.md), [Azure CLI](../../vpn-gateway/vpn-gateway-howto-vnet-vnet-cli.md)) - -Peering is preferable because it uses the Microsoft backbone network, so from the connectivity perspective, there is no noticeable difference in latency between virtual machines in a peered virtual network and in the same virtual network. Virtual network peering is to supported between the networks in the same region. Global virtual network peering is also supported with the limitation described in the note below. - -> [!IMPORTANT] -> [On 9/22/2020 support for global virtual network peering for newly created virtual clusters was announced](https://azure.microsoft.com/updates/global-virtual-network-peering-support-for-azure-sql-managed-instance-now-available/). It means that global virtual network peering is supported for SQL managed instances created in empty subnets after the announcement date, as well for all the subsequent managed instances created in those subnets. For all the other SQL managed instances peering support is limited to the networks in the same region due to the [constraints of global virtual network peering](../../virtual-network/virtual-network-manage-peering.md#requirements-and-constraints). See also the relevant section of the [Azure Virtual Networks frequently asked questions](../../virtual-network/virtual-networks-faq.md#what-are-the-constraints-related-to-global-vnet-peering-and-load-balancers) article for more details. To be able to use global virtual network peering for SQL managed instances from virtual clusters created before the announcement date, consider configuring [maintenance window](../database/maintenance-window.md) on the instances, as it will move the instances into new virtual clusters that support global virtual network peering. - -## Connect from on-premises - -You can also connect your on-premises application to SQL Managed Instance via virtual network (private IP address). In order to access it from on-premises, you need to make a site-to-site connection between the application and the SQL Managed Instance virtual network. For data access to your managed instance from outside a virtual network see [Configure public endpoint in Azure SQL Managed Instance](./public-endpoint-configure.md). - -There are two options for how to connect on-premises to an Azure virtual network: - -- Site-to-site VPN connection ([Azure portal](../../vpn-gateway/tutorial-site-to-site-portal.md), [PowerShell](../../vpn-gateway/vpn-gateway-create-site-to-site-rm-powershell.md), [Azure CLI](../../vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-cli.md)) -- [Azure ExpressRoute](../../expressroute/expressroute-introduction.md) connection - -If you've established an on-premises to Azure connection successfully and you can't establish a connection to SQL Managed Instance, check if your firewall has an open outbound connection on SQL port 1433 as well as the 11000-11999 range of ports for redirection. - -## Connect the developer box - -It is also possible to connect your developer box to SQL Managed Instance. In order to access it from your developer box via virtual network, you first need to make a connection between your developer box and the SQL Managed Instance virtual network. To do so, configure a point-to-site connection to a virtual network using native Azure certificate authentication. For more information, see [Configure a point-to-site connection to connect to Azure SQL Managed Instance from an on-premises computer](point-to-site-p2s-configure.md). - -For data access to your managed instance from outside a virtual network see [Configure public endpoint in Azure SQL Managed Instance](./public-endpoint-configure.md). - -## Connect with VNet peering - -Another scenario implemented by customers is where a VPN gateway is installed in a separate virtual network and subscription from the one hosting SQL Managed Instance. The two virtual networks are then peered. The following sample architecture diagram shows how this can be implemented. - -![Virtual network peering](./media/connect-application-instance/vnet-peering.png) - -Once you have the basic infrastructure set up, you need to modify some settings so that the VPN gateway can see the IP addresses in the virtual network that hosts SQL Managed Instance. To do so, make the following very specific changes under the **Peering settings**. - -1. In the virtual network that hosts the VPN gateway, go to **Peerings**, go to the peered virtual network connection for SQL Managed Instance, and then click **Allow Gateway Transit**. -2. In the virtual network that hosts SQL Managed Instance, go to **Peerings**, go to the peered virtual network connection for the VPN gateway, and then click **Use remote gateways**. - -## Connect Azure App Service - -You can also connect an application that's hosted by Azure App Service. In order to access it from Azure App Service via virtual network, you first need to make a connection between the application and the SQL Managed Instance virtual network. See [Integrate your app with an Azure virtual network](../../app-service/overview-vnet-integration.md). For data access to your managed instance from outside a virtual network see [Configure public endpoint in Azure SQL Managed Instance](./public-endpoint-configure.md). - -For troubleshooting Azure App Service access via virtual network, see [Troubleshooting virtual networks and applications](../../app-service/overview-vnet-integration.md#troubleshooting). - -A special case of connecting Azure App Service to SQL Managed Instance is when you integrate Azure App Service to a network peered to a SQL Managed Instance virtual network. That case requires the following configuration to be set up: - -- SQL Managed Instance virtual network must NOT have a gateway -- SQL Managed Instance virtual network must have the `Use remote gateways` option set -- Peered virtual network must have the `Allow gateway transit` option set - -This scenario is illustrated in the following diagram: - -![integrated app peering](./media/connect-application-instance/integrated-app-peering.png) - ->[!NOTE] ->The virtual network integration feature does not integrate an app with a virtual network that has an ExpressRoute gateway. Even if the ExpressRoute gateway is configured in coexistence mode, virtual network integration does not work. If you need to access resources through an ExpressRoute connection, then you can use App Service Environment, which runs in your virtual network. - -## Troubleshooting connectivity issues - -For troubleshooting connectivity issues, review the following: - -- If you are unable to connect to SQL Managed Instance from an Azure virtual machine within the same virtual network but a different subnet, check if you have a Network Security Group set on VM subnet that might be blocking access. Additionally, open outbound connection on SQL port 1433 as well as ports in the range 11000-11999, since those are needed for connecting via redirection inside the Azure boundary. -- Ensure that BGP Propagation is set to **Enabled** for the route table associated with the virtual network. -- If using P2S VPN, check the configuration in the Azure portal to see if you see **Ingress/Egress** numbers. Non-zero numbers indicate that Azure is routing traffic to/from on-premises. - - ![ingress/egress numbers](./media/connect-application-instance/ingress-egress-numbers.png) - -- Check that the client machine (that is running the VPN client) has route entries for all the virtual networks that you need to access. The routes are stored in -`%AppData%\Roaming\Microsoft\Network\Connections\Cm\\routes.txt`. - - ![route.txt](./media/connect-application-instance/route-txt.png) - - As shown in this image, there are two entries for each virtual network involved and a third entry for the VPN endpoint that is configured in the portal. - - Another way to check the routes is via the following command. The output shows the routes to the various subnets: - - ```cmd - C:\ >route print -4 - =========================================================================== - Interface List - 14...54 ee 75 67 6b 39 ......Intel(R) Ethernet Connection (3) I218-LM - 57...........................rndatavnet - 18...94 65 9c 7d e5 ce ......Intel(R) Dual Band Wireless-AC 7265 - 1...........................Software Loopback Interface 1 - Adapter=========================================================================== - - IPv4 Route Table - =========================================================================== - Active Routes: - Network Destination Netmask Gateway Interface Metric - 0.0.0.0 0.0.0.0 10.83.72.1 10.83.74.112 35 - 10.0.0.0 255.255.255.0 On-link 172.26.34.2 43 - 10.4.0.0 255.255.255.0 On-link 172.26.34.2 43 - =========================================================================== - Persistent Routes: - None - ``` - -- If you're using virtual network peering, ensure that you have followed the instructions for setting [Allow Gateway Transit and Use Remote Gateways](#connect-from-on-premises). - -- If you're using virtual network peering to connect an Azure App Service hosted application, and the SQL Managed Instance virtual network has a public IP address range, make sure that your hosted application settings allow your outbound traffic to be routed to public IP networks. Follow the instructions in [Regional virtual network integration](../../app-service/overview-vnet-integration.md#regional-virtual-network-integration). - -## Required versions of drivers and tools - -The following minimal versions of the tools and drivers are recommended if you want to connect to SQL Managed Instance: - -| Driver/tool | Version | -| --- | --- | -|.NET Framework | 4.6.1 (or .NET Core) | -|ODBC driver| v17 | -|PHP driver| 5.2.0 | -|JDBC driver| 6.4.0 | -|Node.js driver| 2.1.1 | -|OLEDB driver| 18.0.2.0 | -|SSMS| 18.0 or [higher](/sql/ssms/download-sql-server-management-studio-ssms) | -|[SMO](/sql/relational-databases/server-management-objects-smo/sql-server-management-objects-smo-programming-guide) | [150](https://www.nuget.org/packages/Microsoft.SqlServer.SqlManagementObjects) or higher | - -## Next steps - -- For information about SQL Managed Instance, see [What is SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- For a tutorial showing you how to create a new managed instance, see [Create a managed instance](instance-create-quickstart.md). \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/connect-vm-instance-configure.md b/articles/azure-sql/managed-instance/connect-vm-instance-configure.md deleted file mode 100644 index a3aee8f505bda..0000000000000 --- a/articles/azure-sql/managed-instance/connect-vm-instance-configure.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Configure Azure VM connectivity -titleSuffix: Azure SQL Managed Instance -description: Connect to Azure SQL Managed Instance using SQL Server Management Studio from an Azure virtual machine. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: connect -ms.custom: mode-other -ms.devlang: -ms.topic: quickstart -author: zoran-rilak-msft -ms.author: zoranrilak -ms.reviewer: mathoma, srbozovi, bonova -ms.date: 02/18/2019 ---- -# Quickstart: Configure an Azure VM to connect to Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This quickstart shows you how to configure an Azure virtual machine to connect to Azure SQL Managed Instance using SQL Server Management Studio (SSMS). - - -For a quickstart showing how to connect from an on-premises client computer using a point-to-site connection instead, see [Configure a point-to-site connection](point-to-site-p2s-configure.md). - -## Prerequisites - -This quickstart uses the resources created in [Create a managed instance](instance-create-quickstart.md) as its starting point. - -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com/). - -## Create a new subnet VNet - -The following steps create a new subnet in the SQL Managed Instance VNet so an Azure virtual machine can connect to the managed instance. The SQL Managed Instance subnet is dedicated to managed instances. You can't create any other resources, like Azure virtual machines, in that subnet. - -1. Open the resource group for the managed instance that you created in the [Create a managed instance](instance-create-quickstart.md) quickstart. Select the virtual network for your managed instance. - - ![SQL Managed Instance resources](./media/connect-vm-instance-configure/resources.png) - -2. Select **Subnets** and then select **+ Subnet** to create a new subnet. - - ![SQL Managed Instance subnets](./media/connect-vm-instance-configure/subnets.png) - -3. Fill out the form using the information in this table: - - | Setting| Suggested value | Description | - | ---------------- | ----------------- | ----------- | - | **Name** | Any valid name|For valid names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming).| - | **Address range (CIDR block)** | A valid range | The default value is good for this quickstart.| - | **Network security group** | None | The default value is good for this quickstart.| - | **Route table** | None | The default value is good for this quickstart.| - | **Service endpoints** | 0 selected | The default value is good for this quickstart.| - | **Subnet delegation** | None | The default value is good for this quickstart.| - - ![New SQL Managed Instance subnet for client VM](./media/connect-vm-instance-configure/new-subnet.png) - -4. Select **OK** to create this additional subnet in the SQL Managed Instance VNet. - -## Create a VM in the new subnet - -The following steps show you how to create a virtual machine in the new subnet to connect to SQL Managed Instance. - -## Prepare the Azure virtual machine - -Since SQL Managed Instance is placed in your private virtual network, you need to create an Azure VM with an installed SQL client tool, like SQL Server Management Studio or Azure Data Studio. This tool lets you connect to SQL Managed Instance and execute queries. This quickstart uses SQL Server Management Studio. - -The easiest way to create a client virtual machine with all necessary tools is to use the Azure Resource Manager templates. - -1. Make sure that you're signed in to the Azure portal in another browser tab. Then, select the following button to create a client virtual machine and install SQL Server Management Studio: - - [![Image showing a button labeled "Deploy to Azure".](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fjovanpop-msft%2Fazure-quickstart-templates%2Fsql-win-vm-w-tools%2F201-vm-win-vnet-sql-tools%2Fazuredeploy.json) - -2. Fill out the form using the information in the following table: - - | Setting| Suggested value | Description | - | ---------------- | ----------------- | ----------- | - | **Subscription** | A valid subscription | Must be a subscription in which you have permission to create new resources. | - | **Resource Group** |The resource group that you specified in the [Create SQL Managed Instance](instance-create-quickstart.md) quickstart|This resource group must be the one in which the VNet exists.| - | **Location** | The location for the resource group | This value is populated based on the resource group selected. | - | **Virtual machine name** | Any valid name | For valid names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming).| - |**Admin Username**|Any valid username|For valid names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). Don't use "serveradmin" as that is a reserved server-level role.
    You use this username any time you [connect to the VM](#connect-to-the-virtual-machine).| - |**Password**|Any valid password|The password must be at least 12 characters long and meet the [defined complexity requirements](../../virtual-machines/windows/faq.yml#what-are-the-password-requirements-when-creating-a-vm-).
    You use this password any time you [connect to the VM](#connect-to-the-virtual-machine).| - | **Virtual Machine Size** | Any valid size | The default in this template of **Standard_B2s** is sufficient for this quickstart. | - | **Location**|[resourceGroup().location].| Don't change this value. | - | **Virtual Network Name**|The virtual network in which you created the managed instance| - | **Subnet name**|The name of the subnet that you created in the previous procedure| Don't choose the subnet in which you created the managed instance.| - | **artifacts Location** | [deployment().properties.templateLink.uri] | Don't change this value. | - | **artifacts Location Sas token** | Leave blank | Don't change this value. | - - ![create client VM](./media/connect-vm-instance-configure/create-client-sql-vm.png) - - If you used the suggested VNet name and the default subnet in [creating your SQL Managed Instance](instance-create-quickstart.md), you don't need to change last two parameters. Otherwise you should change these values to the values that you entered when you set up the network environment. - -3. Select the **I agree to the terms and conditions stated above** checkbox. -4. Select **Purchase** to deploy the Azure VM in your network. -5. Select the **Notifications** icon to view the status of deployment. - -> [!IMPORTANT] -> Do not continue until approximately 15 minutes after the virtual machine is created to give time for the post-creation scripts to install SQL Server Management Studio. - -## Connect to the virtual machine - -The following steps show you how to connect to your newly created virtual machine using a Remote Desktop connection. - -1. After deployment completes, go to the virtual machine resource. - - ![Screenshot shows the Azure portal with the Overview page for a virtual machine selected and Connect highlighted.](./media/connect-vm-instance-configure/vm.png) - -2. Select **Connect**. - - A Remote Desktop Protocol file (.rdp file) form appears with the public IP address and port number for the virtual machine. - - ![RDP form](./media/connect-vm-instance-configure/rdp.png) - -3. Select **Download RDP File**. - - > [!NOTE] - > You can also use SSH to connect to your VM. - -4. Close the **Connect to virtual machine** form. -5. To connect to your VM, open the downloaded RDP file. -6. When prompted, select **Connect**. On a Mac, you need an RDP client such as [this Remote Desktop Client](https://apps.apple.com/app/microsoft-remote-desktop-10/id1295203466?mt=12) from the Mac App Store. - -7. Enter the username and password you specified when creating the virtual machine, and then choose **OK**. - -8. You might receive a certificate warning during the sign-in process. Choose **Yes** or **Continue** to proceed with the connection. - -You're connected to your virtual machine in the Server Manager dashboard. - -## Connect to SQL Managed Instance - -1. In the virtual machine, open SQL Server Management Studio. - - It takes a few moments to open, as it needs to complete its configuration since this is the first time SSMS has been started. -2. In the **Connect to Server** dialog box, enter the fully qualified **host name** for your managed instance in the **Server name** box. Select **SQL Server Authentication**, provide your username and password, and then select **Connect**. - - ![SSMS connect](./media/connect-vm-instance-configure/ssms-connect.png) - -After you connect, you can view your system and user databases in the Databases node, and various objects in the Security, Server Objects, Replication, Management, SQL Server Agent, and XEvent Profiler nodes. - -## Next steps - -- For a quickstart showing how to connect from an on-premises client computer using a point-to-site connection, see [Configure a point-to-site connection](point-to-site-p2s-configure.md). -- For an overview of the connection options for applications, see [Connect your applications to SQL Managed Instance](connect-application-instance.md). -- To restore an existing SQL Server database from on-premises to a managed instance, you can use [Azure Database Migration Service for migration](../../dms/tutorial-sql-server-to-managed-instance.md) or the [T-SQL RESTORE command](restore-sample-database-quickstart.md) to restore from a database backup file. diff --git a/articles/azure-sql/managed-instance/connection-types-overview.md b/articles/azure-sql/managed-instance/connection-types-overview.md deleted file mode 100644 index b561c47cae1b2..0000000000000 --- a/articles/azure-sql/managed-instance/connection-types-overview.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Connection types -titleSuffix: Azure SQL Managed Instance -description: Learn about Azure SQL Managed Instance connection types -services: sql-database -ms.service: sql-managed-instance -ms.subservice: connect -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: vanto -ms.date: 12/01/2021 -ms.custom: devx-track-azurepowershell ---- - -# Azure SQL Managed Instance connection types -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains how clients connect to Azure SQL Managed Instance depending on the connection type. Script samples to change connection types are provided below, along with considerations related to changing the default connectivity settings. - -## Connection types - -Azure SQL Managed Instance supports the following two connection types: - -- **Redirect (recommended):** Clients establish connections directly to the node hosting the database. To enable connectivity using redirect, you must open firewalls and Network Security Groups (NSG) to allow access on ports 1433, and 11000-11999. Packets go directly to the database, and hence there are latency and throughput performance improvements using redirect over proxy. Impact of planned maintenance events of gateway component is also minimized with redirect connection type compared to proxy since connections, once established, have no dependency on gateway. -- **Proxy (default):** In this mode, all connections are using a proxy gateway component. To enable connectivity, only port 1433 for private networks and port 3342 for public connection need to be opened. Choosing this mode can result in higher latency and lower throughput, depending on nature of the workload. Also, planned maintenance events of gateway component break all live connections in proxy mode. We highly recommend the redirect connection policy over the proxy connection policy for the lowest latency, highest throughput, and minimized impact of planned maintenance. - -## Redirect connection type - -In the redirect connection type, after the TCP session is established to the SQL engine, the client session obtains the destination virtual IP of the virtual cluster node from the load balancer. Subsequent packets flow directly to the virtual cluster node, bypassing the gateway. The following diagram illustrates this traffic flow. - -![Diagram shows an on-premises network with redirect-find-db connected to a gateway in an Azure virtual network and a redirect-query connected to a database primary node in the virtual network.](./media/connection-types-overview/redirect.png) - -> [!IMPORTANT] -> The redirect connection type currently works only for a private endpoint. Regardless of the connection type setting, connections coming through the public endpoint would be through a proxy. - -## Proxy connection type - -In the proxy connection type, the TCP session is established using the gateway and all subsequent packets flow through it. The following diagram illustrates this traffic flow. - -![Diagram shows an on-premises network with a proxy connected to a gateway in an Azure virtual network, connect next to a database primary node in the virtual network.](./media/connection-types-overview/proxy.png) - -## Changing Connection Type - -- **Using the Portal:** -To change the Connection Type using the Azure portal,open the Virtual Network page and use the **Connection type** setting to change the connection type and save the changes. - -- **Script to change connection type settings using PowerShell:** - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -The following PowerShell script shows how to change the connection type for a managed instance to `Redirect`. - -```powershell -Install-Module -Name Az -Import-Module Az.Accounts -Import-Module Az.Sql - -Connect-AzAccount -# Get your SubscriptionId from the Get-AzSubscription command -Get-AzSubscription -# Use your SubscriptionId in place of {subscription-id} below -Select-AzSubscription -SubscriptionId {subscription-id} -# Replace {rg-name} with the resource group for your managed instance, and replace {mi-name} with the name of your managed instance -$mi = Get-AzSqlInstance -ResourceGroupName {rg-name} -Name {mi-name} -$mi = $mi | Set-AzSqlInstance -ProxyOverride "Redirect" -force -``` - -## Next steps - -- [Restore a database to SQL Managed Instance](restore-sample-database-quickstart.md) -- Learn how to [configure a public endpoint on SQL Managed Instance](public-endpoint-configure.md) -- Learn about [SQL Managed Instance connectivity architecture](connectivity-architecture-overview.md) diff --git a/articles/azure-sql/managed-instance/connectivity-architecture-overview.md b/articles/azure-sql/managed-instance/connectivity-architecture-overview.md deleted file mode 100644 index 6165466f9d846..0000000000000 --- a/articles/azure-sql/managed-instance/connectivity-architecture-overview.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: Connectivity architecture -titleSuffix: Azure SQL Managed Instance -description: Learn about Azure SQL Managed Instance communication and connectivity architecture as well as how the components direct traffic to a managed instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: fasttrack-edit -ms.devlang: -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova -ms.date: 04/29/2021 ---- - -# Connectivity architecture for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains communication in Azure SQL Managed Instance. It also describes connectivity architecture and how the components direct traffic to a managed instance. - -SQL Managed Instance is placed inside the Azure virtual network and the subnet that's dedicated to managed instances. This deployment provides: - -- A secure private IP address. -- The ability to connect an on-premises network to SQL Managed Instance. -- The ability to connect SQL Managed Instance to a linked server or another on-premises data store. -- The ability to connect SQL Managed Instance to Azure resources. - -## Communication overview - -The following diagram shows entities that connect to SQL Managed Instance. It also shows the resources that need to communicate with a managed instance. The communication process at the bottom of the diagram represents customer applications and tools that connect to SQL Managed Instance as data sources. - -![Entities in connectivity architecture](./media/connectivity-architecture-overview/connectivityarch001.png) - -SQL Managed Instance is a platform as a service (PaaS) offering. Azure uses automated agents (management, deployment, and maintenance) to manage this service based on telemetry data streams. Because Azure is responsible for management, customers can't access the SQL Managed Instance virtual cluster machines through Remote Desktop Protocol (RDP). - -Some operations started by end users or applications might require SQL Managed Instance to interact with the platform. One case is the creation of a SQL Managed Instance database. This resource is exposed through the Azure portal, PowerShell, Azure CLI, and the REST API. - -SQL Managed Instance depends on Azure services such as Azure Storage for backups, Azure Event Hubs for telemetry, Azure Active Directory (Azure AD) for authentication, Azure Key Vault for Transparent Data Encryption (TDE), and a couple of Azure platform services that provide security and supportability features. SQL Managed Instance makes connections to these services. - -All communications are encrypted and signed using certificates. To check the trustworthiness of communicating parties, SQL Managed Instance constantly verifies these certificates through certificate revocation lists. If the certificates are revoked, SQL Managed Instance closes the connections to protect the data. - -## High-level connectivity architecture - -At a high level, SQL Managed Instance is a set of service components. These components are hosted on a dedicated set of isolated virtual machines that run inside the customer's virtual network subnet. These machines form a virtual cluster. - -A virtual cluster can host multiple managed instances. If needed, the cluster automatically expands or contracts when the customer changes the number of provisioned instances in the subnet. - -Customer applications can connect to SQL Managed Instance and can query and update databases inside the virtual network, peered virtual network, or network connected by VPN or Azure ExpressRoute. This network must use an endpoint and a private IP address. - -![Connectivity architecture diagram](./media/connectivity-architecture-overview/connectivityarch002.png) - -Azure management and deployment services run outside the virtual network. SQL Managed Instance and Azure services connect over the endpoints that have public IP addresses. When SQL Managed Instance creates an outbound connection, on the receiving end Network Address Translation (NAT) makes the connection look like it's coming from this public IP address. - -Management traffic flows through the customer's virtual network. That means that elements of the virtual network's infrastructure can harm management traffic by making the instance fail and become unavailable. - -> [!IMPORTANT] -> To improve customer experience and service availability, Azure applies a network intent policy on Azure virtual network infrastructure elements. The policy can affect how SQL Managed Instance works. This platform mechanism transparently communicates networking requirements to users. The policy's main goal is to prevent network misconfiguration and to ensure normal SQL Managed Instance operations. When you delete a managed instance, the network intent policy is also removed. - -## Virtual cluster connectivity architecture - -Let's take a deeper dive into connectivity architecture for SQL Managed Instance. The following diagram shows the conceptual layout of the virtual cluster. - -![Connectivity architecture of the virtual cluster](./media/connectivity-architecture-overview/connectivityarch003.png) - -Clients connect to SQL Managed Instance by using a host name that has the form `..database.windows.net`. This host name resolves to a private IP address, although it's registered in a public Domain Name System (DNS) zone and is publicly resolvable. The `zone-id` is automatically generated when you create the cluster. If a newly created cluster hosts a secondary managed instance, it shares its zone ID with the primary cluster. For more information, see [Use auto failover groups to enable transparent and coordinated failover of multiple databases](auto-failover-group-configure-sql-mi.md#enabling-geo-replication-between-managed-instances-and-their-vnets). - -This private IP address belongs to the internal load balancer for SQL Managed Instance. The load balancer directs traffic to the SQL Managed Instance gateway. Because multiple managed instances can run inside the same cluster, the gateway uses the SQL Managed Instance host name to redirect traffic to the correct SQL engine service. - -Management and deployment services connect to SQL Managed Instance by using a [management endpoint](#management-endpoint) that maps to an external load balancer. Traffic is routed to the nodes only if it's received on a predefined set of ports that only the management components of SQL Managed Instance use. A built-in firewall on the nodes is set up to allow traffic only from Microsoft IP ranges. Certificates mutually authenticate all communication between management components and the management plane. - -## Management endpoint - -Azure manages SQL Managed Instance by using a management endpoint. This endpoint is inside an instance's virtual cluster. The management endpoint is protected by a built-in firewall on the network level. On the application level, it's protected by mutual certificate verification. To find the endpoint's IP address, see [Determine the management endpoint's IP address](management-endpoint-find-ip-address.md). - -When connections start inside SQL Managed Instance (as with backups and audit logs), traffic appears to start from the management endpoint's public IP address. You can limit access to public services from SQL Managed Instance by setting firewall rules to allow only the IP address for SQL Managed Instance. For more information, see [Verify the SQL Managed Instance built-in firewall](management-endpoint-verify-built-in-firewall.md). - -> [!NOTE] -> Traffic that goes to Azure services that are inside the SQL Managed Instance region is optimized and for that reason not NATed to the public IP address for the management endpoint. For that reason if you need to use IP-based firewall rules, most commonly for storage, the service needs to be in a different region from SQL Managed Instance. - -## Service-aided subnet configuration - -To address customer security and manageability requirements, SQL Managed Instance is transitioning from manual to service-aided subnet configuration. - -With service-aided subnet configuration, the customer is in full control of data (TDS) traffic, while SQL Managed Instance control plane takes responsibility to ensure uninterrupted flow of management traffic in order to fulfill an SLA. - -Service-aided subnet configuration builds on top of the virtual network [subnet delegation](../../virtual-network/subnet-delegation-overview.md) feature to provide automatic network configuration management and enable service endpoints. - -Service endpoints could be used to configure virtual network firewall rules on storage accounts that keep backups and audit logs. Even with service endpoints enabled, customers are encouraged to use [private link](../../private-link/private-link-overview.md) that provides additional security over service endpoints. - -> [!IMPORTANT] -> Due to control plane configuration specificities, service-aided subnet configuration would not enable service endpoints in national clouds. - -### Network requirements - -Deploy SQL Managed Instance in a dedicated subnet inside the virtual network. The subnet must have these characteristics: - -- **Dedicated subnet:** SQL Managed Instance's subnet can't contain any other cloud service that's associated with it, but other managed instances are allowed and it can't be a gateway subnet. The subnet can't contain any resource but the managed instance(s), and you can't later add other types of resources in the subnet. -- **Subnet delegation:** The SQL Managed Instance subnet needs to be delegated to the `Microsoft.Sql/managedInstances` resource provider. -- **Network security group (NSG):** An NSG needs to be associated with the SQL Managed Instance subnet. You can use an NSG to control access to the SQL Managed Instance data endpoint by filtering traffic on port 1433 and ports 11000-11999 when SQL Managed Instance is configured for redirect connections. The service will automatically provision and keep current [rules](#mandatory-inbound-security-rules-with-service-aided-subnet-configuration) required to allow uninterrupted flow of management traffic. -- **User defined route (UDR) table:** A UDR table needs to be associated with the SQL Managed Instance subnet. You can add entries to the route table to route traffic that has on-premises private IP ranges as a destination through the virtual network gateway or virtual network appliance (NVA). Service will automatically provision and keep current [entries](#mandatory-user-defined-routes-with-service-aided-subnet-configuration) required to allow uninterrupted flow of management traffic. -- **Sufficient IP addresses:** The SQL Managed Instance subnet must have at least 32 IP addresses. For more information, see [Determine the size of the subnet for SQL Managed Instance](vnet-subnet-determine-size.md). You can deploy managed instances in [the existing network](vnet-existing-add-subnet.md) after you configure it to satisfy [the networking requirements for SQL Managed Instance](#network-requirements). Otherwise, create a [new network and subnet](virtual-network-subnet-create-arm-template.md). -- **Allowed by Azure policies:** If you use [Azure Policy](../../governance/policy/overview.md) to deny the creation or modification of resources in the scope that includes SQL Managed Instance subnet/virtual network, such policies should not prevent Managed Instance from managing its internal resources. The following resources need to be excluded from deny effects to enable normal operation: - - Resources of type Microsoft.Network/serviceEndpointPolicies, when resource name begins with \_e41f87a2\_ - - All resources of type Microsoft.Network/networkIntentPolicies - - All resources of type Microsoft.Network/virtualNetworks/subnets/contextualServiceEndpointPolicies -- **Locks on virtual network:** [Locks](../../azure-resource-manager/management/lock-resources.md) on the dedicated subnet's virtual network, its parent resource group, or subscription, may occasionally interfere with SQL Managed Instance's management and maintenance operations. Take special care when you use such locks. - -> [!IMPORTANT] -> When you create a managed instance, a network intent policy is applied on the subnet to prevent noncompliant changes to networking setup. This policy is a hidden resource located in the virtual network of the resource group. After the last instance is removed from the subnet, the network intent policy is also removed. Rules below are for the informational purposes only, and you should not deploy them using ARM template / PowerShell / CLI. If you want to use the latest official template you could always [retrieve it from the portal](../../azure-resource-manager/templates/quickstart-create-templates-use-the-portal.md). Replication traffic for auto-failover groups between two SQL Managed Instances should be direct, and not through a hub network. - -### Mandatory inbound security rules with service-aided subnet configuration -These rules are necessary to ensure inbound management traffic flow. See [paragraph above](#high-level-connectivity-architecture) for more information on connectivity architecture and management traffic. - -| Name |Port |Protocol|Source |Destination|Action| -|------------|----------------------------|--------|-----------------|-----------|------| -|management |9000, 9003, 1438, 1440, 1452|TCP |SqlManagement |MI SUBNET |Allow | -| |9000, 9003 |TCP |CorpnetSaw |MI SUBNET |Allow | -| |9000, 9003 |TCP |CorpnetPublic |MI SUBNET |Allow | -|mi_subnet |Any |Any |MI SUBNET |MI SUBNET |Allow | -|health_probe|Any |Any |AzureLoadBalancer|MI SUBNET |Allow | - -### Mandatory outbound security rules with service-aided subnet configuration -These rules are necessary to ensure outbound management traffic flow. See [paragraph above](#high-level-connectivity-architecture) for more information on connectivity architecture and management traffic. - -| Name |Port |Protocol|Source |Destination|Action| -|------------|--------------|--------|-----------------|-----------|------| -|management |443, 12000 |TCP |MI SUBNET |AzureCloud |Allow | -|mi_subnet |Any |Any |MI SUBNET |MI SUBNET |Allow | - -### Mandatory user defined routes with service-aided subnet configuration -These routes are necessary to ensure that management traffic is routed directly to a destination. See [paragraph above](#high-level-connectivity-architecture) for more information on connectivity architecture and management traffic. - -|Name|Address prefix|Next hop| -|----|--------------|-------| -|subnet-to-vnetlocal|MI SUBNET|Virtual network| -|mi-azurecloud-REGION-internet|AzureCloud.REGION|Internet| -|mi-azurecloud-REGION_PAIR-internet|AzureCloud.REGION_PAIR|Internet| -|mi-azuremonitor-internet|AzureMonitor|Internet| -|mi-corpnetpublic-internet|CorpNetPublic|Internet| -|mi-corpnetsaw-internet|CorpNetSaw|Internet| -|mi-eventhub-REGION-internet|EventHub.REGION|Internet| -|mi-eventhub-REGION_PAIR-internet|EventHub.REGION_PAIR|Internet| -|mi-sqlmanagement-internet|SqlManagement|Internet| -|mi-storage-internet|Storage|Internet| -|mi-storage-REGION-internet|Storage.REGION|Internet| -|mi-storage-REGION_PAIR-internet|Storage.REGION_PAIR|Internet| -|mi-azureactivedirectory-internet|AzureActiveDirectory|Internet| - - -\* MI SUBNET refers to the IP address range for the subnet in the form x.x.x.x/y. You can find this information in the Azure portal, in subnet properties. - -\** If the destination address is for one of Azure's services, Azure routes the traffic directly to the service over Azure's backbone network, rather than routing the traffic to the Internet. Traffic between Azure services does not traverse the Internet, regardless of which Azure region the virtual network exists in, or which Azure region an instance of the Azure service is deployed in. For more details check [UDR documentation page](../../virtual-network/virtual-networks-udr-overview.md). - -In addition, you can add entries to the route table to route traffic that has on-premises private IP ranges as a destination through the virtual network gateway or virtual network appliance (NVA). - -If the virtual network includes a custom DNS, the custom DNS server must be able to resolve public DNS records. Using additional features like Azure AD Authentication might require resolving additional FQDNs. For more information, see [Set up a custom DNS](custom-dns-configure.md). - -### Networking constraints - -**TLS 1.2 is enforced on outbound connections**: In January 2020 Microsoft enforced TLS 1.2 for intra-service traffic in all Azure services. For Azure SQL Managed Instance, this resulted in TLS 1.2 being enforced on outbound connections used for replication and linked server connections to SQL Server. If you are using versions of SQL Server older than 2016 with SQL Managed Instance, please ensure that [TLS 1.2 specific updates](https://support.microsoft.com/help/3135244/tls-1-2-support-for-microsoft-sql-server) have been applied. - -The following virtual network features are currently *not supported* with SQL Managed Instance: - -- **Microsoft peering**: Enabling [Microsoft peering](../../expressroute/expressroute-faqs.md#microsoft-peering) on ExpressRoute circuits peered directly or transitively with a virtual network where SQL Managed Instance resides affects traffic flow between SQL Managed Instance components inside the virtual network and services it depends on, causing availability issues. SQL Managed Instance deployments to virtual network with Microsoft peering already enabled are expected to fail. -- **Global virtual network peering**: [Virtual network peering](../../virtual-network/virtual-network-peering-overview.md) connectivity across Azure regions doesn't work for SQL Managed Instances placed in subnets created before 9/22/2020. -- **AzurePlatformDNS**: Using the AzurePlatformDNS [service tag](../../virtual-network/service-tags-overview.md) to block platform DNS resolution would render SQL Managed Instance unavailable. Although SQL Managed Instance supports customer-defined DNS for DNS resolution inside the engine, there is a dependency on platform DNS for platform operations. -- **NAT gateway**: Using [Azure Virtual Network NAT](../../virtual-network/nat-gateway/nat-overview.md) to control outbound connectivity with a specific public IP address would render SQL Managed Instance unavailable. The SQL Managed Instance service is currently limited to use of basic load balancer that doesn't provide coexistence of inbound and outbound flows with Virtual Network NAT. -- **IPv6 for Azure Virtual Network**: Deploying SQL Managed Instance to [dual stack IPv4/IPv6 virtual networks](../../virtual-network/ip-services/ipv6-overview.md) is expected to fail. Associating network security group (NSG) or route table (UDR) containing IPv6 address prefixes to SQL Managed Instance subnet, or adding IPv6 address prefixes to NSG or UDR that is already associated with Managed instance subnet, would render SQL Managed Instance unavailable. SQL Managed Instance deployments to a subnet with NSG and UDR that already have IPv6 prefixes are expected to fail. -- **Azure DNS private zones with a name reserved for Microsoft services**: Following is the list of reserved names: windows.net, database.windows.net, core.windows.net, blob.core.windows.net, table.core.windows.net, management.core.windows.net, monitoring.core.windows.net, queue.core.windows.net, graph.windows.net, login.microsoftonline.com, login.windows.net, servicebus.windows.net, vault.azure.net. Deploying SQL Managed Instance to a virtual network with associated [Azure DNS private zone](../../dns/private-dns-privatednszone.md) with a name reserved for Microsoft services would fail. Associating Azure DNS private zone with reserved name with a virtual network containing Managed Instance, would render SQL Managed Instance unavailable. Please follow [Azure Private Endpoint DNS configuration](../../private-link/private-endpoint-dns.md) for the proper Private Link configuration. - -## Next steps - -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- Learn how to [set up a new Azure virtual network](virtual-network-subnet-create-arm-template.md) or an [existing Azure virtual network](vnet-existing-add-subnet.md) where you can deploy SQL Managed Instance. -- [Calculate the size of the subnet](vnet-subnet-determine-size.md) where you want to deploy SQL Managed Instance. -- Learn how to create a managed instance: - - From the [Azure portal](instance-create-quickstart.md). - - By using [PowerShell](scripts/create-configure-managed-instance-powershell.md). - - By using [an Azure Resource Manager template](https://azure.microsoft.com/resources/templates/sqlmi-new-vnet/). - - By using [an Azure Resource Manager template (using JumpBox, with SSMS included)](https://azure.microsoft.com/resources/templates/sqlmi-new-vnet-w-jumpbox/). diff --git a/articles/azure-sql/managed-instance/context/ml-context.yml b/articles/azure-sql/managed-instance/context/ml-context.yml deleted file mode 100644 index 37675ca06f011..0000000000000 --- a/articles/azure-sql/managed-instance/context/ml-context.yml +++ /dev/null @@ -1,5 +0,0 @@ -### YamlMime:ContextObject -brand: azure -uhfHeaderId: azure -breadcrumb_path: ../bread/toc.yml -toc_rel: ../../toc.yml diff --git a/articles/azure-sql/managed-instance/create-configure-managed-instance-powershell-quickstart.md b/articles/azure-sql/managed-instance/create-configure-managed-instance-powershell-quickstart.md deleted file mode 100644 index cbeb50b51248c..0000000000000 --- a/articles/azure-sql/managed-instance/create-configure-managed-instance-powershell-quickstart.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: Create Azure SQL Managed Instance - Quickstart -description: Create an instance of Azure SQL Managed Instance using Azure PowerShell. -services: sql-managed-instance -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: contperf-fy21q1, devx-track-azurecli, devx-track-azurepowershell, mode-api -ms.topic: quickstart -author: MashaMSFT -ms.author: mathoma -ms.reviewer: -ms.date: 06/25/2021 ---- -# Quickstart: Create a managed instance using Azure PowerShell - -In this quickstart, learn to create an instance of [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) using Azure PowerShell. - - -## Prerequisite - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- The latest version of [Azure PowerShell](/powershell/azure/install-az-ps). - -## Set variables - -Creating a SQL Manged Instance requires creating several resources within Azure, and as such, the Azure PowerShell commands rely on variables to simplify the experience. Define the variables, and then execute the the cmdlets in each section within the same PowerShell session. - -```azurepowershell-interactive -$NSnetworkModels = "Microsoft.Azure.Commands.Network.Models" -$NScollections = "System.Collections.Generic" -# The SubscriptionId in which to create these objects -$SubscriptionId = '' -# Set the resource group name and location for your managed instance -$resourceGroupName = "myResourceGroup-$(Get-Random)" -$location = "eastus2" -# Set the networking values for your managed instance -$vNetName = "myVnet-$(Get-Random)" -$vNetAddressPrefix = "10.0.0.0/16" -$miSubnetName = "myMISubnet-$(Get-Random)" -$miSubnetAddressPrefix = "10.0.0.0/24" -#Set the managed instance name for the new managed instance -$instanceName = "myMIName-$(Get-Random)" -# Set the admin login and password for your managed instance -$miAdminSqlLogin = "SqlAdmin" -$miAdminSqlPassword = "ChangeYourAdminPassword1" -# Set the managed instance service tier, compute level, and license mode -$edition = "General Purpose" -$vCores = 4 -$maxStorage = 128 -$computeGeneration = "Gen5" -$license = "LicenseIncluded" #"BasePrice" or LicenseIncluded if you have don't have SQL Server licence that can be used for AHB discount -``` - -## Create resource group - -First, connect to Azure, set your subscription context, and create your resource group. - -To do so, execute this PowerShell script: - -```azurepowershell-interactive - -## Connect to Azure -Connect-AzAccount - -# Set subscription context -Set-AzContext -SubscriptionId $SubscriptionId - -# Create a resource group -$resourceGroup = New-AzResourceGroup -Name $resourceGroupName -Location $location -Tag @{Owner="SQLDB-Samples"} -``` - -## Configure networking - -After your resource group is created, configure the networking resources such as the virtual network, subnets, network security group, and routing table. This example demonstrates the use of the **Delegate subnet for Managed Instance deployment** script, which is available on GitHub as [delegate-subnet.ps1](https://github.com/microsoft/sql-server-samples/tree/master/samples/manage/azure-sql-db-managed-instance/delegate-subnet). - -To do so, execute this PowerShell script: - -```azurepowershell-interactive - -# Configure virtual network, subnets, network security group, and routing table -$virtualNetwork = New-AzVirtualNetwork ` - -ResourceGroupName $resourceGroupName ` - -Location $location ` - -Name $vNetName ` - -AddressPrefix $vNetAddressPrefix - - Add-AzVirtualNetworkSubnetConfig ` - -Name $miSubnetName ` - -VirtualNetwork $virtualNetwork ` - -AddressPrefix $miSubnetAddressPrefix | - Set-AzVirtualNetwork - -$scriptUrlBase = 'https://raw.githubusercontent.com/Microsoft/sql-server-samples/master/samples/manage/azure-sql-db-managed-instance/delegate-subnet' - -$parameters = @{ - subscriptionId = $SubscriptionId - resourceGroupName = $resourceGroupName - virtualNetworkName = $vNetName - subnetName = $miSubnetName - } - -Invoke-Command -ScriptBlock ([Scriptblock]::Create((iwr ($scriptUrlBase+'/delegateSubnet.ps1?t='+ [DateTime]::Now.Ticks)).Content)) -ArgumentList $parameters - -$virtualNetwork = Get-AzVirtualNetwork -Name $vNetName -ResourceGroupName $resourceGroupName -$miSubnet = Get-AzVirtualNetworkSubnetConfig -Name $miSubnetName -VirtualNetwork $virtualNetwork -$miSubnetConfigId = $miSubnet.Id -``` - -## Create managed instance - -For added security, create a complex and randomized password for your SQL Managed Instance credential: - -```azurepowershell-interactive -# Create credentials -$secpassword = ConvertTo-SecureString $miAdminSqlPassword -AsPlainText -Force -$credential = New-Object System.Management.Automation.PSCredential ($miAdminSqlLogin, $secpassword) -``` - -Then create your SQL Managed Instance: - -```azurepowershell-interactive -# Create managed instance -New-AzSqlInstance -Name $instanceName ` - -ResourceGroupName $resourceGroupName -Location $location -SubnetId $miSubnetConfigId ` - -AdministratorCredential $credential ` - -StorageSizeInGB $maxStorage -VCore $vCores -Edition $edition ` - -ComputeGeneration $computeGeneration -LicenseType $license -``` - -This operation may take some time to complete. To learn more, see [Management operations](management-operations-overview.md). - - -## Clean up resources - -Keep the resource group, and managed instance to go on to the next steps, and learn how to connect to your SQL Managed Instance using a client virtual machine. - -When you're finished using these resources, you can delete the resource group you created, which will also delete the server and single database within it. - -```azurepowershell-interactive -# Clean up deployment -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -``` - - -## Next steps - -After your SQL Managed Instance is created, deploy a client VM to connect to your SQL Managed Instance, and restore a sample database. - -> [!div class="nextstepaction"] -> [Create client VM](connect-vm-instance-configure.md) -> [Restore database](restore-sample-database-quickstart.md) diff --git a/articles/azure-sql/managed-instance/create-template-quickstart.md b/articles/azure-sql/managed-instance/create-template-quickstart.md deleted file mode 100644 index 53fdaeac1a11d..0000000000000 --- a/articles/azure-sql/managed-instance/create-template-quickstart.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "Azure Resource Manager: Create an Azure SQL Managed Instance" -description: Learn how to create an Azure SQL Managed Instance by using an Azure Resource Manager template. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: subject-armqs, devx-track-azurepowershell, mode-arm -ms.topic: quickstart -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma -ms.date: 06/22/2020 ---- - -# Quickstart: Create an Azure SQL Managed Instance using an ARM template - -This quickstart focuses on the process of deploying an Azure Resource Manager template (ARM template) to create an Azure SQL Managed Instance and vNet. [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) is an intelligent, fully managed, scalable cloud database, with almost 100% feature parity with the SQL Server database engine. - -[!INCLUDE [About Azure Resource Manager](../../../includes/resource-manager-quickstart-introduction.md)] - -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. - -[![Deploy to Azure](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.sql%2Fsqlmi-new-vnet%2Fazuredeploy.json) - -## Prerequisites - -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/). - -## Review the template - -The template used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/sqlmi-new-vnet/). - -:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.sql/sqlmi-new-vnet/azuredeploy.json"::: - -These resources are defined in the template: - -- [**Microsoft.Network/networkSecurityGroups**](/azure/templates/microsoft.Network/networkSecurityGroups) -- [**Microsoft.Network/routeTables**](/azure/templates/microsoft.Network/routeTables) -- [**Microsoft.Network/virtualNetworks**](/azure/templates/microsoft.Network/virtualNetworks) -- [**Microsoft.Sql/managedinstances**](/azure/templates/microsoft.sql/managedinstances) - -More template samples can be found in [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/?resourceType=Microsoft.Sql&pageNumber=1&sort=Popular). - -## Deploy the template - -Select **Try it** from the following PowerShell code block to open Azure Cloud Shell. - -> [!IMPORTANT] -> Deploying a managed instance is a long-running operation. Deployment of the first instance in the subnet typically takes much longer than deploying into a subnet with existing managed instances. For average provisioning times, see [SQL Managed Instance management operations](management-operations-overview.md#duration). - -# [PowerShell](#tab/azure-powershell) - -```azurepowershell-interactive -$projectName = Read-Host -Prompt "Enter a project name that is used for generating resource names" -$location = Read-Host -Prompt "Enter the location (i.e. centralus)" -$templateUri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/quickstarts/microsoft.sql/sqlmi-new-vnet/azuredeploy.json" - -$resourceGroupName = "${projectName}rg" - -New-AzResourceGroup -Name $resourceGroupName -Location $location -New-AzResourceGroupDeployment -ResourceGroupName $resourceGroupName -TemplateUri $templateUri - -Read-Host -Prompt "Press [ENTER] to continue ..." -``` - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -read -p "Enter a project name that is used for generating resource names:" projectName && -read -p "Enter the location (i.e. centralus):" location && -templateUri="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/quickstarts/microsoft.sql/sqlmi-new-vnet/azuredeploy.json" && -resourceGroupName="${projectName}rg" && -az group create --name $resourceGroupName --location "$location" && -az deployment group create --resource-group $resourceGroupName --template-uri $templateUri && -echo "Press [ENTER] to continue ..." && -read -``` - ---- - -## Review deployed resources - -Visit the [Azure portal](https://portal.azure.com/#blade/HubsExtension/BrowseResourceGroups) and verify the managed instance is in your selected resource group. Because creating a managed instance can take some time, you might need to check the **Deployments** link on your resource group's **Overview** page. - -- For a quickstart that shows how to connect to SQL Managed Instance from an Azure virtual machine, see [Configure an Azure virtual machine connection](connect-vm-instance-configure.md). -- For a quickstart that shows how to connect to SQL Managed Instance from an on-premises client computer by using a point-to-site connection, see [Configure a point-to-site connection](point-to-site-p2s-configure.md). - -## Clean up resources - -Keep the managed instance if you want to go to the [Next steps](#next-steps), but delete the managed instance and related resources after completing any additional tutorials. After deleting a managed instance, see [Delete a subnet after deleting a managed instance](virtual-cluster-delete.md). - - -To delete the resource group: - -# [PowerShell](#tab/azure-powershell) - -```azurepowershell-interactive -$resourceGroupName = Read-Host -Prompt "Enter the Resource Group name" -Remove-AzResourceGroup -Name $resourceGroupName -``` - -# [Azure CLI](#tab/azure-cli) - -```azurecli -echo "Enter the Resource Group name:" && -read resourceGroupName && -az group delete --name $resourceGroupName -``` - ---- - -## Next steps - -> [!div class="nextstepaction"] -> [Configure an Azure VM to connect to Azure SQL Managed Instance](connect-vm-instance-configure.md) diff --git a/articles/azure-sql/managed-instance/custom-dns-configure.md b/articles/azure-sql/managed-instance/custom-dns-configure.md deleted file mode 100644 index 0b84c0738e814..0000000000000 --- a/articles/azure-sql/managed-instance/custom-dns-configure.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Custom DNS -titleSuffix: Azure SQL Managed Instance -description: This topic describes configuration options for a custom DNS with Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova -ms.date: 07/17/2019 ---- -# Configure a custom DNS for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance must be deployed within an Azure [virtual network (VNet)](../../virtual-network/virtual-networks-overview.md). There are a few scenarios (for example, db mail, linked servers to other SQL Server instances in your cloud or hybrid environment) that require private host names to be resolved from SQL Managed Instance. In this case, you need to configure a custom DNS inside Azure. - -Because SQL Managed Instance uses the same DNS for its inner workings, configure the custom DNS server so that it can resolve public domain names. - -> [!IMPORTANT] -> Always use a fully qualified domain name (FQDN) for the mail server, for the SQL Server instance, and for other services, even if they're within your private DNS zone. For example, use `smtp.contoso.com` for your mail server because `smtp` won't resolve correctly. Creating a linked server or replication that references SQL Server VMs inside the same virtual network also requires an FQDN and a default DNS suffix. For example, `SQLVM.internal.cloudapp.net`. For more information, see [Name resolution that uses your own DNS server](../../virtual-network/virtual-networks-name-resolution-for-vms-and-role-instances.md#name-resolution-that-uses-your-own-dns-server). - -> [!IMPORTANT] -> Updating virtual network DNS servers won't affect SQL Managed Instance immediately. See [how to synchronize virtual network DNS servers setting on SQL Managed Instance virtual cluster](synchronize-vnet-dns-servers-setting-on-virtual-cluster.md) for more details. - -## Next steps - -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- For a tutorial showing you how to create a new managed instance, see [Create a managed instance](instance-create-quickstart.md). -- For information about configuring a VNet for a managed instance, see [VNet configuration for managed instances](connectivity-architecture-overview.md). diff --git a/articles/azure-sql/managed-instance/data-virtualization-overview.md b/articles/azure-sql/managed-instance/data-virtualization-overview.md deleted file mode 100644 index 3496a54cee6c2..0000000000000 --- a/articles/azure-sql/managed-instance/data-virtualization-overview.md +++ /dev/null @@ -1,393 +0,0 @@ ---- -title: Data virtualization -titleSuffix: Azure SQL Managed Instance -description: Learn about data virtualization capabilities of Azure SQL Managed Instance -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: -ms.devlang: -ms.topic: conceptual -author: MladjoA -ms.author: mlandzic -ms.reviewer: mathoma, MashaMSFT -ms.date: 03/08/2022 ---- - -# Data virtualization with Azure SQL Managed Instance (Preview) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Data virtualization with Azure SQL Managed Instance allows you to execute Transact-SQL (T-SQL) queries against data from files stored in Azure Data Lake Storage Gen2 or Azure Blob Storage, and combine it with locally stored relational data using joins. This way you can transparently access external data while keeping it in its original format and location - also known as data virtualization. - -Data virtualization is currently in preview for Azure SQL Managed Instance. - - -## Overview - -Data virtualization provides two ways of querying external files stored in Azure Data Lake Storage or Azure Blob Storage, intended for different scenarios: - -- OPENROWSET syntax – optimized for ad-hoc querying of files. Typically used to quickly explore the content and the structure of a new set of files. -- External tables – optimized for repetitive querying of files using identical syntax as if data were stored locally in the database. External tables require several preparation steps compared to the OPENROWSET syntax, but allow for more control over data access. External tables are typically used for analytical workloads and reporting. - -Parquet and delimited text (CSV) file formats are directly supported. The JSON file format is indirectly supported by specifying the CSV file format where queries return every document as a separate row. It's possible to parse rows further using `JSON_VALUE` and `OPENJSON`. - -## Getting started - -Use Transact-SQL (T-SQL) to explicitly enable the data virtualization feature before using it. - -To enable data virtualization capabilities, run the following command: - - -```sql -exec sp_configure 'polybase_enabled', 1; -go -reconfigure; -go -``` - -Provide the location of the file(s) you intend to query using the location prefix corresponding to the type of external source and endpoint/protocol, such as the following examples: - -```sql ---Blob Storage endpoint -abs://@.blob.core.windows.net//.parquet - ---Data Lake endpoint -adls://@.dfs.core.windows.net//.parquet - -``` - -> [!IMPORTANT] -> Using the generic `https://` prefix is discouraged and will be disabled in the future. Be sure to use endpoint-specific prefixes to avoid interruptions. - - - -If you're new to data virtualization and want to quickly test functionality, start by querying publicly available data sets available in [Azure Open Datasets](../../open-datasets/dataset-catalog.md), like the [Bing COVID-19 dataset](../../open-datasets/dataset-bing-covid-19.md?tabs=azure-storage) allowing anonymous access. - -Use the following endpoints to query the Bing COVID-19 data sets: - -- Parquet: `abs://public@pandemicdatalake.blob.core.windows.net/curated/covid-19/bing_covid-19_data/latest/bing_covid-19_data.parquet` -- CSV: `abs://public@pandemicdatalake.blob.core.windows.net/curated/covid-19/bing_covid-19_data/latest/bing_covid-19_data.csv` - -Once your public data set queries are executing successfully, consider switching to private data sets that require configuring specific rights and/or firewall rules. - -To access a private location, use a Shared Access Signature (SAS) with proper access permissions and validity period to authenticate to the storage account. Create a database-scoped credential using the SAS key, rather than providing it directly in each query. The credential is then used as a parameter to access the external data source. - - - -## External data source - -External data sources are abstractions intended to make it easier to manage file locations across multiple queries, and to reference authentication parameters that are encapsulated within database-scoped credentials. - -When accessing a public location, add the file location when querying the external data source: - - -```sql -CREATE EXTERNAL DATA SOURCE DemoPublicExternalDataSource -WITH ( - LOCATION = 'abs://public@pandemicdatalake.blob.core.windows.net/curated/covid-19/bing_covid-19_data/latest' --- LOCATION = 'abs://@.blob.core.windows.net/' -) -``` - -When accessing a private location, include the file path and credential when querying the external data source: - - -```sql --- Step0 (optional): Create master key if it doesn't exist in the database: --- CREATE MASTER KEY ENCRYPTION BY PASSWORD = '' --- GO - ---Step1: Create database-scoped credential (requires database master key to exist): -CREATE DATABASE SCOPED CREDENTIAL [DemoCredential] -WITH IDENTITY = 'SHARED ACCESS SIGNATURE', -SECRET = ''; -GO - ---Step2: Create external data source pointing to the file path, and referencing database-scoped credential: -CREATE EXTERNAL DATA SOURCE DemoPrivateExternalDataSource -WITH ( - LOCATION = 'abs://@.blob.core.windows.net/', - CREDENTIAL = [DemoCredential] -) -``` - -## Query data sources using OPENROWSET - -The [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql) syntax enables instant ad-hoc querying while only creating the minimal number of database objects necessary. -`OPENROWSET` only requires creating the external data source (and possibly the credential) as opposed to the external table approach which requires an external file format and the external table itself. - -The `DATA_SOURCE` parameter value is automatically prepended to the BULK parameter to form the full path to the file. - -When using `OPENROWSET` provide the format of the file, such as the following example, which queries a single file: - -```sql -SELECT TOP 10 * -FROM OPENROWSET( - BULK 'bing_covid-19_data.parquet', - DATA_SOURCE = 'DemoPublicExternalDataSource', - FORMAT = 'parquet' -) AS filerows -``` - -### Querying multiple files and folders - -The `OPENROWSET` command also allows querying multiple files or folders by using wildcards in the BULK path. - -The following example uses the [NYC yellow taxi trip records open data set](../../open-datasets/dataset-taxi-yellow.md): - -```sql ---Query all files with .parquet extension in folders matching name pattern: -SELECT TOP 10 * -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = 'NYCTaxiDemoDataSource',--You need to create the data source first - FORMAT = 'parquet' -) AS filerows - ``` - -When querying multiple files or folders, all files accessed with the single `OPENROWSET` must have the same structure (such as the same number of columns and data types). Folders can't be traversed recursively. - -### Schema inference - -Automatic schema inference helps you quickly write queries and explore data when you don't know file schemas. Schema inference only works with parquet format files. - -While convenient, the cost is that inferred data types may be larger than the actual data types. This can lead to poor query performance since there may not be enough information in the source files to ensure the appropriate data type is used. For example, parquet files don't contain metadata about maximum character column length, so the instance infers it as varchar(8000). - - -Use the [sp_describe_first_results_set](/sql/relational-databases/system-stored-procedures/sp-describe-first-result-set-transact-sql) stored procedure to check the resulting data types of your query, such as the following example: - -```sql -EXEC sp_describe_first_result_set N' - SELECT - vendor_id, pickup_datetime, passenger_count - FROM - OPENROWSET( - BULK ''taxi/*/*/*'', - DATA_SOURCE = ''NYCTaxiDemoDataSource'', - FORMAT=''parquet'' - ) AS nyc'; - ``` - -Once you know the data types, you can then specify them using the `WITH` clause to improve performance: - -```sql -SELECT TOP 100 - vendor_id, pickup_datetime, passenger_count -FROM -OPENROWSET( - BULK 'taxi/*/*/*', - DATA_SOURCE = 'NYCTaxiDemoDataSource', - FORMAT='PARQUET' - ) -WITH ( -vendor_id varchar(4), -- we're using length of 4 instead of the inferred 8000 -pickup_datetime datetime2, -passenger_count int -) AS nyc; -``` - -Since the schema of CSV files can't be automatically determined, explicitly specify columns using the `WITH` clause: - - -```sql -SELECT TOP 10 * -FROM OPENROWSET( - BULK 'population/population.csv', - DATA_SOURCE = 'PopulationDemoDataSourceCSV', - FORMAT = 'CSV') -WITH ( - [country_code] VARCHAR (5) COLLATE Latin1_General_BIN2, - [country_name] VARCHAR (100) COLLATE Latin1_General_BIN2, - [year] smallint, - [population] bigint -) AS filerows -``` - -### File metadata functions - - -When querying multiple files or folders, you can use `Filepath` and `Filename` functions to read file metadata and get part of the path or full path and name of the file that the row in the result set originates from: - - -```sql ---Query all files and project file path and file name information for each row: -SELECT TOP 10 filerows.filepath(1) as [Year_Folder], filerows.filepath(2) as [Month_Folder], -filerows.filename() as [File_name], filerows.filepath() as [Full_Path], * -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = 'NYCTaxiDemoDataSource', - FORMAT = 'parquet') AS filerows ---List all paths: -SELECT DISTINCT filerows.filepath(1) as [Year_Folder], filerows.filepath(2) as [Month_Folder] -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = 'NYCTaxiDemoDataSource', - FORMAT = 'parquet') AS filerows -``` - -When called without a parameter, the `Filepath` function returns the file path that the row originates from. When `DATA_SOURCE` is used in `OPENROWSET`, it returns the path relative to the `DATA_SOURCE`, otherwise it returns full file path. - -When called with a parameter, it returns part of the path that matches the wildcard on the position specified in the parameter. For example, parameter value 1 would return part of the path that matches the first wildcard. - -The `Filepath` function can also be used for filtering and aggregating rows: - -```sql -SELECT - r.filepath() AS filepath - ,r.filepath(1) AS [year] - ,r.filepath(2) AS [month] - ,COUNT_BIG(*) AS [rows] -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', -DATA_SOURCE = 'NYCTaxiDemoDataSource', -FORMAT = 'parquet' - ) AS r -WHERE - r.filepath(1) IN ('2017') - AND r.filepath(2) IN ('10', '11', '12') -GROUP BY - r.filepath() - ,r.filepath(1) - ,r.filepath(2) -ORDER BY - filepath; -``` - -### Creating view on top of OPENROWSET - -You can create and use views to wrap OPENROWSET queries so that you can easily reuse the underlying query: - -```sql -CREATE VIEW TaxiRides AS -SELECT * -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = 'NYCTaxiDemoDataSource', - FORMAT = 'parquet' -) AS filerows -``` - -It's also convenient to add columns with the file location data to a view using the `Filepath` function for easier and more performant filtering. Using views can reduce the number of files and the amount of data the query on top of the view needs to read and process when filtered by any of those columns: - - -```sql -CREATE VIEW TaxiRides AS -SELECT * - ,filerows.filepath(1) AS [year] - ,filerows.filepath(2) AS [month] -FROM OPENROWSET( - BULK 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = 'NYCTaxiDemoDataSource', - FORMAT = 'parquet' -) AS filerows -``` - -Views also enable reporting and analytic tools like Power BI to consume results of `OPENROWSET`. - -## External tables - -External tables encapsulate access to files making the querying experience almost identical to querying local relational data stored in user tables. Creating an external table requires the external data source and external file format objects to exist: - -```sql ---Create external file format -CREATE EXTERNAL FILE FORMAT DemoFileFormat -WITH ( - FORMAT_TYPE=PARQUET -) -GO - ---Create external table: -CREATE EXTERNAL TABLE tbl_TaxiRides( - vendor_id VARCHAR(100) COLLATE Latin1_General_BIN2, - pickup_datetime DATETIME2, - dropoff_datetime DATETIME2, - passenger_count INT, - trip_distance FLOAT, - fare_amount FLOAT, - extra FLOAT, - mta_tax FLOAT, - tip_amount FLOAT, - tolls_amount FLOAT, - improvement_surcharge FLOAT, - total_amount FLOAT -) -WITH ( - LOCATION = 'taxi/year=*/month=*/*.parquet', - DATA_SOURCE = DemoDataSource, - FILE_FORMAT = DemoFileFormat -); -GO -``` - -Once the external table is created, you can query it just like any other table: - -```sql -SELECT TOP 10 * -FROM tbl_TaxiRides -``` - -Just like `OPENROWSET`, external tables allow querying multiple files and folders by using wildcards. Schema inference and filepath/filename functions aren't supported with external tables. - -## Performance considerations - -There's no hard limit in terms of number of files or amount of data that can be queried, but query performance depends on the amount of data, data format, and complexity of queries and joins. - -Collecting statistics on your external data is one of the most important things you can do for query optimization. The more the instance knows about your data, the faster it can execute queries. The SQL engine query optimizer is a cost-based optimizer. It compares the cost of various query plans, and then chooses the plan with the lowest cost. In most cases, it chooses the plan that will execute the fastest. - -### Automatic creation of statistics - -Managed Instance analyzes incoming user queries for missing statistics. If statistics are missing, the query optimizer automatically creates statistics on individual columns in the query predicate or join condition to improve cardinality estimates for the query plan. Automatic creation of statistics is done synchronously so you may incur slightly degraded query performance if your columns are missing statistics. The time to create statistics for a single column depends on the size of the files targeted. - -### OPENROWSET manual statistics - -Single-column statistics for the `OPENROWSET` path can be created using the `sp_create_openrowset_statistics` stored procedure, by passing the select query with a single column as a parameter: - -```sql -EXEC sys.sp_create_openrowset_statistics N' -SELECT pickup_datetime -FROM OPENROWSET( - BULK ''abs://public@pandemicdatalake.blob.core.windows.net/curated/covid-19/bing_covid-19_data/latest/*.parquet'', - FORMAT = ''parquet'') AS filerows -' -``` - -By default, the instance uses 100% of the data provided in the dataset to create statistics. You can optionally specify the sample size as a percentage using the `TABLESAMPLE` options. To create single-column statistics for multiple columns, execute the stored procedure for each of the columns. You can't create multi-column statistics for the `OPENROWSET` path. - -To update existing statistics, drop them first using the `sp_drop_openrowset_statistics` stored procedure, and then recreate them using the `sp_create_openrowset_statistics`: - -```sql -EXEC sys.sp_drop_openrowset_statistics N' -SELECT pickup_datetime -FROM OPENROWSET( - BULK ''abs://public@pandemicdatalake.blob.core.windows.net/curated/covid-19/bing_covid-19_data/latest/*.parquet'', - FORMAT = ''parquet'') AS filerows -' -``` - -### External table manual statistics - -The syntax for creating statistics on external tables resembles the one used for ordinary user tables. To create statistics on a column, provide a name for the statistics object and the name of the column: - -```sql -CREATE STATISTICS sVendor -ON tbl_TaxiRides (vendor_id) -WITH FULLSCAN, NORECOMPUTE -``` - -The `WITH` options are mandatory, and for the sample size, the allowed options are `FULLSCAN` and `SAMPLE n` percent. To create single-column statistics for multiple columns, execute the stored procedure for each of the columns. Multi-column statistics are not supported. - -## Troubleshooting - -Issues with query execution are typically caused by managed instance not being able to access file location. The related error messages may report insufficient access rights, non-existing location or file path, file being used by another process, or that directory cannot be listed. In most cases this indicates that access to files is blocked by network traffic control policies or due to lack of access rights. This is what should be checked: - -- Wrong or mistyped location path. -- SAS key validity: it could be expired i.e. out of its validity period, containing a typo, starting with a question mark. -- SAS key persmissions allowed: Read at minimum, and List if wildcards are used -- Blocked inbound traffic on the storage account. Check [Managing virtual network rules for Azure Storage](../../storage/common/storage-network-security.md?tabs=azure-portal#managing-virtual-network-rules) for more details and make sure that access from managed instance VNet is allowed. -- Outbound traffic blocked on the managed instance using [storage endpoint policy](service-endpoint-policies-configure.md#configure-policies). Allow outbound traffic to the storage account. - -## Next steps - -- To learn more about syntax options available with OPENROWSET, see [OPENROWSET T-SQL](/sql/t-sql/functions/openrowset-transact-sql). -- For more information about creating external table in SQL Managed Instance, see [CREATE EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql). -- To learn more about creating external file format, see [CREATE EXTERNAL FILE FORMAT](/sql/t-sql/statements/create-external-file-format-transact-sql) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/doc-changes-updates-known-issues.md b/articles/azure-sql/managed-instance/doc-changes-updates-known-issues.md deleted file mode 100644 index 983afdd69b6eb..0000000000000 --- a/articles/azure-sql/managed-instance/doc-changes-updates-known-issues.md +++ /dev/null @@ -1,362 +0,0 @@ ---- -title: Known issues -titleSuffix: Azure SQL Managed Instance -description: Learn about the currently known issues with Azure SQL Managed Instance, and their possible workarounds or resolutions. -services: sql-database -author: MashaMSFT -ms.author: mathoma -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: references_regions -ms.devlang: -ms.topic: conceptual -ms.date: 03/17/2022 ---- -# Known issues with Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article lists the currently known issues with [Azure SQL Managed Instance](https://azure.microsoft.com/updates/?product=sql-database&query=sql%20managed%20instance), as well as their resolution date or possible workaround. To learn more about Azure SQL Managed Instance, see the [overview](sql-managed-instance-paas-overview.md), and [what's new](doc-changes-updates-release-notes-whats-new.md). - - -## Known issues - -|Issue |Date discovered |Status |Date resolved | -|---------|---------|---------|---------| -|[Querying external table fails with 'not supported' error message](#querying-external-table-fails-with-not-supported-error-message)|Jan 2022|Has Workaround|| -|[When using SQL Server authentication, usernames with '@' are not supported](#when-using-sql-server-authentication-usernames-with--are-not-supported)|Oct 2021|Resolved|Feb 2022| -|[Misleading error message on Azure portal suggesting recreation of the Service Principal](#misleading-error-message-on-azure-portal-suggesting-recreation-of-the-service-principal)|Sep 2021||Oct 2021| -|[Changing the connection type does not affect connections through the failover group endpoint](#changing-the-connection-type-does-not-affect-connections-through-the-failover-group-endpoint)|Jan 2021|Has Workaround|| -|[Procedure sp_send_dbmail may transiently fail when @query parameter is used](#procedure-sp_send_dbmail-may-transiently-fail-when--parameter-is-used)|Jan 2021|Has Workaround|| -|[Distributed transactions can be executed after removing managed instance from Server Trust Group](#distributed-transactions-can-be-executed-after-removing-managed-instance-from-server-trust-group)|Oct 2020|Has Workaround|| -|[Distributed transactions cannot be executed after managed instance scaling operation](#distributed-transactions-cannot-be-executed-after-managed-instance-scaling-operation)|Oct 2020|Resolved|May 2021| -|[Cannot create SQL Managed Instance with the same name as logical server previously deleted](#cannot-create-sql-managed-instance-with-the-same-name-as-logical-server-previously-deleted)|Aug 2020|Has Workaround|| -|[Service Principal cannot access Azure AD and AKV](#service-principal-cannot-access-azure-ad-and-akv)|Aug 2020|Has Workaround|| -|[Restoring manual backup without CHECKSUM might fail](#restoring-manual-backup-without-checksum-might-fail)|May 2020|Resolved|June 2020| -|[Agent becomes unresponsive upon modifying, disabling, or enabling existing jobs](#agent-becomes-unresponsive-upon-modifying-disabling-or-enabling-existing-jobs)|May 2020|Resolved|June 2020| -|[Permissions on resource group not applied to SQL Managed Instance](#permissions-on-resource-group-not-applied-to-sql-managed-instance)|Feb 2020|Resolved|Nov 2020| -|[Limitation of manual failover via portal for failover groups](#limitation-of-manual-failover-via-portal-for-failover-groups)|Jan 2020|Has Workaround|| -|[SQL Agent roles need explicit EXECUTE permissions for non-sysadmin logins](#in-memory-oltp-memory-limits-are-not-applied)|Dec 2019|Has Workaround|| -|[SQL Agent jobs can be interrupted by Agent process restart](#sql-agent-jobs-can-be-interrupted-by-agent-process-restart)|Dec 2019|Resolved|Mar 2020| -|[Azure AD logins and users are not supported in SSDT](#azure-ad-logins-and-users-are-not-supported-in-ssdt)|Nov 2019|No Workaround|| -|[In-memory OLTP memory limits are not applied](#in-memory-oltp-memory-limits-are-not-applied)|Oct 2019|Has Workaround|| -|[Wrong error returned while trying to remove a file that is not empty](#wrong-error-returned-while-trying-to-remove-a-file-that-is-not-empty)|Oct 2019|Has Workaround|| -|[Change service tier and create instance operations are blocked by ongoing database restore](#change-service-tier-and-create-instance-operations-are-blocked-by-ongoing-database-restore)|Sep 2019|Has Workaround|| -|[Resource Governor on Business Critical service tier might need to be reconfigured after failover](#resource-governor-on-business-critical-service-tier-might-need-to-be-reconfigured-after-failover)|Sep 2019|Has Workaround|| -|[Cross-database Service Broker dialogs must be reinitialized after service tier upgrade](#cross-database-service-broker-dialogs-must-be-reinitialized-after-service-tier-upgrade)|Aug 2019|Has Workaround|| -|[Impersonation of Azure AD login types is not supported](#impersonation-of-azure-ad-login-types-is-not-supported)|Jul 2019|No Workaround|| -|[@query parameter not supported in sp_send_db_mail](#-parameter-not-supported-in-sp_send_db_mail)|Apr 2019|Resolved|Jan 2021| -|[Transactional Replication must be reconfigured after geo-failover](#transactional-replication-must-be-reconfigured-after-geo-failover)|Mar 2019|No Workaround|| -|[Temporary database is used during RESTORE operation](#temporary-database-is-used-during-restore-operation)||Has Workaround|| -|[TEMPDB structure and content is re-created](#tempdb-structure-and-content-is-re-created)||No Workaround|| -|[Exceeding storage space with small database files](#exceeding-storage-space-with-small-database-files)||Has Workaround|| -|[GUID values shown instead of database names](#guid-values-shown-instead-of-database-names)||Has Workaround|| -|[Error logs aren't persisted](#error-logs-arent-persisted)||No Workaround|| -|[Transaction scope on two databases within the same instance isn't supported](#transaction-scope-on-two-databases-within-the-same-instance-isnt-supported)||Has Workaround|Mar 2020| -|[CLR modules and linked servers sometimes can't reference a local IP address](#clr-modules-and-linked-servers-sometimes-cant-reference-a-local-ip-address)||Has Workaround|| -|Database consistency not verified using DBCC CHECKDB after restore database from Azure Blob Storage.||Resolved|Nov 2019| -|Point-in-time database restore from Business Critical tier to General Purpose tier will not succeed if source database contains in-memory OLTP objects.||Resolved|Oct 2019| -|Database mail feature with external (non-Azure) mail servers using secure connection||Resolved|Oct 2019| -|Contained databases not supported in SQL Managed Instance||Resolved|Aug 2019| - -## Has workaround - -### Querying external table fails with not supported error message -Querying external table may fail with generic error message "_Queries over external tables are not supported with the current service tier or performance level of this database. Consider upgrading the service tier or performance level of the database_". The only type of external table supported in Azure SQL Managed Instance are PolyBase external tables (in preview). To allow queries on PolyBase external tables, you need to enable PolyBase on managed instance by running sp_configure command. - -External tables related to [Elastic Query](../database/elastic-query-overview.md) feature of Azure SQL Database are [not supported](../database/features-comparison.md#features-of-sql-database-and-sql-managed-instance) in SQL Managed Instance, but creating and querying them wasn't explicitly blocked. With support for PolyBase external tables, new checks have been introduced, blocking querying of _any_ type of external table in managed instance unless PolyBase is enabled. - -If you're using unsupported Elastic Query external tables to query data in Azure SQL Database or Azure Synapse from your managed instance, you should use Linked Server feature instead. To establish Linked Server connection from SQL Managed Instance to SQL Database, please follow instructions from [this article](https://techcommunity.microsoft.com/t5/azure-database-support-blog/lesson-learned-63-it-is-possible-to-create-linked-server-in/ba-p/369168). To establish Linked Server connection from SQL Managed Instance to SQL Synapse, check [step-by-step instructions](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/#how-to-use-linked-servers). Since configuring and testing Linked Server connection takes some time, you can use a workaround as a temporary solution to enable querying external tables related to Elastic Query feature: - -**Workaround**: Execute the following commands (once per instance) that will enable queries on external tables: - -```sql -sp_configure 'polybase enabled', 1 -go -reconfigure -go -``` - -### Changing the connection type does not affect connections through the failover group endpoint - -If an instance participates in an [auto-failover group](../database/auto-failover-group-overview.md), changing the instance's [connection type](../managed-instance/connection-types-overview.md) doesn't take effect for the connections established through the failover group listener endpoint. - -**Workaround**: Drop and recreate auto-failover group after changing the connection type. - -### Procedure sp_send_dbmail may transiently fail when @query parameter is used - -Procedure `sp_send_dbmail` may transiently fail when `@query` parameter is used. When this issue occurs, every second execution of procedure `sp_send_dbmail` fails with error `Msg 22050, Level 16, State 1` and message `Failed to initialize sqlcmd library with error number -2147467259`. To be able to see this error properly, the procedure should be called with default value 0 for the parameter `@exclude_query_output`, otherwise the error will not be propagated. - -This problem is caused by a known bug related to how `sp_send_dbmail` is using impersonation and connection pooling. - -To work around this issue wrap code for sending email into a retry logic that relies on output parameter `@mailitem_id`. If the execution fails, then parameter value will be NULL, indicating `sp_send_dbmail` should be called one more time to successfully send an email. Here is an example this retry logic. - -```sql -CREATE PROCEDURE send_dbmail_with_retry AS -BEGIN - DECLARE @miid INT - EXEC msdb.dbo.sp_send_dbmail - @recipients = 'name@mail.com', @subject = 'Subject', @query = 'select * from dbo.test_table', - @profile_name ='AzureManagedInstance_dbmail_profile', @execute_query_database = 'testdb', - @mailitem_id = @miid OUTPUT - - -- If sp_send_dbmail returned NULL @mailidem_id then retry sending email. - -- - IF (@miid is NULL) - EXEC msdb.dbo.sp_send_dbmail - @recipients = 'name@mail.com', @subject = 'Subject', @query = 'select * from dbo.test_table', - @profile_name ='AzureManagedInstance_dbmail_profile', @execute_query_database = 'testdb', -END -``` - -### Distributed transactions can be executed after removing managed instance from Server Trust Group - -[Server Trust Groups](../managed-instance/server-trust-group-overview.md) are used to establish trust between managed instances that is prerequisite for executing [distributed transactions](../database/elastic-transactions-overview.md). After removing managed instance from Server Trust Group or deleting the group, you still might be able to execute distributed transactions. There's a workaround you can apply to be sure that distributed transactions are disabled and that is [user-initiated manual failover](../managed-instance/user-initiated-failover.md) on managed instance. - -### Distributed transactions cannot be executed after managed instance scaling operation - -SQL Managed Instance scaling operations that include changing service tier or number of vCores will reset Server Trust Group settings on the backend and disable running [distributed transactions](../database/elastic-transactions-overview.md). As a workaround, delete and create new [Server Trust Group](../managed-instance/server-trust-group-overview.md) on Azure portal. - -### Cannot create SQL Managed Instance with the same name as logical server previously deleted - -A DNS record of `.database.windows.com` is created when you create a [logical server in Azure](../database/logical-servers.md) for Azure SQL Database, and when you create a SQL Managed Instance. The DNS record must be unique. As such, if you create a logical server for SQL Database and then delete it, there's a threshold period of 7 days before the name is released from the records. In that period, a SQL Managed Instance cannot be created with the same name as the deleted logical server. As a workaround, use a different name for the SQL Managed Instance, or create a support ticket to release the logical server name. - -### Service Principal cannot access Azure AD and AKV - -In some circumstances, there might exist an issue with Service Principal used to access Azure AD and Azure Key Vault (AKV) services. As a result, this issue impacts usage of Azure AD authentication and Transparent Database Encryption (TDE) with SQL Managed Instance. This might be experienced as an intermittent connectivity issue, or not being able to run statements such are `CREATE LOGIN/USER FROM EXTERNAL PROVIDER` or `EXECUTE AS LOGIN/USER`. Setting up TDE with customer-managed key on a new Azure SQL Managed Instance might also not work in some circumstances. - -**Workaround**: To prevent this issue from occurring on your SQL Managed Instance before executing any update commands, or in case you have already experienced this issue after update commands, go to Azure portal, access SQL Managed Instance [Active Directory admin page](../database/authentication-aad-configure.md?tabs=azure-powershell#azure-portal). Verify if you can see the error message "Managed Instance needs a Service Principal to access Azure Active Directory. Click here to create a Service Principal". In case you've encountered this error message, click on it, and follow the step-by-step instructions provided until this error have been resolved. - -### Limitation of manual failover via portal for failover groups - -If a failover group spans across instances in different Azure subscriptions or resource groups, manual failover cannot be initiated from the primary instance in the failover group. - -**Workaround**: Initiate failover via the portal from the geo-secondary instance. - -### SQL Agent roles need explicit EXECUTE permissions for non-sysadmin logins - -If non-sysadmin logins are added to any [SQL Agent fixed database roles](/sql/ssms/agent/sql-server-agent-fixed-database-roles), there exists an issue in which explicit EXECUTE permissions need to be granted to three stored procedures in the master database for these logins to work. If this issue is encountered, the error message "The EXECUTE permission was denied on the object (Microsoft SQL Server, Error: 229)" will be shown. - -**Workaround**: Once you add logins to a SQL Agent fixed database role (SQLAgentUserRole, SQLAgentReaderRole, or SQLAgentOperatorRole), for each of the logins added to these roles, execute the below T-SQL script to explicitly grant EXECUTE permissions to the stored procedures listed. - -```sql -USE [master] -GO -CREATE USER [login_name] FOR LOGIN [login_name]; -GO -GRANT EXECUTE ON master.dbo.xp_sqlagent_enum_jobs TO [login_name]; -GRANT EXECUTE ON master.dbo.xp_sqlagent_is_starting TO [login_name]; -GRANT EXECUTE ON master.dbo.xp_sqlagent_notify TO [login_name]; -``` - -### In-memory OLTP memory limits are not applied - -The Business Critical service tier will not correctly apply [max memory limits for memory-optimized objects](../managed-instance/resource-limits.md#in-memory-oltp-available-space) in some cases. SQL Managed Instance may enable workload to use more memory for in-memory OLTP operations, which may affect availability and stability of the instance. In-memory OLTP queries that are reaching the limits might not fail immediately. This issue will be fixed soon. The queries that use more in-memory OLTP memory will fail sooner if they reach the [limits](../managed-instance/resource-limits.md#in-memory-oltp-available-space). - -**Workaround**: [Monitor in-memory OLTP storage usage](../in-memory-oltp-monitor-space.md) using [SQL Server Management Studio](/sql/relational-databases/in-memory-oltp/monitor-and-troubleshoot-memory-usage#bkmk_Monitoring) to ensure that the workload is not using more than the available memory. Increase the memory limits that depend on the number of vCores, or optimize your workload to use less memory. - -### Wrong error returned while trying to remove a file that is not empty - -SQL Server and SQL Managed Instance [don't allow a user to drop a file that is not empty](/sql/relational-databases/databases/delete-data-or-log-files-from-a-database#Prerequisites). If you try to remove a nonempty data file using an `ALTER DATABASE REMOVE FILE` statement, the error `Msg 5042 – The file '' cannot be removed because it is not empty` will not be immediately returned. SQL Managed Instance will keep trying to drop the file, and the operation will fail after 30 minutes with `Internal server error`. - -**Workaround**: Remove the contents of the file using the `DBCC SHRINKFILE (N'', EMPTYFILE)` command. If this is the only file in the file group you would need to delete data from the table or partition associated to this file group before you shrink the file, and optionally load this data into another table/partition. - -### Change service tier and create instance operations are blocked by ongoing database restore - -Ongoing `RESTORE` statement, Data Migration Service migration process, and built-in point-in-time restore will block updating a service tier or resize of the existing instance and creating new instances until the restore process finishes. - -The restore process will block these operations on the managed instances and instance pools in the same subnet where the restore process is running. The instances in instance pools are not affected. Create or change service tier operations will not fail or time out. They will proceed once the restore process is completed or canceled. - -**Workaround**: Wait until the restore process finishes, or cancel the restore process if the creation or update-service-tier operation has higher priority. - -### Resource Governor on Business Critical service tier might need to be reconfigured after failover - -The [Resource Governor](/sql/relational-databases/resource-governor/resource-governor) feature that enables you to limit the resources assigned to the user workload might incorrectly classify some user workload after failover or a user-initiated change of service tier (for example, the change of max vCore or max instance storage size). - -**Workaround**: Run `ALTER RESOURCE GOVERNOR RECONFIGURE` periodically or as part of a SQL Agent job that executes the SQL task when the instance starts if you are using -[Resource Governor](/sql/relational-databases/resource-governor/resource-governor). - -### Cross-database Service Broker dialogs must be reinitialized after service tier upgrade - -Cross-database Service Broker dialogs will stop delivering the messages to the services in other databases after change service tier operation. The messages are *not lost*, and they can be found in the sender queue. Any change of vCores or instance storage size in SQL Managed Instance will cause a `service_broke_guid` value in [sys.databases](/sql/relational-databases/system-catalog-views/sys-databases-transact-sql) view to be changed for all databases. Any `DIALOG` created using a [BEGIN DIALOG](/sql/t-sql/statements/begin-dialog-conversation-transact-sql) statement that references Service Brokers in other database will stop delivering messages to the target service. - -**Workaround**: Stop any activity that uses cross-database Service Broker dialog conversations before updating a service tier, and reinitialize them afterward. If there are remaining messages that are undelivered after a service tier change, read the messages from the source queue and resend them to the target queue. - -### Temporary database is used during RESTORE operation - -When a database is restoring in SQL Managed Instance, the restore service will first create an empty database with the desired name to allocate the name on the instance. After some time, this database will be dropped, and restoring of the actual database will be started. - -The database that is in *Restoring* state will temporarily have a random GUID value instead of name. The temporary name will be changed to the desired name specified in the `RESTORE` statement once the restore process finishes. - -In the initial phase, a user can access the empty database and even create tables or load data in this database. This temporary database will be dropped when the restore service starts the second phase. - -**Workaround**: Do not access the database that you are restoring until you see that restore is completed. - -### Exceeding storage space with small database files - -`CREATE DATABASE`, `ALTER DATABASE ADD FILE`, and `RESTORE DATABASE` statements might fail because the instance can reach the Azure Storage limit. - -Each General Purpose instance of SQL Managed Instance has up to 35 TB of storage reserved for Azure Premium Disk space. Each database file is placed on a separate physical disk. Disk sizes can be 128 GB, 256 GB, 512 GB, 1 TB, or 4 TB. Unused space on the disk isn't charged, but the total sum of Azure Premium Disk sizes can't exceed 35 TB. In some cases, a managed instance that doesn't need 8 TB in total might exceed the 35 TB Azure limit on storage size due to internal fragmentation. - -For example, a General Purpose instance of SQL Managed Instance might have one large file that's 1.2 TB in size placed on a 4-TB disk. It also might have 248 files that are 1 GB each and that are placed on separate 128-GB disks. In this example: - -- The total allocated disk storage size is 1 x 4 TB + 248 x 128 GB = 35 TB. -- The total reserved space for databases on the instance is 1 x 1.2 TB + 248 x 1 GB = 1.4 TB. - -This example illustrates that under certain circumstances, due to a specific distribution of files, an instance of SQL Managed Instance might reach the 35-TB limit that's reserved for an attached Azure Premium Disk when you might not expect it to. - -In this example, existing databases continue to work and can grow without any problem as long as new files aren't added. New databases can't be created or restored because there isn't enough space for new disk drives, even if the total size of all databases doesn't reach the instance size limit. The error that's returned in that case isn't clear. - -You can [identify the number of remaining files](https://medium.com/azure-sqldb-managed-instance/how-many-files-you-can-create-in-general-purpose-azure-sql-managed-instance-e1c7c32886c1) by using system views. If you reach this limit, try to [empty and delete some of the smaller files by using the DBCC SHRINKFILE statement](/sql/t-sql/database-console-commands/dbcc-shrinkfile-transact-sql#d-emptying-a-file) or switch to the [Business Critical tier, which doesn't have this limit](../managed-instance/resource-limits.md#service-tier-characteristics). - - -### GUID values shown instead of database names - -Several system views, performance counters, error messages, XEvents, and error log entries display GUID database identifiers instead of the actual database names. Don't rely on these GUID identifiers because they're replaced with actual database names in the future. - -**Workaround**: Use `sys.databases` view to resolve the actual database name from the physical database name, specified in the form of GUID database identifiers: - -```tsql -SELECT name as ActualDatabaseName, physical_database_name as GUIDDatabaseIdentifier -FROM sys.databases -WHERE database_id > 4; -``` - -### Transaction scope on two databases within the same instance isn't supported - -**(Resolved in March 2020)** The `TransactionScope` class in .NET doesn't work if two queries are sent to two databases within the same instance under the same transaction scope: - -```csharp -using (var scope = new TransactionScope()) -{ - using (var conn1 = new SqlConnection("Server=quickstartbmi.neu15011648751ff.database.windows.net;Database=b;User ID=myuser;Password=mypassword;Encrypt=true")) - { - conn1.Open(); - SqlCommand cmd1 = conn1.CreateCommand(); - cmd1.CommandText = string.Format("insert into T1 values(1)"); - cmd1.ExecuteNonQuery(); - } - - using (var conn2 = new SqlConnection("Server=quickstartbmi.neu15011648751ff.database.windows.net;Database=b;User ID=myuser;Password=mypassword;Encrypt=true")) - { - conn2.Open(); - var cmd2 = conn2.CreateCommand(); - cmd2.CommandText = string.Format("insert into b.dbo.T2 values(2)"); cmd2.ExecuteNonQuery(); - } - - scope.Complete(); -} - -``` - -**Workaround (not needed since March 2020)**: Use [SqlConnection.ChangeDatabase(String)](/dotnet/api/system.data.sqlclient.sqlconnection.changedatabase) to use another database in a connection context instead of using two connections. - -### CLR modules and linked servers sometimes can't reference a local IP address - -CLR modules in SQL Managed Instance and linked servers or distributed queries that reference a current instance sometimes can't resolve the IP of a local instance. This error is a transient issue. - -### Transaction scope on two databases within the same instance isn't supported - -**(Resolved in March 2020)** The `TransactionScope` class in .NET doesn't work if two queries are sent to two databases within the same instance under the same transaction scope: - -```csharp -using (var scope = new TransactionScope()) -{ - using (var conn1 = new SqlConnection("Server=quickstartbmi.neu15011648751ff.database.windows.net;Database=b;User ID=myuser;Password=mypassword;Encrypt=true")) - { - conn1.Open(); - SqlCommand cmd1 = conn1.CreateCommand(); - cmd1.CommandText = string.Format("insert into T1 values(1)"); - cmd1.ExecuteNonQuery(); - } - - using (var conn2 = new SqlConnection("Server=quickstartbmi.neu15011648751ff.database.windows.net;Database=b;User ID=myuser;Password=mypassword;Encrypt=true")) - { - conn2.Open(); - var cmd2 = conn2.CreateCommand(); - cmd2.CommandText = string.Format("insert into b.dbo.T2 values(2)"); cmd2.ExecuteNonQuery(); - } - - scope.Complete(); -} - -``` - -**Workaround (not needed since March 2020)**: Use [SqlConnection.ChangeDatabase(String)](/dotnet/api/system.data.sqlclient.sqlconnection.changedatabase) to use another database in a connection context instead of using two connections. - - -## No resolution - -### Azure AD logins and users are not supported in SSDT - -SQL Server Data Tools don't fully support Azure AD logins and users. - -### Impersonation of Azure AD login types is not supported - -Impersonation using `EXECUTE AS USER` or `EXECUTE AS LOGIN` of the following Azure Active Directory (Azure AD) principals is not supported: -- Aliased Azure AD users. The following error is returned in this case: `15517`. -- Azure AD logins and users based on Azure AD applications or service principals. The following errors are returned in this case: `15517` and `15406`. - -### Transactional Replication must be reconfigured after geo-failover - -If Transactional Replication is enabled on a database in an auto-failover group, the SQL Managed Instance administrator must clean up all publications on the old primary and reconfigure them on the new primary after a failover to another region occurs. For more information, see [Replication](../managed-instance/transact-sql-tsql-differences-sql-server.md#replication). - -### TEMPDB structure and content is re-created - -The `tempdb` database is always split into 12 data files, and the file structure cannot be changed. The maximum size per file can't be changed, and new files cannot be added to `tempdb`. `Tempdb` is always re-created as an empty database when the instance starts or fails over, and any changes made in `tempdb` will not be preserved. - - -### Error logs aren't persisted - -Error logs that are available in SQL Managed Instance aren't persisted, and their size isn't included in the maximum storage limit. Error logs might be automatically erased if failover occurs. There might be gaps in the error log history because SQL Managed Instance was moved several times on several virtual machines. - -## Resolved - -### When using SQL Server authentication, usernames with '@' are not supported - -Usernames that contain the '@' symbol in the middle (e.g. 'abc@xy') are not able to log in using SQL Server authentication. - -### Restoring manual backup without CHECKSUM might fail - -In certain circumstances manual backup of databases that was made on a managed instance without CHECKSUM might fail to be restored. In such cases, retry restoring the backup until you're successful. - -**Workaround**: Take manual backups of databases on managed instances with CHECKSUM enabled. - -### Agent becomes unresponsive upon modifying, disabling, or enabling existing jobs - -In certain circumstances, modifying, disabling, or enabling an existing job can cause the agent to become unresponsive. The issue is automatically mitigated upon detection, resulting in a restart of the agent process. - -### Permissions on resource group not applied to SQL Managed Instance - -When the SQL Managed Instance Contributor Azure role is applied to a resource group (RG), it's not applied to SQL Managed Instance and has no effect. - -**Workaround**: Set up a SQL Managed Instance Contributor role for users at the subscription level. - -### SQL Agent jobs can be interrupted by Agent process restart - -**(Resolved in March 2020)** SQL Agent creates a new session each time a job is started, gradually increasing memory consumption. To avoid hitting the internal memory limit, which would block execution of scheduled jobs, Agent process will be restarted once its memory consumption reaches threshold. It may result in interrupting execution of jobs running at the moment of restart. - -### @query parameter not supported in sp_send_db_mail - -The `@query` parameter in the [sp_send_db_mail](/sql/relational-databases/system-stored-procedures/sp-send-dbmail-transact-sql) procedure doesn't work. - -### Misleading error message on Azure portal suggesting recreation of the Service Principal - -_Active Directory admin_ blade of Azure portal for Azure SQL Managed Instance may be showing the following error message even though Service Principal already exists: - -"Managed Instance needs a Service Principal to access Azure Active Directory. Click here to create a Service Principal" - -You can neglect this error message if Service Principal for the managed instance already exists, and/or Azure Active Directory authentication on the managed instance works. - -To check whether Service Principal exists, navigate to the _Enterprise applications_ page on the Azure portal, choose _Managed Identities_ from the _Application type_ dropdown list, select _Apply_ and type the name of the managed instance in the search box. If the instance name shows up in the result list, Service Principal already exists and no further actions are needed. - -If you already followed the instructions from the error message and clicked the link from the error message, Service Principal of the managed instance has been recreated. In that case, please assign Azure AD read permissions to the newly created Service Principal in order for Azure AD authentication to work properly. This can be done via Azure PowerShell by following [instructions](../database/authentication-aad-configure.md?tabs=azure-powershell#powershell). - -## Contribute to content - -To contribute to the Azure SQL documentation, see the [Docs contributor guide](/contribute/). - -## Next steps - -For a list of SQL Managed Instance updates and improvements, see [SQL Managed Instance service updates](https://azure.microsoft.com/updates/?product=sql-database&query=sql%20managed%20instance). - -For updates and improvements to all Azure services, see [Service updates](https://azure.microsoft.com/updates). diff --git a/articles/azure-sql/managed-instance/doc-changes-updates-release-notes-whats-new.md b/articles/azure-sql/managed-instance/doc-changes-updates-release-notes-whats-new.md deleted file mode 100644 index 72033f514d792..0000000000000 --- a/articles/azure-sql/managed-instance/doc-changes-updates-release-notes-whats-new.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: What's new? -titleSuffix: Azure SQL Managed Instance -description: Learn about the new features and documentation improvements for Azure SQL Managed Instance. -services: sql-database -author: MashaMSFT -ms.author: mathoma -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: references_regions, ignite-fall-2021 -ms.devlang: -ms.topic: conceptual -ms.date: 04/06/2022 ---- -# What's new in Azure SQL Managed Instance? -[!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](../database/doc-changes-updates-release-notes-whats-new.md) -> * [Azure SQL Managed Instance](../managed-instance/doc-changes-updates-release-notes-whats-new.md) - -This article summarizes the documentation changes associated with new features and improvements in the recent releases of [Azure SQL Managed Instance](https://azure.microsoft.com/updates/?product=sql-database&query=sql%20managed%20instance). To learn more about Azure SQL Managed Instance, see the [overview](sql-managed-instance-paas-overview.md). - - -## Preview - -The following table lists the features of Azure SQL Managed Instance that are currently in preview: - -| Feature | Details | -| ---| --- | -| [16 TB support in Business Critical](resource-limits.md#service-tier-characteristics) | Support for allocation up to 16 TB of space on SQL Managed Instance in the Business Critical service tier using the new memory optimized premium-series hardware. | -| [Data virtualization](data-virtualization-overview.md) | Join locally stored relational data with data queried from external data sources, such as Azure Data Lake Storage Gen2 or Azure Blob Storage. | -|[Endpoint policies](/azure/azure-sql/managed-instance/service-endpoint-policies-configure) | Configure which Azure Storage accounts can be accessed from a SQL Managed Instance subnet. Grants an extra layer of protection against inadvertent or malicious data exfiltration.| -| [Instance pools](instance-pools-overview.md) | A convenient and cost-efficient way to migrate smaller SQL Server instances to the cloud. | -| [Managed Instance link](managed-instance-link-feature-overview.md)| Online replication of SQL Server databases hosted anywhere to Azure SQL Managed Instance. | -| [Maintenance window advance notifications](../database/advance-notifications.md)| Advance notifications (preview) for databases configured to use a non-default [maintenance window](../database/maintenance-window.md). Advance notifications are in preview for Azure SQL Managed Instance. | -| [Memory optimized premium-series hardware](resource-limits.md#service-tier-characteristics) | Deploy your SQL Managed Instance to the new memory optimized premium-series hardware to take advantage of the latest Intel Ice Lake CPUs. Memory optimized hardware offers higher memory to vCore ratio. | -| [Migrate with Log Replay Service](log-replay-service-migrate.md) | Migrate databases from SQL Server to SQL Managed Instance by using Log Replay Service. | -| [Premium-series hardware](resource-limits.md#service-tier-characteristics) | Deploy your SQL Managed Instance to the new premium-series hardware to take advantage of the latest Intel Ice Lake CPUs. | -| [Query Store hints](/sql/relational-databases/performance/query-store-hints?view=azuresqldb-mi-current&preserve-view=true) | Use query hints to optimize your query execution via the OPTION clause. | -| [Service Broker cross-instance message exchange](/sql/database-engine/configure-windows/sql-server-service-broker) | Support for cross-instance message exchange using Service Broker on Azure SQL Managed Instance. | -| [SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md) | SQL Insights (preview) is a comprehensive solution for monitoring any product in the Azure SQL family. SQL Insights uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance. | -| [Transactional Replication](replication-transactional-overview.md) | Replicate the changes from your tables into other databases in SQL Managed Instance, SQL Database, or SQL Server. Or update your tables when some rows are changed in other instances of SQL Managed Instance or SQL Server. For information, see [Configure replication in Azure SQL Managed Instance](replication-between-two-instances-configure-tutorial.md). | -| [Threat detection](threat-detection-configure.md) | Threat detection notifies you of security threats detected to your database. | -| [Windows Auth for Azure Active Directory principals](winauth-azuread-overview.md) | Kerberos authentication for Azure Active Directory (Azure AD) enables Windows Authentication access to Azure SQL Managed Instance. | - -## General availability (GA) - -The following table lists the features of Azure SQL Managed Instance that have transitioned from preview to general availability (GA) within the last 12 months: - -| Feature | GA Month | Details | -| ---| --- |--- | -|[Maintenance window](../database/maintenance-window.md)| March 2022 | The maintenance window feature allows you to configure maintenance schedule for your Azure SQL Managed Instance. [Maintenance window advance notifications](../database/advance-notifications.md), however, are in preview for Azure SQL Managed Instance.| -|[16 TB support in General Purpose](resource-limits.md)| November 2021 | Support for allocation up to 16 TB of space on SQL Managed Instance in the General Purpose service tier. | -[Azure Active Directory-only authentication](../database/authentication-azure-ad-only-authentication.md) | November 2021 | It's now possible to restrict authentication to your Azure SQL Managed Instance only to Azure Active Directory users. | -|[Distributed transactions](../database/elastic-transactions-overview.md) | November 2021 | Distributed database transactions for Azure SQL Managed Instance allow you to run distributed transactions that span several databases across instances. | -|[Linked server - managed identity Azure AD authentication](/sql/relational-databases/system-stored-procedures/sp-addlinkedserver-transact-sql#h-create-sql-managed-instance-linked-server-with-managed-identity-azure-ad-authentication) |November 2021 | Create a linked server with managed identity authentication for your Azure SQL Managed Instance.| -|[Linked server - pass-through Azure AD authentication](/sql/relational-databases/system-stored-procedures/sp-addlinkedserver-transact-sql#i-create-sql-managed-instance-linked-server-with-pass-through-azure-ad-authentication) |November 2021 | Create a linked server with pass-through Azure AD authentication for your Azure SQL Managed Instance. | -|[Long-term backup retention](long-term-backup-retention-configure.md) |November 2021 | Store full backups for a specific database with configured redundancy for up to 10 years in Azure Blob storage, restoring the database as a new database. | -|[Move instance to different subnet](vnet-subnet-move-instance.md)| November 2021 | Move SQL Managed Instance to a different subnet using the Azure portal, Azure PowerShell or the Azure CLI. | - -## Documentation changes - -Learn about significant changes to the Azure SQL Managed Instance documentation. - -### March 2022 - -| Changes | Details | -| --- | --- | -| **Data virtualization preview** | It's now possible to query data in external sources such as Azure Data Lake Storage Gen2 or Azure Blob Storage, joining it with locally stored relational data. This feature is currently in preview. To learn more, see [Data virtualization](data-virtualization-overview.md). | -| **Managed Instance link guidance** | We've published a number of guides for using the [Managed Instance link feature](managed-instance-link-feature-overview.md), including how to [prepare your environment](managed-instance-link-preparation.md), [configure replication by using SSMS](managed-instance-link-use-ssms-to-replicate-database.md), [configure replication via scripts](managed-instance-link-use-scripts-to-replicate-database.md), [fail over your database by using SSMS](managed-instance-link-use-ssms-to-failover-database.md), [fail over your database via scripts](managed-instance-link-use-scripts-to-failover-database.md) and some [best practices](managed-instance-link-best-practices.md) when using the link feature (currently in preview). | -| **Maintenance window GA, advance notifications preview** | The [maintenance window](../database/maintenance-window.md) feature is now generally available, allowing you to configure a maintenance schedule for your Azure SQL Managed Instance. It's also possible to receive advance notifications for planned maintenance events, which is currently in preview. Review [Maintenance window advance notifications (preview)](../database/advance-notifications.md) to learn more. | -| **Windows Auth for Azure Active Directory principals preview** | Windows Authentication for managed instances empowers customers to move existing services to the cloud while maintaining a seamless user experience, and provides the basis for infrastructure modernization. Learn more in [Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance](winauth-azuread-overview.md). | - - - -### 2021 - -| Changes | Details | -| --- | --- | -| **16 TB support for Business Critical preview** | The Business Critical service tier of SQL Managed Instance now provides increased maximum instance storage capacity of up to 16 TB with the new premium-series and memory optimized premium-series hardware, which are currently in preview. See [resource limits](resource-limits.md#service-tier-characteristics) to learn more. | -|**16 TB support for General Purpose GA** | Deploying a 16 TB instance to the General Purpose service tier is now generally available. See [resource limits](resource-limits.md) to learn more. | -| **Azure AD-only authentication GA** | Restricting authentication to your Azure SQL Managed Instance only to Azure Active Directory users is now generally available. To learn more, see [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). | -| **Distributed transactions GA** | The ability to execute distributed transactions across managed instances is now generally available. See [Distributed transactions](../database/elastic-transactions-overview.md) to learn more. | -|**Endpoint policies preview** | It's now possible to configure an endpoint policy to restrict access from a SQL Managed Instance subnet to an Azure Storage account. This grants an extra layer of protection against inadvertent or malicious data exfiltration. See [Endpoint policies](/azure/azure-sql/managed-instance/service-endpoint-policies-configure) to learn more. | -|**Link feature preview** | Use the link feature for SQL Managed Instance to replicate data from your SQL Server hosted anywhere to Azure SQL Managed Instance, leveraging the benefits of Azure without moving your data to Azure, to offload your workloads, for disaster recovery, or to migrate to the cloud. See the [Link feature for SQL Managed Instance](managed-instance-link-feature-overview.md) to learn more. The link feature is currently in limited public preview. | -|**Long-term backup retention GA** | Storing full backups for a specific database with configured redundancy for up to 10 years in Azure Blob storage is now generally available. To learn more, see [Long-term backup retention](long-term-backup-retention-configure.md). | -| **Move instance to different subnet GA** | It's now possible to move your SQL Managed Instance to a different subnet. See [Move instance to different subnet](vnet-subnet-move-instance.md) to learn more. | -|**New hardware preview** | There are now two new hardware configurations for SQL Managed Instance: premium-series, and a memory optimized premium-series. Both offerings take advantage of a new hardware powered by the latest Intel Ice Lake CPUs, and offer a higher memory to vCore ratio to support your most resource demanding database applications. As part of this announcement, the Gen5 hardware has been renamed to standard-series. The two new premium hardware offerings are currently in preview. See [resource limits](resource-limits.md#service-tier-characteristics) to learn more. | -|**Split what's new** | The previously-combined **What's new** article has been split by product - [What's new in SQL Database](../database/doc-changes-updates-release-notes-whats-new.md) and [What's new in SQL Managed Instance](doc-changes-updates-release-notes-whats-new.md), making it easier to identify what features are currently in preview, generally available, and significant documentation changes. Additionally, the [Known Issues in SQL Managed Instance](doc-changes-updates-known-issues.md) content has moved to its own page. | -|**16 TB support for General Purpose preview** | Support has been added for allocation of up to 16 TB of space for SQL Managed Instance in the General Purpose service tier. See [resource limits](resource-limits.md) to learn more. This instance offer is currently in preview. | -| **Parallel backup** | It's now possible to take backups in parallel for SQL Managed Instance in the general purpose tier, enabling faster backups. See the [Parallel backup for better performance](https://techcommunity.microsoft.com/t5/azure-sql/parallel-backup-for-better-performance-in-sql-managed-instance/ba-p/2421762) blog entry to learn more. | -| **Azure AD-only authentication preview** | It's now possible to restrict authentication to your Azure SQL Managed Instance only to Azure Active Directory users. This feature is currently in preview. To learn more, see [Azure AD-only authentication](../database/authentication-azure-ad-only-authentication.md). | -| **Resource Health monitor** | Use Resource Health to monitor the health status of your Azure SQL Managed Instance. See [Resource health](../database/resource-health-to-troubleshoot-connectivity.md) to learn more. | -| **Granular permissions for data masking GA** | Granular permissions for dynamic data masking for Azure SQL Managed Instance is now generally available (GA). To learn more, see [Dynamic data masking](../database/dynamic-data-masking-overview.md#permissions). | -| **User-defined routes (UDR) tables** | Service-aided subnet configuration for Azure SQL Managed Instance now makes use of service tags for user-defined routes (UDR) tables. See the [connectivity architecture](connectivity-architecture-overview.md) to learn more. | -| **Audit management operations** | The ability to audit SQL Managed Instance operations is now generally available (GA). | -| **Log Replay Service** | It's now possible to migrate databases from SQL Server to Azure SQL Managed Instance using the Log Replay Service. To learn more, see [Migrate with Log Replay Service](log-replay-service-migrate.md). This feature is currently in preview. | -| **Long-term backup retention** | Support for Long-term backup retention up to 10 years on Azure SQL Managed Instance. To learn more, see [Long-term backup retention](long-term-backup-retention-configure.md)| -| **Machine Learning Services GA** | The Machine Learning Services for Azure SQL Managed Instance are now generally available (GA). To learn more, see [Machine Learning Services for SQL Managed Instance](machine-learning-services-overview.md).| -| **Maintenance window** | The maintenance window feature allows you to configure a maintenance schedule for your Azure SQL Managed Instance. To learn more, see [maintenance window](../database/maintenance-window.md).| -| **Service Broker message exchange** | The Service Broker component of Azure SQL Managed Instance allows you to compose your applications from independent, self-contained services, by providing native support for reliable and secure message exchange between the databases attached to the service. Currently in preview. To learn more, see [Service Broker](/sql/database-engine/configure-windows/sql-server-service-broker). -| **SQL Insights (preview)** | SQL Insights (preview) is a comprehensive solution for monitoring any product in the Azure SQL family. SQL Insights uses dynamic management views to expose the data you need to monitor health, diagnose problems, and tune performance. To learn more, see [Azure Monitor SQL Insights (preview)](../../azure-monitor/insights/sql-insights-overview.md). | - -### 2020 - -The following changes were added to SQL Managed Instance and the documentation in 2020: - -| Changes | Details | -| --- | --- | -| **Audit support operations** | The auditing of Microsoft support operations capability enables you to audit Microsoft support operations when you need to access your servers and/or databases during a support request to your audit logs destination (Preview). To learn more, see [Audit support operations](../database/auditing-overview.md#auditing-of-microsoft-support-operations).| -| **Elastic transactions** | Elastic transactions allow for distributed database transactions spanning multiple databases across Azure SQL Database and Azure SQL Managed Instance. Elastic transactions have been added to enable frictionless migration of existing applications, as well as development of modern multi-tenant applications relying on vertically or horizontally partitioned database architecture (Preview). To learn more, see [Distributed transactions](../database/elastic-transactions-overview.md#transactions-for-sql-managed-instance). | -| **Configurable backup storage redundancy** | It's now possible to configure Locally redundant storage (LRS) and zone-redundant storage (ZRS) options for backup storage redundancy, providing more flexibility and choice. To learn more, see [Configure backup storage redundancy](../database/automated-backups-overview.md?tabs=managed-instance#configure-backup-storage-redundancy).| -| **TDE-encrypted backup performance improvements** | It's now possible to set the point-in-time restore (PITR) backup retention period, and automated compression of backups encrypted with transparent data encryption (TDE) are now 30 percent more efficient in consuming backup storage space, saving costs for the end user. See [Change PITR](../database/automated-backups-overview.md?tabs=managed-instance#change-the-short-term-retention-policy) to learn more. | -| **Azure AD authentication improvements** | Automate user creation using Azure AD applications and create individual Azure AD guest users (preview). To learn more, see [Directory readers in Azure AD](../database/authentication-aad-directory-readers-role.md)| -| **Global VNet peering support** | Global virtual network peering support has been added to SQL Managed Instance, improving the geo-replication experience. See [geo-replication between managed instances](auto-failover-group-configure-sql-mi.md#enabling-geo-replication-between-managed-instances-and-their-vnets). | -| **Hosting SSRS catalog databases** | SQL Managed Instance can now host catalog databases of SQL Server Reporting Services (SSRS) for versions 2017 and newer. | -| **Major performance improvements** | Introducing improvements to SQL Managed Instance performance, including improved transaction log write throughput, improved data and log IOPS for business critical instances, and improved TempDB performance. See the [improved performance](https://techcommunity.microsoft.com/t5/azure-sql/announcing-major-performance-improvements-for-azure-sql-database/ba-p/1701256) tech community blog to learn more. -| **Enhanced management experience** | Using the new [OPERATIONS API](/rest/api/sql/2021-02-01-preview/managed-instance-operations), it's now possible to check the progress of long-running instance operations. To learn more, see [Management operations](management-operations-overview.md?tabs=azure-portal). -| **Machine learning support** | Machine Learning Services with support for R and Python languages now include preview support on Azure SQL Managed Instance (Preview). To learn more, see [Machine learning with SQL Managed Instance](machine-learning-services-overview.md). | -| **User-initiated failover** | User-initiated failover is now generally available, providing you with the capability to manually initiate an automatic failover using PowerShell, CLI commands, and API calls, improving application resiliency. To learn more, see, [testing resiliency](../database/high-availability-sla.md#testing-application-fault-resiliency). | - - -## Known issues - -The known issues content has moved to a dedicated [known issues in SQL Managed Instance](doc-changes-updates-known-issues.md) article. - - -## Contribute to content - -To contribute to the Azure SQL documentation, see the [Docs contributor guide](/contribute/). diff --git a/articles/azure-sql/managed-instance/failover-group-add-instance-tutorial.md b/articles/azure-sql/managed-instance/failover-group-add-instance-tutorial.md deleted file mode 100644 index a17e0601ceaf2..0000000000000 --- a/articles/azure-sql/managed-instance/failover-group-add-instance-tutorial.md +++ /dev/null @@ -1,1176 +0,0 @@ ---- -title: "Tutorial: Add SQL Managed Instance to a failover group" -titleSuffix: Azure SQL Managed Instance -description: In this tutorial, learn to create a failover group between a primary and secondary Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: tutorial -author: emlisa -ms.author: emlisa -ms.reviewer: mathoma -ms.date: 08/27/2019 ---- -# Tutorial: Add SQL Managed Instance to a failover group -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database (single database)](../database/failover-group-add-single-database-tutorial.md) -> * [Azure SQL Database (elastic pool)](../database/failover-group-add-elastic-pool-tutorial.md) -> * [Azure SQL Managed Instance](failover-group-add-instance-tutorial.md) - -Add managed instances of Azure SQL Managed Instance to an [auto-failover group](auto-failover-group-sql-mi.md). - -In this tutorial, you will learn how to: - -> [!div class="checklist"] -> - Create a primary managed instance. -> - Create a secondary managed instance as part of a failover group. -> - Test failover. - - There are multiple ways to establish connectivity between managed instances in different virtual networks, including: - * [Azure ExpressRoute](../../expressroute/expressroute-howto-circuit-portal-resource-manager.md) - * [Virtual network peering](../../virtual-network/virtual-network-peering-overview.md) - * VPN gateways - -This tutorial provides steps for creating and connecting VPN gateways. If you prefer to use ExpressRoute or VNet peering, replace the gateway steps accordingly, or -skip ahead to [Step 7](#create-a-failover-group) if you already have ExpressRoute or global VNet peering configured. - - - > [!NOTE] - > - When going through this tutorial, ensure you are configuring your resources with the [prerequisites for setting up failover groups for SQL Managed Instance](../database/auto-failover-group-overview.md#enabling-geo-replication-between-managed-instances-and-their-vnets). - > - Creating a managed instance can take a significant amount of time. As a result, this tutorial may take several hours to complete. For more information on provisioning times, see [SQL Managed Instance management operations](sql-managed-instance-paas-overview.md#management-operations). - -## Prerequisites - -# [Portal](#tab/azure-portal) -To complete this tutorial, make sure you have: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. - - -# [PowerShell](#tab/azure-powershell) -To complete the tutorial, make sure you have the following items: - -- An Azure subscription. [Create a free account](https://azure.microsoft.com/free/) if you don't already have one. -- [Azure PowerShell](/powershell/azure/) - ---- - - -## Create a resource group and primary managed instance - -In this step, you will create the resource group and the primary managed instance for your failover group using the Azure portal or PowerShell. - -Deploy both managed instances to [paired regions](../../availability-zones/cross-region-replication-azure.md) for performance reasons. Managed instances residing in geo-paired regions have much better performance compared to unpaired regions. - - -# [Portal](#tab/azure-portal) - -Create the resource group and your primary managed instance using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the Azure portal. If **Azure SQL** is not in the list, select **All services**, and then type `Azure SQL` in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select **+ Add** to open the **Select SQL deployment option** page. You can view additional information about the different databases by selecting **Show details** on the **Databases** tile. -1. Select **Create** on the **SQL Managed Instances** tile. - - ![Select SQL Managed Instance](./media/failover-group-add-instance-tutorial/select-managed-instance.png) - -1. On the **Create Azure SQL Managed Instance** page, on the **Basics** tab: - 1. Under **Project Details**, select your **Subscription** from the drop-down and then choose to **Create New** resource group. Type in a name for your resource group, such as `myResourceGroup`. - 1. Under **SQL Managed Instance Details**, provide the name of your managed instance, and the region where you would like to deploy your managed instance. Leave **Compute + storage** at default values. - 1. Under **Administrator Account**, provide an admin login, such as `azureuser`, and a complex admin password. - - ![Create primary managed instance](./media/failover-group-add-instance-tutorial/primary-sql-mi-values.png) - -1. Leave the rest of the settings at default values, and select **Review + create** to review your SQL Managed Instance settings. -1. Select **Create** to create your primary managed instance. - -# [PowerShell](#tab/azure-powershell) - -Create your resource group and the primary managed instance using PowerShell. - - ```powershell-interactive - # Connect-AzAccount - # The SubscriptionId in which to create these objects - $SubscriptionId = '' - # Create a random identifier to use as subscript for the different resource names - $randomIdentifier = $(Get-Random) - # Set the resource group name and location for SQL Managed Instance - $resourceGroupName = "myResourceGroup-$randomIdentifier" - $location = "eastus" - $drLocation = "eastus2" - - # Set the networking values for your primary managed instance - $primaryVNet = "primaryVNet-$randomIdentifier" - $primaryAddressPrefix = "10.0.0.0/16" - $primaryDefaultSubnet = "primaryDefaultSubnet-$randomIdentifier" - $primaryDefaultSubnetAddress = "10.0.0.0/24" - $primaryMiSubnetName = "primaryMISubnet-$randomIdentifier" - $primaryMiSubnetAddress = "10.0.0.0/24" - $primaryMiGwSubnetAddress = "10.0.255.0/27" - $primaryGWName = "primaryGateway-$randomIdentifier" - $primaryGWPublicIPAddress = $primaryGWName + "-ip" - $primaryGWIPConfig = $primaryGWName + "-ipc" - $primaryGWAsn = 61000 - $primaryGWConnection = $primaryGWName + "-connection" - - - # Set the networking values for your secondary managed instance - $secondaryVNet = "secondaryVNet-$randomIdentifier" - $secondaryAddressPrefix = "10.128.0.0/16" - $secondaryDefaultSubnet = "secondaryDefaultSubnet-$randomIdentifier" - $secondaryDefaultSubnetAddress = "10.128.0.0/24" - $secondaryMiSubnetName = "secondaryMISubnet-$randomIdentifier" - $secondaryMiSubnetAddress = "10.128.0.0/24" - $secondaryMiGwSubnetAddress = "10.128.255.0/27" - $secondaryGWName = "secondaryGateway-$randomIdentifier" - $secondaryGWPublicIPAddress = $secondaryGWName + "-IP" - $secondaryGWIPConfig = $secondaryGWName + "-ipc" - $secondaryGWAsn = 62000 - $secondaryGWConnection = $secondaryGWName + "-connection" - - - - # Set the SQL Managed Instance name for the new managed instances - $primaryInstance = "primary-mi-$randomIdentifier" - $secondaryInstance = "secondary-mi-$randomIdentifier" - - # Set the admin login and password for SQL Managed Instance - $secpasswd = "PWD27!"+(New-Guid).Guid | ConvertTo-SecureString -AsPlainText -Force - $mycreds = New-Object System.Management.Automation.PSCredential ("azureuser", $secpasswd) - - - # Set the SQL Managed Instance service tier, compute level, and license mode - $edition = "General Purpose" - $vCores = 8 - $maxStorage = 256 - $computeGeneration = "Gen5" - $license = "LicenseIncluded" #"BasePrice" or LicenseIncluded if you have don't have SQL Server license that can be used for AHB discount - - # Set failover group details - $vpnSharedKey = "mi1mi2psk" - $failoverGroupName = "failovergroup-$randomIdentifier" - - # Show randomized variables - Write-host "Resource group name is" $resourceGroupName - Write-host "Password is" $secpasswd - Write-host "Primary Virtual Network name is" $primaryVNet - Write-host "Primary default subnet name is" $primaryDefaultSubnet - Write-host "Primary SQL Managed Instance subnet name is" $primaryMiSubnetName - Write-host "Secondary Virtual Network name is" $secondaryVNet - Write-host "Secondary default subnet name is" $secondaryDefaultSubnet - Write-host "Secondary SQL Managed Instance subnet name is" $secondaryMiSubnetName - Write-host "Primary SQL Managed Instance name is" $primaryInstance - Write-host "Secondary SQL Managed Instance name is" $secondaryInstance - Write-host "Failover group name is" $failoverGroupName - - # Suppress networking breaking changes warning (https://aka.ms/azps-changewarnings - Set-Item Env:\SuppressAzurePowerShellBreakingChangeWarnings "true" - - # Set the subscription context - Set-AzContext -SubscriptionId $subscriptionId  - - # Create the resource group - Write-host "Creating resource group..." - $resourceGroup = New-AzResourceGroup -Name $resourceGroupName -Location $location -Tag @{Owner="SQLDB-Samples"} - $resourceGroup - - # Configure the primary virtual network - Write-host "Creating primary virtual network..." - $primaryVirtualNetwork = New-AzVirtualNetwork ` - -ResourceGroupName $resourceGroupName ` - -Location $location ` - -Name $primaryVNet ` - -AddressPrefix $primaryAddressPrefix - - Add-AzVirtualNetworkSubnetConfig ` - -Name $primaryMiSubnetName ` - -VirtualNetwork $primaryVirtualNetwork ` - -AddressPrefix $PrimaryMiSubnetAddress ` - | Set-AzVirtualNetwork - $primaryVirtualNetwork - - - # Configure the primary managed instance subnet - Write-host "Configuring primary MI subnet..." - $primaryVirtualNetwork = Get-AzVirtualNetwork -Name $primaryVNet -ResourceGroupName $resourceGroupName - - - $primaryMiSubnetConfig = Get-AzVirtualNetworkSubnetConfig ` - -Name $primaryMiSubnetName ` - -VirtualNetwork $primaryVirtualNetwork - $primaryMiSubnetConfig - - # Configure the network security group management service - Write-host "Configuring primary MI subnet..." - - $primaryMiSubnetConfigId = $primaryMiSubnetConfig.Id - - $primaryNSGMiManagementService = New-AzNetworkSecurityGroup ` - -Name 'primaryNSGMiManagementService' ` - -ResourceGroupName $resourceGroupName ` - -location $location - $primaryNSGMiManagementService - - # Configure the route table management service - Write-host "Configuring primary MI route table management service..." - - $primaryRouteTableMiManagementService = New-AzRouteTable ` - -Name 'primaryRouteTableMiManagementService' ` - -ResourceGroupName $resourceGroupName ` - -location $location - $primaryRouteTableMiManagementService - - # Configure the primary network security group - Write-host "Configuring primary network security group..." - Set-AzVirtualNetworkSubnetConfig ` - -VirtualNetwork $primaryVirtualNetwork ` - -Name $primaryMiSubnetName ` - -AddressPrefix $PrimaryMiSubnetAddress ` - -NetworkSecurityGroup $primaryNSGMiManagementService ` - -RouteTable $primaryRouteTableMiManagementService | ` - Set-AzVirtualNetwork - - Get-AzNetworkSecurityGroup ` - -ResourceGroupName $resourceGroupName ` - -Name "primaryNSGMiManagementService" ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 100 ` - -Name "allow_management_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange 9000,9003,1438,1440,1452 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 200 ` - -Name "allow_misubnet_inbound" ` - -Access Allow ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix $PrimaryMiSubnetAddress ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 300 ` - -Name "allow_health_probe_inbound" ` - -Access Allow ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix AzureLoadBalancer ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1000 ` - -Name "allow_tds_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 1433 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1100 ` - -Name "allow_redirect_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 11000-11999 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1200 ` - -Name "allow_geodr_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 5022 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 4096 ` - -Name "deny_all_inbound" ` - -Access Deny ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 100 ` - -Name "allow_management_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange 80,443,12000 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 200 ` - -Name "allow_misubnet_outbound" ` - -Access Allow ` - -Protocol * ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix $PrimaryMiSubnetAddress ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1100 ` - -Name "allow_redirect_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 11000-11999 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1200 ` - -Name "allow_geodr_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 5022 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 4096 ` - -Name "deny_all_outbound" ` - -Access Deny ` - -Protocol * ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Set-AzNetworkSecurityGroup - Write-host "Primary network security group configured successfully." - - - Get-AzRouteTable ` - -ResourceGroupName $resourceGroupName ` - -Name "primaryRouteTableMiManagementService" ` - | Add-AzRouteConfig ` - -Name "primaryToMIManagementService" ` - -AddressPrefix 0.0.0.0/0 ` - -NextHopType Internet ` - | Add-AzRouteConfig ` - -Name "ToLocalClusterNode" ` - -AddressPrefix $PrimaryMiSubnetAddress ` - -NextHopType VnetLocal ` - | Set-AzRouteTable - Write-host "Primary network route table configured successfully." - - - # Create the primary managed instance - - Write-host "Creating primary SQL Managed Instance..." - Write-host "This will take some time, see https://docs.microsoft.com/azure/sql-database/sql-database-managed-instance#managed-instance-management-operations or more information." - New-AzSqlInstance -Name $primaryInstance ` - -ResourceGroupName $resourceGroupName ` - -Location $location ` - -SubnetId $primaryMiSubnetConfigId ` - -AdministratorCredential $mycreds ` - -StorageSizeInGB $maxStorage ` - -VCore $vCores ` - -Edition $edition ` - -ComputeGeneration $computeGeneration ` - -LicenseType $license - Write-host "Primary SQL Managed Instance created successfully." - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates an Azure resource group. | -| [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) | Creates a virtual network. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup) | Creates a network security group. | -| [New-AzRouteTable](/powershell/module/az.network/new-azroutetable) | Creates a route table. | -| [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/set-azvirtualnetworksubnetconfig) | Updates a subnet configuration for a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzNetworkSecurityGroup](/powershell/module/az.network/get-aznetworksecuritygroup) | Gets a network security group. | -| [Add-AzNetworkSecurityRuleConfig](/powershell/module/az.network/add-aznetworksecurityruleconfig)| Adds a network security rule configuration to a network security group. | -| [Set-AzNetworkSecurityGroup](/powershell/module/az.network/set-aznetworksecuritygroup) | Updates a network security group. | -| [Add-AzRouteConfig](/powershell/module/az.network/add-azrouteconfig) | Adds a route to a route table. | -| [Set-AzRouteTable](/powershell/module/az.network/set-azroutetable) | Updates a route table. | -| [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) | Creates a managed instance. | - ---- - -## Create secondary virtual network - -If you're using the Azure portal to create your managed instance, you will need to create the virtual network separately because there is a requirement that the subnet of the primary and secondary managed instance do not have overlapping ranges. If you're using PowerShell to configure your managed instance, skip ahead to step 3. - -# [Portal](#tab/azure-portal) - -To verify the subnet range of your primary virtual network, follow these steps: - -1. In the [Azure portal](https://portal.azure.com), navigate to your resource group and select the virtual network for your primary instance. -2. Select **Subnets** under **Settings** and note the **Address range**. The subnet address range of the virtual network for the secondary managed instance cannot overlap this. - - - ![Primary subnet](./media/failover-group-add-instance-tutorial/verify-primary-subnet-range.png) - -To create a virtual network, follow these steps: - -1. In the [Azure portal](https://portal.azure.com), select **Create a resource** and search for *virtual network*. -1. Select the **Virtual Network** option published by Microsoft and then select **Create** on the next page. -1. Fill out the required fields to configure the virtual network for your secondary managed instance, and then select **Create**. - - The following table shows the values necessary for the secondary virtual network: - - | **Field** | Value | - | --- | --- | - | **Name** | The name for the virtual network to be used by the secondary managed instance, such as `vnet-sql-mi-secondary`. | - | **Address space** | The address space for your virtual network, such as `10.128.0.0/16`. | - | **Subscription** | The subscription where your primary managed instance and resource group reside. | - | **Region** | The location where you will deploy your secondary managed instance. | - | **Subnet** | The name for your subnet. `default` is provided for you by default. | - | **Address range**| The address range for your subnet. This must be different than the subnet address range used by the virtual network of your primary managed instance, such as `10.128.0.0/24`. | - - - ![Secondary virtual network values](./media/failover-group-add-instance-tutorial/secondary-virtual-network.png) - -# [PowerShell](#tab/azure-powershell) - -This step is only necessary if you're using the Azure portal to deploy SQL Managed Instance. Skip ahead to step 3 if you're using PowerShell. - ---- - -## Create a secondary managed instance -In this step, you will create a secondary managed instance in the Azure portal, which will also configure the networking between the two managed instances. - -Your second managed instance must: -- Be empty. -- Have a different subnet and IP range than the primary managed instance. - -# [Portal](#tab/azure-portal) - -Create the secondary managed instance using the Azure portal. - -1. Select **Azure SQL** in the left-hand menu of the Azure portal. If **Azure SQL** is not in the list, select **All services**, and then type `Azure SQL` in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select **+ Add** to open the **Select SQL deployment option** page. You can view additional information about the different databases by selecting **Show details** on the **Databases** tile. -1. Select **Create** on the **SQL managed instances** tile. - - ![Select SQL Managed Instance](./media/failover-group-add-instance-tutorial/select-managed-instance.png) - -1. On the **Basics** tab of the **Create Azure SQL Managed Instance** page, fill out the required fields to configure your secondary managed instance. - - The following table shows the values necessary for the secondary managed instance: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your primary managed instance is. | - | **Resource group**| The resource group where your primary managed instance is. | - | **SQL Managed Instance name** | The name of your new secondary managed instance, such as `sql-mi-secondary`. | - | **Region**| The location for your secondary managed instance. | - | **SQL Managed Instance admin login** | The login you want to use for your new secondary managed instance, such as `azureuser`. | - | **Password** | A complex password that will be used by the admin login for the new secondary managed instance. | - - -1. Under the **Networking** tab, for the **Virtual Network**, select the virtual network you created for the secondary managed instance from the drop-down. - - ![Secondary MI networking](./media/failover-group-add-instance-tutorial/networking-settings-for-secondary-mi.png) - -1. Under the **Additional settings** tab, for **Geo-Replication**, choose **Yes** to _Use as failover secondary_. Select the primary managed instance from the drop-down. - - Be sure that the collation and time zone match that of the primary managed instance. The primary managed instance created in this tutorial used the default of `SQL_Latin1_General_CP1_CI_AS` collation and the `(UTC) Coordinated Universal Time` time zone. - - ![Secondary managed instance networking](./media/failover-group-add-instance-tutorial/secondary-mi-failover.png) - -1. Select **Review + create** to review the settings for your secondary managed instance. -1. Select **Create** to create your secondary managed instance. - -# [PowerShell](#tab/azure-powershell) - -Create the secondary managed instance using PowerShell. - - ```powershell-interactive - # Configure the secondary virtual network - Write-host "Configuring secondary virtual network..." - - $SecondaryVirtualNetwork = New-AzVirtualNetwork ` - -ResourceGroupName $resourceGroupName ` - -Location $drlocation ` - -Name $secondaryVNet ` - -AddressPrefix $secondaryAddressPrefix - - Add-AzVirtualNetworkSubnetConfig ` - -Name $secondaryMiSubnetName ` - -VirtualNetwork $SecondaryVirtualNetwork ` - -AddressPrefix $secondaryMiSubnetAddress ` - | Set-AzVirtualNetwork - $SecondaryVirtualNetwork - - # Configure the secondary managed instance subnet - Write-host "Configuring secondary MI subnet..." - - $SecondaryVirtualNetwork = Get-AzVirtualNetwork -Name $secondaryVNet ` - -ResourceGroupName $resourceGroupName - - $secondaryMiSubnetConfig = Get-AzVirtualNetworkSubnetConfig ` - -Name $secondaryMiSubnetName ` - -VirtualNetwork $SecondaryVirtualNetwork - $secondaryMiSubnetConfig - - # Configure the secondary network security group management service - Write-host "Configuring secondary network security group management service..." - - $secondaryMiSubnetConfigId = $secondaryMiSubnetConfig.Id - - $secondaryNSGMiManagementService = New-AzNetworkSecurityGroup ` - -Name 'secondaryToMIManagementService' ` - -ResourceGroupName $resourceGroupName ` - -location $drlocation - $secondaryNSGMiManagementService - - # Configure the secondary route table MI management service - Write-host "Configuring secondary route table MI management service..." - - $secondaryRouteTableMiManagementService = New-AzRouteTable ` - -Name 'secondaryRouteTableMiManagementService' ` - -ResourceGroupName $resourceGroupName ` - -location $drlocation - $secondaryRouteTableMiManagementService - - # Configure the secondary network security group - Write-host "Configuring secondary network security group..." - - Set-AzVirtualNetworkSubnetConfig ` - -VirtualNetwork $SecondaryVirtualNetwork ` - -Name $secondaryMiSubnetName ` - -AddressPrefix $secondaryMiSubnetAddress ` - -NetworkSecurityGroup $secondaryNSGMiManagementService ` - -RouteTable $secondaryRouteTableMiManagementService ` - | Set-AzVirtualNetwork - - Get-AzNetworkSecurityGroup ` - -ResourceGroupName $resourceGroupName ` - -Name "secondaryToMIManagementService" ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 100 ` - -Name "allow_management_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange 9000,9003,1438,1440,1452 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 200 ` - -Name "allow_misubnet_inbound" ` - -Access Allow ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix $secondaryMiSubnetAddress ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 300 ` - -Name "allow_health_probe_inbound" ` - -Access Allow ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix AzureLoadBalancer ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1000 ` - -Name "allow_tds_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 1433 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1100 ` - -Name "allow_redirect_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 11000-11999 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1200 ` - -Name "allow_geodr_inbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 5022 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 4096 ` - -Name "deny_all_inbound" ` - -Access Deny ` - -Protocol * ` - -Direction Inbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 100 ` - -Name "allow_management_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange 80,443,12000 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 200 ` - -Name "allow_misubnet_outbound" ` - -Access Allow ` - -Protocol * ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix $secondaryMiSubnetAddress ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1100 ` - -Name "allow_redirect_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 11000-11999 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 1200 ` - -Name "allow_geodr_outbound" ` - -Access Allow ` - -Protocol Tcp ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix VirtualNetwork ` - -DestinationPortRange 5022 ` - -DestinationAddressPrefix * ` - | Add-AzNetworkSecurityRuleConfig ` - -Priority 4096 ` - -Name "deny_all_outbound" ` - -Access Deny ` - -Protocol * ` - -Direction Outbound ` - -SourcePortRange * ` - -SourceAddressPrefix * ` - -DestinationPortRange * ` - -DestinationAddressPrefix * ` - | Set-AzNetworkSecurityGroup - - - Get-AzRouteTable ` - -ResourceGroupName $resourceGroupName ` - -Name "secondaryRouteTableMiManagementService" ` - | Add-AzRouteConfig ` - -Name "secondaryToMIManagementService" ` - -AddressPrefix 0.0.0.0/0 ` - -NextHopType Internet ` - | Add-AzRouteConfig ` - -Name "ToLocalClusterNode" ` - -AddressPrefix $secondaryMiSubnetAddress ` - -NextHopType VnetLocal ` - | Set-AzRouteTable - Write-host "Secondary network security group configured successfully." - - # Create the secondary managed instance - - $primaryManagedInstanceId = Get-AzSqlInstance -Name $primaryInstance -ResourceGroupName $resourceGroupName | Select-Object Id - - - Write-host "Creating secondary SQL Managed Instance..." - Write-host "This will take some time, see https://docs.microsoft.com/azure/sql-database/sql-database-managed-instance#managed-instance-management-operations or more information." - New-AzSqlInstance -Name $secondaryInstance ` - -ResourceGroupName $resourceGroupName ` - -Location $drLocation ` - -SubnetId $secondaryMiSubnetConfigId ` - -AdministratorCredential $mycreds ` - -StorageSizeInGB $maxStorage ` - -VCore $vCores ` - -Edition $edition ` - -ComputeGeneration $computeGeneration ` - -LicenseType $license ` - -DnsZonePartner $primaryManagedInstanceId.Id - Write-host "Secondary SQL Managed Instance created successfully." - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates an Azure resource group. | -| [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) | Creates a virtual network. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup) | Creates a network security group. | -| [New-AzRouteTable](/powershell/module/az.network/new-azroutetable) | Creates a route table. | -| [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/set-azvirtualnetworksubnetconfig) | Updates a subnet configuration for a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzNetworkSecurityGroup](/powershell/module/az.network/get-aznetworksecuritygroup) | Gets a network security group. | -| [Add-AzNetworkSecurityRuleConfig](/powershell/module/az.network/add-aznetworksecurityruleconfig)| Adds a network security rule configuration to a network security group. | -| [Set-AzNetworkSecurityGroup](/powershell/module/az.network/set-aznetworksecuritygroup) | Updates a network security group. | -| [Add-AzRouteConfig](/powershell/module/az.network/add-azrouteconfig) | Adds a route to a route table. | -| [Set-AzRouteTable](/powershell/module/az.network/set-azroutetable) | Updates a route table. | -| [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) | Creates a managed instance. | - ---- - -## Create a primary gateway - -> [!NOTE] -> The SKU of the gateway affects throughput performance. This tutorial deploys a gateway with the most basic SKU (`HwGw1`). Deploy a higher SKU (example: `VpnGw3`) to achieve higher throughput. For all available options, see [Gateway SKUs](../../vpn-gateway/vpn-gateway-about-vpngateways.md#benchmark) - -# [Portal](#tab/azure-portal) - -Create the gateway for the virtual network of your primary managed instance using the Azure portal. - - -1. In the [Azure portal](https://portal.azure.com), go to your resource group and select the **Virtual network** resource for your primary managed instance. -1. Select **Subnets** under **Settings** and then select to add a new **Gateway subnet**. Leave the default values. - - ![Add gateway for primary managed instance](./media/failover-group-add-instance-tutorial/add-subnet-gateway-primary-vnet.png) - -1. Once the subnet gateway is created, select **Create a resource** from the left navigation pane and then type `Virtual network gateway` in the search box. Select the **Virtual network gateway** resource published by **Microsoft**. - - ![Create a new virtual network gateway](./media/failover-group-add-instance-tutorial/create-virtual-network-gateway.png) - -1. Fill out the required fields to configure the gateway for your primary managed instance. - - The following table shows the values necessary for the gateway for the primary managed instance: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your primary managed instance is. | - | **Name** | The name for your virtual network gateway, such as `primary-mi-gateway`. | - | **Region** | The region where your primary managed instance is. | - | **Gateway type** | Select **VPN**. | - | **VPN Type** | Select **Route-based**. | - | **SKU**| Leave default of `VpnGw1`. | - | **Virtual network**| Select the virtual network that was created in section 2, such as `vnet-sql-mi-primary`. | - | **Public IP address**| Select **Create new**. | - | **Public IP address name**| Enter a name for your IP address, such as `primary-gateway-IP`. | - - -1. Leave the other values as default, and then select **Review + create** to review the settings for your virtual network gateway. - - ![Primary gateway settings](./media/failover-group-add-instance-tutorial/settings-for-primary-gateway.png) - -1. Select **Create** to create your new virtual network gateway. - - -# [PowerShell](#tab/azure-powershell) - -Create the gateway for the virtual network of your primary managed instance using PowerShell. - - ```powershell-interactive - # Create the primary gateway - Write-host "Adding GatewaySubnet to primary VNet..." - Get-AzVirtualNetwork ` - -Name $primaryVNet ` - -ResourceGroupName $resourceGroupName ` - | Add-AzVirtualNetworkSubnetConfig ` - -Name "GatewaySubnet" ` - -AddressPrefix $primaryMiGwSubnetAddress ` - | Set-AzVirtualNetwork - - $primaryVirtualNetwork = Get-AzVirtualNetwork ` - -Name $primaryVNet ` - -ResourceGroupName $resourceGroupName - $primaryGatewaySubnet = Get-AzVirtualNetworkSubnetConfig ` - -Name "GatewaySubnet" ` - -VirtualNetwork $primaryVirtualNetwork - - Write-host "Creating primary gateway..." - Write-host "This will take some time." - $primaryGWPublicIP = New-AzPublicIpAddress -Name $primaryGWPublicIPAddress -ResourceGroupName $resourceGroupName ` - -Location $location -AllocationMethod Dynamic - $primaryGatewayIPConfig = New-AzVirtualNetworkGatewayIpConfig -Name $primaryGWIPConfig ` - -Subnet $primaryGatewaySubnet -PublicIpAddress $primaryGWPublicIP - - $primaryGateway = New-AzVirtualNetworkGateway -Name $primaryGWName -ResourceGroupName $resourceGroupName ` - -Location $location -IpConfigurations $primaryGatewayIPConfig -GatewayType Vpn ` - -VpnType RouteBased -GatewaySku VpnGw1 -EnableBgp $true -Asn $primaryGWAsn - $primaryGateway - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) | Creates a public IP address. | -| [New-AzVirtualNetworkGatewayIpConfig](/powershell/module/az.network/new-azvirtualnetworkgatewayipconfig) | Creates an IP configuration for a virtual network gateway. | -| [New-AzVirtualNetworkGateway](/powershell/module/az.network/new-azvirtualnetworkgateway) | Creates a virtual network gateway. | - - ---- - - -## Create secondary gateway -In this step, create the gateway for the virtual network of your secondary managed instance using the Azure portal. - - -# [Portal](#tab/azure-portal) - -Using the Azure portal, repeat the steps in the previous section to create the virtual network subnet and gateway for the secondary managed instance. Fill out the required fields to configure the gateway for your secondary managed instance. - - The following table shows the values necessary for the gateway for the secondary managed instance: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your secondary managed instance is. | - | **Name** | The name for your virtual network gateway, such as `secondary-mi-gateway`. | - | **Region** | The region where your secondary managed instance is. | - | **Gateway type** | Select **VPN**. | - | **VPN Type** | Select **Route-based**. | - | **SKU**| Leave default of `VpnGw1`. | - | **Virtual network**| Select the virtual network for the secondary managed instance, such as `vnet-sql-mi-secondary`. | - | **Public IP address**| Select **Create new**. | - | **Public IP address name**| Enter a name for your IP address, such as `secondary-gateway-IP`. | - - - ![Secondary gateway settings](./media/failover-group-add-instance-tutorial/settings-for-secondary-gateway.png) - - -# [PowerShell](#tab/azure-powershell) - -Create the gateway for the virtual network of the secondary managed instance using PowerShell. - - ```powershell-interactive - # Create the secondary gateway - Write-host "Creating secondary gateway..." - - Write-host "Adding GatewaySubnet to secondary VNet..." - Get-AzVirtualNetwork ` - -Name $secondaryVNet ` - -ResourceGroupName $resourceGroupName ` - | Add-AzVirtualNetworkSubnetConfig ` - -Name "GatewaySubnet" ` - -AddressPrefix $secondaryMiGwSubnetAddress ` - | Set-AzVirtualNetwork - - $secondaryVirtualNetwork = Get-AzVirtualNetwork ` - -Name $secondaryVNet ` - -ResourceGroupName $resourceGroupName - $secondaryGatewaySubnet = Get-AzVirtualNetworkSubnetConfig ` - -Name "GatewaySubnet" ` - -VirtualNetwork $secondaryVirtualNetwork - $drLocation = $secondaryVirtualNetwork.Location - - Write-host "Creating secondary gateway..." - Write-host "This will take some time." - $secondaryGWPublicIP = New-AzPublicIpAddress -Name $secondaryGWPublicIPAddress -ResourceGroupName $resourceGroupName ` - -Location $drLocation -AllocationMethod Dynamic - $secondaryGatewayIPConfig = New-AzVirtualNetworkGatewayIpConfig -Name $secondaryGWIPConfig ` - -Subnet $secondaryGatewaySubnet -PublicIpAddress $secondaryGWPublicIP - - $secondaryGateway = New-AzVirtualNetworkGateway -Name $secondaryGWName -ResourceGroupName $resourceGroupName ` - -Location $drLocation -IpConfigurations $secondaryGatewayIPConfig -GatewayType Vpn ` - -VpnType RouteBased -GatewaySku VpnGw1 -EnableBgp $true -Asn $secondaryGWAsn - $secondaryGateway - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) | Creates a public IP address. | -| [New-AzVirtualNetworkGatewayIpConfig](/powershell/module/az.network/new-azvirtualnetworkgatewayipconfig) | Creates an IP configuration for a virtual network gateway. | -| [New-AzVirtualNetworkGateway](/powershell/module/az.network/new-azvirtualnetworkgateway) | Creates a virtual network gateway. | - ---- - - -## Connect the gateways -In this step, create a bidirectional connection between the two gateways of the two virtual networks. - - -# [Portal](#tab/azure-portal) - -Connect the two gateways using the Azure portal. - - -1. Select **Create a resource** from the [Azure portal](https://portal.azure.com). -1. Type `connection` in the search box and then press enter to search, which takes you to the **Connection** resource, published by Microsoft. -1. Select **Create** to create your connection. -1. On the **Basics** page, select the following values and then select **OK**. - 1. Select `VNet-to-VNet` for the **Connection type**. - 1. Select your subscription from the drop-down. - 1. Select the resource group for SQL Managed Instance in the drop-down. - 1. Select the location of your primary managed instance from the drop-down. -1. On the **Settings** page, select or enter the following values and then select **OK**: - 1. Choose the primary network gateway for the **First virtual network gateway**, such as `primaryGateway`. - 1. Choose the secondary network gateway for the **Second virtual network gateway**, such as `secondaryGateway`. - 1. Select the checkbox next to **Establish bidirectional connectivity**. - 1. Either leave the default primary connection name, or rename it to a value of your choice. - 1. Provide a **Shared key (PSK)** for the connection, such as `mi1m2psk`. - 1. Select **OK** to save your settings. - - ![Create gateway connection](./media/failover-group-add-instance-tutorial/create-gateway-connection.png) - - - -1. On the **Review + create** page, review the settings for your bidirectional connection and then select **OK** to create your connection. - - -# [PowerShell](#tab/azure-powershell) - -Connect the two gateways using PowerShell. - - ```powershell-interactive - # Connect the primary to secondary gateway - Write-host "Connecting the primary gateway to secondary gateway..." - New-AzVirtualNetworkGatewayConnection -Name $primaryGWConnection -ResourceGroupName $resourceGroupName ` - -VirtualNetworkGateway1 $primaryGateway -VirtualNetworkGateway2 $secondaryGateway -Location $location ` - -ConnectionType Vnet2Vnet -SharedKey $vpnSharedKey -EnableBgp $true - $primaryGWConnection - - # Connect the secondary to primary gateway - Write-host "Connecting the secondary gateway to primary gateway..." - - New-AzVirtualNetworkGatewayConnection -Name $secondaryGWConnection -ResourceGroupName $resourceGroupName ` - -VirtualNetworkGateway1 $secondaryGateway -VirtualNetworkGateway2 $primaryGateway -Location $drLocation ` - -ConnectionType Vnet2Vnet -SharedKey $vpnSharedKey -EnableBgp $true - $secondaryGWConnection - ``` - -This portion of the tutorial uses the following PowerShell cmdlet: - -| Command | Notes | -|---|---| -| [New-AzVirtualNetworkGatewayConnection](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) | Creates a connection between the two virtual network gateways. | - ---- - - -## Create a failover group -In this step, you will create the failover group and add both managed instances to it. - - -# [Portal](#tab/azure-portal) -Create the failover group using the Azure portal. - - -1. Select **Azure SQL** in the left-hand menu of the [Azure portal](https://portal.azure.com). If **Azure SQL** is not in the list, select **All services**, and then type `Azure SQL` in the search box. (Optional) Select the star next to **Azure SQL** to favorite it and add it as an item in the left-hand navigation. -1. Select the primary managed instance you created in the first section, such as `sql-mi-primary`. -1. Under **Data management**, navigate to **Failover groups** and then choose **Add group** to open the **Instance Failover Group** page. - - ![Add a failover group](./media/failover-group-add-instance-tutorial/add-failover-group.png) - -1. On the **Instance Failover Group** page, type the name of your failover group, such as `failovergrouptutorial`. Then choose the secondary managed instance, such as `sql-mi-secondary`, from the drop-down. Select **Create** to create your failover group. - - ![Create failover group](./media/failover-group-add-instance-tutorial/create-failover-group.png) - -1. Once failover group deployment is complete, you will be taken back to the **Failover group** page. - - -# [PowerShell](#tab/azure-powershell) -Create the failover group using PowerShell. - - ```powershell-interactive - Write-host "Creating the failover group..." - $failoverGroup = New-AzSqlDatabaseInstanceFailoverGroup -Name $failoverGroupName ` - -Location $location -ResourceGroupName $resourceGroupName -PrimaryManagedInstanceName $primaryInstance ` - -PartnerRegion $drLocation -PartnerManagedInstanceName $secondaryInstance ` - -FailoverPolicy Automatic -GracePeriodWithDataLossHours 1 - $failoverGroup - ``` - -This portion of the tutorial uses the following PowerShell cmdlet: - -| Command | Notes | -|---|---| -| [New-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/new-azsqldatabaseinstancefailovergroup)| Creates a new Azure SQL Managed Instance failover group. | - - ---- - - -## Test failover -In this step, you will fail your failover group over to the secondary server, and then fail back using the Azure portal. - - -# [Portal](#tab/azure-portal) -Test failover using the Azure portal. - - -1. Navigate to your _secondary_ managed instance within the [Azure portal](https://portal.azure.com) and select **Instance Failover Groups** under settings. -1. Review which managed instance is the primary, and which managed instance is the secondary. -1. Select **Failover** and then select **Yes** on the warning about TDS sessions being disconnected. - - ![Fail over the failover group](./media/failover-group-add-instance-tutorial/failover-mi-failover-group.png) - -1. Review which managed instance is the primary and which managed instance is the secondary. If failover succeeded, the two instances should have switched roles. - - ![Managed instances have switched roles after failover](./media/failover-group-add-instance-tutorial/mi-switched-after-failover.png) - -1. Go to the new _secondary_ managed instance and select **Failover** once again to fail the primary instance back to the primary role. - - -# [PowerShell](#tab/azure-powershell) -Test failover using PowerShell. - - ```powershell-interactive - - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $resourceGroupName ` - -Location $location -Name $failoverGroupName - - # Fail over the primary managed instance to the secondary role - Write-host "Failing primary over to the secondary location" - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $resourceGroupName ` - -Location $drLocation -Name $failoverGroupName | Switch-AzSqlDatabaseInstanceFailoverGroup - Write-host "Successfully failed failover group to secondary location" - ``` - - -Revert the failover group back to the primary server: - - ```powershell-interactive - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $resourceGroupName ` - -Location $drLocation -Name $failoverGroupName - - # Fail the primary managed instance back to the primary role - Write-host "Failing primary back to primary role" - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $resourceGroupName ` - -Location $location -Name $failoverGroupName | Switch-AzSqlDatabaseInstanceFailoverGroup - Write-host "Successfully failed failover group to primary location" - - # Verify the current primary role - Get-AzSqlDatabaseInstanceFailoverGroup -ResourceGroupName $resourceGroupName ` - -Location $location -Name $failoverGroupName - ``` - -This portion of the tutorial uses the following PowerShell cmdlets: - -| Command | Notes | -|---|---| -| [Get-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/get-azsqldatabaseinstancefailovergroup) | Gets or lists SQL Managed Instance failover groups.| -| [Switch-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/switch-azsqldatabaseinstancefailovergroup) | Executes a failover of a SQL Managed Instance failover group. | - ---- - - - -## Clean up resources -Clean up resources by first deleting the managed instances, then the virtual cluster, then any remaining resources, and finally the resource group. - -# [Portal](#tab/azure-portal) -1. Navigate to your resource group in the [Azure portal](https://portal.azure.com). -1. Select the managed instance(s) and then select **Delete**. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. This process may take some time to complete in the background, and until it's done, you will not be able to delete the *virtual cluster* or any other dependent resources. Monitor the deletion in the **Activity** tab to confirm your managed instance has been deleted. -1. Once the managed instance is deleted, delete the *virtual cluster* by selecting it in your resource group, and then choosing **Delete**. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. -1. Delete any remaining resources. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. -1. Delete the resource group by selecting **Delete resource group**, typing in the name of the resource group, `myResourceGroup`, and then selecting **Delete**. - -# [PowerShell](#tab/azure-powershell) - -You will need to remove the resource group twice. Removing the resource group the first time will remove the managed instances and virtual clusters but will then fail with the error message `Remove-AzResourceGroup : Long running operation failed with status 'Conflict'`. Run the Remove-AzResourceGroup command a second time to remove any residual resources as well as the resource group. - -```powershell-interactive -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -Write-host "Removing SQL Managed Instance and virtual cluster..." -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -Write-host "Removing residual resources and resource group..." -``` - -This portion of the tutorial uses the following PowerShell cmdlet: - -| Command | Notes | -|---|---| -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group. | - ---- - -## Full script - -# [PowerShell](#tab/azure-powershell) -[!code-powershell-interactive[main](../../../powershell_scripts/sql-database/failover-groups/add-managed-instance-to-failover-group-az-ps.ps1 "Add SQL Managed Instance to a failover group")] - -This script uses the following commands. Each command in the table links to command-specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates an Azure resource group. | -| [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) | Creates a virtual network. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup) | Creates a network security group. | -| [New-AzRouteTable](/powershell/module/az.network/new-azroutetable) | Creates a route table. | -| [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/set-azvirtualnetworksubnetconfig) | Updates a subnet configuration for a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzNetworkSecurityGroup](/powershell/module/az.network/get-aznetworksecuritygroup) | Gets a network security group. | -| [Add-AzNetworkSecurityRuleConfig](/powershell/module/az.network/add-aznetworksecurityruleconfig)| Adds a network security rule configuration to a network security group. | -| [Set-AzNetworkSecurityGroup](/powershell/module/az.network/set-aznetworksecuritygroup) | Updates a network security group. | -| [Add-AzRouteConfig](/powershell/module/az.network/add-azrouteconfig) | Adds a route to a route table. | -| [Set-AzRouteTable](/powershell/module/az.network/set-azroutetable) | Updates a route table. | -| [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) | Creates a managed instance. | -| [Get-AzSqlInstance](/powershell/module/az.sql/get-azsqlinstance)| Returns information about Azure SQL Managed Instance. | -| [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) | Creates a public IP address. | -| [New-AzVirtualNetworkGatewayIpConfig](/powershell/module/az.network/new-azvirtualnetworkgatewayipconfig) | Creates an IP configuration for a virtual network gateway. | -| [New-AzVirtualNetworkGateway](/powershell/module/az.network/new-azvirtualnetworkgateway) | Creates a virtual network gateway. | -| [New-AzVirtualNetworkGatewayConnection](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) | Creates a connection between the two virtual network gateways. | -| [New-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/new-azsqldatabaseinstancefailovergroup)| Creates a new SQL Managed Instance failover group. | -| [Get-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/get-azsqldatabaseinstancefailovergroup) | Gets or lists SQL Managed Instance failover groups.| -| [Switch-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/switch-azsqldatabaseinstancefailovergroup) | Executes a failover of a SQL Managed Instance failover group. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group. | - -# [Portal](#tab/azure-portal) - -There are no scripts available for the Azure portal. - ---- - -## Next steps - -In this tutorial, you configured a failover group between two managed instances. You learned how to: - -> [!div class="checklist"] -> - Create a primary managed instance. -> - Create a secondary managed instance as part of a [failover group](../database/auto-failover-group-overview.md). -> - Test failover. - -Advance to the next quickstart on how to connect to SQL Managed Instance, and how to restore a database to SQL Managed Instance: - -> [!div class="nextstepaction"] -> [Connect to SQL Managed Instance](connect-vm-instance-configure.md) -> [Restore a database to SQL Managed Instance](restore-sample-database-quickstart.md) - - diff --git a/articles/azure-sql/managed-instance/frequently-asked-questions-faq.yml b/articles/azure-sql/managed-instance/frequently-asked-questions-faq.yml deleted file mode 100644 index 8e7e182b8808f..0000000000000 --- a/articles/azure-sql/managed-instance/frequently-asked-questions-faq.yml +++ /dev/null @@ -1,599 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Frequently asked questions (FAQ) - titleSuffix: Azure SQL Managed Instance - description: Azure SQL Managed Instance frequently asked questions (FAQ) - services: sql-database - ms.service: sql-managed-instance - ms.subservice: service-overview - ms.custom: sqldbrb=1 - ms.devlang: - ms.topic: faq - author: MashaMSFT - ms.author: mathoma - ms.reviewer: urmilano, danil, nnikolic, wiassaf - ms.date: 04/14/2022 -title: Azure SQL Managed Instance frequently asked questions (FAQ) -summary: | - [!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - - This article contains the most common questions about [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). - - -sections: - - name: Ignored - questions: - - question: | - Supported features - answer: | - ### Where can I find a list of features supported on SQL Managed Instance? - - For a list of supported features in SQL Managed Instance, see [Azure SQL Managed Instance features](../database/features-comparison.md). - - For differences in syntax and behavior between Azure SQL Managed Instance and SQL Server, see [T-SQL differences from SQL Server](transact-sql-tsql-differences-sql-server.md). - - - - question: | - Technical specification, resource limits and other limitations - answer: | - ### Where can I find technical characteristics and resource limits for SQL Managed Instance? - - For available hardware characteristics, see [Technical differences in hardware configurations](resource-limits.md#hardware-configuration-characteristics). - For available service tiers and their characteristics, see [Technical differences between service tiers](resource-limits.md#service-tier-characteristics). - - ### What service tier am I eligible for? - - Any customer is eligible for any service tier. However, if you want to exchange your existing licenses for discounted rates on Azure SQL Managed Instance by using [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/), bear in mind that SQL Server Enterprise Edition customers with Software Assurance are eligible for the [General Purpose](../database/service-tier-general-purpose.md) or [Business Critical](../database/service-tier-business-critical.md) performance tiers and SQL Server Standard Edition customers with Software Assurance are eligible for the General Purpose performance tier only. For more information, see [Specific rights of the AHB](../azure-hybrid-benefit.md?tabs=azure-powershell#what-are-the-specific-rights-of-the-azure-hybrid-benefit-for-sql-server). - - ### What subscription types are supported for SQL Managed Instance? - - For the list of supported subscription types, see [Supported subscription types](resource-limits.md#supported-subscription-types). - - ### Which Azure regions are supported? - - Managed instances can be created in most of the Azure regions; see [Supported regions for SQL Managed Instance](https://azure.microsoft.com/global-infrastructure/services/?products=sql-database®ions=all). If you need managed instance in a region that is currently not supported, [send a support request via the Azure portal](../database/quota-increase-request.md). - - ### Are there any quota limitations for SQL Managed Instance deployments? - - Managed instance has two default limits: limit on the number of subnets you can use and a limit on the number of vCores you can provision. Limits vary across the subscription types and regions. For the list of regional resource limitations by subscription type, see table from [Regional resource limitation](resource-limits.md#regional-resource-limitations). These are soft limits that can be increased on demand. If you need to provision more managed instances in your current regions, send a support request to increase the quota using the Azure portal. For more information, see [Request quota increases for Azure SQL Database](../database/quota-increase-request.md). - - ### Can I increase the number of databases limit (100) on my managed instance on demand? - - No, and currently there are no committed plans to increase the number of databases on SQL Managed Instance. - - ### Where can I migrate if I have more than 16 TB of data? - You can consider migrating to other Azure flavors that suit your workload: [Azure SQL Database Hyperscale](../database/service-tier-hyperscale.md) or [SQL Server on Azure Virtual Machines](../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md). - - ### Where can I migrate if I have specific hardware requirements such as larger RAM to vCore ratio or more CPUs? - You can consider migrating to [SQL Server on Azure Virtual Machines](../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) or [Azure SQL Database](../database/sql-database-paas-overview.md) memory/cpu optimized. - - - question: | - Known issues and defects - answer: | - ### Where can I find known issues and defects? - - For product defects and known issues, see [Known issues](doc-changes-updates-known-issues.md). - - - question: | - New features - answer: | - ### Where can I find latest features and the features in public preview? - - For new and preview features, see [Release notes](doc-changes-updates-release-notes-whats-new.md). - - - question: | - Create, update, delete or move SQL Managed Instance - answer: | - ### How can I provision Azure SQL Managed Instance? - - You can provision an instance from [Azure portal](instance-create-quickstart.md), [PowerShell](scripts/create-configure-managed-instance-powershell.md), [Azure CLI](https://techcommunity.microsoft.com/t5/azure-sql-database/create-azure-sql-managed-instance-using-azure-cli/ba-p/386281) and [ARM templates](/archive/blogs/sqlserverstorageengine/creating-azure-sql-managed-instance-using-arm-templates). - - ### Can I provision SQL Managed Instances in an existing subscription? - - Yes, you can provision a SQL Managed Instance in an existing subscription if that subscription belongs to the [Supported subscription types](resource-limits.md#supported-subscription-types). - - ### Why couldn't I provision a SQL Managed Instance in the subnet which name starts with a digit? - - This is a current limitation on underlying component that verifies subnet name against the regex ^[a-zA-Z_][^\\\/\:\*\?\"\<\>\|\`\'\^]*(? [!IMPORTANT] - > Azure platform can change policy requirements without notifying services relying on that policies. - - ### What are current Azure platform policies? - - Each login must set its password upon sign-in, and change its password after it reaches maximum age. - - | **Policy** | **Security Setting** | - | --- | --- | - | Maximum password age | 42 days | - | Minimum password age | One day | - | Minimum password length | 10 characters | - | Password must meet complexity requirements | Enabled | - - ### Is it possible to disable password complexity and expiration in SQL Managed Instance at the login level? - - Yes, it is possible to control CHECK_POLICY and CHECK_EXPIRATION fields at the login level. You can check current settings by executing following T-SQL command: - - ```sql - SELECT * - FROM sys.sql_logins - ``` - - After that, you can modify specified login settings by executing : - - ```sql - ALTER LOGIN WITH CHECK_POLICY = OFF; - ALTER LOGIN WITH CHECK_EXPIRATION = OFF; - ``` - - (Replace 'test' with desired login name and adjust policy and expiration values.) - - - - question: | - Service updates - answer: | - ### What is the Root CA change for Azure SQL Database & SQL Managed Instance? - - See [Certificate rotation for Azure SQL Database & SQL Managed Instance](../updates/ssl-root-certificate-expiring.md). - - ### What is a planned maintenance event for SQL Managed Instance? - - See [Plan for Azure maintenance events in SQL Managed Instance](../database/planned-maintenance.md). - - - - question: | - Azure feedback and support - answer: | - ### Where can I leave my ideas for SQL Managed Instance improvements? - - You can vote for a new SQL Managed Instance feature or create a new improvement idea in the [SQL Managed Instance Feedback Forum](https://feedback.azure.com/d365community/forum/a99f7006-3425-ec11-b6e6-000d3a4f0f84). This way you can contribute to the product development and help us prioritize our potential improvements. - - ### How can I create Azure support request? - - To learn how to create Azure support request, see [How to create Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). diff --git a/articles/azure-sql/managed-instance/how-to-content-reference-guide.md b/articles/azure-sql/managed-instance/how-to-content-reference-guide.md deleted file mode 100644 index 7b7f73239fddf..0000000000000 --- a/articles/azure-sql/managed-instance/how-to-content-reference-guide.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Configure & manage content reference -titleSuffix: Azure SQL Managed Instance -description: A reference guide of content that teaches you how to configure and manage Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, ignite-fall-2021 -ms.devlang: -ms.topic: guide -author: MashaMSFT -ms.author: mathoma -ms.reviewer: mathoma, danil -ms.date: 03/22/2022 ---- -# Azure SQL Managed Instance content reference -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -In this article you can find a content reference to various guides, scripts, and explanations that help you manage and configure Azure SQL Managed Instance. - -## Load data - -- [SQL Server to Azure SQL Managed Instance Guide](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md): Learn about the recommended migration process and tools for migration to Azure SQL Managed Instance. -- [Migrate TDE cert to Azure SQL Managed Instance](tde-certificate-migrate.md): If your SQL Server database is protected with transparent data encryption (TDE), you would need to migrate the certificate that SQL Managed Instance can use to decrypt the backup that you want to restore in Azure. -- [Import a DB from a BACPAC](../database/database-import.md) -- [Export a DB to BACPAC](../database/database-export.md) -- [Load data with BCP](../load-from-csv-with-bcp.md) -- [Load data with Azure Data Factory](../../data-factory/connector-azure-sql-database.md?toc=/azure/sql-database/toc.json) - -## Network configuration - -- [Determine subnet size](vnet-subnet-determine-size.md): - Since the subnet cannot be resized after SQL Managed Instance is deployed, you need to calculate what IP range of addresses is required for the number and types of managed instances you plan to deploy to the subnet. -- [Create a new VNet and subnet](virtual-network-subnet-create-arm-template.md): - Configure the virtual network and subnet according to the [network requirements](connectivity-architecture-overview.md#network-requirements). -- [Configure an existing VNet and subnet](vnet-existing-add-subnet.md): - Verify network requirements and configure your existing virtual network and subnet to deploy SQL Managed Instance. -- [Configure service endpoint policies for Azure Storage (Preview)](service-endpoint-policies-configure.md): - Secure your subnet against erroneous or malicious data exfiltration into unauthorized Azure Storage accounts. -- [Configure custom DNS](custom-dns-configure.md): - Configure custom DNS to grant external resource access to custom domains from SQL Managed Instance via a linked server of db mail profiles. -- [Find the management endpoint IP address](management-endpoint-find-ip-address.md): - Determine the public endpoint that SQL Managed Instance is using for management purposes. -- [Verify built-in firewall protection](management-endpoint-verify-built-in-firewall.md): - Verify that SQL Managed Instance allows traffic only on necessary ports, and other built-in firewall rules. -- [Connect applications](connect-application-instance.md): - Learn about different patterns for connecting the applications to SQL Managed Instance. - -## Feature configuration - -- [Configure Azure AD auth](../database/authentication-aad-configure.md) -- [Configure conditional access](../database/conditional-access-configure.md) -- [Multi-factor Azure AD auth](../database/authentication-mfa-ssms-overview.md) -- [Configure multi-factor auth](../database/authentication-mfa-ssms-configure.md) -- [Configure auto-failover group](auto-failover-group-configure-sql-mi.md) to automatically failover all databases on an instance to a secondary instance in another region in the event of a disaster. -- [Configure a temporal retention policy](../database/temporal-tables-retention-policy.md) -- [Configure In-Memory OLTP](../in-memory-oltp-configure.md) -- [Configure Azure Automation](../database/automation-manage.md) -- [Transactional replication](replication-between-two-instances-configure-tutorial.md) enables you to replicate your data between managed instances, or from SQL Server on-premises to SQL Managed Instance, and vice versa. -- [Configure threat detection](threat-detection-configure.md) – [threat detection](../database/threat-detection-overview.md) is a built-in Azure SQL Managed Instance feature that detects various potential attacks such as SQL injection or access from suspicious locations. -- [Creating alerts](alerts-create.md) enables you to set up alerts on monitored metrics such as CPU utilization, storage space consumption, IOPS and others for SQL Managed Instance. - -### Transparent Data Encryption - -- [Configure TDE with BYOK](../database/transparent-data-encryption-byok-configure.md) -- [Rotate TDE BYOK keys](../database/transparent-data-encryption-byok-key-rotation.md) -- [Remove a TDE protector](../database/transparent-data-encryption-byok-remove-tde-protector.md) - -### Managed Instance link feature - -- [Prepare environment for link feature](managed-instance-link-preparation.md) -- [Replicate database with link feature in SSMS](managed-instance-link-use-ssms-to-replicate-database.md) -- [Replicate database with Azure SQL Managed Instance link feature with T-SQL and PowerShell scripts](managed-instance-link-use-scripts-to-replicate-database.md) -- [Failover database with link feature in SSMS - Azure SQL Managed Instance](managed-instance-link-use-ssms-to-failover-database.md) -- [Failover (migrate) database with Azure SQL Managed Instance link feature with T-SQL and PowerShell scripts](managed-instance-link-use-scripts-to-failover-database.md) -- [Best practices with link feature for Azure SQL Managed Instance](managed-instance-link-best-practices.md) - - -## Monitoring and tuning - -- [Manual tuning](../database/performance-guidance.md) -- [Use DMVs to monitor performance](../database/monitoring-with-dmvs.md) -- [Use Query Store to monitor performance](/sql/relational-databases/performance/best-practice-with-the-query-store#Insight) -- [Troubleshoot performance with Intelligent Insights](../database/intelligent-insights-troubleshoot-performance.md) -- [Use the Intelligent Insights diagnostics log](../database/intelligent-insights-use-diagnostics-log.md) -- [Monitor In-Memory OLTP space](../in-memory-oltp-monitor-space.md) - -### Extended events - -- [Extended events](../database/xevent-db-diff-from-svr.md) -- [Store extended events into an event file](../database/xevent-code-event-file.md) -- [Store extended events into a ring buffer](../database/xevent-code-ring-buffer.md) - -### Alerting - -- [Create alerts on managed instance](alerts-create.md) - -## Operations - -- [User-initiated manual failover on SQL Managed Instance](user-initiated-failover.md) - -## Develop applications - -- [Connectivity](../database/connect-query-content-reference-guide.md#libraries) -- [Use Spark Connector](../../cosmos-db/create-sql-api-spark.md) -- [Authenticate an app](../database/application-authentication-get-client-id-keys.md) -- [Use batching for better performance](../performance-improve-use-batching.md) -- [Connectivity guidance](../database/troubleshoot-common-connectivity-issues.md) -- [DNS aliases](../database/dns-alias-overview.md) -- [Set up a DNS alias by using PowerShell](../database/dns-alias-powershell-create.md) -- [Ports - ADO.NET](../database/adonet-v12-develop-direct-route-ports.md) -- [C and C ++](../database/develop-cplusplus-simple.md) -- [Excel](../database/connect-excel.md) - -## Design applications - -- [Design for disaster recovery](../database/designing-cloud-solutions-for-disaster-recovery.md) -- [Design for elastic pools](../database/disaster-recovery-strategies-for-applications-with-elastic-pool.md) -- [Design for app upgrades](../database/manage-application-rolling-upgrade.md) - -### Design Multi-tenant SaaS applications - -- [SaaS design patterns](../database/saas-tenancy-app-design-patterns.md) -- [SaaS video indexer](../database/saas-tenancy-video-index-wingtip-brk3120-20171011.md) -- [SaaS app security](../database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md) - -## Next steps - -Get started by [deploying SQL Managed Instance](instance-create-quickstart.md). diff --git a/articles/azure-sql/managed-instance/index.yml b/articles/azure-sql/managed-instance/index.yml deleted file mode 100644 index b632a478954cc..0000000000000 --- a/articles/azure-sql/managed-instance/index.yml +++ /dev/null @@ -1,172 +0,0 @@ -### YamlMime:Landing - -title: Azure SQL Managed Instance documentation -summary: Find documentation about Azure SQL Managed Instance, a managed instance in Azure based on the latest stable version of Microsoft SQL Server. - -metadata: - title: Azure SQL Managed Instance documentation - description: Find documentation about Azure SQL Managed Instance, a managed instance in Azure based on the latest stable version of Microsoft SQL Server. - services: sql-database - ms.service: sql-managed-instance - ms.subservice: service-overview - ms.topic: landing-page - author: MashaMSFT - ms.author: mathoma - ms.reviewer: - ms.date: 05/27/2020 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Azure SQL Managed Instance - linkLists: - - linkListType: whats-new - links: - - text: What's new? - url: doc-changes-updates-release-notes-whats-new.md - - linkListType: quickstart - links: - - text: Create SQL Managed Instance - url: instance-create-quickstart.md - - linkListType: video - links: - - text: Azure SQL Managed Instance overview - url: /shows/Azure-SQL-for-Beginners/Azure-SQL-Managed-Instance-Overview-6-of-61 - - linkListType: concept - links: - - text: What is SQL Managed Instance? - url: sql-managed-instance-paas-overview.md - - text: vCore purchasing model - url: service-tiers-managed-instance-vcore.md - - text: Transactional replication - url: replication-transactional-overview.md - - text: Migrate from SQL Server - url: ../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md - - text: T-SQL differences with SQL Server - url: transact-sql-tsql-differences-sql-server.md - - - # Card - - title: Advanced security - linkLists: - - linkListType: concept - links: - - text: Security capabilities - url: ../database/security-overview.md - - text: Security best practices - url: ../database/security-best-practice.md - - text: Logins, user accounts, roles, and permissions - url: ../database/logins-create-manage.md - - text: Azure Active Directory - url: ../database/authentication-aad-overview.md - - text: Auditing - url: auditing-configure.md - - text: Transparent Data Encryption (TDE) - url: ../database/transparent-data-encryption-tde-overview.md - - text: Dynamic Data Masking - url: ../database/dynamic-data-masking-overview.md - - text: Public endpoints - url: public-endpoint-overview.md - - - # Card - - title: Learn Azure SQL - linkLists: - - linkListType: learn - links: - - text: Azure SQL for beginners - url: https://aka.ms/azuresql4beginners - - text: Azure SQL fundamentals - url: /learn/paths/azure-sql-fundamentals/ - - text: Azure SQL hands-on labs - url: https://aka.ms/asqlworkshop - - text: Azure SQL bootcamp - url: https://aka.ms/azuresqlbootcamp - - text: Educational SQL resources - url: /sql/sql-server/educational-sql-resources - - # Card - - title: Reference - linkLists: - - linkListType: deploy - links: - - text: Azure CLI samples - url: ../database/az-cli-script-samples-content-guide.md - - text: PowerShell samples - url: ../database/powershell-script-content-guide.md - - text: ARM template samples - url: ../database/arm-templates-content-guide.md - - linkListType: download - links: - - text: SQL Server Management Studio (SSMS) - url: /sql/ssms/download-sql-server-management-studio-ssms - - text: Azure Data Studio - url: /sql/azure-data-studio/download-azure-data-studio - - text: SQL Server Data Tools - url: /sql/ssdt/download-sql-server-data-tools-ssdt - - text: Visual Studio 2019 - url: https://visualstudio.microsoft.com/downloads/ - - linkListType: reference - links: - - text: Migration guide - url: https://datamigration.microsoft.com/ - - text: Transact-SQL (T-SQL) - url: /sql/t-sql/language-reference - - text: Azure CLI - url: /cli/azure/azure-cli-reference-for-sql#sql-managed-instance-references - - text: PowerShell - url: /powershell/module/az.sql - - text: REST API - url: /rest/api/sql/ - - # Card - - title: Business continuity - linkLists: - - linkListType: how-to-guide - links: - - text: Business continuity - url: ../database/business-continuity-high-availability-disaster-recover-hadr-overview.md - - text: High availability - url: ../database/high-availability-sla.md - - text: Auto-failover groups - url: ../database/auto-failover-group-overview.md - - text: Automated backups - url: ../database/automated-backups-overview.md - - text: Recover with backup - url: ../database/recovery-using-backups.md - - text: Long-term backup retention - url: ../database/long-term-retention-overview.md - - # Card - - title: Instance pools - linkLists: - - linkListType: concept - links: - - text: What is an instance pool? - url: instance-pools-overview.md - - text: Configure - url: instance-pools-configure.md - - # Card - - title: Planned maintenance - linkLists: - - linkListType: concept - links: - - text: Plan for Azure maintenance events - url: ../database/planned-maintenance.md - - text: Maintenance window - url: ../database/maintenance-window.md - - text: Resource health - url: ../database/resource-health-to-troubleshoot-connectivity.md - - - linkListType: how-to-guide - links: - - text: Maintenance window - url: ../database/maintenance-window-configure.md - - text: Maintenance Window Notifications - url: ../database/advance-notifications.md - \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/instance-create-quickstart.md b/articles/azure-sql/managed-instance/instance-create-quickstart.md deleted file mode 100644 index 843c6d0c7a224..0000000000000 --- a/articles/azure-sql/managed-instance/instance-create-quickstart.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "Quickstart: Create an Azure SQL Managed Instance (portal)" -description: Create a managed instance, network environment, and client VM for access using the Azure portal in this quickstart. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: mode-ui -ms.devlang: -ms.topic: quickstart -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 04/06/2022 ---- -# Quickstart: Create an Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This quickstart teaches you to create an [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) in the Azure portal. - -> [!IMPORTANT] -> For limitations, see [Supported regions](resource-limits.md#supported-regions) and [Supported subscription types](resource-limits.md#supported-subscription-types). - -## Create an Azure SQL Managed Instance - -To create a SQL Managed Instance, follow these steps: - -### Sign in to the Azure portal - -If you don't have an Azure subscription, [create a free account](https://azure.microsoft.com/free/). - -1. Sign in to the [Azure portal](https://portal.azure.com/). -1. Select **Azure SQL** on the left menu of the Azure portal. If **Azure SQL** is not in the list, select **All services**, and then enter **Azure SQL** in the search box. -1. Select **+Add** to open the **Select SQL deployment option** page. You can view additional information about Azure SQL Managed Instance by selecting **Show details** on the **SQL managed instances** tile. -1. Select **Create**. - - ![Create a managed instance](./media/instance-create-quickstart/create-azure-sql-managed-instance.png) - -4. Use the tabs on the **Create Azure SQL Managed Instance** provisioning form to add required and optional information. The following sections describe these tabs. - -### Basics tab - -- Fill out mandatory information required on the **Basics** tab. This is a minimum set of information required to provision a SQL Managed Instance. - - !["Basics" tab for creating a SQL Managed Instance](./media/instance-create-quickstart/azure-sql-managed-instance-create-tab-basics.png) - - Use the table below as a reference for information required at this tab. - - | Setting| Suggested value | Description | - | ------ | --------------- | ----------- | - | **Subscription** | Your subscription. | A subscription that gives you permission to create new resources. | - | **Resource group** | A new or existing resource group.|For valid resource group names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming).| - | **Managed instance name** | Any valid name.|For valid names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming).| - | **Region** |The region in which you want to create the managed instance.|For information about regions, see [Azure regions](https://azure.microsoft.com/regions/).| - | **Managed instance admin login** | Any valid username. | For valid names, see [Naming rules and restrictions](/azure/architecture/best-practices/resource-naming). Don't use "serveradmin" because that's a reserved server-level role.| - | **Password** | Any valid password.| The password must be at least 16 characters long and meet the [defined complexity requirements](../../virtual-machines/windows/faq.yml#what-are-the-password-requirements-when-creating-a-vm-).| - -- Select **Configure Managed Instance** to size compute and storage resources and to review the pricing tiers. Use the sliders or text boxes to specify the amount of storage and the number of virtual cores. When you're finished, select **Apply** to save your selection. - - ![Managed instance form](./media/instance-create-quickstart/azure-sql-managed-instance-create-tab-configure-performance.png) - -| Setting| Suggested value | Description | -| ------ | --------------- | ----------- | -| **Service Tier** | Select one of the options. | Based on your scenario, select one of the following options:
    • **General Purpose**: for most production workloads, and the default option.
    • **Business Critical**: designed for low-latency workloads with high resiliency to failures and fast failovers.

    For more information, review [service tiers](service-tiers-managed-instance-vcore.md) and [resource limits](/azure/azure-sql/managed-instance/resource-limits).| -| **Hardware Configuration** | Select one of the options. | Hardware configuration generally defines the compute and memory limits and other characteristics that impact the performance of the workload. **Gen5** is the default.| -| **vCore compute model** | Select an option. | vCores represent exact amount of compute resources that are always provisioned for your workload. **Eight vCores** is the default.| -| **Storage in GB** | Select an option. | Storage size in GB, select based on expected data size. If migrating existing data from on-premises or on various cloud platforms, see [Migration overview: SQL Server to SQL Managed Instance](/azure/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-overview).| -| **Azure Hybrid Benefit** | Check option if applicable. | For leveraging an existing license for Azure. For more information, see [Azure Hybrid Benefit - Azure SQL Database & SQL Managed Instance](/azure/azure-sql/azure-hybrid-benefit). | -| **Backup storage redundancy** | Select **Geo-redundant backup storage**. | Storage redundancy inside Azure for backup storage. Note that this value cannot be changed later. Geo-redundant backup storage is default and recommended, though Zone and Local redundancy allow for more cost flexibility and single region data residency. For more information, see [Backup Storage redundancy](../database/automated-backups-overview.md?tabs=managed-instance#backup-storage-redundancy).| - - -- To review your choices before you create a SQL Managed Instance, you can select **Review + create**. Or, configure networking options by selecting **Next: Networking**. - -### Networking tab - -- Fill out optional information on the **Networking** tab. If you omit this information, the portal will apply default settings. - - !["Networking" tab for creating a managed instance](./media/instance-create-quickstart/azure-sql-managed-instance-create-tab-networking.png) - - Use the table below as a reference for information required at this tab. - - | Setting| Suggested value | Description | - | ------ | --------------- | ----------- | - | **Virtual network** | Select either **Create new virtual network** or a valid virtual network and subnet.| If a network or subnet is unavailable, it must be [modified to satisfy the network requirements](vnet-existing-add-subnet.md) before you select it as a target for the new managed instance. For information about the requirements for configuring the network environment for SQL Managed Instance, see [Configure a virtual network for SQL Managed Instance](connectivity-architecture-overview.md). | - | **Connection type** | Choose between a proxy and a redirect connection type.|For more information about connection types, see [Azure SQL Managed Instance connection type](../database/connectivity-architecture.md#connection-policy).| - | **Public endpoint** | Select **Disable**. | For a managed instance to be accessible through the public data endpoint, you need to enable this option. | - | **Allow access from** (if **Public endpoint** is enabled) | Select **No Access** |The portal experience enables configuring a security group with a public endpoint.

    Based on your scenario, select one of the following options:
    • **Azure services**: We recommend this option when you're connecting from Power BI or another multitenant service.
    • **Internet**: Use for test purposes when you want to quickly spin up a managed instance. We don't recommend it for production environments.
    • **No access**: This option creates a **Deny** security rule. Modify this rule to make a managed instance accessible through a public endpoint.

    For more information on public endpoint security, see [Using Azure SQL Managed Instance securely with a public endpoint](public-endpoint-overview.md).| - -- Select **Review + create** to review your choices before you create a managed instance. Or, configure more custom settings by selecting **Next: Additional settings**. - - -### Additional settings - -- Fill out optional information on the **Additional settings** tab. If you omit this information, the portal will apply default settings. - - !["Additional settings" tab for creating a managed instance](./media/instance-create-quickstart/azure-sql-managed-instance-create-tab-additional-settings.png) - - Use the table below as a reference for information required at this tab. - - | Setting| Suggested value | Description | - | ------ | --------------- | ----------- | - | **Collation** | Choose the collation that you want to use for your managed instance. If you migrate databases from SQL Server, check the source collation by using `SELECT SERVERPROPERTY(N'Collation')` and use that value.| For information about collations, see [Set or change the server collation](/sql/relational-databases/collations/set-or-change-the-server-collation).| - | **Time zone** | Select the time zone that managed instance will observe.|For more information, see [Time zones](timezones-overview.md).| - | **Use as failover secondary** | Select **Yes**. | Enable this option to use the managed instance as a failover group secondary.| - | **Primary SQL Managed Instance** (if **Use as failover secondary** is set to **Yes**) | Choose an existing primary managed instance that will be joined in the same DNS zone with the managed instance you're creating. | This step will enable post-creation configuration of the failover group. For more information, see [Tutorial: Add a managed instance to a failover group](failover-group-add-instance-tutorial.md).| - -- Select **Review + create** to review your choices before you create a managed instance. Or, configure Azure Tags by selecting **Next: Tags** (recommended). - -### Tags - -- Add tags to resources in your Azure Resource Manager template (ARM template). [Tags](../../azure-resource-manager/management/tag-resources.md) help you logically organize your resources. The tag values show up in cost reports and allow for other management activities by tag. - -- Consider at least tagging your new SQL Managed Instance with the Owner tag to identify who created, and the Environment tag to identify whether this system is Production, Development, etc. For more information, see [Develop your naming and tagging strategy for Azure resources](/azure/cloud-adoption-framework/ready/azure-best-practices/naming-and-tagging). - -- Select **Review + create** to proceed. - -## Review + create - -1. Select **Review + create** tab to review your choices before you create a managed instance. - - ![Tab for reviewing and creating a managed instance](./media/instance-create-quickstart/azure-sql-managed-instance-create-tab-review-create.png) - -1. Select **Create** to start provisioning the managed instance. - -> [!IMPORTANT] -> Deploying a managed instance is a long-running operation. Deployment of the first instance in the subnet typically takes much longer than deploying into a subnet with existing managed instances. For average provisioning times, see [Overview of Azure SQL Managed Instance management operations](management-operations-overview.md#duration). - -## Monitor deployment progress - -1. Select the **Notifications** icon to view the status of the deployment. - - ![Deployment progress of a SQL Managed Instance deployment](./media/instance-create-quickstart/azure-sql-managed-instance-create-deployment-in-progress.png) - -1. Select **Deployment in progress** in the notification to open the SQL Managed Instance window and further monitor the deployment progress. - -> [!TIP] -> - If you closed your web browser or moved away from the deployment progress screen, you can monitor the provisioning operation via the managed instance's **Overview** page, or via PowerShell or the Azure CLI. For more information, see [Monitor operations](management-operations-monitor.md#monitor-operations). -> - You can cancel the provisioning process through Azure portal, or via PowerShell or the Azure CLI or other tooling using the REST API. See [Canceling Azure SQL Managed Instance management operations](management-operations-cancel.md). - -> [!IMPORTANT] -> - Start of SQL Managed Instance creation could be delayed in cases when there exist other impacting operations, such are long-running restore or scaling operations on other Managed Instances in the same subnet. To learn more, see [Management operations cross-impact](management-operations-overview.md#management-operations-cross-impact). -> - In order to be able to get the status of managed instance creation, you need to have **read permissions** over the resource group. If you don't have this permission or revoke it while the managed instance is in creation process, this can cause SQL Managed Instance not to be visible in the list of resource group deployments. -> - -## View resources created - -Upon successful deployment of a managed instance, to view resources created: - -1. Open the resource group for your managed instance. - - ![SQL Managed Instance resources](./media/instance-create-quickstart/azure-sql-managed-instance-resources.png) - -## View and fine-tune network settings - -To optionally fine-tune networking settings, inspect the following: - -1. In the list of resources, select the route table to review the user-defined Route table (UDR) object that was created. - -2. In the route table, review the entries to route traffic from and within the SQL Managed Instance virtual network. If you create or configure your route table manually, create these entries in the SQL Managed Instance route table. - - ![Entry for a SQL Managed Instance subnet to local](./media/instance-create-quickstart/azure-sql-managed-instance-route-table-user-defined-route.png) - - To change or add routes, open the **Routes** in the Route table settings. - -3. Return to the resource group, and select the network security group (NSG) object that was created. - -4. Review the inbound and outbound security rules. - - ![Security rules](./media/instance-create-quickstart/azure-sql-managed-instance-security-rules.png) - - To change or add rules, open the **Inbound Security Rules** and **Outbound security rules** in the Network security group settings. - -> [!IMPORTANT] -> If you have configured a public endpoint for SQL Managed Instance, you need to open ports to allow network traffic allowing connections to SQL Managed Instance from the public internet. For more information, see [Configure a public endpoint for SQL Managed Instance](public-endpoint-configure.md#allow-public-endpoint-traffic-on-the-network-security-group). -> - -## Retrieve connection details to SQL Managed Instance - -To connect to SQL Managed Instance, follow these steps to retrieve the host name and fully qualified domain name (FQDN): - -1. Return to the resource group and select the SQL managed instance object that was created. - -2. On the **Overview** tab, locate the **Host** property. Copy the host name to your clipboard for the managed instance for use in the next quickstart by clicking the **Copy to clipboard** button. - - ![Host name](./media/instance-create-quickstart/azure-sql-managed-instance-host-name.png) - - The value copied represents a fully qualified domain name (FQDN) that can be used to connect to SQL Managed Instance. It is similar to the following address example: *your_host_name.a1b2c3d4e5f6.database.windows.net*. - -## Next steps - -To learn about how to connect to SQL Managed Instance: -- For an overview of the connection options for applications, see [Connect your applications to SQL Managed Instance](connect-application-instance.md). -- For a quickstart that shows how to connect to SQL Managed Instance from an Azure virtual machine, see [Configure an Azure virtual machine connection](connect-vm-instance-configure.md). -- For a quickstart that shows how to connect to SQL Managed Instance from an on-premises client computer by using a point-to-site connection, see [Configure a point-to-site connection](point-to-site-p2s-configure.md). - -To restore an existing SQL Server database from on-premises to SQL Managed Instance: -- Use the [Azure Database Migration Service for migration](../../dms/tutorial-sql-server-to-managed-instance.md) to restore from a database backup file. -- Use the [T-SQL RESTORE command](restore-sample-database-quickstart.md) to restore from a database backup file. - -For advanced monitoring of SQL Managed Instance database performance with built-in troubleshooting intelligence, see [Monitor Azure SQL Managed Instance by using Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). diff --git a/articles/azure-sql/managed-instance/instance-pools-configure.md b/articles/azure-sql/managed-instance/instance-pools-configure.md deleted file mode 100644 index 85a7504f7deb9..0000000000000 --- a/articles/azure-sql/managed-instance/instance-pools-configure.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Deploy SQL Managed Instance to an instance pool -titleSuffix: Azure SQL Managed Instance -description: This article describes how to create and manage Azure SQL Managed Instance pools (preview). -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurepowershell, devx-track-azurecli -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 09/05/2019 ---- -# Deploy Azure SQL Managed Instance to an instance pool -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article provides details on how to create an [instance pool](instance-pools-overview.md) and deploy Azure SQL Managed Instance to it. - -## Instance pool operations - -The following table shows the available operations related to instance pools and their availability in the Azure portal, PowerShell, and Azure CLI. - -|Command|Azure portal|PowerShell|Azure CLI| -|:---|:---|:---|:---| -|Create an instance pool|No|Yes|Yes| -|Update an instance pool (limited number of properties)|No |Yes | Yes| -|Check an instance pool usage and properties|No|Yes | Yes | -|Delete an instance pool|No|Yes|Yes| -|Create a managed instance inside an instance pool|No|Yes|No| -|Update resource usage for a managed instance|Yes |Yes|No| -|Check usage and properties for a managed instance|Yes|Yes|No| -|Delete a managed instance from the pool|Yes|Yes|No| -|Create a database in instance within the pool|Yes|Yes|No| -|Delete a database from SQL Managed Instance|Yes|Yes|No| - -# [PowerShell](#tab/powershell) - -To use PowerShell, [install the latest version of PowerShell Core](/powershell/scripting/install/installing-powershell#powershell), and follow instructions to [Install the Azure PowerShell module](/powershell/azure/install-az-ps). - -Available [PowerShell commands](/powershell/module/az.sql/): - -|Cmdlet |Description | -|:---|:---| -|[New-AzSqlInstancePool](/powershell/module/az.sql/new-azsqlinstancepool/) | Creates a SQL Managed Instance pool. | -|[Get-AzSqlInstancePool](/powershell/module/az.sql/get-azsqlinstancepool/) | Returns information about an instance pool. | -|[Set-AzSqlInstancePool](/powershell/module/az.sql/set-azsqlinstancepool/) | Sets properties for an instance pool in SQL Managed Instance. | -|[Remove-AzSqlInstancePool](/powershell/module/az.sql/remove-azsqlinstancepool/) | Removes an instance pool in SQL Managed Instance. | -|[Get-AzSqlInstancePoolUsage](/powershell/module/az.sql/get-azsqlinstancepoolusage/) | Returns information about SQL Managed Instance pool usage. | - -For operations related to instances both inside pools and single instances, use the standard [managed instance commands](api-references-create-manage-instance.md#powershell-create-and-configure-managed-instances), but the *instance pool name* property must be populated when using these commands for an instance in a pool. - -# [Azure CLI](#tab/azure-cli) - -Prepare your environment for the Azure CLI. - -[!INCLUDE [azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] - -Available [Azure CLI](/cli/azure/sql) commands: - -|Cmdlet |Description | -|:---|:---| -|[az sql instance-pool create](/cli/azure/sql/instance-pool#az-sql-instance-pool-create) | Creates a SQL Managed Instance pool. | -|[az sql instance-pool show](/cli/azure/sql/instance-pool#az-sql-instance-pool-show) | Returns information about an instance pool. | -|[az sql instance-pool update](/cli/azure/sql/instance-pool#az-sql-instance-pool-update) | Sets or updates properties for an instance pool in SQL Managed Instance. | -|[az sql instance-pool delete](/cli/azure/sql/instance-pool#az-sql-instance-pool-delete) | Removes an instance pool in SQL Managed Instance. | - ---- - -## Deployment process - -To deploy a managed instance into an instance pool, you must first deploy the instance pool, which is a one-time long-running operation where the duration is the same as deploying a [single instance created in an empty subnet](sql-managed-instance-paas-overview.md#management-operations). After that, you can deploy a managed instance into the pool, which is a relatively fast operation that typically takes up to five minutes. The instance pool parameter must be explicitly specified as part of this operation. - -In public preview, both actions are only supported using PowerShell and Azure Resource Manager templates. The Azure portal experience is not currently available. - -After a managed instance is deployed to a pool, you *can* use the Azure portal to change its properties on the pricing tier page. - -## Create a virtual network with a subnet - -To place multiple instance pools inside the same virtual network, see the following articles: - -- [Determine VNet subnet size for Azure SQL Managed Instance](vnet-subnet-determine-size.md). -- Create new virtual network and subnet using the [Azure portal template](virtual-network-subnet-create-arm-template.md) or follow the instructions for [preparing an existing virtual network](vnet-existing-add-subnet.md). - - -## Create an instance pool - -After completing the previous steps, you are ready to create an instance pool. - -The following restrictions apply to instance pools: - -- Only General Purpose and Gen5 are available in public preview. -- The pool name can contain only lowercase letters, numbers and hyphens, and can't start with a hyphen. -- If you want to use Azure Hybrid Benefit, it is applied at the instance pool level. You can set the license type during pool creation or update it anytime after creation. - -> [!IMPORTANT] -> Deploying an instance pool is a long running operation that takes approximately 4.5 hours. - -# [PowerShell](#tab/powershell) - -To get network parameters: - -```powershell -$virtualNetwork = Get-AzVirtualNetwork -Name "miPoolVirtualNetwork" -ResourceGroupName "myResourceGroup" -$subnet = Get-AzVirtualNetworkSubnetConfig -Name "miPoolSubnet" -VirtualNetwork $virtualNetwork -``` - -To create an instance pool: - -```powershell -$instancePool = New-AzSqlInstancePool ` - -ResourceGroupName "myResourceGroup" ` - -Name "mi-pool-name" ` - -SubnetId $subnet.Id ` - -LicenseType "LicenseIncluded" ` - -VCore 8 ` - -Edition "GeneralPurpose" ` - -ComputeGeneration "Gen5" ` - -Location "westeurope" -``` - -# [Azure CLI](#tab/azure-cli) - -To get the virtual network parameters: - -```azurecli -az network vnet show --resource-group MyResourceGroup --name miPoolVirtualNetwork -``` - -To get the virtual subnet parameters: - -```azurecli -az network vnet subnet show --resource group MyResourceGroup --name miPoolSubnet --vnet-name miPoolVirtualNetwork -``` - -To create an instance pool: - -```azurecli -az sql instance-pool create - --license-type LicenseIncluded - --location westeurope - --name mi-pool-name - --capacity 8 - --tier GeneralPurpose - --family Gen5 - --resource-group myResourceGroup - --subnet miPoolSubnet - --vnet-name miPoolVirtualNetwork -``` - ---- - -> [!IMPORTANT] -> Because deploying an instance pool is a long running operation, you need to wait until it completes before running any of the following steps in this article. - -## Create a managed instance - -After the successful deployment of the instance pool, it's time to create a managed instance inside it. - -To create a managed instance, execute the following command: - -```powershell -$instanceOne = $instancePool | New-AzSqlInstance -Name "mi-one-name" -VCore 2 -StorageSizeInGB 256 -``` - -Deploying an instance inside a pool takes a couple of minutes. After the first instance has been created, additional instances can be created: - -```powershell -$instanceTwo = $instancePool | New-AzSqlInstance -Name "mi-two-name" -VCore 4 -StorageSizeInGB 512 -``` - -## Create a database - -To create and manage databases in a managed instance that's inside a pool, use the single instance commands. - -To create a database inside a managed instance: - -```powershell -$poolinstancedb = New-AzSqlInstanceDatabase -Name "mipooldb1" -InstanceName "poolmi-001" -ResourceGroupName "myResourceGroup" -``` - - -## Get pool usage - -To get a list of instances inside a pool: - -```powershell -$instancePool | Get-AzSqlInstance -``` - - -To get pool resource usage: - -```powershell -$instancePool | Get-AzSqlInstancePoolUsage -``` - - -To get detailed usage overview of the pool and instances inside it: - -```powershell -$instancePool | Get-AzSqlInstancePoolUsage –ExpandChildren -``` - -To list the databases in an instance: - -```powershell -$databases = Get-AzSqlInstanceDatabase -InstanceName "pool-mi-001" -ResourceGroupName "resource-group-name" -``` - - -> [!NOTE] -> For checking limits on number of databases per instance pool and managed instance deployed inside the pool visit [Instance pool resource limits](instance-pools-overview.md#resource-limitations) section. - - -## Scale - - -After populating a managed instance with databases, you may hit instance limits regarding storage or performance. In that case, if pool usage has not been exceeded, you can scale your instance. -Scaling a managed instance inside a pool is an operation that takes a couple of minutes. The prerequisite for scaling is available vCores and storage on the instance pool level. - -To update the number of vCores and storage size: - -```powershell -$instanceOne | Set-AzSqlInstance -VCore 8 -StorageSizeInGB 512 -InstancePoolName "mi-pool-name" -``` - - -To update storage size only: - -```powershell -$instance | Set-AzSqlInstance -StorageSizeInGB 1024 -InstancePoolName "mi-pool-name" -``` - -## Connect - -To connect to a managed instance in a pool, the following two steps are required: - -1. [Enable the public endpoint for the instance](#enable-the-public-endpoint). -2. [Add an inbound rule to the network security group (NSG)](#add-an-inbound-rule-to-the-network-security-group). - -After both steps are complete, you can connect to the instance by using a public endpoint address, port, and credentials provided during instance creation. - -### Enable the public endpoint - -Enabling the public endpoint for an instance can be done through the Azure portal or by using the following PowerShell command: - - -```powershell -$instanceOne | Set-AzSqlInstance -InstancePoolName "pool-mi-001" -PublicDataEndpointEnabled $true -``` - -This parameter can be set during instance creation as well. - -### Add an inbound rule to the network security group - -This step can be done through the Azure portal or using PowerShell commands, and can be done anytime after the subnet is prepared for the managed instance. - -For details, see [Allow public endpoint traffic on the network security group](public-endpoint-configure.md#allow-public-endpoint-traffic-on-the-network-security-group). - - -## Move an existing single instance to a pool - -Moving instances in and out of a pool is one of the public preview limitations. A workaround relies on point-in-time restore of databases from an instance outside a pool to an instance that's already in a pool. - -Both instances must be in the same subscription and region. Cross-region and cross-subscription restore is not currently supported. - -This process does have a period of downtime. - -To move existing databases: - -1. Pause workloads on the managed instance you are migrating from. -2. Generate scripts to create system databases and execute them on the instance that's inside the instance pool. -3. Do a point-in-time restore of each database from the single instance to the instance in the pool. - - ```powershell - $resourceGroupName = "my resource group name" - $managedInstanceName = "my managed instance name" - $databaseName = "my source database name" - $pointInTime = "2019-08-21T08:51:39.3882806Z" - $targetDatabase = "name of the new database that will be created" - $targetResourceGroupName = "resource group of instance pool" - $targetInstanceName = "pool instance name" - - Restore-AzSqlInstanceDatabase -FromPointInTimeBackup ` - -ResourceGroupName $resourceGroupName ` - -InstanceName $managedInstanceName ` - -Name $databaseName ` - -PointInTime $pointInTime ` - -TargetInstanceDatabaseName $targetDatabase ` - -TargetResourceGroupName $targetResourceGroupName ` - -TargetInstanceName $targetInstanceName - ``` - -4. Point your application to the new instance and resume its workloads. - -If there are multiple databases, repeat the process for each database. - - -## Next steps - -- For a features and comparison list, see [SQL common features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). -- For advanced monitoring of SQL Managed Instance database performance with built-in troubleshooting intelligence, see [Monitor Azure SQL Managed Instance using Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- For pricing information, see [SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/sql-database/managed/). diff --git a/articles/azure-sql/managed-instance/instance-pools-overview.md b/articles/azure-sql/managed-instance/instance-pools-overview.md deleted file mode 100644 index 3a0a5cc2d5f21..0000000000000 --- a/articles/azure-sql/managed-instance/instance-pools-overview.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: "What is an Azure SQL Managed Instance pool?" -titleSuffix: Azure SQL Managed Instance -description: Learn about Azure SQL Managed Instance pools (preview), a feature that provides a convenient and cost-efficient way to migrate smaller SQL Server databases to the cloud at scale, and manage multiple managed instances. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: -ms.devlang: -ms.topic: conceptual -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 10/25/2021 ---- -# What is an Azure SQL Managed Instance pool (preview)? -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Instance pools in Azure SQL Managed Instance provide a convenient and cost-efficient way to migrate smaller SQL Server instances to the cloud at scale. - -Instance pools allow you to pre-provision compute resources according to your total migration requirements. You can then deploy several individual managed instances up to your pre-provisioned compute level. For example, if you pre-provision 8 vCores you can deploy two 2-vCore and one 4-vCore instance, and then migrate databases into these instances. Prior to instance pools being available, smaller and less compute-intensive workloads would often have to be consolidated into a larger managed instance when migrating to the cloud. The need to migrate groups of databases to a large instance typically required careful capacity planning and resource governance, additional security considerations, and some extra data consolidation work at the instance level. - -Additionally, instance pools support native VNet integration so you can deploy multiple instance pools and multiple single instances in the same subnet. - -## Key capabilities - -Instance pools provide the following benefits: - -1. Ability to host 2-vCore instances. *\*Only for instances in the instance pools*. -2. Predictable and fast instance deployment time (up to 5 minutes). -3. Minimal IP address allocation. - -The following diagram illustrates an instance pool with multiple managed instances deployed within a virtual network subnet. - -![instance pool with multiple instances](./media/instance-pools-overview/instance-pools1.png) - -Instance pools enable deployment of multiple instances on the same virtual machine, where the virtual machine's compute size is based on the total number of vCores allocated for the pool. This architecture allows *partitioning* of the virtual machine into multiple instances, which can be any supported size, including 2 vCores (2-vCore instances are only available for instances in pools). - -After initial deployment, management operations on instances in a pool are much faster. This is because the deployment or extension of a [virtual cluster](connectivity-architecture-overview.md#high-level-connectivity-architecture) (dedicated set of virtual machines) is not part of provisioning the managed instance. - -Because all instances in a pool share the same virtual machine, the total IP allocation does not depend on the number of instances deployed, which is convenient for deployment in subnets with a narrow IP range. - -Each pool has a fixed IP allocation of only nine IP addresses (not including the five IP addresses in the subnet that are reserved for its own needs). For details, see the [subnet size requirements for single instances](vnet-subnet-determine-size.md). - -## Application scenarios - -The following list provides the main use cases where instance pools should be considered: - -- Migration of *a group of SQL Server instances* at the same time, where the majority is a smaller size (for example 2 or 4 vCores). -- Scenarios where *predictable and short instance creation or scaling* is important. For example, deployment of a new tenant in a multi-tenant SaaS application environment that requires instance-level capabilities. -- Scenarios where having a *fixed cost* or *spending limit* is important. For example, running shared dev-test or demo environments of a fixed (or infrequently changing) size, where you periodically deploy managed instances when needed. -- Scenarios where *minimal IP address allocation* in a VNet subnet is important. All instances in a pool are sharing a virtual machine, so the number of allocated IP addresses is lower than in the case of single instances. - -## Architecture - -Instance pools have a similar architecture to regular (*single*) managed instances. To support [deployments within Azure virtual networks](../../virtual-network/virtual-network-for-azure-services.md) and to provide isolation and security for customers, instance pools also rely on [virtual clusters](connectivity-architecture-overview.md#high-level-connectivity-architecture). Virtual clusters represent a dedicated set of isolated virtual machines deployed inside the customer's virtual network subnet. - -The main difference between the two deployment models is that instance pools allow multiple SQL Server process deployments on the same virtual machine node, which are resource governed using [Windows job objects](/windows/desktop/ProcThread/job-objects), while single instances are always alone on a virtual machine node. - -The following diagram shows an instance pool and two individual instances deployed in the same subnet and illustrates the main architectural details for both deployment models: - -![Instance pool and two individual instances](./media/instance-pools-overview/instance-pools2.png) - -Every instance pool creates a separate virtual cluster underneath. Instances within a pool and single instances deployed in the same subnet do not share compute resources allocated to SQL Server processes and gateway components, which ensures performance predictability. - -## Resource limitations - -There are several resource limitations regarding instance pools and instances inside pools: - -- Instance pools are available only on Gen5 hardware. -- Managed instances within a pool have dedicated CPU and RAM, so the aggregated number of vCores across all instances must be less than or equal to the number of vCores allocated to the pool. -- All [instance-level limits](resource-limits.md#service-tier-characteristics) apply to instances created within a pool. -- In addition to instance-level limits, there are also two limits imposed *at the instance pool level*: - - Total storage size per pool (8 TB). - - Total number of user databases per pool. This limit depends on the pool vCores value: - - 8 vCores pool supports up to 200 databases, - - 16 vCores pool supports up to 400 databases, - - 24 and larger vCores pool supports up to 500 databases. -- Azure AD authentication can be used after creating or setting a managed instance with the `-AssignIdentity` flag. For more information, see [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) and [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance). Users can then set an Azure AD admin for the instance by following [Provision Azure AD admin (SQL Managed Instance)](../database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). - -Total storage allocation and number of databases across all instances must be lower than or equal to the limits exposed by instance pools. - -- Instance pools support 8, 16, 24, 32, 40, 64, and 80 vCores. -- Managed instances inside pools support 2, 4, 8, 16, 24, 32, 40, 64, and 80 vCores. -- Managed instances inside pools support storage sizes between 32 GB and 8 TB, except: - - 2 vCore instances support sizes between 32 GB and 640 GB, - - 4 vCore instances support sizes between 32 GB and 2 TB. -- Managed instances inside pools have limit of up to 100 user databases per instance, except 2 vCore instances that support up to 50 user databases per instance. - -The [service tier property](resource-limits.md#service-tier-characteristics) is associated with the instance pool resource, so all instances in a pool must be the same service tier as the service tier of the pool. At this time, only the General Purpose service tier is available (see the following section on limitations in the current preview). - -### Public preview limitations - -The public preview has the following limitations: - -- Currently, only the General Purpose service tier is available. -- Instance pools cannot be scaled during the public preview, so careful capacity planning before deployment is important. -- Azure portal support for instance pool creation and configuration is not yet available. All operations on instance pools are supported through PowerShell only. Initial instance deployment in a pre-created pool is also supported through PowerShell only. Once deployed into a pool, managed instances can be updated using the Azure portal. -- Managed instances created outside of the pool cannot be moved into an existing pool, and instances created inside a pool cannot be moved outside as a single instance or to another pool. -- [Reserve capacity](../database/reserved-capacity-overview.md) instance pricing is not available. -- Failover groups are not supported for instances in the pool. - -## SQL features supported - -Managed instances created in pools support the same [compatibility levels and features supported in single managed instances](sql-managed-instance-paas-overview.md#sql-features-supported). - -Every managed instance deployed in a pool has a separate instance of SQL Agent. - -Optional features or features that require you to choose specific values (such as instance-level collation, time zone, public endpoint for data traffic, failover groups) are configured at the instance level and can be different for each instance in a pool. - -## Performance considerations - -Although managed instances within pools do have dedicated vCore and RAM, they share local disk (for tempdb usage) and network resources. It's not likely, but it is possible to experience the *noisy neighbor* effect if multiple instances in the pool have high resource consumption at the same time. If you observe this behavior, consider deploying these instances to a bigger pool or as single instances. - -## Security considerations - -Because instances deployed in a pool share the same virtual machine, you may want to consider disabling features that introduce higher security risks, or to firmly control access permissions to these features. For example, CLR integration, native backup and restore, database email, etc. - -## Instance pool support requests - -Create and manage support requests for instance pools in the [Azure portal](https://portal.azure.com). - -If you are experiencing issues related to instance pool deployment (creation or deletion), make sure that you specify **Instance Pools** in the **Problem subtype** field. - -![Instance pools support request](./media/instance-pools-overview/support-request.png) - -If you are experiencing issues related to a single managed instance or database within a pool, you should create a regular support ticket for Azure SQL Managed Instance. - -To create larger SQL Managed Instance deployments (with or without instance pools), you may need to obtain a larger regional quota. For more information, see [Request quota increases for Azure SQL Database](../database/quota-increase-request.md). The deployment logic for instance pools compares total vCore consumption *at the pool level* against your quota to determine whether you are allowed to create new resources without further increasing your quota. - -## Instance pool billing - -Instance pools allow scaling compute and storage independently. Customers pay for compute associated with the pool resource measured in vCores, and storage associated with every instance measured in gigabytes (the first 32 GB are free of charge for every instance). - -vCore price for a pool is charged regardless of how many instances are deployed in that pool. - -For the compute price (measured in vCores), two pricing options are available: - - 1. *License included*: Price of SQL Server licenses is included. This is for the customers who choose not to apply existing SQL Server licenses with Software Assurance. - 2. *Azure Hybrid Benefit*: A reduced price that includes Azure Hybrid Benefit for SQL Server. Customers can opt into this price by using their existing SQL Server licenses with Software Assurance. For eligibility and other details, see [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/). - -Setting different pricing options is not possible for individual instances in a pool. All instances in the parent pool must be either at License Included price or Azure Hybrid Benefit price. The license model for the pool can be altered after the pool is created. - -> [!IMPORTANT] -> If you specify a license model for the instance that is different than in the pool, the pool price is used and the instance level value is ignored. - -If you create instance pools on [subscriptions eligible for dev-test benefit](https://azure.microsoft.com/pricing/dev-test/), you automatically receive discounted rates of up to 55 percent on Azure SQL Managed Instance. - -For full details on instance pool pricing, refer to the *instance pools* section on the [SQL Managed Instance pricing page](https://azure.microsoft.com/pricing/details/sql-database/managed/). - -## Next steps - -- To get started with instance pools, see [SQL Managed Instance pools how-to guide](instance-pools-configure.md). -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [Azure SQL common features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). -- For advanced monitoring of SQL Managed Instance database performance with built-in troubleshooting intelligence, see [Monitor Azure SQL Managed Instance using Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- For pricing information, see [SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/sql-database/managed/). \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/job-automation-managed-instance.md b/articles/azure-sql/managed-instance/job-automation-managed-instance.md deleted file mode 100644 index 112b2e7cf7d6e..0000000000000 --- a/articles/azure-sql/managed-instance/job-automation-managed-instance.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Job automation with SQL Agent jobs -titleSuffix: Azure SQL Managed Instance -description: 'Automation options to run Transact-SQL (T-SQL) scripts in Azure SQL Managed Instance' -services: sql-database -ms.service: sql-db-mi -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -dev_langs: - - TSQL -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: -ms.date: 04/19/2022 ---- -# Automate management tasks using SQL Agent jobs in Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Using [SQL Server Agent](/sql/ssms/agent/sql-server-agent) in SQL Server and [SQL Managed Instance](sql-managed-instance-paas-overview.md), you can create and schedule jobs that could be periodically executed against one or many databases to run Transact-SQL (T-SQL) queries and perform maintenance tasks. This article covers the use of SQL Agent for SQL Managed Instance. - -> [!Note] -> SQL Agent is not available in Azure SQL Database or Azure Synapse Analytics. Instead, we recommend [Job automation with Elastic Jobs](../database/job-automation-overview.md). - -### SQL Agent job limitations in SQL Managed Instance - -It is worth noting the differences between SQL Agent available in SQL Server and as part of SQL Managed Instance. For more on the supported feature differences between SQL Server and SQL Managed Instance, see [Azure SQL Managed Instance T-SQL differences from SQL Server](/azure/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server#sql-server-agent). - -Some of the SQL Agent features that are available in SQL Server are not supported in SQL Managed Instance: - -- SQL Agent settings are read only. - - The system stored procedure `sp_set_agent_properties` is not supported. -- Enabling/disabling SQL Agent is currently not supported. SQL Agent is always running. -- Notifications are partially supported: - - Pager is not supported. - - NetSend is not supported. - - Alerts are not supported. -- Proxies are not supported. -- Eventlog is not supported. -- Job schedule trigger based on an idle CPU is not supported. - -## When to use SQL Agent jobs - -There are several scenarios when you could use SQL Agent jobs: - -- Automate management tasks and schedule them to run every weekday, after hours, etc. - - Deploy schema changes, credentials management, performance data collection or tenant (customer) telemetry collection. - - Update reference data (information common across all databases), load data from Azure Blob storage. Microsoft recommends using [SHARED ACCESS SIGNATURE authentication to authenticate to Azure Blob storage](/sql/t-sql/statements/bulk-insert-transact-sql#f-importing-data-from-a-file-in-azure-blob-storage). - - Common maintenance tasks including `DBCC CHECKDB` to ensure data integrity or index maintenance to improve query performance. Configure jobs to execute across a collection of databases on a recurring basis, such as during off-peak hours. - - Collect query results from a set of databases into a central table on an on-going basis. Performance queries can be continually executed and configured to trigger additional tasks to be executed. -- Collect data for reporting - - Aggregate data from a collection of databases into a single destination table. - - Execute longer running data processing queries across a large set of databases, for example the collection of customer telemetry. Results are collected into a single destination table for further analysis. -- Data movements - - Create jobs that replicate changes made in your databases to other databases or collect updates made in remote databases and apply changes in the database. - - Create jobs that load data from or to your databases using SQL Server Integration Services (SSIS). - -## SQL Agent jobs in SQL Managed Instance - -SQL Agent Jobs are executed by the SQL Agent service that continues to be used for task automation in SQL Server and SQL Managed Instance. - -SQL Agent Jobs are a specified series of T-SQL scripts against your database. Use jobs to define an administrative task that can be run one or more times and monitored for success or failure. - -A job can run on one local server or on multiple remote servers. SQL Agent Jobs are an internal Database Engine component that is executed within the SQL Managed Instance service. - -There are several key concepts in SQL Agent Jobs: - -- **Job steps** set of one or many steps that should be executed within the job. For every job step you can define retry strategy and the action that should happen if the job step succeeds or fails. -- **Schedules** define when the job should be executed. -- **Notifications** enable you to define rules that will be used to notify operators via email once the job completes. - -### SQL Agent job steps - -SQL Agent Job steps are sequences of actions that SQL Agent should execute. Every step has the following step that should be executed if the step succeeds or fails, number of retries in a case of failure. - -SQL Agent enables you to create different types of job steps, such as Transact-SQL job steps that execute a single Transact-SQL batch against the database, or OS command/PowerShell steps that can execute custom OS script, [SSIS job steps](../../data-factory/how-to-invoke-ssis-package-managed-instance-agent.md) that enable you to load data using SSIS runtime, or [replication](../managed-instance/replication-transactional-overview.md) steps that can publish changes from your database to other databases. - -> [!Note] -> For more information on leveraging the Azure SSIS Integration Runtime with SSISDB hosted by SQL Managed Instance, see [Use Azure SQL Managed Instance with SQL Server Integration Services (SSIS) in Azure Data Factory](../../data-factory/how-to-use-sql-managed-instance-with-ir.md). - -[Transactional replication](../managed-instance/replication-transactional-overview.md) can replicate the changes from your tables into other databases in SQL Managed Instance, Azure SQL Database, or SQL Server. For information, see [Configure replication in Azure SQL Managed Instance](/azure/azure-sql/managed-instance/replication-between-two-instances-configure-tutorial). - -Other types of job steps are not currently supported in SQL Managed Instance, including: - -- Merge replication job step is not supported. -- Queue Reader is not supported. -- Analysis Services are not supported - -### SQL Agent job schedules - -A schedule specifies when a job runs. More than one job can run on the same schedule, and more than one schedule can apply to the same job. - -A schedule can define the following conditions for the time when a job runs: - -- Whenever SQL Server Agent starts. Job is activated after every failover. -- One time, at a specific date and time, which is useful for delayed execution of some job. -- On a recurring schedule. - -For more information on scheduling a SQL Agent job, see [Schedule a Job](/sql/ssms/agent/schedule-a-job). - -> [!Note] -> Azure SQL Managed Instance currently does not enable you to start a job when the CPU is idle. - -### SQL Agent job notifications - -SQL Agent jobs enable you to get notifications when the job finishes successfully or fails. You can receive notifications via email. - -If it isn't already enabled, first you would need to configure [the Database Mail feature](/sql/relational-databases/database-mail/database-mail) on SQL Managed Instance: - -```sql -GO -EXEC sp_configure 'show advanced options', 1; -GO -RECONFIGURE; -GO -EXEC sp_configure 'Database Mail XPs', 1; -GO -RECONFIGURE -``` - -As an example exercise, set up the email account that will be used to send the email notifications. Assign the account to the email profile called `AzureManagedInstance_dbmail_profile`. To send e-mail using SQL Agent jobs in SQL Managed Instance, there should be a profile that must be called `AzureManagedInstance_dbmail_profile`. Otherwise, SQL Managed Instance will be unable to send emails via SQL Agent. - -> [!NOTE] -> For the mail server, we recommend you use authenticated SMTP relay services to send email. These relay services typically connect through TCP ports 25 or 587 for connections over TLS, or port 465 for SSL connections, however Database Mail can be configured to use any port. These ports require a new outbound rule in your managed instance's network security group. These services are used to maintain IP and domain reputation to minimize the possibility that external domains reject your messages or put them to the SPAM folder. Consider an authenticated SMTP relay service already in your on-premises servers. In Azure, [SendGrid](https://sendgrid.com/partners/azure/) is one such SMTP relay service, but there are others. - -Use the following sample script to create a Database Mail account and profile, then associate them together: - -```sql --- Create a Database Mail account -EXECUTE msdb.dbo.sysmail_add_account_sp - @account_name = 'SQL Agent Account', - @description = 'Mail account for Azure SQL Managed Instance SQL Agent system.', - @email_address = '$(loginEmail)', - @display_name = 'SQL Agent Account', - @mailserver_name = '$(mailserver)' , - @username = '$(loginEmail)' , - @password = '$(password)'; - --- Create a Database Mail profile -EXECUTE msdb.dbo.sysmail_add_profile_sp - @profile_name = 'AzureManagedInstance_dbmail_profile', - @description = 'E-mail profile used for messages sent by Managed Instance SQL Agent.'; - --- Add the account to the profile -EXECUTE msdb.dbo.sysmail_add_profileaccount_sp - @profile_name = 'AzureManagedInstance_dbmail_profile', - @account_name = 'SQL Agent Account', - @sequence_number = 1; -``` - -Test the Database Mail configuration via T-SQL using the [sp_send_db_mail](/sql/relational-databases/system-stored-procedures/sp-send-dbmail-transact-sql) system stored procedure: - -```sql -DECLARE @body VARCHAR(4000) = 'The email is sent from ' + @@SERVERNAME; -EXEC msdb.dbo.sp_send_dbmail - @profile_name = 'AzureManagedInstance_dbmail_profile', - @recipients = 'ADD YOUR EMAIL HERE', - @body = 'Add some text', - @subject = 'Azure SQL Instance - test email'; -``` - -You can notify the operator that something happened with your SQL Agent jobs. An operator defines contact information for an individual responsible for the maintenance of one or more instances in SQL Managed Instance. Sometimes, operator responsibilities are assigned to one individual. - -In systems with multiple instances in SQL Managed Instance or SQL Server, many individuals can share operator responsibilities. An operator does not contain security information, and does not define a security principal. Ideally, an operator is not an individual whose responsibilities may change, but an email distribution group. - -You can [create operators](/sql/relational-databases/system-stored-procedures/sp-add-operator-transact-sql) using SQL Server Management Studio (SSMS) or the Transact-SQL script shown in the following example: - -```sql -EXEC msdb.dbo.sp_add_operator - @name=N'AzureSQLTeam', - @enabled=1, - @email_address=N'AzureSQLTeamn@contoso.com'; -``` - -Confirm the email's success or failure via the [Database Mail Log](/sql/relational-databases/database-mail/database-mail-log-and-audits) in SSMS. - -You can then [modify any SQL Agent job](/sql/relational-databases/system-stored-procedures/sp-update-job-transact-sql) and assign operators that will be notified via email if the job completes, fails, or succeeds using SSMS or the following T-SQL script: - -```sql -EXEC msdb.dbo.sp_update_job @job_name=N'Load data using SSIS', - @notify_level_email=3, -- Options are: 1 on succeed, 2 on failure, 3 on complete - @notify_email_operator_name=N'AzureSQLTeam'; -``` - -### SQL Agent job history - -SQL Managed Instance currently doesn't allow you to change any SQL Agent properties because they are stored in the underlying registry values. This means options for adjusting the Agent retention policy for job history records are fixed at the default of 1000 total records and max 100 history records per job. - -For more information, see [View SQL Agent job history](/sql/ssms/agent/view-the-job-history). - -### SQL Agent fixed database role membership - -If users linked to non-sysadmin logins are added to any of the three SQL Agent fixed database roles in the `msdb` system database, there exists an issue in which explicit EXECUTE permissions need to be granted to three system stored procedures in the master database. If this issue is encountered, the error message "The EXECUTE permission was denied on the object (Microsoft SQL Server, Error: 229)" will be shown. - -Once you add users to a SQL Agent fixed database role (SQLAgentUserRole, SQLAgentReaderRole, or SQLAgentOperatorRole) in `msdb`, for each of the user's logins added to these roles, execute the below T-SQL script to explicitly grant EXECUTE permissions to the system stored procedures listed. This example assumes that the user name and login name are the same: - -```sql -USE [master] -GO -CREATE USER [login_name] FOR LOGIN [login_name]; -GO -GRANT EXECUTE ON master.dbo.xp_sqlagent_enum_jobs TO [login_name]; -GRANT EXECUTE ON master.dbo.xp_sqlagent_is_starting TO [login_name]; -GRANT EXECUTE ON master.dbo.xp_sqlagent_notify TO [login_name]; -``` - -## Learn more - -- [What is Azure SQL Managed Instance?](../managed-instance/sql-managed-instance-paas-overview.md) -- [What's new in Azure SQL Managed Instance?](doc-changes-updates-release-notes-whats-new.md) -- [Azure SQL Managed Instance T-SQL differences from SQL Server](/azure/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server#sql-server-agent) -- [Features comparison: Azure SQL Database and Azure SQL Managed Instance](/azure/azure-sql/database/features-comparison) - - -## Next steps - -- [Configure Database Mail](/sql/relational-databases/database-mail/configure-database-mail) -- [Troubleshoot outbound SMTP connectivity problems in Azure](/azure/virtual-network/troubleshoot-outbound-smtp-connectivity) diff --git a/articles/azure-sql/managed-instance/log-replay-service-migrate.md b/articles/azure-sql/managed-instance/log-replay-service-migrate.md deleted file mode 100644 index c694042c22fcb..0000000000000 --- a/articles/azure-sql/managed-instance/log-replay-service-migrate.md +++ /dev/null @@ -1,489 +0,0 @@ ---- -title: Migrate databases to SQL Managed Instance using Log Replay Service -description: Learn how to migrate databases from SQL Server to SQL Managed Instance by using Log Replay Service (LRS). -services: sql-database -ms.service: sql-managed-instance -ms.subservice: migration -ms.custom: devx-track-azurecli, devx-track-azurepowershell -ms.topic: how-to -author: danimir -ms.author: danil -ms.reviewer: mathoma -ms.date: 03/29/2022 ---- - -# Migrate databases from SQL Server to SQL Managed Instance by using Log Replay Service (Preview) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains how to manually configure database migration from SQL Server 2008-2019 to Azure SQL Managed Instance by using Log Replay Service (LRS), currently in public preview. LRS is a free of charge cloud service enabled for Azure SQL Managed Instance based on SQL Server log-shipping technology. - -[Azure Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md) and LRS use the same underlying migration technology and APIs. LRS further enables complex custom migrations and hybrid architectures between on-premises SQL Server and SQL Managed Instance. - -## When to use Log Replay Service - -When you can't use Azure Database Migration Service for migration, you can use LRS directly with PowerShell, Azure CLI cmdlets, or APIs to manually build and orchestrate database migrations to SQL Managed Instance. - -Consider using LRS in the following cases: -- You need more control for your database migration project. -- There's little tolerance for downtime during migration cutover. -- The Database Migration Service executable file can't be installed to your environment. -- The Database Migration Service executable file doesn't have file access to your database backups. -- No access to the host OS is available, or there are no administrator privileges. -- You can't open network ports from your environment to Azure. -- Network throttling, or proxy blocking issues exist in your environment. -- Backups are stored directly to Azure Blob Storage through the `TO URL` option. -- You need to use differential backups. - -> [!NOTE] -> We recommend automating the migration of databases from SQL Server to SQL Managed Instance by using Database Migration Service. This service uses the same LRS cloud service at the back end, with log shipping in `NORECOVERY` mode. Consider manually using LRS to orchestrate migrations when Database Migration Service doesn't fully support your scenarios. - -## How it works - -Building a custom solution to migrate databases to the cloud with LRS requires several orchestration steps, as shown in the diagram and a table later in this section. - -Migration consists of making database backups on SQL Server with `CHECKSUM` enabled, and copying backup files to Azure Blob Storage. Full, log, and differential backups are supported. LRS cloud service is used to restore backup files from Azure Blob Storage to SQL Managed Instance. Blob Storage is intermediary storage between SQL Server and SQL Managed Instance. - -LRS monitors Blob Storage for any new differential or log backups added after the full backup has been restored. LRS then automatically restores these new files. You can use the service to monitor the progress of backup files being restored to SQL Managed Instance, and stop the process if necessary. - -LRS does not require a specific naming convention for backup files. It scans all files placed on Blob Storage and constructs the backup chain from reading the file headers only. Databases are in a **restoring** state during the migration process. Databases are restored in [NORECOVERY](/sql/t-sql/statements/restore-statements-transact-sql#comparison-of-recovery-and-norecovery) mode, so they can't be used for read or write workloads until the migration process completes. - -If you're migrating several databases, you need to: - -- Place backup files for each database in a separate folder on Azure Blob Storage in a flat-file structure. For example, use separate database folders: `bolbcontainer/database1/files`, `blobcontainer/database2/files`, etc. -- Don't use nested folders inside database folders as this structure is not supported. For example, do not use subfolders: `blobcontainer/database1/subfolder/files`. -- Start LRS separately for each database. -- Specify different URI paths to separate database folders on Azure Blob Storage. - -You can start LRS in either *autocomplete* or *continuous* mode. When you start it in autocomplete mode, the migration will complete automatically when the last of the specified backup files have been restored. When you start LRS in continuous mode, the service will continuously restore any new backup files added, and the migration completes during manual cutover only. - -We recommend that you manually cut over after the final log-tail backup is shown as restored on SQL Managed Instance. The final cutover step makes the database come online and available for read and write use on SQL Managed Instance. - -After LRS is stopped, either automatically through autocomplete, or manually through cutover, you can't resume the restore process for a database that was brought online on SQL Managed Instance. For example, once migration completes, you are no longer able to restore additional differential backups for an online database. To restore more backup files after migration completes, you need to delete the database from the managed instance and restart the migration from the beginning. - -:::image type="content" source="./media/log-replay-service-migrate/log-replay-service-conceptual.png" alt-text="Diagram that explains the Log Replay Service orchestration steps for SQL Managed Instance." border="false"::: - -| Operation | Details | -| :----------------------------- | :------------------------- | -| **1. Copy database backups from SQL Server to Blob Storage**. | Copy full, differential, and log backups from SQL Server to a Blob Storage container by using [AzCopy](../../storage/common/storage-use-azcopy-v10.md) or [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer/).

    Use any file names. LRS doesn't require a specific file-naming convention.

    Use a separate folder for each database when migrating several databases. | -| **2. Start LRS in the cloud**. | You can start the service with PowerShell ([start-azsqlinstancedatabaselogreplay](/powershell/module/az.sql/start-azsqlinstancedatabaselogreplay)) or the Azure CLI ([az_sql_midb_log_replay_start cmdlets](/cli/azure/sql/midb/log-replay#az-sql-midb-log-replay-start)).

    Start LRS separately for each database that points to a backup folder on Blob Storage.

    After the service starts, it will take backups from the Blob Storage container and start restoring them to SQL Managed Instance.

    When started in continuous mode, LRS restores all the backups initially uploaded and then watches for any new files uploaded to the folder. The service will continuously apply logs based on the log sequence number (LSN) chain until it's stopped manually. | -| **2.1. Monitor the operation's progress**. | You can monitor progress of the restore operation with PowerShell ([get-azsqlinstancedatabaselogreplay](/powershell/module/az.sql/get-azsqlinstancedatabaselogreplay)) or the Azure CLI ([az_sql_midb_log_replay_show cmdlets](/cli/azure/sql/midb/log-replay#az-sql-midb-log-replay-show)). | -| **2.2. Stop the operation if needed**. | If you need to stop the migration process, use PowerShell ([stop-azsqlinstancedatabaselogreplay](/powershell/module/az.sql/stop-azsqlinstancedatabaselogreplay)) or the Azure CLI ([az_sql_midb_log_replay_stop](/cli/azure/sql/midb/log-replay#az-sql-midb-log-replay-stop)).

    Stopping the operation deletes the database that you're restoring to SQL Managed Instance. After you stop an operation, you can't resume LRS for a database. You need to restart the migration process from the beginning. | -| **3. Cut over to the cloud when you're ready**. | Stop the application and workload. Take the last log-tail backup and upload it to Azure Blob Storage.

    Complete the cutover by initiating an LRS `complete` operation with PowerShell ([complete-azsqlinstancedatabaselogreplay](/powershell/module/az.sql/complete-azsqlinstancedatabaselogreplay)) or the Azure CLI [az_sql_midb_log_replay_complete](/cli/azure/sql/midb/log-replay#az-sql-midb-log-replay-complete). This operation stops LRS and brings the database online for read and write workloads on SQL Managed Instance.

    Repoint the application connection string from SQL Server to SQL Managed Instance. You will need to orchestrate this step yourself, either through a manual connection string change in your application, or automatically (for example, if your application can read the connection string from a property, or a database). | - -## Getting started - -Consider the requirements in this section to get started with using LRS to migrate. - -### SQL Server - -Make sure you have the following requirements for SQL Server: - -- SQL Server versions 2008 to 2019 -- Full backup of databases (one or multiple files) -- Differential backup (one or multiple files) -- Log backup (not split for a transaction log file) -- `CHECKSUM` enabled for backups (mandatory) - -### Azure - -Make sure you have the following requirements for Azure: - -- PowerShell Az.SQL module version 2.16.0 or later ([installed](https://www.powershellgallery.com/packages/Az.Sql/) or accessed through [Azure Cloud Shell](/azure/cloud-shell/)) -- Azure CLI version 2.19.0 or later ([installed](/cli/azure/install-azure-cli)) -- Azure Blob Storage container provisioned -- Shared access signature (SAS) security token with read and list permissions generated for the Blob Storage container - -### Azure RBAC permissions - -Running LRS through the provided clients requires one of the following Azure roles: -- Subscription Owner role -- [SQL Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) role -- Custom role with the following permission: `Microsoft.Sql/managedInstances/databases/*` - -## Requirements - -Please ensure the following requirements are met: -- Use the full recovery model on SQL Server (mandatory). -- Use `CHECKSUM` for backups on SQL Server (mandatory). -- Place backup files for an individual database inside a separate folder in a flat-file structure (mandatory). Nested folders inside database folders are not supported. -- Plan to complete the migration within 36 hours after you start LRS (mandatory). This is a grace period during which system-managed software patches are postponed. - -## Best practices - -We recommend the following best practices: -- Run [Data Migration Assistant](/sql/dma/dma-overview) to validate that your databases are ready to be migrated to SQL Managed Instance. -- Split full and differential backups into multiple files, instead of using a single file. -- Enable backup compression to help the network transfer speeds. -- Use Cloud Shell to run PowerShell or CLI scripts, because it will always be updated to the latest cmdlets released. - -> [!IMPORTANT] -> - You can't use databases being restored through LRS until the migration process completes. -> - LRS doesn't support read-only access to databases during the migration. -> - After the migration completes, the migration process is finalized and cannot be resumed with additional differential backups. - -## Steps to migrate - -To migrate using LRS, follow the steps in this section. - -### Make database backups on SQL Server - -You can make database backups on SQL Server by using either of the following options: - -- Back up to the local disk storage, and then upload files to Azure Blob Storage, if your environment restricts direct backups to Blob Storage. -- Back up directly to Blob Storage with the `TO URL` option in Transact-SQL (T-SQL), if your environment and security procedures allow it. - -Set databases that you want to migrate to the full recovery model to allow log backups. - -```SQL --- To permit log backups, before the full database backup, modify the database to use the full recovery -USE master -ALTER DATABASE SampleDB -SET RECOVERY FULL -GO -``` - -To manually make full, differential, and log backups of your database to local storage, use the following sample T-SQL scripts. Ensure the `CHECKSUM` option is enabled, as it's mandatory for LRS. - - -The following example takes a full database backup to the local disk: - -```SQL --- Take full database backup to local disk -BACKUP DATABASE [SampleDB] -TO DISK='C:\BACKUP\SampleDB_full.bak' -WITH INIT, COMPRESSION, CHECKSUM -GO -``` - -The following example takes a differential backup to the local disk: - -```sql --- Take differential database backup to local disk -BACKUP DATABASE [SampleDB] -TO DISK='C:\BACKUP\SampleDB_diff.bak' -WITH DIFFERENTIAL, COMPRESSION, CHECKSUM -GO -``` - -The following example takes a transaction log backup to the local disk: - -```sql --- Take transactional log backup to local disk -BACKUP LOG [SampleDB] -TO DISK='C:\BACKUP\SampleDB_log.trn' -WITH COMPRESSION, CHECKSUM -GO -``` - -### Create a storage account - -Azure Blob Storage is used as intermediary storage for backup files between SQL Server and SQL Managed Instance. To create a new storage account and a blob container inside the storage account, follow these steps: - -1. [Create a storage account](../../storage/common/storage-account-create.md?tabs=azure-portal). -2. [Crete a blob container](../../storage/blobs/storage-quickstart-blobs-portal.md) inside the storage account. - -### Copy backups from SQL Server to Blob Storage - -When migrating databases to a managed instance by using LRS, you can use the following approaches to upload backups to Blob Storage: - -- SQL Server native [BACKUP TO URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url) -- [AzCopy](../../storage/common/storage-use-azcopy-v10.md) or [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer) to upload backups to a blob container -- Storage Explorer in the Azure portal - -> [!NOTE] -> To migrate multiple databases using the same Azure Blob Storage container, place all backup files of an individual database into a separate folder inside the container. Use flat-file structure for each database folder, as nested folders are not supported. -> - -### Make backups from SQL Server directly to Blob Storage - -If your corporate and network policies allow it, take backups from SQL Server directly to Blob Storage by using the SQL Server native [BACKUP TO URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url) option. If you can use this option, you don't need to take backups to local storage and upload them to Blob Storage. - -As the first step, this operation requires you to generate an SAS authentication token for Blob Storage and then import the token to SQL Server. The second step is to make backups with the `TO URL` option in T-SQL. Ensure that all backups are made with the `CHEKSUM` option enabled. - -For reference, the following sample code makes backups to Blob Storage. This example does not include instructions on how to import the SAS token. You can find detailed instructions, including how to generate and import the SAS token to SQL Server, in the tutorial [Use Azure Blob Storage with SQL Server](/sql/relational-databases/tutorial-use-azure-blob-storage-service-with-sql-server-2016#1---create-stored-access-policy-and-shared-access-storage). - -The following example takes a full database backup to a URL: - -```SQL --- Take a full database backup to a URL -BACKUP DATABASE [SampleDB] -TO URL = 'https://.blob.core.windows.net///SampleDB_full.bak' -WITH INIT, COMPRESSION, CHECKSUM -GO -``` - -The following example takes a differential database backup to a URL: - -```sql --- Take a differential database backup to a URL -BACKUP DATABASE [SampleDB] -TO URL = 'https://.blob.core.windows.net///SampleDB_diff.bak' -WITH DIFFERENTIAL, COMPRESSION, CHECKSUM -GO -``` - -The following example takes a transaction log backup to a URL: - -```sql --- Take a transactional log backup to a URL -BACKUP LOG [SampleDB] -TO URL = 'https://.blob.core.windows.net///SampleDB_log.trn' -WITH COMPRESSION, CHECKSUM -``` - -### Migration of multiple databases - -If migrating multiple databases using the same Azure Blob Storage container, you must place backup files for different databases in separate folders inside the container. All backup files for a single database must be placed in a flat-file structure inside a database folder, and the folders cannot be nested, as it's not supported. - -Below is an example of folder structure inside Azure Blob Storage container required to migrate multiple databases using LRS. - -```URI --- Place all backup files for database 1 in a separate "database1" folder in a flat-file structure. --- Do not use nested folders inside database1 folder. -https://.blob.core.windows.net/// - --- Place all backup files for database 2 in a separate "database2" folder in a flat-file structure. --- Do not use nested folders inside database2 folder. -https://.blob.core.windows.net/// - --- Place all backup files for database 3 in a separate "database3" folder in a flat-file structure. --- Do not use nested folders inside database3 folder. -https://.blob.core.windows.net/// -``` - -### Generate a Blob Storage SAS authentication token for LRS - -Azure Blob Storage is used as intermediary storage for backup files between SQL Server and SQL Managed Instance. Generate an SAS authentication token for LRS with only list and read permissions. The token enables LRS to access Blob Storage and uses the backup files to restore them to SQL Managed Instance. - -Follow these steps to generate the token: - -1. Open **Storage Explorer** from the Azure portal. -2. Expand **Blob Containers**. -3. Right-click the blob container and select **Get Shared Access Signature**. - - :::image type="content" source="./media/log-replay-service-migrate/lrs-sas-token-01.png" alt-text="Screenshot that shows selections for generating an S A S authentication token."::: - -4. Select the time frame for token expiration. Ensure the token is valid for the duration of your migration. -5. Select the time zone for the token: UTC or your local time. - - > [!IMPORTANT] - > The time zone of the token and your managed instance might mismatch. Ensure that the SAS token has the appropriate time validity, taking time zones into consideration. To account for time zone differences, set the validity time frame **FROM** well before your migration window starts, and the **TO** time frame well after you expect your migration to complete. - -6. Select **Read** and **List** permissions only. - - > [!IMPORTANT] - > Don't select any other permissions. If you do, LRS won't start. This security requirement is by-design. - -7. Select **Create**. - - :::image type="content" source="./media/log-replay-service-migrate/lrs-sas-token-02.png" alt-text="Screenshot that shows selections for S A S token expiration, time zone, and permissions, along with the Create button."::: - -The SAS authentication is generated with the time validity that you specified. You need the URI version of the token, as shown in the following screenshot. - -:::image type="content" source="./media/log-replay-service-migrate/lrs-generated-uri-token.png" alt-text="Screenshot that shows an example of the U R I version of an S A S token."::: - - > [!NOTE] - > Using SAS tokens created with permissions set through defining a [stored access policy](/rest/api/storageservices/define-stored-access-policy) is not supported at this time. Follow the instructions in this article to manually specify **Read** and **List** permissions for the SAS token. - -### Copy parameters from the SAS token - -Before you use the SAS token to start LRS, you need to understand its structure. The URI of the generated SAS token consists of two parts separated with a question mark (`?`), as shown in this example: - -:::image type="content" source="./media/log-replay-service-migrate/lrs-token-structure.png" alt-text="Example U R I for a generated S A S token for Log Replay Service." border="false"::: - -The first part, starting with `https://` until the question mark (`?`), is used for the `StorageContainerURI` parameter that's fed as the input to LRS. It gives LRS information about the folder where the database backup files are stored. - -The second part, starting after the question mark (`?`) and going all the way until the end of the string, is the `StorageContainerSasToken` parameter. This part is the actual signed authentication token, which is valid for the duration of the specified time. This part does not necessarily need to start with `sp=` as shown in the example. Your case may differ. - -Copy the parameters as follows: - -1. Copy the first part of the token, starting from `https://` all the way until the question mark (`?`). Use it as the `StorageContainerUri` parameter in PowerShell or the Azure CLI when starting LRS. - - :::image type="content" source="./media/log-replay-service-migrate/lrs-token-uri-copy-part-01.png" alt-text="Screenshot that shows copying the first part of the token."::: - -2. Copy the second part of the token, starting after the question mark (`?`) all the way until the end of the string. Use it as the `StorageContainerSasToken` parameter in PowerShell or the Azure CLI when starting LRS. - - :::image type="content" source="./media/log-replay-service-migrate/lrs-token-uri-copy-part-02.png" alt-text="Screenshot that shows copying the second part of the token."::: - -> [!NOTE] -> Don't include the question mark (`?`) when you copy either part of the token. -> - -### Log in to Azure and select a subscription - -Use the following PowerShell cmdlet to log in to Azure: - -```powershell -Login-AzAccount -``` - -Select the appropriate subscription where your managed instance resides by using the following PowerShell cmdlet: - -```powershell -Select-AzSubscription -SubscriptionId -``` - -## Start the migration - -You start the migration by starting LRS. You can start the service in either autocomplete or continuous mode. - -When you use autocomplete mode, the migration completes automatically when the last of the specified backup files have been restored. This option requires the start command to specify the filename of the last backup file. - -When you use continuous mode, the service continuously restores any new backup files that were added. The migration only completes during manual cutover. - -> [!NOTE] -> When migrating multiple databases, LRS must be started separately for each database pointing to the full URI path of Azure Blob storage container and the individual database folder. -> - -### Start LRS in autocomplete mode - -To start LRS in autocomplete mode, use PowerShell or Azure CLI commands. Specify the last backup file name by using the `-LastBackupName` parameter. Upon restoring the last of the specified backup files, the service automatically initiates a cutover. - -The following PowerShell example starts LRS in autocomplete mode: - -```PowerShell -Start-AzSqlInstanceDatabaseLogReplay -ResourceGroupName "ResourceGroup01" ` - -InstanceName "ManagedInstance01" ` - -Name "ManagedDatabaseName" ` - -Collation "SQL_Latin1_General_CP1_CI_AS" ` - -StorageContainerUri "https://.blob.core.windows.net//" ` - -StorageContainerSasToken "sv=2019-02-02&ss=b&srt=sco&sp=rl&se=2023-12-02T00:09:14Z&st=2019-11-25T16:09:14Z&spr=https&sig=92kAe4QYmXaht%2Fgjocqwerqwer41s%3D" ` - -AutoCompleteRestore ` - -LastBackupName "last_backup.bak" -``` - -The following Azure CLI example starts LRS in autocomplete mode: - -```CLI -az sql midb log-replay start -g mygroup --mi myinstance -n mymanageddb -a --last-bn "backup.bak" - --storage-uri "https://.blob.core.windows.net//" - --storage-sas "sv=2019-02-02&ss=b&srt=sco&sp=rl&se=2023-12-02T00:09:14Z&st=2019-11-25T16:09:14Z&spr=https&sig=92kAe4QYmXaht%2Fgjocqwerqwer41s%3D" -``` - -### Start LRS in continuous mode - -The following PowerShell example starts LRS in continuous mode: - -```PowerShell -Start-AzSqlInstanceDatabaseLogReplay -ResourceGroupName "ResourceGroup01" ` - -InstanceName "ManagedInstance01" ` - -Name "ManagedDatabaseName" ` - -Collation "SQL_Latin1_General_CP1_CI_AS" -StorageContainerUri "https://.blob.core.windows.net//" ` - -StorageContainerSasToken "sv=2019-02-02&ss=b&srt=sco&sp=rl&se=2023-12-02T00:09:14Z&st=2019-11-25T16:09:14Z&spr=https&sig=92kAe4QYmXaht%2Fgjocqwerqwer41s%3D" -``` - -The following Azure CLI example starts LRS in continuous mode: - -```CLI -az sql midb log-replay start -g mygroup --mi myinstance -n mymanageddb - --storage-uri "https://.blob.core.windows.net//" - --storage-sas "sv=2019-02-02&ss=b&srt=sco&sp=rl&se=2023-12-02T00:09:14Z&st=2019-11-25T16:09:14Z&spr=https&sig=92kAe4QYmXaht%2Fgjocqwerqwer41s%3D" -``` - -PowerShell and CLI clients that start LRS in continuous mode are synchronous. This means the client waits for the API response to report on success or failure to start the job. - -During this wait, the command won't return control to the command prompt. If you're scripting the migration experience, and you need the LRS start command to give back control immediately to continue with rest of the script, you can run PowerShell as a background job with the `-AsJob` switch. For example: - -```PowerShell -$lrsjob = Start-AzSqlInstanceDatabaseLogReplay -AsJob -``` - -When you start a background job, a job object returns immediately, even if the job takes an extended time to complete. You can continue to work in the session without interruption while the job runs. For details on running PowerShell as a background job, see the [PowerShell Start-Job](/powershell/module/microsoft.powershell.core/start-job#description) documentation. - -Similarly, to start an Azure CLI command on Linux as a background process, use the ampersand (`&`) at the end of the LRS start command: - -```CLI -az sql midb log-replay start & -``` - -> [!IMPORTANT] -> After you start LRS, any system-managed software patches are halted for 36 hours. After this window, the next automated software patch will automatically stop LRS. If that happens, you can't resume migration and need to restart it from the beginning. - -## Monitor migration progress - -To monitor migration progress through PowerShell, use the following command: - -```PowerShell -Get-AzSqlInstanceDatabaseLogReplay -ResourceGroupName "ResourceGroup01" ` - -InstanceName "ManagedInstance01" ` - -Name "ManagedDatabaseName" -``` - -To monitor migration progress through the Azure CLI, use the following command: - -```CLI -az sql midb log-replay show -g mygroup --mi myinstance -n mymanageddb -``` - -## Stop the migration - -If you need to stop the migration, use PowerShell or the Azure CLI. Stopping the migration deletes the restoring database on SQL Managed Instance, so resuming the migration won't be possible. - -To stop the migration process through PowerShell, use the following command: - -```PowerShell -Stop-AzSqlInstanceDatabaseLogReplay -ResourceGroupName "ResourceGroup01" ` - -InstanceName "ManagedInstance01" ` - -Name "ManagedDatabaseName" -``` - -To stop the migration process through the Azure CLI, use the following command: - -```CLI -az sql midb log-replay stop -g mygroup --mi myinstance -n mymanageddb -``` - -## Complete the migration (continuous mode) - -If you started LRS in continuous mode, after you've ensured that all backups have been restored, initiating the cutover will complete the migration. After the cutover, the database is migrated and ready for read and write access. - -To complete the migration process in LRS continuous mode through PowerShell, use the following command: - -```PowerShell -Complete-AzSqlInstanceDatabaseLogReplay -ResourceGroupName "ResourceGroup01" ` --InstanceName "ManagedInstance01" ` --Name "ManagedDatabaseName" ` --LastBackupName "last_backup.bak" -``` - -To complete the migration process in LRS continuous mode through the Azure CLI, use the following command: - -```CLI -az sql midb log-replay complete -g mygroup --mi myinstance -n mymanageddb --last-backup-name "backup.bak" -``` - -## Limitations - -Consider the following limitations of LRS: - -- During the migration process, databases being migrated cannot be used for read-only access on SQL Managed Instance. -- System-managed software patches are blocked for 36 hours once the LRS has been started. After this time window expires, the next software maintenance update stops LRS. You will need to restart the LRS migration from the beginning. -- LRS requires databases on SQL Server to be backed up with the `CHECKSUM` option enabled. -- The SAS token that LRS uses must be generated for the entire Azure Blob Storage container, and it must have **Read** and **List** permissions only. For example, if you grant **Read**, **List** and **Write** permissions, LRS will not be able to start because of the extra **Write** permission. -- Using SAS tokens created with permissions set through defining a [stored access policy](/rest/api/storageservices/define-stored-access-policy) is not supported at this time. Follow the instructions in this article to manually specify **Read** and **List** permissions for the SAS token. -- Backup files containing % and $ characters in the file name cannot be consumed by LRS. Consider renaming such file names. -- Backup files for different databases must be placed in separate folders on Blob Storage in a flat-file structure. Nested folders inside individual database folders are not supported. -- LRS must be started separately for each database pointing to the full URI path containing an individual database folder. -- LRS can support up to 100 simultaneous restore processes per single managed instance. - -> [!NOTE] -> If you require database to be R/O accessible during the migration, and if you require migration window larger than 36 hours, please consider the [link feature for Managed Instance](managed-instance-link-feature-overview.md) as an alternative migration solution. - -## Troubleshooting - -After you start LRS, use the monitoring cmdlet (PowerShell: `get-azsqlinstancedatabaselogreplay` or Azure CLI: `az_sql_midb_log_replay_show`) to see the status of the operation. If LRS fails to start after some time and you get an error, check for the most common issues: - -- Does an existing database on SQL Managed Instance have the same name as the one you're trying to migrate from SQL Server? Resolve this conflict by renaming one of the databases. -- Was the database backup on SQL Server made via the `CHECKSUM` option? -- Are the permissions granted for the SAS **token Read** and **List** _only_? -- Did you copy the SAS token for LRS after the question mark (`?`), with content starting like this: `sv=2020-02-10...`?  -- Is the SAS token validity time applicable for the time window of starting and completing the migration? There might be mismatches due to the different time zones used for SQL Managed Instance and the SAS token. Try regenerating the SAS token and extending the token validity of the time window before and after the current date. -- Are the database name, resource group name, and managed instance name spelled correctly? -- If you started LRS in autocomplete mode, was a valid filename for the last backup file specified? - -## Next steps - -- Learn more about [migrating to SQL Managed Instance using the link feature](managed-instance-link-feature-overview.md). -- Learn more about [migrating from SQL Server to SQL Managed instance](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md). -- Learn more about [differences between SQL Server and SQL Managed Instance](transact-sql-tsql-differences-sql-server.md). -- Learn more about [best practices to cost and size workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs). diff --git a/articles/azure-sql/managed-instance/long-term-backup-retention-configure.md b/articles/azure-sql/managed-instance/long-term-backup-retention-configure.md deleted file mode 100644 index 3366ecd7b1515..0000000000000 --- a/articles/azure-sql/managed-instance/long-term-backup-retention-configure.md +++ /dev/null @@ -1,404 +0,0 @@ ---- -title: Azure SQL Managed Instance long-term backup retention -description: Learn how to store and restore automated backups on separate Azure Blob storage containers for an Azure SQL Managed Instance using the Azure portal and PowerShell. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: backup-restore -ms.custom: devx-track-azurepowershell, devx-track-azurecli -ms.topic: how-to -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: mathoma -ms.date: 09/12/2021 ---- -# Manage Azure SQL Managed Instance long-term backup retention -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -In Azure SQL Managed Instance, you can configure a [long-term backup retention](../database/long-term-retention-overview.md) policy (LTR). This allows you to automatically retain database backups in separate Azure Blob storage containers for up to 10 years. You can then recover a database using these backups with the Azure portal and PowerShell. - -The following sections show you how to use the Azure portal, PowerShell, and Azure CLI to configure the long-term backup retention, view backups in Azure SQL storage, and restore from a backup in Azure SQL storage. - -## Prerequisites - -# [Portal](#tab/portal) - -An active Azure subscription. - -# [Azure CLI](#tab/azure-cli) - -Prepare your environment for the Azure CLI. - -[!INCLUDE[azure-cli-prepare-your-environment-no-header](../../../includes/azure-cli-prepare-your-environment-no-header.md)] - -# [PowerShell](#tab/powershell) - -Prepare your environment for PowerShell. - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, however, future development will be done in the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. - -For **Get-AzSqlInstanceDatabaseLongTermRetentionBackup** and **Restore-AzSqlInstanceDatabase**, you will need to have one of the following roles: - -- Subscription Owner role or -- Managed Instance Contributor role or -- Custom role with the following permissions: - - `Microsoft.Sql/locations/longTermRetentionManagedInstanceBackups/read` - - `Microsoft.Sql/locations/longTermRetentionManagedInstances/longTermRetentionManagedInstanceBackups/read` - - `Microsoft.Sql/locations/longTermRetentionManagedInstances/longTermRetentionDatabases/longTermRetentionManagedInstanceBackups/read` - -For **Remove-AzSqlInstanceDatabaseLongTermRetentionBackup**, you will need to have one of the following roles: - -- Subscription Owner role or -- Custom role with the following permission: - - `Microsoft.Sql/locations/longTermRetentionManagedInstances/longTermRetentionDatabases/longTermRetentionManagedInstanceBackups/delete` - -> [!NOTE] -> The Managed Instance Contributor role does not have permission to delete LTR backups. - -Azure RBAC permissions could be granted in either *subscription* or *resource group* scope. However, to access LTR backups that belong to a dropped instance, the permission must be granted in the *subscription* scope of that instance. - -- `Microsoft.Sql/locations/longTermRetentionManagedInstances/longTermRetentionDatabases/longTermRetentionManagedInstanceBackups/delete` - ---- - -## Create long-term retention policies - -You can configure SQL Managed Instance to [retain automated backups](../database/long-term-retention-overview.md) for a period longer than the retention period for your service tier. - -# [Portal](#tab/portal) - -1. In the Azure portal, select your managed instance and then click **Backups**. On the **Retention policies** tab, select the database(s) on which you want to set or modify long-term backup retention policies. Changes will not apply to any databases left unselected. - - ![manage backups link](./media/long-term-backup-retention-configure/ltr-configure-ltr.png) - -2. In the **Configure policies** pane, specify your desired retention period for weekly, monthly, or yearly backups. Choose a retention period of '0' to indicate that no long-term backup retention should be set. - - ![configure policies](./media/long-term-backup-retention-configure/ltr-configure-policies.png) - -3. When complete, click **Apply**. - -> [!IMPORTANT] -> When you enable a long-term backup retention policy, it may take up to 7 days for the first backup to become visible and available to restore. For details of the LTR backup cadence, see [long-term backup retention](../database/long-term-retention-overview.md). - -# [Azure CLI](#tab/azure-cli) - -1. Run the [az sql midb show](/cli/azure/sql/midb#az-sql-midb-show) command to get the details for the Managed Instance database. - - ```azurecli - az sql midb show / - --resource-group mygroup / - --managed-instance myinstance / - --name mymanageddb / - --subscription mysubscription - ``` - -2. Run the [az sql midb ltr-policy set](/cli/azure/sql/midb/ltr-policy#az-sql-midb-ltr-policy-set) command to create an LTR policy. The following example sets a long-term retention policy for 12 weeks for the weekly backup. - - ```azurecli - az sql midb ltr-policy set / - --resource-group mygroup / - --managed-instance myinstance / - --name mymanageddb / - --weekly-retention "P12W" - ``` - - This example sets a retention policy for 12 weeks for the weekly backup, 5 years for the yearly backup, and the week of April 15 in which to take the yearly LTR backup. - - ```azurecli - az sql midb ltr-policy set / - --resource-group mygroup / - --managed-instance myinstance / - --name mymanageddb / - --weekly-retention "P12W" / - --yearly-retention "P5Y" / - --week-of-year 16 - ``` - -# [PowerShell](#tab/powershell) - -```powershell -# get the Managed Instance -$subId = "" -$instanceName = "" -$resourceGroup = "" -$dbName = "" - -Connect-AzAccount - -Select-AzSubscription -SubscriptionId $subId - -$instance = Get-AzSqlInstance -Name $instanceName -ResourceGroupName $resourceGroup - -# create LTR policy with WeeklyRetention = 12 weeks. MonthlyRetention and YearlyRetention = 0 by default. -$LTRPolicy = @{ - InstanceName = $instanceName - DatabaseName = $dbName - ResourceGroupName = $resourceGroup - WeeklyRetention = 'P12W' -} -Set-AzSqlInstanceDatabaseBackupLongTermRetentionPolicy @LTRPolicy - -# create LTR policy with WeeklyRetention = 12 weeks, YearlyRetention = 5 years and WeekOfYear = 16 (week of April 15). MonthlyRetention = 0 by default. -$LTRPolicy = @{ - InstanceName = $instanceName - DatabaseName = $dbName - ResourceGroupName = $resourceGroup - WeeklyRetention = 'P12W' - YearlyRetention = 'P5Y' - WeekOfYear = '16' -} -Set-AzSqlInstanceDatabaseBackupLongTermRetentionPolicy @LTRPolicy -``` - ---- - -## View backups and restore from a backup - -# [Portal](#tab/portal) - -View the backups that are retained for a specific database with an LTR policy, and restore from those backups. - -1. In the Azure portal, select your managed instance and then click **Backups**. On the **Available backups** tab, select the database for which you want to see available backups. Click **Manage**. - - ![select database](./media/long-term-backup-retention-configure/ltr-available-backups-select-database.png) - -1. In the **Manage backups** pane, review the available backups. - - ![view backups](./media/long-term-backup-retention-configure/ltr-available-backups.png) - -1. Select the backup from which you want to restore, click **Restore**, then on the restore page specify the new database name. The backup and source will be pre-populated on this page. - - ![select backup for restore](./media/long-term-backup-retention-configure/ltr-available-backups-restore.png) - - ![restore](./media/long-term-backup-retention-configure/ltr-restore.png) - -1. Click **Review + Create** to review your Restore details. Then click **Create** to restore your database from the chosen backup. - -1. On the toolbar, click the notification icon to view the status of the restore job. - - ![restore job progress](./media/long-term-backup-retention-configure/restore-job-progress-long-term.png) - -1. When the restore job is completed, open the **Managed Instance Overview** page to view the newly restored database. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, such as to [extract a bit of data from the restored database to copy into the existing database or to delete the existing database and rename the restored database to the existing database name](../database/recovery-using-backups.md#point-in-time-restore). - -# [Azure CLI](#tab/azure-cli) - -### View LTR policies - -Run the [az sql midb ltr-policy show](/cli/azure/sql/midb/ltr-policy#az-sql-midb-ltr-policy-show) command to view the LTR policy for a single database within an instance. - -```azurecli -az sql midb ltr-policy show \ - --resource-group mygroup \ - --managed-instance myinstance \ - --name mymanageddb -``` - -### View LTR backups - -Use the [az sql midb ltr-backup list](/cli/azure/sql/midb/ltr-backup#az-sql-midb-ltr-backup-list) command to view the LTR backups within an instance. - -```azurecli -az sql midb ltr-backup list \ - --resource-group mygroup \ - --location eastus2 \ - --managed-instance myinstance -``` - -### Delete LTR backups - -Run the [az sql midb ltr-backup delete](/cli/azure/sql/midb/ltr-backup#az-sql-midb-ltr-backup-delete) command to remove an LTR backup. You can run [az sql midb ltr-backup list](/cli/azure/sql/midb/ltr-backup#az-sql-midb-ltr-backup-list) to get the backup `name`. - -```azurecli -az sql midb ltr-backup delete \ - --location eastus2 \ - --managed-instance myinstance \ - --database mymanageddb \ - --name "3214b3fb-fba9-43e7-96a3-09e35ffcb336;132292152080000000" -``` - -> [!IMPORTANT] -> Deleting LTR backup is non-reversible. To delete an LTR backup after the instance has been deleted you must have Subscription scope permission. You can set up notifications about each delete in Azure Monitor by filtering for operation 'Deletes a long term retention backup'. The activity log contains information on who and when made the request. See [Create activity log alerts](../../azure-monitor/alerts/alerts-activity-log.md) for detailed instructions. - -### Restore from LTR backups - -Run the [az sql midb ltr-backup restore](/cli/azure/sql/midb/ltr-backup#az-sql-midb-ltr-backup-restore) command to restore your database from an LTR backup. You can run [az sql midb ltr-backup show](/cli/azure/sql/midb/ltr-backup#az-sql-midb-ltr-backup-show) to get the `backup-id`. - -1. Create a variable for the `backup-id` with the command `az sql db ltr-backup show` for future use. - - ```azurecli - get_backup_id=$(az sql midb ltr-backup show - --location eastus2 \ - --managed-instance myinstance \ - --database mydb \ - --name "3214b3fb-fba9-43e7-96a3-09e35ffcb336;132292152080000000" \ - --query 'id' \ - --output tsv) - ``` -2. Restore your database from an LTR backup - - ```azurecli - az sql midb ltr-backup restore \ - --dest-database targetmidb \ - --dest-mi myinstance \ - --dest-resource-group mygroup \ - --backup-id $get_backup_id - ``` - -> [!IMPORTANT] -> To restore from an LTR backup after the instance has been deleted, you must have permissions scoped to the subscription of the instance and that subscription must be active. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, such as to extract a bit of data from the restored database to copy into the existing database or to delete the existing database and rename the restored database to the existing database name. See [point in time restore](../database/recovery-using-backups.md#point-in-time-restore). - -# [PowerShell](#tab/powershell) - -### View LTR policies - -This example shows how to list the LTR policies within an instance for a single database. - -```powershell -# gets the current version of LTR policy for a database -$LTRPolicy = @{ - InstanceName = $instanceName - DatabaseName = $dbName - ResourceGroupName = $resourceGroup -} -Get-AzSqlInstanceDatabaseBackupLongTermRetentionPolicy @LTRPolicy -``` - -This example shows how to list the LTR policies for all of the databases on an instance. - -```powershell -# gets the current version of LTR policy for all of the databases on an instance - -$Databases = Get-AzSqlInstanceDatabase -ResourceGroupName $resourceGroup -InstanceName $instanceName - -$LTRParams = @{ - InstanceName = $instanceName - ResourceGroupName = $resourceGroup -} - -foreach($database in $Databases.Name){ - Get-AzSqlInstanceDatabaseBackupLongTermRetentionPolicy @LTRParams -DatabaseName $database - } -``` - -### Clear an LTR policy - -This example shows how to clear an LTR policy from a database. - -```powershell -# remove the LTR policy from a database -$LTRPolicy = @{ - InstanceName = $instanceName - DatabaseName = $dbName - ResourceGroupName = $resourceGroup - RemovePolicy = $true -} -Set-AzSqlInstanceDatabaseBackupLongTermRetentionPolicy @LTRPolicy -``` - -### View LTR backups - -This example shows how to list the LTR backups within an instance. - -```powershell - -$instance = Get-AzSqlInstance -Name $instanceName -ResourceGroupName $resourceGroup - -# get the list of all LTR backups in a specific Azure region -# backups are grouped by the logical database id, within each group they are ordered by the timestamp, the earliest backup first -Get-AzSqlInstanceDatabaseLongTermRetentionBackup -Location $instance.Location - -# get the list of LTR backups from the Azure region under the given managed instance -$LTRBackupParam = @{ - Location = $instance.Location - InstanceName = $instanceName -} -Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam - -# get the LTR backups for a specific database from the Azure region under the given managed instance -$LTRBackupParam = @{ - Location = $instance.Location - InstanceName = $instanceName - DatabaseName = $dbName -} -Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam - -# list LTR backups only from live databases (you have option to choose All/Live/Deleted) -$LTRBackupParam = @{ - Location = $instance.Location - DatabaseState = 'Live' -} -Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam - -# only list the latest LTR backup for each database -$LTRBackupParam = @{ - Location = $instance.Location - InstanceName = $instanceName - OnlyLatestPerDatabase = $true -} -Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam -``` - -### Delete LTR backups - -This example shows how to delete an LTR backup from the list of backups. - -```powershell -# remove the earliest backup -# get the LTR backups for a specific database from the Azure region under the given managed instance -$LTRBackupParam = @{ - Location = $instance.Location - InstanceName = $instanceName - DatabaseName = $dbName -} -$ltrBackups = Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam -$ltrBackup = $ltrBackups[0] -Remove-AzSqlInstanceDatabaseLongTermRetentionBackup -ResourceId $ltrBackup.ResourceId -``` - -> [!IMPORTANT] -> Deleting LTR backup is non-reversible. To delete an LTR backup after the instance has been deleted you must have Subscription scope permission. You can set up notifications about each delete in Azure Monitor by filtering for operation 'Deletes a long term retention backup'. The activity log contains information on who and when made the request. See [Create activity log alerts](../../azure-monitor/alerts/alerts-activity-log.md) for detailed instructions. - -### Restore from LTR backups - -This example shows how to restore from an LTR backup. Note, this interface did not change but the resource ID parameter now requires the LTR backup resource ID. - -```powershell -# restore a specific LTR backup as an P1 database on the instance $instanceName of the resource group $resourceGroup -$LTRBackupParam = @{ - Location = $instance.Location - InstanceName = $instanceName - DatabaseName = $dbname - OnlyLatestPerDatabase = $true -} -$ltrBackup = Get-AzSqlInstanceDatabaseLongTermRetentionBackup @LTRBackupParam - -$RestoreLTRParam = @{ - TargetInstanceName = $instanceName - TargetResourceGroupName = $resourceGroup - TargetInstanceDatabaseName = $dbName - FromLongTermRetentionBackup = $true - ResourceId = $ltrBackup.ResourceId -} -Restore-AzSqlInstanceDatabase @RestoreLTRParam -``` - -> [!IMPORTANT] -> To restore from an LTR backup after the instance has been deleted, you must have permissions scoped to the subscription of the instance and that subscription must be active. You must also omit the optional -ResourceGroupName parameter. - -> [!NOTE] -> From here, you can connect to the restored database using SQL Server Management Studio to perform needed tasks, such as to extract a bit of data from the restored database to copy into the existing database or to delete the existing database and rename the restored database to the existing database name. See [point in time restore](../database/recovery-using-backups.md#point-in-time-restore). - ---- - -## Next steps - -- To learn about service-generated automatic backups, see [automatic backups](../database/automated-backups-overview.md) -- To learn about long-term backup retention, see [long-term backup retention](../database/long-term-retention-overview.md) diff --git a/articles/azure-sql/managed-instance/machine-learning-services-differences.md b/articles/azure-sql/managed-instance/machine-learning-services-differences.md deleted file mode 100644 index 2cbaf17a8dc65..0000000000000 --- a/articles/azure-sql/managed-instance/machine-learning-services-differences.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Key differences for Machine Learning Services -description: This article describes key differences between Machine Learning Services in Azure SQL Managed Instance and SQL Server Machine Learning Services. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: machine-learning -ms.custom: -ms.devlang: -ms.topic: conceptual -author: MashaMSFT -ms.author: mathoma -ms.reviewer: mathoma, davidph -manager: cgronlun -ms.date: 03/17/2021 ---- - -# Key differences between Machine Learning Services in Azure SQL Managed Instance and SQL Server - -This article describes the few, key differences in functionality between [Machine Learning Services in Azure SQL Managed Instance](machine-learning-services-overview.md) and [SQL Server Machine Learning Services](/sql/advanced-analytics/what-is-sql-server-machine-learning). - -## Language support - -Machine Learning Services in both SQL Managed Instance and SQL Server support the Python and R [extensibility framework](/sql/machine-learning/concepts/extensibility-framework). The key differences in SQL Managed Instance are: - -- Only Python and R are supported. External languages such as Java cannot be added. - -- The initial versions of Python and R are different: - - | Platform | Python runtime version | R runtime versions | - |----------------------------|----------------------------------|--------------------------------------| - | Azure SQL Managed Instance | 3.7.2 | 3.5.2 | - | SQL Server 2019 | 3.7.1 | 3.5.2 | - | SQL Server 2017 | 3.5.2 and 3.7.2 (CU22 and later) | 3.3.3 and 3.5.2 (CU22 and later) | - | SQL Server 2016 | Not available | 3.2.2 and 3.5.2 (SP2 CU14 and later) | - -## Python and R Packages - -There is no support in SQL Managed Instance for packages that depend on external runtimes (like Java) or need access to OS APIs for installation or usage. - -For more information about managing Python and R packages, see: - -- [Get Python package information](/sql/machine-learning/package-management/python-package-information?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current) -- [Get R package information](/sql/machine-learning/package-management/r-package-information?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current) - -## Resource governance - -In SQL Managed Instance, it's not possible to limit R resources through [Resource Governor](/sql/relational-databases/resource-governor/resource-governor?view=azuresqldb-mi-current&preserve-view=true), and external resource pools are not supported. - -By default, R resources are set to a maximum of 20% of the available SQL Managed Instance resources when extensibility is enabled. To change this default percentage, create an Azure support ticket at [https://azure.microsoft.com/support/create-ticket/](https://azure.microsoft.com/support/create-ticket/). - -Extensibility is enabled with the following SQL commands (SQL Managed Instance will restart and be unavailable for a few seconds): - -```sql -sp_configure 'external scripts enabled', 1; -RECONFIGURE WITH OVERRIDE; -``` - -To disable extensibility and restore 100% of memory and CPU resources to SQL Server, use the following commands: - -```sql -sp_configure 'external scripts enabled', 0; -RECONFIGURE WITH OVERRIDE; -``` - -The total resources available to SQL Managed Instance depend on which service tier you choose. For more information, see [Azure SQL Database purchasing models](../database/purchasing-models.md). - -### Insufficient memory error - -Memory usage depends on how much is used in your R scripts and the number of parallel queries being executed. If there is insufficient memory available for R, you'll get an error message. Common error messages are: - -- `Unable to communicate with the runtime for 'R' script for request id: *******. Please check the requirements of 'R' runtime` -- `'R' script error occurred during execution of 'sp_execute_external_script' with HRESULT 0x80004004. ...an external script error occurred: "..could not allocate memory (0 Mb) in C function 'R_AllocStringBuffer'"` -- `An external script error occurred: Error: cannot allocate vector of size.` - -If you receive one of these errors, you can resolve it by scaling your database to a higher service tier. - -If you encounter out of memory errors in Azure SQL Managed Instance, review [sys.dm_os_out_of_memory_events](/sql/relational-databases/system-dynamic-management-views/sys-dm-os-out-of-memory-events). - -## SQL Managed Instance pools - -Machine Learning Services is currently not supported on [Azure SQL Managed Instance pools (preview)](instance-pools-overview.md). - -## Next steps - -- See the overview, [Machine Learning Services in Azure SQL Managed Instance](machine-learning-services-overview.md). -- To learn how to use Python in Machine Learning Services, see [Run Python scripts](/sql/machine-learning/tutorials/quickstart-python-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current&preserve-view=true). -- To learn how to use R in Machine Learning Services, see [Run R scripts](/sql/machine-learning/tutorials/quickstart-r-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current&preserve-view=true). \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/machine-learning-services-overview.md b/articles/azure-sql/managed-instance/machine-learning-services-overview.md deleted file mode 100644 index baaa3abd40f0a..0000000000000 --- a/articles/azure-sql/managed-instance/machine-learning-services-overview.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Machine Learning Services in Azure SQL Managed Instance -description: This article provides an overview or Machine Learning Services in Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: machine-learning -ms.custom: -ms.devlang: -ms.topic: conceptual -author: MashaMSFT -ms.author: mathoma -ms.reviewer: mathoma, davidph -manager: cgronlun -ms.date: 03/17/2021 ---- - -# Machine Learning Services in Azure SQL Managed Instance - -Machine Learning Services is a feature of Azure SQL Managed Instance that provides in-database machine learning, supporting both Python and R scripts. The feature includes Microsoft Python and R packages for high-performance predictive analytics and machine learning. The relational data can be used in scripts through stored procedures, T-SQL script containing Python or R statements, or Python or R code containing T-SQL. - -## What is Machine Learning Services? - -Machine Learning Services in Azure SQL Managed Instance lets you execute Python and R scripts in-database. You can use it to prepare and clean data, do feature engineering, and train, evaluate, and deploy machine learning models within a database. The feature runs your scripts where the data resides and eliminates transfer of the data across the network to another server. - -Use Machine Learning Services with R/Python support in Azure SQL Managed Instance to: - -- **Run R and Python scripts to do data preparation and general purpose data processing** - You can now bring your R/Python scripts to Azure SQL Managed Instance where your data lives, instead of having to move data out to some other server to run R and Python scripts. You can eliminate the need for data movement and associated problems related to latency, security, and compliance. - -- **Train machine learning models in database** - You can train models using any open source algorithms. You can easily scale your training to the entire dataset rather than relying on sample datasets pulled out of the database. - -- **Deploy your models and scripts into production in stored procedures** - The scripts and trained models can be operationalized simply by embedding them in T-SQL stored procedures. Apps connecting to Azure SQL Managed Instance can benefit from predictions and intelligence in these models by just calling a stored procedure. You can also use the native T-SQL PREDICT function to operationalize models for fast scoring in highly concurrent real-time scoring scenarios. - -Base distributions of Python and R are included in Machine Learning Services. You can install and use open-source packages and frameworks, such as PyTorch, TensorFlow, and scikit-learn, in addition to the Microsoft packages -[revoscalepy](/sql/machine-learning/python/ref-py-revoscalepy) and -[microsoftml](/sql/machine-learning/python/ref-py-microsoftml) for Python, and - [RevoScaleR](/sql/machine-learning/r/ref-r-revoscaler), -[MicrosoftML](/sql/machine-learning/r/ref-r-microsoftml), - [olapR](/sql/machine-learning/r/ref-r-olapr), and - [sqlrutils](/sql/machine-learning/r/ref-r-sqlrutils) for R. - -## How to enable Machine Learning Services - -You can enable Machine Learning Services in Azure SQL Managed Instance by enabling extensibility with the following SQL commands (SQL Managed Instance will restart and be unavailable for a few seconds): - -```sql -sp_configure 'external scripts enabled', 1; -RECONFIGURE WITH OVERRIDE; -``` - -For details on how this command affects SQL Managed Instance resources, see [Resource Governance](machine-learning-services-differences.md#resource-governance). - -### Enable Machine Learning Services in a failover group - -In a [failover group](failover-group-add-instance-tutorial.md), system databases are not replicated to the secondary instance (see [Limitations of failover groups](auto-failover-group-sql-mi.md#limitations) for more information). - -If the SQL Managed Instance you're using is part of a failover group, do the following: - -- Run the `sp_configure` and `RECONFIGURE` commands on each instance of the failover group to enable Machine Learning Services. - -- Install the R/Python libraries on a user database rather than the master database. - -## Next steps - -- See the [key differences from SQL Server Machine Learning Services](machine-learning-services-differences.md). -- To learn how to use Python in Machine Learning Services, see [Run Python scripts](/sql/machine-learning/tutorials/quickstart-python-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current&preserve-view=true). -- To learn how to use R in Machine Learning Services, see [Run R scripts](/sql/machine-learning/tutorials/quickstart-r-create-script?context=/azure/azure-sql/managed-instance/context/ml-context&view=azuresqldb-mi-current&preserve-view=true). -- For more information about machine learning on other SQL platforms, see the [SQL machine learning documentation](/sql/machine-learning/index). diff --git a/articles/azure-sql/managed-instance/managed-instance-link-best-practices.md b/articles/azure-sql/managed-instance/managed-instance-link-best-practices.md deleted file mode 100644 index 7a9a6841b625f..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-best-practices.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: The link feature best practices -titleSuffix: Azure SQL Managed Instance -description: Learn about best practices when using the link feature for Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: danimir -ms.author: danil -ms.reviewer: mathoma, danil -ms.date: 03/28/2022 ---- -# Best practices with link feature for Azure SQL Managed Instance (preview) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article outlines best practices when using the link feature for Azure SQL Managed Instance. The link feature for Azure SQL Managed Instance connects your SQL Servers hosted anywhere to SQL Managed Instance, providing near real-time data replication to the cloud. - -> [!NOTE] -> The link feature for Azure SQL Managed Instance is currently in preview. - -## Take log backups regularly - -The link feature replicates data using the [Distributed availability groups](/sql/database-engine/availability-groups/windows/distributed-availability-groups) concept based on the Always On availability groups technology stack. Data replication with distributed availability groups is based on replicating transaction log records. No transaction log records can be truncated from the database on the primary instance until they're replicated to the database on the secondary instance. If transaction log record replication is slow or blocked due to network connection issues, the log file keeps growing on the primary instance. Growth speed depends on the intensity of workload and the network speed. If there's a prolonged network connection outage and heavy workload on primary instance, the log file may take all available storage space. - -To minimize the risk of running out of space on your primary instance due to log file growth, make sure to **take database log backups regularly**. By taking log backups regularly, you make your database more resilient to unplanned log growth events. Consider scheduling daily log backup tasks using SQL Server Agent job. - -You can use a Transact-SQL (T-SQL) script to back up the log file, such as the sample provided in this section. Replace the placeholders in the sample script with name of your database, name and path of the backup file, and the description. - -To back up your transaction log, use the following sample Transact-SQL (T-SQL) script on SQL Server: - -```sql --- Execute on SQL Server -USE [] ---Set current database inside job step or script ---Check that you are executing the script on the primary instance -if (SELECT role - FROM sys.dm_hadr_availability_replica_states AS a - JOIN sys.availability_replicas AS b - ON b.replica_id = a.replica_id -WHERE b.replica_server_name = @@SERVERNAME) = 1 -BEGIN --- Take log backup -BACKUP LOG [] -TO DISK = N'' -WITH NOFORMAT, NOINIT, -NAME = N'', SKIP, NOREWIND, NOUNLOAD, COMPRESSION, STATS = 1 -END -``` - -Use the following Transact-SQL (T-SQL) command to check the log spaced used by your database on SQL Server: - -```sql --- Execute on SQL Server -DBCC SQLPERF(LOGSPACE); -``` - -The query output looks like the following example below for sample database **tpcc**: - -:::image type="content" source="./media/managed-instance-link-best-practices/database-log-file-size.png" alt-text="Screenshot with results of the command showing log file size and space used"::: - -In this example, the database has used 76% of the available log, with an absolute log file size of approximately 27 GB (27,971 MB). The thresholds for action may vary based on your workload, but it's typically an indication that you should take a log backup to truncate the log file and free up some space. - -## Add startup trace flags - -There are two trace flags (`-T1800` and `-T9567`) that, when added as start up parameters, can optimize the performance of data replication through the link. See [Enable startup trace flags](managed-instance-link-preparation.md#enable-startup-trace-flags) to learn more. - -## Next steps - -To get started with the link feature, [prepare your environment for replication](managed-instance-link-preparation.md). - -For more information on the link feature, see the following articles: - -- [Managed Instance link – overview](managed-instance-link-feature-overview.md) -- [Managed Instance link – connecting SQL Server to Azure reimagined](https://aka.ms/mi-link-techblog) diff --git a/articles/azure-sql/managed-instance/managed-instance-link-feature-overview.md b/articles/azure-sql/managed-instance/managed-instance-link-feature-overview.md deleted file mode 100644 index 5dd31f9b3a06b..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-feature-overview.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: The link feature -titleSuffix: Azure SQL Managed Instance -description: Learn about the link feature for Azure SQL Managed Instance to continuously replicate data from SQL Server to the cloud, or migrate your SQL Server databases with the best possible minimum downtime. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: ignite-fall-2021 -ms.devlang: -ms.topic: conceptual -author: danimir -ms.author: danil -ms.reviewer: mathoma, danil -ms.date: 03/28/2022 ---- -# Link feature for Azure SQL Managed Instance (preview) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -The new link feature in Azure SQL Managed Instance connects your SQL Servers hosted anywhere to SQL Managed Instance, providing hybrid flexibility and database mobility. With an approach that uses near real-time data replication to the cloud, you can offload workloads to a read-only secondary in Azure to take advantage of Azure-only features, performance, and scale. - -After a disastrous event, you can continue running your read-only workloads on SQL Managed Instance in Azure. You can also choose to migrate one or more applications from SQL Server to SQL Managed Instance at the same time, at your own pace, and with the best possible minimum downtime compared to other solutions in Azure today. - -To use the link feature, you'll need: - -- SQL Server 2019 Enterprise Edition or Developer Edition with [CU15 (or above)](https://support.microsoft.com/en-us/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6) installed on-premises, or on an Azure VM. -- Network connectivity between your SQL Server and managed instance is required. If your SQL Server is running on-premises, use a VPN link or Express route. If your SQL Server is running on an Azure VM, either deploy your VM to the same subnet as your managed instance, or use global VNet peering to connect two separate subnets. -- Azure SQL Managed Instance provisioned on any service tier. - -> [!NOTE] -> SQL Managed Instance link feature is available in all public Azure regions. -> National clouds are currently not supported. - -## Overview - -The underlying technology of near real-time data replication between SQL Server and SQL Managed Instance is based on distributed availability groups, part of the well-known and proven Always On availability group technology stack. Extend your SQL Server on-premises availability group to SQL Managed Instance in Azure in a safe and secure manner. - -There's no need to have an existing availability group or multiple nodes. The link supports single node SQL Server instances without existing availability groups, and also multiple-node SQL Server instances with existing availability groups. Through the link, you can use the modern benefits of Azure without migrating your entire SQL Server data estate to the cloud. - -You can keep running the link for as long as you need it, for months and even years at a time. And for your modernization journey, if or when you're ready to migrate to Azure, the link enables a considerably-improved migration experience with the minimum possible downtime compared to all other options available today, providing a true online migration to SQL Managed Instance. - -## Supported scenarios - -Data replicated through the link feature from SQL Server to Azure SQL Managed Instance can be used with several scenarios, such as: - -- **Use Azure services without migrating to the cloud** -- **Offload read-only workloads to Azure** -- **Migrate to Azure** - -![Managed Instance link main scenario](./media/managed-instance-link-feature-overview/mi-link-main-scenario.png) - -### Use Azure services - -Use the link feature to leverage Azure services using SQL Server data without migrating to the cloud. Examples include reporting, analytics, backups, machine learning, and other jobs that send data to Azure. - -### Offload workloads to Azure - -You can also use the link feature to offload workloads to Azure. For example, an application could use SQL Server for read-write workloads, while offloading read-only workloads to SQL Managed Instance in any of Azure's 60+ regions worldwide. Once the link is established, the primary database on SQL Server is read/write accessible, while replicated data to SQL Managed Instance in Azure is read-only accessible. This allows for various scenarios where replicated databases on SQL Managed Instance can be used for read scale-out and offloading read-only workloads to Azure. SQL Managed Instance, in parallel, can also host independent read/write databases. This allows for copying the replicated database to another read/write database on the same managed instance for further data processing. - -The link is database scoped (one link per one database), allowing for consolidation and deconsolidation of workloads in Azure. For example, you can replicate databases from multiple SQL Servers to a single SQL Managed Instance in Azure (consolidation), or replicate databases from a single SQL Server to multiple managed instances via a 1 to 1 relationship between a database and a managed instance - to any of Azure's regions worldwide (deconsolidation). The latter provides you with an efficient way to quickly bring your workloads closer to your customers in any region worldwide, which you can use as read-only replicas. - -### Migrate to Azure - -The link feature also facilitates migrating from SQL Server to SQL Managed Instance, enabling: - -- The most performant minimum downtime migration compared to all other solutions available today -- True online migration to SQL Managed Instance in any service tier - -Since the link feature enables minimum downtime migration, you can migrate to your managed instance while maintaining your primary workload online. While online migration was possible to achieve previously with other solutions when migrating to the general purpose service tier, the link feature now also allows for true online migrations to the business critical service tier as well. - -## How it works - -The underlying technology behind the link feature for SQL Managed Instance is distributed availability groups. The solution supports single-node systems without existing availability groups, or multiple node systems with existing availability groups. - -![How does the link feature for SQL Managed Instance work](./media/managed-instance-link-feature-overview/mi-link-ag-dag.png) - -Secure connectivity, such as VPN or Express Route is used between an on-premises network and Azure. If SQL Server is hosted on an Azure VM, the internal Azure backbone can be used between the VM and managed instance – such as, for example, global VNet peering. The trust between the two systems is established using certificate-based authentication, in which SQL Server and SQL Managed Instance exchange their public keys. - -There could exist up to 100 links from the same, or various SQL Server sources to a single SQL Managed Instance. This limit is governed by the number of databases that could be hosted on a managed instance at this time. Likewise, a single SQL Server can establish multiple parallel database replication links with several managed instances in different Azure regions in a 1 to 1 relationship between a database and a managed instance . The feature requires CU13 or higher to be installed on SQL Server 2019. - -## Use the link feature - -To help with the initial environment setup, we have prepared the following online guide on how to setup your SQL Server environment to use with the link feature for Managed Instance: - -* [Prepare environment for the link](managed-instance-link-preparation.md) - -Once you have ensured the pre-requirements have been met, you can create the link using the automated wizard in SSMS, or you can choose to setup the link manually using scripts. Create the link using one of the following instructions: - -* [Replicate database with link feature in SSMS](managed-instance-link-use-ssms-to-replicate-database.md), or alternatively -* [Replicate database with Azure SQL Managed Instance link feature with T-SQL and PowerShell scripts](managed-instance-link-use-scripts-to-replicate-database.md) - -Once the link has been created, ensure that you follow the best practices for maintaining the link, by following instructions described at this page: - -* [Best practices with link feature for Azure SQL Managed Instance](managed-instance-link-best-practices.md) - -If and when you are ready to migrate a database to Azure with a minimum downtime, you can do this using an automated wizard in SSMS, or you can choose to do this manually with scripts. Migrate database to Azure link using one of the following instructions: - -* [Failover database with link feature in SSMS](managed-instance-link-use-ssms-to-failover-database.md), or alternatively -* [Failover (migrate) database with Azure SQL Managed Instance link feature with T-SQL and PowerShell scripts](managed-instance-link-use-scripts-to-failover-database.md) - -## Limitations - -This section describes the product’s functional limitations. - -### General functional limitations - -Managed Instance link has a set of general limitations, and those are listed in this section. Listed limitations are of a technical nature and are unlikely to be addressed in the foreseeable future. - -- Only user databases can be replicated. Replication of system databases isn't supported. -- The solution doesn't replicate server level objects, agent jobs, nor user logins from SQL Server to Managed Instance. -- Only one database can be placed into a single Availability Group per one Distributed Availability Group link. -- Link can't be established between SQL Server and Managed Instance if functionality used on SQL Server isn't support on Managed Instance. - - File tables and file streams aren't supported for replication, as Managed Instance doesn't support this. - - Replicating Databases using Hekaton (In-Memory OLTP) isn't supported on Managed Instance General Purpose service tier. Hekaton is only supported on Managed Instance Business Critical service tier. - - For the full list of differences between SQL Server and Managed Instance, see [this article](./transact-sql-tsql-differences-sql-server.md). -- In case Change data capture (CDC), log shipping, or service broker are used with database replicated on the SQL Server, and in case of database migration to Managed Instance, on the failover to the Azure, clients will need to connect using instance name of the current global primary replica. you'll need to manually re-configure these settings. -- In case Transactional Replication is used with database replicated on the SQL Server, and in case of migration scenario, on failover to Azure, transactional replication on Azure SQL Managed instance won't continue. you'll need to manually re-configure Transactional Replication. -- In case distributed transactions are used with database replicated from the SQL Server, and in case of migration scenario, on the cutover to the cloud, the DTC capabilities won't be transferred. There will be no possibility for migrated database to get involved in distributed transactions with SQL Server, as Managed Instance doesn't support distributed transactions with SQL Server at this time. For reference, Managed Instance today supports distributed transactions only between other Managed Instances, see [this article](../database/elastic-transactions-overview.md#transactions-for-sql-managed-instance). -- Managed Instance link can replicate database of any size if it fits into chosen storage size of target Managed Instance. - -### Preview limitations - -Some Managed Instance link features and capabilities are limited **at this time**. Details can be found in the following list. -- SQL Server 2019, Enterprise Edition or Developer Edition, CU15 (or higher) on Windows or Linux host OS is supported. -- Private endpoint (VPN/VNET) is supported to connect Distributed Availability Groups to Managed Instance. Public endpoint can't be used to connect to Managed Instance. -- Managed Instance Link authentication between SQL Server instance and Managed Instance is certificate-based, available only through exchange of certificates. Windows authentication between instances isn't supported. -- Replication of user databases from SQL Server to Managed Instance is one-way. User databases from Managed Instance can't be replicated back to SQL Server. -- [Auto failover groups](auto-failover-group-sql-mi.md) replication to secondary Managed Instance can't be used in parallel while operating the Managed Instance link with SQL Server. -- Replicated R/O databases aren't part of auto-backup process on SQL Managed Instance. - -## Next steps - -If you're interested in using Link feature for Azure SQL Managed Instance with versions and editions that are currently not supported, sign-up [here](https://aka.ms/mi-link-signup). - -For more information on the link feature, see the following: - -- [Managed Instance link – connecting SQL Server to Azure reimagined](https://aka.ms/mi-link-techblog). -- [Prepare for SQL Managed Instance link](./managed-instance-link-preparation.md). -- [Use SQL Managed Instance link via SSMS to replicate database](./managed-instance-link-use-ssms-to-replicate-database.md). -- [Use SQL Managed Instance link via SSMS to migrate database](./managed-instance-link-use-ssms-to-failover-database.md). - -For other replication scenarios, consider: - -- [Transactional replication with Azure SQL Managed Instance (Preview)](replication-transactional-overview.md) diff --git a/articles/azure-sql/managed-instance/managed-instance-link-preparation.md b/articles/azure-sql/managed-instance/managed-instance-link-preparation.md deleted file mode 100644 index 878f5bca7d57b..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-preparation.md +++ /dev/null @@ -1,366 +0,0 @@ ---- -title: Prepare environment for Managed Instance link -titleSuffix: Azure SQL Managed Instance -description: Learn how to prepare your environment for using a Managed Instance link to replicate and fail over your database to SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma, danil -ms.date: 04/02/2022 ---- - -# Prepare your environment for a link - Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you how to prepare your environment for a [Managed Instance link](managed-instance-link-feature-overview.md) so that you can replicate databases from SQL Server to Azure SQL Managed Instance. - -> [!NOTE] -> The link is a feature of Azure SQL Managed Instance and is currently in preview. - -## Prerequisites - -To use the link with Azure SQL Managed Instance, you need the following prerequisites: - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- [SQL Server 2019 Enterprise or Developer edition](https://www.microsoft.com/en-us/evalcenter/evaluate-sql-server-2019?filetype=EXE), starting with [CU15 (15.0.4198.2)](https://support.microsoft.com/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6). -- Azure SQL Managed Instance. [Get started](instance-create-quickstart.md) if you don't have it. - -## Prepare your SQL Server instance - -To prepare your SQL Server instance, you need to validate that: - -- You're on the minimum supported version. -- You've enabled the availability groups feature. -- You've added the proper trace flags at startup. -- Your databases are in full recovery mode and backed up. - -You'll need to restart SQL Server for these changes to take effect. - -### Install CU15 (or later) - -The link feature for SQL Managed Instance was introduced in CU15 of SQL Server 2019. - -To check your SQL Server version, run the following Transact-SQL (T-SQL) script on SQL Server: - -```sql --- Run on SQL Server --- Shows the version and CU of the SQL Server -SELECT @@VERSION -``` - -If your SQL Server version is earlier than CU15 (15.0.4198.2), install [CU15](https://support.microsoft.com/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6) or the latest cumulative update. You must restart your SQL Server instance during the update. - -### Create a database master key in the master database - -Create database master key in the master database by running the following T-SQL script on SQL Server: - -```sql --- Run on SQL Server --- Create a master key -USE MASTER -CREATE MASTER KEY ENCRYPTION BY PASSWORD = '' -``` - -To make sure that you have the database master key, use the following T-SQL script on SQL Server: - -```sql --- Run on SQL Server -SELECT * FROM sys.symmetric_keys WHERE name LIKE '%DatabaseMasterKey%' -``` - -### Enable availability groups - -The link feature for SQL Managed Instance relies on the Always On availability groups feature, which isn't enabled by default. To learn more, review [Enable the Always On availability groups feature](/sql/database-engine/availability-groups/windows/enable-and-disable-always-on-availability-groups-sql-server). - -To confirm that the Always On availability groups feature is enabled, run the following T-SQL script on SQL Server: - -```sql --- Run on SQL Server --- Is Always On enabled on this SQL Server instance? -declare @IsHadrEnabled sql_variant = (select SERVERPROPERTY('IsHadrEnabled')) -select - @IsHadrEnabled as IsHadrEnabled, - case @IsHadrEnabled - when 0 then 'The Always On availability groups is disabled.' - when 1 then 'The Always On availability groups is enabled.' - else 'Unknown status.' - end as 'HadrStatus' -``` - -If the availability groups feature isn't enabled, follow these steps to enable it: - -1. Open SQL Server Configuration Manager. -1. Select **SQL Server Services** from the left pane. -1. Right-click the SQL Server service, and then select **Properties**. - - :::image type="content" source="./media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-properties.png" alt-text="Screenshot that shows SQL Server Configuration Manager, with selections for opening properties for the service."::: - -1. Go to the **Always On Availability Groups** tab. -1. Select the **Always On Availability Groups** checkbox, and then select **OK**. - - :::image type="content" source="./media/managed-instance-link-preparation/always-on-availability-groups-properties.png" alt-text="Screenshot that shows the properties for Always On availability groups."::: - -1. Select **OK** in the dialog to restart the SQL Server service. - -### Enable startup trace flags - -To optimize the performance of your SQL Managed Instance link, we recommend enabling the following trace flags at startup: - -- `-T1800`: This trace flag optimizes performance when the log files for the primary and secondary replicas in an availability group are hosted on disks with different sector sizes, such as 512 bytes and 4K. If both primary and secondary replicas have a disk sector size of 4K, this trace flag isn't required. To learn more, review [KB3009974](https://support.microsoft.com/topic/kb3009974-fix-slow-synchronization-when-disks-have-different-sector-sizes-for-primary-and-secondary-replica-log-files-in-sql-server-ag-and-logshipping-environments-ed181bf3-ce80-b6d0-f268-34135711043c). -- `-T9567`: This trace flag enables compression of the data stream for availability groups during automatic seeding. The compression increases the load on the processor but can significantly reduce transfer time during seeding. - -To enable these trace flags at startup, use the following steps: - -1. Open SQL Server Configuration Manager. -1. Select **SQL Server Services** from the left pane. -1. Right-click the SQL Server service, and then select **Properties**. - - :::image type="content" source="./media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-properties.png" alt-text="Screenshot that shows SQL Server Configuration Manager."::: - -1. Go to the **Startup Parameters** tab. In **Specify a startup parameter**, enter `-T1800` and select **Add** to add the startup parameter. Then enter `-T9567` and select **Add** to add the other trace flag. Select **Apply** to save your changes. - - :::image type="content" source="./media/managed-instance-link-preparation/startup-parameters-properties.png" alt-text="Screenshot that shows startup parameter properties."::: - -1. Select **OK** to close the **Properties** window. - -To learn more, review the [syntax for enabling trace flags](/sql/t-sql/database-console-commands/dbcc-traceon-transact-sql). - -### Restart SQL Server and validate the configuration - -After you've ensured that you're on a supported version of SQL Server, enabled the Always On availability groups feature, and added your startup trace flags, restart your SQL Server instance to apply all of these changes: - -1. Open **SQL Server Configuration Manager**. -1. Select **SQL Server Services** from the left pane. -1. Right-click the SQL Server service, and then select **Restart**. - - :::image type="content" source="./media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-restart.png" alt-text="Screenshot that shows the SQL Server restart command call."::: - -After the restart, run the following T-SQL script on SQL Server to validate the configuration of your SQL Server instance: - -```sql --- Run on SQL Server --- Shows the version and CU of SQL Server -SELECT @@VERSION - --- Shows if the Always On availability groups feature is enabled -SELECT SERVERPROPERTY ('IsHadrEnabled') - --- Lists all trace flags enabled on SQL Server -DBCC TRACESTATUS -``` - -Your SQL Server version should be 15.0.4198.2 or later, the Always On availability groups feature should be enabled, and you should have the trace flags `-T1800` and `-T9567` enabled. The following screenshot is an example of the expected outcome for a SQL Server instance that has been properly configured: - -:::image type="content" source="./media/managed-instance-link-preparation/ssms-results-expected-outcome.png" alt-text="Screenshot that shows the expected outcome in S S M S."::: - -### Set up database recovery and backup - -All databases that will be replicated via the link must be in full recovery mode and have at least one backup. Run the following code on SQL Server: - -```sql --- Run on SQL Server --- Set full recovery mode for all databases you want to replicate. -ALTER DATABASE [] SET RECOVERY FULL -GO - --- Execute backup for all databases you want to replicate. -BACKUP DATABASE [] TO DISK = N'' -GO -``` - -## Configure network connectivity - -For the link to work, you must have network connectivity between SQL Server and SQL Managed Instance. The network option that you choose depends on where your SQL Server instance resides - whether it's on-premises or on a virtual machine (VM). - -### SQL Server on Azure Virtual Machines - -Deploying SQL Server on Azure Virtual Machines in the same Azure virtual network that hosts SQL Managed Instance is the simplest method, because network connectivity will automatically exist between the two instances. To learn more, see the detailed tutorial [Deploy and configure an Azure VM to connect to Azure SQL Managed Instance](./connect-vm-instance-configure.md). - -If your SQL Server on Azure Virtual Machines instance is in a different virtual network from your managed instance, either connect the two Azure virtual networks by using [global virtual network peering](https://techcommunity.microsoft.com/t5/azure-sql/new-feature-global-vnet-peering-support-for-azure-sql-managed/ba-p/1746913) or configure [VPN gateways](../../vpn-gateway/tutorial-create-gateway-portal.md). - ->[!NOTE] -> Global virtual network peering is enabled by default on managed instances provisioned after November 2020. [Raise a support ticket](../database/quota-increase-request.md) to enable global virtual network peering on older instances. - - -### SQL Server outside Azure - -If your SQL Server instance is hosted outside Azure, establish a VPN connection between SQL Server and SQL Managed Instance by using either of these options: - -- [Site-to-site VPN connection](/office365/enterprise/connect-an-on-premises-network-to-a-microsoft-azure-virtual-network) -- [Azure ExpressRoute connection](../../expressroute/expressroute-introduction.md) - -> [!TIP] -> We recommend ExpressRoute for the best network performance when you're replicating data. Provision a gateway with enough bandwidth for your use case. - -### Network ports between the environments - -Port 5022 needs to allow inbound and outbound traffic between SQL Server and SQL Managed Instance. Port 5022 is the standard database mirroring endpoint port for availability groups. It can't be changed or customized. - -The following table describes port actions for each environment: - -|Environment|What to do| -|:---|:-----| -|SQL Server (in Azure) | Open both inbound and outbound traffic on port 5022 for the network firewall to the entire subnet IP range of SQL Managed Instance. If necessary, do the same on the SQL Server host OS (Windows/Linux) firewall. Create a network security group (NSG) rule in the virtual network that hosts the VM to allow communication on port 5022. | -|SQL Server (outside Azure) | Open both inbound and outbound traffic on port 5022 for the network firewall to the entire subnet IP range of SQL Managed Instance. If necessary, do the same on the SQL Server host OS (Windows/Linux) firewall. | -|SQL Managed Instance |[Create an NSG rule](../../virtual-network/manage-network-security-group.md#create-a-security-rule) in the Azure portal to allow inbound and outbound traffic from the IP address of SQL Server on port 5022 to the virtual network that hosts SQL Managed Instance. | - -Use the following PowerShell script on the Windows host of the SQL Server instance to open ports in the Windows firewall: - -```powershell -New-NetFirewallRule -DisplayName "Allow TCP port 5022 inbound" -Direction inbound -Profile Any -Action Allow -LocalPort 5022 -Protocol TCP -New-NetFirewallRule -DisplayName "Allow TCP port 5022 outbound" -Direction outbound -Profile Any -Action Allow -LocalPort 5022 -Protocol TCP -``` - -## Test bidirectional network connectivity - -Bidirectional network connectivity between SQL Server and SQL Managed Instance is necessary for the link to work. After you open ports on the SQL Server side and configure an NSG rule on the SQL Managed Instance side, test connectivity. - -### Test the connection from SQL Server to SQL Managed Instance - -To check if SQL Server can reach SQL Managed Instance, use the following `tnc` command in PowerShell from the SQL Server host machine. Replace `` with the fully qualified domain name (FQDN) of the managed instance. You can copy the FQDN from the managed instance's overview page in the Azure portal. - -```powershell -tnc -port 5022 -``` - -A successful test shows `TcpTestSucceeded : True`. - -:::image type="content" source="./media/managed-instance-link-preparation/powershell-output-tnc-command.png" alt-text="Screenshot that shows the output of the command for testing a network connection in PowerShell."::: - -If the response is unsuccessful, verify the following network settings: -- There are rules in both the network firewall *and* the SQL Server host OS (Windows/Linux) firewall that allow traffic to the entire *subnet IP range* of SQL Managed Instance. -- There's an NSG rule that allows communication on port 5022 for the virtual network that hosts SQL Managed Instance. - - -### Test the connection from SQL Managed Instance to SQL Server - -To check that SQL Managed Instance can reach SQL Server, you first create a test endpoint. Then you use the SQL Agent to run a PowerShell script with the `tnc` command pinging SQL Server on port 5022 from the managed instance. - -To create a test endpoint, connect to SQL Server and run the following T-SQL script: - -```sql --- Run on SQL Server --- Create the certificate needed for the test endpoint -USE MASTER -CREATE CERTIFICATE TEST_CERT -WITH SUBJECT = N'Certificate for SQL Server', -EXPIRY_DATE = N'3/30/2051' -GO - --- Create the test endpoint on SQL Server -USE MASTER -CREATE ENDPOINT TEST_ENDPOINT - STATE=STARTED - AS TCP (LISTENER_PORT=5022, LISTENER_IP = ALL) - FOR DATABASE_MIRRORING ( - ROLE=ALL, - AUTHENTICATION = CERTIFICATE TEST_CERT, - ENCRYPTION = REQUIRED ALGORITHM AES - ) -``` - -To verify that the SQL Server endpoint is receiving connections on port 5022, run the following PowerShell command on the host operating system of your SQL Server instance: - -```powershell -tnc localhost -port 5022 -``` - -A successful test shows `TcpTestSucceeded : True`. You can then proceed to creating a SQL Agent job on the managed instance to try testing the SQL Server test endpoint on port 5022 from the managed instance. - -Next, create a SQL Agent job on the managed instance called `NetHelper` by using the public IP address or DNS name that can be resolved from the managed instance for `SQL_SERVER_ADDRESS`. Run the following T-SQL script on the managed instance: - -```sql --- Run on the managed instance --- SQL_SERVER_ADDRESS should be a public IP address, or the DNS name that can be resolved from the SQL Managed Instance host machine. -DECLARE @SQLServerIpAddress NVARCHAR(MAX) = '' -DECLARE @tncCommand NVARCHAR(MAX) = 'tnc ' + @SQLServerIpAddress + ' -port 5022 -InformationLevel Quiet' -DECLARE @jobId BINARY(16) - -EXEC msdb.dbo.sp_add_job @job_name=N'NetHelper', - @enabled=1, - @description=N'Test Managed Instance to SQL Server network connectivity on port 5022.', - @category_name=N'[Uncategorized (Local)]', - @owner_login_name=N'cloudSA', @job_id = @jobId OUTPUT - -EXEC msdb.dbo.sp_add_jobstep @job_id=@jobId, @step_name=N'tnc step', - @step_id=1, - @os_run_priority=0, @subsystem=N'PowerShell', - @command = @tncCommand, - @database_name=N'master', - @flags=40 - -EXEC msdb.dbo.sp_update_job @job_id = @jobId, @start_step_id = 1 - -EXEC msdb.dbo.sp_add_jobserver @job_id = @jobId, @server_name = N'(local)' - -EXEC msdb.dbo.sp_start_job @job_name = N'NetHelper' -``` - - -Run the SQL Agent job by running the following T-SQL command on the managed instance: - -```sql --- Run on the managed instance -EXEC msdb.dbo.sp_start_job @job_name = N'NetHelper' -``` - -Run the following query on the managed instance to show the log of the SQL Agent job: - -```sql --- Run on the managed instance -SELECT - sj.name JobName, sjs.step_id, sjs.step_name, sjsl.log, sjsl.date_modified -FROM - msdb.dbo.sysjobs sj - LEFT OUTER JOIN msdb.dbo.sysjobsteps sjs - ON sj.job_id = sjs.job_id - LEFT OUTER JOIN msdb.dbo.sysjobstepslogs sjsl - ON sjs.step_uid = sjsl.step_uid -WHERE - sj.name = 'NetHelper' -``` - -If the connection is successful, the log will show `True`. If the connection is unsuccessful, the log will show `False`. - -:::image type="content" source="./media/managed-instance-link-preparation/ssms-output-tnchelper.png" alt-text="Screenshot that shows the expected output of the NetHelper SQL Agent job."::: - -Finally, drop the test endpoint and certificate on SQL Server by using the following T-SQL commands: - -```sql --- Run on SQL Server -DROP ENDPOINT TEST_ENDPOINT -GO -DROP CERTIFICATE TEST_CERT -GO -``` - -If the connection is unsuccessful, verify the following items: - -- The firewall on the host SQL Server instance allows inbound and outbound communication on port 5022. -- An NSG rule for the virtual network that hosts SQL Managed Instance allows communication on port 5022. -- If your SQL Server instance is on an Azure VM, an NSG rule allows communication on port 5022 on the virtual network that hosts the VM. -- SQL Server is running. - -> [!CAUTION] -> Proceed with the next steps only if you've validated network connectivity between your source and target environments. Otherwise, troubleshoot network connectivity issues before proceeding. - -## Migrate a certificate of a TDE-protected database - -If you're migrating a SQL Server database protected by Transparent Data Encryption to a managed instance, you must migrate the corresponding encryption certificate from the on-premises or Azure VM SQL Server instance to the managed instance before using the link. For detailed steps, see [Migrate a TDE certificate to a managed instance](tde-certificate-migrate.md). - -## Install SSMS - -SQL Server Management Studio (SSMS) v18.11.1 is the easiest way to use a SQL Managed Instance link. [Download SSMS version 18.11.1 or later](/sql/ssms/download-sql-server-management-studio-ssms) and install it to your client machine. - -After installation finishes, open SSMS and connect to your supported SQL Server instance. Right-click a user database and validate that the **Azure SQL Managed Instance link** option appears on the menu. - -:::image type="content" source="./media/managed-instance-link-preparation/ssms-database-context-menu-managed-instance-link.png" alt-text="Screenshot that shows the Azure SQL Managed Instance link option on the context menu."::: - -## Next steps - -After you've prepared your environment, you're ready to start [replicating your database](managed-instance-link-use-ssms-to-replicate-database.md). To learn more, review [Link feature for Azure SQL Managed Instance](link-feature.md). diff --git a/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-failover-database.md b/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-failover-database.md deleted file mode 100644 index 9fa0617b0b0cd..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-failover-database.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -title: Fail over a database with the link via T-SQL & PowerShell scripts -titleSuffix: Azure SQL Managed Instance -description: Learn how to use Transact-SQL and PowerShell scripts to fail over a database from SQL Server to SQL Managed Instance by using the Managed Instance link. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma, danil -ms.date: 03/15/2022 ---- - -# Fail over (migrate) a database with a link via T-SQL and PowerShell scripts - Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you how to use Transact-SQL (T-SQL) and PowerShell scripts and a [Managed Instance link](managed-instance-link-feature-overview.md) to fail over (migrate) your database from SQL Server to SQL Managed Instance. - -> [!NOTE] -> - The link is a feature of Azure SQL Managed Instance and is currently in preview. You can also use a [SQL Server Management Studio (SSMS) wizard](managed-instance-link-use-ssms-to-failover-database.md) to fail over a database with the link. -> - The PowerShell scripts in this article make REST API calls on the SQL Managed Instance side. - - -Database failover from SQL Server to SQL Managed Instance breaks the link between the two databases. Failover stops replication and leaves both databases in an independent state, ready for individual read/write workloads. - -To start migrating your database to SQL Managed Instance, first stop any application workloads on SQL Server during your maintenance hours. This enables SQL Managed Instance to catch up with database replication and migrate to Azure while mitigating data loss. - -While the primary database is a part of an Always On availability group, you can't set it to read-only mode. You need to ensure that your applications aren't committing transactions to SQL Server. - -## Switch the replication mode - -The replication between SQL Server and SQL Managed Instance is asynchronous by default. Before you migrate your database to Azure, switch the link to synchronous mode. Synchronous replication across large network distances might slow down transactions on the primary SQL Server instance. - -Switching from async to sync mode requires a replication mode change on SQL Managed Instance and SQL Server. - -### Switch replication mode (SQL Managed Instance) - -Use the following PowerShell script to call a REST API that changes the replication mode from asynchronous to synchronous on SQL Managed Instance. We suggest that you make the REST API call by using Azure Cloud Shell in the Azure portal. In the script, replace: - -- `` with your subscription ID. -- `` with the name of your managed instance. -- `` with the name of the distributed availability group that you want to get the status for. - -```powershell -# Run in Azure Cloud Shell -# ==================================================================================== -# POWERSHELL SCRIPT TO SWITCH REPLICATION MODE SYNC-ASYNC ON MANAGED INSTANCE -# USER CONFIGURABLE VALUES -# (C) 2021-2022 SQL Managed Instance product group -# ==================================================================================== -# Enter your Azure subscription ID -$SubscriptionID = "" -# Enter your managed instance name – for example, "sqlmi1" -$ManagedInstanceName = "" -# Enter the distributed availability group name (the link name) -$DAGName = "" - -# ==================================================================================== -# INVOKING THE API CALL -- THIS PART IS NOT USER CONFIGURABLE -# ==================================================================================== -# Log in and select a subscription if needed -if ((Get-AzContext ) -eq $null) -{ - echo "Logging to Azure subscription" - Login-AzAccount -} -Select-AzSubscription -SubscriptionName $SubscriptionID - -# Build a URI for the API call -# -$miRG = (Get-AzSqlInstance -InstanceName $ManagedInstanceName).ResourceGroupName -$uriFull = "https://management.azure.com/subscriptions/" + $SubscriptionID + "/resourceGroups/" + $miRG+ "/providers/Microsoft.Sql/managedInstances/" + $ManagedInstanceName + "/distributedAvailabilityGroups/" + $DAGName + "?api-version=2021-05-01-preview" -echo $uriFull - -# Build the API request body -# - -$bodyFull = "{`"properties`":{`"ReplicationMode`":`"sync`"}}" - -echo $bodyFull - -# Get an authentication token and build the header -# -$azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile -$currentAzureContext = Get-AzContext -$profileClient = New-Object Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient($azProfile) -$token = $profileClient.AcquireAccessToken($currentAzureContext.Tenant.TenantId) -$authToken = $token.AccessToken -$headers = @{} -$headers.Add("Authorization", "Bearer "+"$authToken") - -# Invoke the API call -# -echo "Invoking API call switch Async-Sync replication mode on Managed Instance" -Invoke-WebRequest -Method PATCH -Headers $headers -Uri $uriFull -ContentType "application/json" -Body $bodyFull -``` - -### Switch replication mode (SQL Server) - -Use the following T-SQL script on SQL Server to change the replication mode of the distributed availability group on SQL Server from async to sync. Replace: - -- `` with the name of the distributed availability group. -- `` with the name of the availability group created on SQL Server. -- `` with the name of your managed instance. - -```sql --- Run on SQL Server --- Sets the distributed availability group to a synchronous commit. --- ManagedInstanceName example: 'sqlmi1' -USE master -GO -ALTER AVAILABILITY GROUP [] -MODIFY -AVAILABILITY GROUP ON - '' WITH - (AVAILABILITY_MODE = SYNCHRONOUS_COMMIT), - '' WITH - (AVAILABILITY_MODE = SYNCHRONOUS_COMMIT); -``` - -To confirm that you've changed the link's replication mode successfully, use the following dynamic management view. Results indicate the `SYNCHRONOUS_COMIT` state. - -```sql --- Run on SQL Server --- Verifies the state of the distributed availability group -SELECT - ag.name, ag.is_distributed, ar.replica_server_name, - ar.availability_mode_desc, ars.connected_state_desc, ars.role_desc, - ars.operational_state_desc, ars.synchronization_health_desc -FROM - sys.availability_groups ag - join sys.availability_replicas ar - on ag.group_id=ar.group_id - left join sys.dm_hadr_availability_replica_states ars - on ars.replica_id=ar.replica_id -WHERE - ag.is_distributed=1 -``` - -Now that you've switched both SQL Managed Instance and SQL Server to sync mode, the replication between the two entities is synchronous. If you need to reverse this state, follow the same steps and set the async state for both SQL Server and SQL Managed Instance. - -## Check LSN values on both SQL Server and SQL Managed Instance - -To complete the migration, confirm that replication has finished. For this, ensure that the log sequence numbers (LSNs) indicating the log records written for both SQL Server and SQL Managed Instance are the same. - -Initially, it's expected that the SQL Server LSN will be higher than the SQL Managed Instance LSN. Network latency might cause SQL Managed Instance to lag somewhat behind the primary SQL Server instance. Because the workload has been stopped on SQL Server, you should expect the LSNs to match and stop changing after some time. - -Use the following T-SQL query on SQL Server to read the LSN of the last recorded transaction log. Replace `` with your database name and look for the last hardened LSN number. - -```sql --- Run on SQL Server --- Obtain the last hardened LSN for the database on SQL Server. -SELECT - ag.name AS [Replication group], - db.name AS [Database name], - drs.database_id AS [Database ID], - drs.group_id, - drs.replica_id, - drs.synchronization_state_desc AS [Sync state], - drs.end_of_log_lsn AS [End of log LSN], - drs.last_hardened_lsn AS [Last hardened LSN] -FROM - sys.dm_hadr_database_replica_states drs - inner join sys.databases db on db.database_id = drs.database_id - inner join sys.availability_groups ag on drs.group_id = ag.group_id -WHERE - ag.is_distributed = 1 and db.name = '' -``` - -Use the following T-SQL query on SQL Managed Instance to read the last hardened LSN for your database. Replace `` with your database name. - -This query will work on a General Purpose managed instance. For a Business Critical managed instance, you need to uncomment `and drs.is_primary_replica = 1` at the end of the script. On Business Critical, this filter ensures that only primary replica details are read. - -```sql --- Run on a managed instance --- Obtain the LSN for the database on SQL Managed Instance. -SELECT - db.name AS [Database name], - drs.database_id AS [Database ID], - drs.group_id, - drs.replica_id, - drs.synchronization_state_desc AS [Sync state], - drs.end_of_log_lsn AS [End of log LSN], - drs.last_hardened_lsn AS [Last hardened LSN] -FROM - sys.dm_hadr_database_replica_states drs - inner join sys.databases db on db.database_id = drs.database_id -WHERE - db.name = '' - -- for Business Critical, add the following as well - -- AND drs.is_primary_replica = 1 -``` - -Verify once again that your workload is stopped on SQL Server. Check that LSNs on both SQL Server and SQL Managed Instance match, and that they remain matched and unchanged for some time. Stable LSNs on both instances indicate that the tail log has been replicated to SQL Managed Instance and the workload is effectively stopped. - -## Start database failover and migration to Azure - -Invoke a REST API call to fail over your database over the link and finalize your migration to Azure. The REST API call breaks the link and ends replication to SQL Managed Instance. The replicated database becomes read/write on the managed instance. - -Use the following API to start database failover to Azure. Replace: - -- `` with your Azure subscription ID. -- `` with the resource group where your managed instance is deployed. -- `` with the name of your managed instance. -- `` with the name of the distributed availability group made on SQL Server. - -```PowerShell -# Run in Azure Cloud Shell -# ==================================================================================== -# POWERSHELL SCRIPT TO FAIL OVER AND MIGRATE DATABASE WITH SQL MANAGED INSTANCE LINK -# USER CONFIGURABLE VALUES -# (C) 2021-2022 SQL Managed Instance product group -# ==================================================================================== -# Enter your Azure subscription ID -$SubscriptionID = "" -# Enter your managed instance name – for example, "sqlmi1" -$ManagedInstanceName = "" -# Enter the distributed availability group link name -$DAGName = "" - -# ==================================================================================== -# INVOKING THE API CALL -- THIS PART IS NOT USER CONFIGURABLE. -# ==================================================================================== -# Log in and select a subscription if needed -if ((Get-AzContext ) -eq $null) -{ - echo "Logging to Azure subscription" - Login-AzAccount -} -Select-AzSubscription -SubscriptionName $SubscriptionID - -# Build a URI for the API call -# -$miRG = (Get-AzSqlInstance -InstanceName $ManagedInstanceName).ResourceGroupName -$uriFull = "https://management.azure.com/subscriptions/" + $SubscriptionID + "/resourceGroups/" + $miRG+ "/providers/Microsoft.Sql/managedInstances/" + $ManagedInstanceName + "/distributedAvailabilityGroups/" + $DAGName + "?api-version=2021-05-01-preview" -echo $uriFull - -# Get an authentication token and build the header -# -$azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile -$currentAzureContext = Get-AzContext -$profileClient = New-Object Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient($azProfile) -$token = $profileClient.AcquireAccessToken($currentAzureContext.Tenant.TenantId) -$authToken = $token.AccessToken -$headers = @{} -$headers.Add("Authorization", "Bearer "+"$authToken") - -# Invoke the API call -# -Invoke-WebRequest -Method DELETE -Headers $headers -Uri $uriFull -ContentType "application/json" -``` - -## Clean up availability groups - -After you break the link and migrate a database to Azure SQL Managed Instance, consider cleaning up the availability group and distributed availability group resources from SQL Server if they're no longer necessary. - -In the following code, replace: - -- `` with the name of the distributed availability group on SQL Server. -- `` with the name of the availability group on SQL Server. - -``` sql --- Run on SQL Server -USE MASTER -GO -DROP AVAILABILITY GROUP -GO -DROP AVAILABILITY GROUP -GO -``` - -With this step, you've finished the migration of the database from SQL Server to SQL Managed Instance. - -## Next steps - -For more information on the link feature, see the following resources: - -- [Managed Instance link – connecting SQL Server to Azure reimagined](https://aka.ms/mi-link-techblog) -- [Prepare your environment for Managed Instance link](./managed-instance-link-preparation.md) -- [Use a Managed Instance link with scripts to replicate a database](./managed-instance-link-use-scripts-to-replicate-database.md) -- [Use a Managed Instance link via SSMS to replicate a database](./managed-instance-link-use-ssms-to-replicate-database.md) -- [Use a Managed Instance link via SSMS to migrate a database](./managed-instance-link-use-ssms-to-failover-database.md) diff --git a/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-replicate-database.md b/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-replicate-database.md deleted file mode 100644 index b44014644ae57..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-use-scripts-to-replicate-database.md +++ /dev/null @@ -1,546 +0,0 @@ ---- -title: Replicate a database with the link via T-SQL & PowerShell scripts -titleSuffix: Azure SQL Managed Instance -description: Learn how to use a Managed Instance link with T-SQL and PowerShell scripts to replicate a database from SQL Server to Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma, danil -ms.date: 03/22/2022 ---- - -# Replicate a database with the link feature via T-SQL and PowerShell scripts - Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you how to use Transact-SQL (T-SQL) and PowerShell scripts to replicate your database from SQL Server to Azure SQL Managed Instance by using a [Managed Instance link](managed-instance-link-feature-overview.md). - -> [!NOTE] -> - The link is a feature of Azure SQL Managed Instance and is currently in preview. You can also use a [SQL Server Management Studio (SSMS) wizard](managed-instance-link-use-ssms-to-replicate-database.md) to set up the link to replicate your database. -> - The PowerShell scripts in this article call SQL Managed Instance REST APIs. - - -## Prerequisites - -To replicate your databases to SQL Managed Instance, you need the following prerequisites: - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- [SQL Server 2019 Enterprise or Developer edition](https://www.microsoft.com/en-us/evalcenter/evaluate-sql-server-2019), starting with [CU15 (15.0.4198.2)](https://support.microsoft.com/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6). -- Azure SQL Managed Instance. [Get started](instance-create-quickstart.md) if you don't have it. -- [SQL Server Management Studio v18.11.1 or later](/sql/ssms/download-sql-server-management-studio-ssms). -- A properly [prepared environment](managed-instance-link-preparation.md). - -## Replicate a database - -Use the following instructions to manually set up the link between your SQL Server instance and managed instance. After the link is created, your source database gets a read-only replica copy on your target managed instance. - -> [!NOTE] -> The link supports replication of user databases only. Replication of system databases is not supported. To replicate instance-level objects (stored in master or msdb databases), we recommend that you script them out and run T-SQL scripts on the destination instance. - -## Terminology and naming conventions - -As you run scripts from this user guide, it's important not to mistake SQL Server and SQL Managed Instance names for their fully qualified domain names (FQDNs). The following table explains what the various names exactly represent and how to obtain their values: - -| Terminology | Description | How to find out | -| :----| :------------- | :------------- | -| SQL Server name | Also called a short SQL Server name. For example: *sqlserver1*. This isn't a fully qualified domain name. | Run `SELECT @@SERVERNAME` from T-SQL. | -| SQL Server FQDN | Fully qualified domain name of your SQL Server instance. For example: *sqlserver1.domain.com*. | See your network (DNS) configuration on-premises, or the server name if you're using an Azure virtual machine (VM). | -| SQL Managed Instance name | Also called a short SQL Managed Instance name. For example: *managedinstance1*. | See the name of your managed instance in the Azure portal. | -| SQL Managed Instance FQDN | Fully qualified domain name of your SQL Managed Instance name. For example: *managedinstance1.6d710bcf372b.database.windows.net*. | See the host name on the SQL Managed Instance overview page in the Azure portal. | -| Resolvable domain name | DNS name that can be resolved to an IP address. For example, running *nslookup sqlserver1.domain.com* should return an IP address such as 10.0.1.100. | Use nslookup from the command prompt. | - -## Establish trust between instances - -The first step in setting up a link is to establish trust between the two instances and secure the endpoints that are used to communicate and encrypt data across the network. Distributed availability groups use the existing availability group database mirroring endpoint, rather than having their own dedicated endpoint. This is why security and trust need to be configured between the two entities through the availability group database mirroring endpoint. - -Certificate-based trust is the only supported way to secure database mirroring endpoints on SQL Server and SQL Managed Instance. If you have existing availability groups that use Windows authentication, you need to add certificate-based trust to the existing mirroring endpoint as a secondary authentication option. You can do this by using the `ALTER ENDPOINT` statement. - -> [!IMPORTANT] -> Certificates are generated with an expiration date and time. They must be rotated before they expire. - -Here's an overview of the process to secure database mirroring endpoints for both SQL Server and SQL Managed Instance: - -1. Generate a certificate on SQL Server and obtain its public key. -1. Obtain a public key of the SQL Managed Instance certificate. -1. Exchange the public keys between SQL Server and SQL Managed Instance. - -The following sections describe these steps in detail. - -### Create a certificate on SQL Server and import its public key to SQL Managed Instance - -First, create a master key on SQL Server and generate an authentication certificate: - -```sql --- Run on SQL Server --- Create a master key encryption password --- Keep the password confidential and in a secure place -USE MASTER -CREATE MASTER KEY ENCRYPTION BY PASSWORD = '' -GO - --- Create the SQL Server certificate for the instance link -USE MASTER -GO - -DECLARE @sqlserver_certificate_name NVARCHAR(MAX) = N'Cert_' + @@servername + N'_endpoint' -DECLARE @sqlserver_certificate_subject NVARCHAR(MAX) = N'Certificate for ' + @sqlserver_certificate_name -DECLARE @create_sqlserver_certificate_command NVARCHAR(MAX) = N'CREATE CERTIFICATE [' + @sqlserver_certificate_name + '] WITH SUBJECT = ''' + @sqlserver_certificate_subject + ''', EXPIRY_DATE = ''03/30/2025''' -EXEC sp_executesql @stmt = @create_sqlserver_certificate_command -GO -``` - -Then, use the following T-SQL query on SQL Server to verify that the certificate has been created: - -```sql --- Run on SQL Server -USE MASTER -GO -SELECT * FROM sys.certificates -``` - -In the query results, you'll see that the certificate has been encrypted with the master key. - -Now you can get the public key of the generated certificate on SQL Server: - -```sql --- Run on SQL Server --- Show the public key of the generated SQL Server certificate -USE MASTER -GO -DECLARE @sqlserver_certificate_name NVARCHAR(MAX) = N'Cert_' + @@servername + N'_endpoint' -DECLARE @PUBLICKEYENC VARBINARY(MAX) = CERTENCODED(CERT_ID(@sqlserver_certificate_name)); -SELECT @PUBLICKEYENC AS PublicKeyEncoded; -``` - -Save the value of `PublicKeyEncoded` from the output, because you'll need it for the next step. - -For the next step, use PowerShell with the installed [Az.Sql module](https://www.powershellgallery.com/packages/Az.Sql/3.7.1), version 3.5.1 or later. Or use Azure Cloud Shell online to run the commands, because it's always updated with the latest module versions. - -Run the following PowerShell script. (If you use Cloud Shell, fill out necessary user information, copy it, paste it into Cloud Shell, and then run the script.) Replace: - -- `` with your Azure subscription ID. -- `` with the short name of your managed instance. -- `` with the public portion of the SQL Server certificate in binary format, which you generated in the previous step. It's a long string value that starts with `0x`. - -```powershell -# Run in Azure Cloud Shell -# =============================================================================== -# POWERSHELL SCRIPT TO IMPORT SQL SERVER CERTIFICATE TO MANAGED INSTANCE -# USER CONFIGURABLE VALUES -# (C) 2021-2022 SQL Managed Instance product group -# =============================================================================== -# Enter your Azure subscription ID -$SubscriptionID = "" - -# Enter your managed instance name – for example, "sqlmi1" -$ManagedInstanceName = "" - -# Enter the name for the server trust certificate – for example, "Cert_sqlserver1_endpoint" -$certificateName = "" - -# Insert the certificate public key blob that you got from SQL Server – for example, "0x1234567..." - -$PublicKeyEncoded = "" - -# =============================================================================== -# INVOKING THE API CALL -- REST OF THE SCRIPT IS NOT USER CONFIGURABLE -# =============================================================================== -# Log in and select a subscription if needed. -# -if ((Get-AzContext ) -eq $null) -{ - echo "Logging to Azure subscription" - Login-AzAccount -} -Select-AzSubscription -SubscriptionName $SubscriptionID - -# Build the URI for the API call. -# -$miRG = (Get-AzSqlInstance -InstanceName $ManagedInstanceName).ResourceGroupName -$uriFull = "https://management.azure.com/subscriptions/" + $SubscriptionID + "/resourceGroups/" + $miRG+ "/providers/Microsoft.Sql/managedInstances/" + $ManagedInstanceName + "/serverTrustCertificates/" + $certificateName + "?api-version=2021-08-01-preview" -echo $uriFull - -# Build the API request body. -# -$bodyFull = "{ `"properties`":{ `"PublicBlob`":`"$PublicKeyEncoded`" } }" - -echo $bodyFull - -# Get auth token and build the HTTP request header. -# -$azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile -$currentAzureContext = Get-AzContext -$profileClient = New-Object Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient($azProfile) -$token = $profileClient.AcquireAccessToken($currentAzureContext.Tenant.TenantId) -$authToken = $token.AccessToken -$headers = @{} -$headers.Add("Authorization", "Bearer "+"$authToken") - -# Invoke API call -# -Invoke-WebRequest -Method PUT -Headers $headers -Uri $uriFull -ContentType "application/json" -Body $bodyFull -``` - -The result of this operation will be a time stamp of the successful upload of the SQL Server certificate private key to SQL Managed Instance. - -### Get the certificate public key from SQL Managed Instance and import it to SQL Server - -The certificate for securing the endpoint for a link is automatically generated. This section describes how to get the certificate public key from SQL Managed Instance, and how to import it to SQL Server. - -Use SSMS to connect to SQL Managed Instance. Run the stored procedure [sp_get_endpoint_certificate](/sql/relational-databases/system-stored-procedures/sp-get-endpoint-certificate-transact-sql) to get the certificate public key: - -```sql --- Run on a managed instance -EXEC sp_get_endpoint_certificate @endpoint_type = 4 -``` - -Copy the entire public key (which starts with `0x`) from SQL Managed Instance. Run the following query on SQL Server by replacing `` with the key value. You don't need to use quotation marks. - -> [!IMPORTANT] -> The name of the certificate must be the SQL Managed Instance FQDN. - -```sql --- Run on SQL Server -USE MASTER -CREATE CERTIFICATE [] -FROM BINARY = -``` - -Finally, verify all created certificates by using the following dynamic management view (DMV): - -```sql --- Run on SQL Server -SELECT * FROM sys.certificates -``` - -## Create a mirroring endpoint on SQL Server - -If you don't have an existing availability group or a mirroring endpoint on SQL Server, the next step is to create a mirroring endpoint on SQL Server and secure it with the certificate. If you do have an existing availability group or mirroring endpoint, go straight to the next section, [Alter an existing endpoint](#alter-an-existing-endpoint). - -To verify that you don't have an existing database mirroring endpoint created, use the following script: - -```sql --- Run on SQL Server --- View database mirroring endpoints on SQL Server -SELECT * FROM sys.database_mirroring_endpoints WHERE type_desc = 'DATABASE_MIRRORING' -``` - -If the preceding query doesn't show an existing database mirroring endpoint, run the following script on SQL Server. It creates a new database mirroring endpoint on port 5022 and secures the endpoint with a certificate. - -```sql --- Run on SQL Server --- Create a connection endpoint listener on SQL Server -USE MASTER -CREATE ENDPOINT database_mirroring_endpoint - STATE=STARTED - AS TCP (LISTENER_PORT=5022, LISTENER_IP = ALL) - FOR DATABASE_MIRRORING ( - ROLE=ALL, - AUTHENTICATION = CERTIFICATE , - ENCRYPTION = REQUIRED ALGORITHM AES - ) -GO -``` - -Validate that the mirroring endpoint was created by running the following script on SQL Server: - -```sql --- Run on SQL Server --- View database mirroring endpoints on SQL Server -SELECT - name, type_desc, state_desc, role_desc, - connection_auth_desc, is_encryption_enabled, encryption_algorithm_desc -FROM - sys.database_mirroring_endpoints -``` - -A new mirroring endpoint was created with certificate authentication and AES encryption enabled. - -### Alter an existing endpoint - -> [!NOTE] -> Skip this step if you've just created a new mirroring endpoint. Use this step only if you're using existing availability groups with an existing database mirroring endpoint. - -If you're using existing availability groups for the link, or if there's an existing database mirroring endpoint, first validate that it satisfies the following mandatory conditions for the link: - -- Type must be `DATABASE_MIRRORING`. -- Connection authentication must be `CERTIFICATE`. -- Encryption must be enabled. -- Encryption algorithm must be `AES`. - -Run the following query on SQL Server to view details for an existing database mirroring endpoint: - -```sql --- Run on SQL Server --- View database mirroring endpoints on SQL Server -SELECT - name, type_desc, state_desc, role_desc, connection_auth_desc, - is_encryption_enabled, encryption_algorithm_desc -FROM - sys.database_mirroring_endpoints -``` - -If the output shows that the existing `DATABASE_MIRRORING` endpoint `connection_auth_desc` isn't `CERTIFICATE`, or `encryption_algorthm_desc` isn't `AES`, the *endpoint needs to be altered to meet the requirements*. - -On SQL Server, the same database mirroring endpoint is used for both availability groups and distributed availability groups. If your `connection_auth_desc` endpoint is `NTLM` (Windows authentication) or `KERBEROS`, and you need Windows authentication for an existing availability group, it's possible to alter the endpoint to use multiple authentication methods by switching the authentication option to `NEGOTIATE CERTIFICATE`. This change will allow the existing availability group to use Windows authentication, while using certificate authentication for SQL Managed Instance. - -Similarly, if encryption doesn't include AES and you need RC4 encryption, it's possible to alter the endpoint to use both algorithms. For details about possible options for altering endpoints, see the [documentation page for sys.database_mirroring_endpoints](/sql/relational-databases/system-catalog-views/sys-database-mirroring-endpoints-transact-sql). - -The following script is an example of how to alter your existing database mirroring endpoint on SQL Server. Replace: - -- `` with your existing endpoint name. -- `` with the name of the generated SQL Server certificate. - -Depending on your specific configuration, you might need to customize the script further. You can also use `SELECT * FROM sys.certificates` to get the name of the created certificate on SQL Server. - -```sql --- Run on SQL Server --- Alter the existing database mirroring endpoint to use CERTIFICATE for authentication and AES for encryption -USE MASTER -ALTER ENDPOINT - STATE=STARTED - AS TCP (LISTENER_PORT=5022, LISTENER_IP = ALL) - FOR DATABASE_MIRRORING ( - ROLE=ALL, - AUTHENTICATION = WINDOWS NEGOTIATE CERTIFICATE , - ENCRYPTION = REQUIRED ALGORITHM AES - ) -GO -``` - -After you run the `ALTER` endpoint query and set the dual authentication mode to Windows and certificate, use this query again on SQL Server to show details for the database mirroring endpoint: - -```sql --- Run on SQL Server --- View database mirroring endpoints on SQL Server -SELECT - name, type_desc, state_desc, role_desc, connection_auth_desc, - is_encryption_enabled, encryption_algorithm_desc -FROM - sys.database_mirroring_endpoints -``` - -You've successfully modified your database mirroring endpoint for a SQL Managed Instance link. - -## Create an availability group on SQL Server - -If you don't have an existing availability group, the next step is to create one on SQL Server. Create an availability group with the following parameters for a link: - -- SQL Server name -- Database name -- A failover mode of `MANUAL` -- A seeding mode of `AUTOMATIC` - -First, find out your SQL Server name by running the following T-SQL statement: - -```sql --- Run on SQL Server -SELECT @@SERVERNAME AS SQLServerName -``` - -Then, use the following script to create the availability group on SQL Server. Replace: - -- `` with the name of your SQL Server instance. -- `` with the name of your availability group. For multiple databases, you'll need to create multiple availability groups. A Managed Instance link requires one database per availability group. Consider naming each availability group so that its name reflects the corresponding database - for example, `AG_`. - - > [!NOTE] - > The link feature supports one database per link. To replicate multiplate databases on an instance, create a link for each individual database. For example, to replicate 10 databases to SQL Managed Instance, create 10 individual links. -- `` with the name of database that you want to replicate. -- `` with the SQL Server IP address. You can use a resolvable SQL Server host machine name as an alternative, but you need to make sure that the name is resolvable from the SQL Managed Instance virtual network. - -```sql --- Run on SQL Server --- Create the primary availability group on SQL Server -USE MASTER -CREATE AVAILABILITY GROUP [] -WITH (CLUSTER_TYPE = NONE) - FOR database [] - REPLICA ON - '' WITH - ( - ENDPOINT_URL = 'TCP://:5022', - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO -``` - -Consider the following: - -- The link currently supports replicating one database per availability group. You can replicate multiple databases to SQL Managed Instance by setting up multiple links. -- Collation between SQL Server and SQL Managed Instance should be the same. A mismatch in collation could cause a mismatch in server name casing and prevent a successful connection from SQL Server to SQL Managed Instance. -- Error 1475 indicates that you need to start a new backup chain by creating a full backup without the `COPY ONLY` option. - -In the following code, replace: - -- `` with the name of your distributed availability group. When you're replicating several databases, you need one availability group and one distributed availability group for each database. Consider naming each item accordingly - for example, `DAG_`. -- `` with the name of the availability group that you created in the previous step. -- `` with the IP address of SQL Server from the previous step. You can use a resolvable SQL Server host machine name as an alternative, but make sure that the name is resolvable from the SQL Managed Instance virtual network. -- `` with the short name of your managed instance. -- `` with the fully qualified domain name of your managed instance. - -```sql --- Run on SQL Server --- Create a distributed availability group for the availability group and database --- ManagedInstanceName example: 'sqlmi1' --- ManagedInstanceFQDN example: 'sqlmi1.73d19f36a420a.database.windows.net' -USE MASTER -CREATE AVAILABILITY GROUP [] - WITH (DISTRIBUTED) - AVAILABILITY GROUP ON - '' WITH - ( - LISTENER_URL = 'TCP://:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC, - SESSION_TIMEOUT = 20 - ), - '' WITH - ( - LISTENER_URL = 'tcp://:5022;Server=[]', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO -``` - -### Verify availability groups - -Use the following script to list all availability groups and distributed availability groups on the SQL Server instance. At this point, the state of your availability group needs to be `connected`, and the state of your distributed availability groups needs to be `disconnected`. The state of the distributed availability group will move to `connected` only when it has been joined with SQL Managed Instance. - -```sql --- Run on SQL Server --- This will show that the availability group and distributed availability group have been created on SQL Server. -SELECT * FROM sys.availability_groups -``` - -Alternatively, you can use SSMS Object Explorer to find availability groups and distributed availability groups. Expand the **Always On High Availability** folder and then the **Availability Groups** folder. - -## Create a link - -The final step of the setup process is to create the link. At this time, you accomplish this by making a REST API call. - -You can invoke direct API calls to Azure by using various API clients. For simplicity of the process, sign in to the Azure portal and run the following PowerShell script from Azure Cloud Shell. Replace: - -- `` with your Azure subscription ID. -- `` with the short name of your managed instance. -- `` with the name of the availability group created on SQL Server. -- `` with the name of the distributed availability group created on SQL Server. -- `` with the database replicated in the availability group on SQL Server. -- `` with the address of the SQL Server instance. This can be a DNS name, a public IP address, or even a private IP address. The provided address must be resolvable from the back-end node that hosts the managed instance. - -```powershell -# Run in Azure Cloud Shell -# ============================================================================= -# POWERSHELL SCRIPT FOR CREATING MANAGED INSTANCE LINK -# USER CONFIGURABLE VALUES -# (C) 2021-2022 SQL Managed Instance product group -# ============================================================================= -# Enter your Azure subscription ID -$SubscriptionID = "" -# Enter your managed instance name – for example, "sqlmi1" -$ManagedInstanceName = "" -# Enter the availability group name that was created on SQL Server -$AGName = "" -# Enter the distributed availability group name that was created on SQL Server -$DAGName = "" -# Enter the database name that was placed in the availability group for replication -$DatabaseName = "" -# Enter the SQL Server address -$SQLServerAddress = "" - -# ============================================================================= -# INVOKING THE API CALL -- THIS PART IS NOT USER CONFIGURABLE -# ============================================================================= -# Log in to the subscription if needed -if ((Get-AzContext ) -eq $null) -{ - echo "Logging to Azure subscription" - Login-AzAccount -} -Select-AzSubscription -SubscriptionName $SubscriptionID -# ----------------------------------- -# Build the URI for the API call -# ----------------------------------- -echo "Building API URI" -$miRG = (Get-AzSqlInstance -InstanceName $ManagedInstanceName).ResourceGroupName -$uriFull = "https://management.azure.com/subscriptions/" + $SubscriptionID + "/resourceGroups/" + $miRG+ "/providers/Microsoft.Sql/managedInstances/" + $ManagedInstanceName + "/distributedAvailabilityGroups/" + $DAGName + "?api-version=2021-05-01-preview" -echo $uriFull -# ----------------------------------- -# Build the API request body -# ----------------------------------- -echo "Buildign API request body" -$bodyFull = @" -{ - "properties":{ - "TargetDatabase":"$DatabaseName", - "SourceEndpoint":"TCP://$SQLServerAddress`:5022", - "PrimaryAvailabilityGroupName":"$AGName", - "SecondaryAvailabilityGroupName":"$ManagedInstanceName", - } -} -"@ -echo $bodyFull -# ----------------------------------- -# Get the authentication token and build the header -# ----------------------------------- -$azProfile = [Microsoft.Azure.Commands.Common.Authentication.Abstractions.AzureRmProfileProvider]::Instance.Profile -$currentAzureContext = Get-AzContext -$profileClient = New-Object Microsoft.Azure.Commands.ResourceManager.Common.RMProfileClient($azProfile) -$token = $profileClient.AcquireAccessToken($currentAzureContext.Tenant.TenantId) -$authToken = $token.AccessToken -$headers = @{} -$headers.Add("Authorization", "Bearer "+"$authToken") -# ----------------------------------- -# Invoke the API call -# ----------------------------------- -echo "Invoking API call to have Managed Instance join DAG on SQL Server" -$response = Invoke-WebRequest -Method PUT -Headers $headers -Uri $uriFull -ContentType "application/json" -Body $bodyFull -echo $response -``` - -The result of this operation will be a time stamp of the successful execution of the request to create a link. - -## Verify the link - -To verify that connection has been made between SQL Managed Instance and SQL Server, run the following query on SQL Server. The connection will not be instantaneous after you make the API call. It can take up to a minute for the DMV to start showing a successful connection. Keep refreshing the DMV until the connection appears as `CONNECTED` for the SQL Managed Instance replica. - -```sql --- Run on SQL Server -SELECT - r.replica_server_name AS [Replica], - r.endpoint_url AS [Endpoint], - rs.connected_state_desc AS [Connected state], - rs.last_connect_error_description AS [Last connection error], - rs.last_connect_error_number AS [Last connection error No], - rs.last_connect_error_timestamp AS [Last error timestamp] -FROM - sys.dm_hadr_availability_replica_states rs - JOIN sys.availability_replicas r - ON rs.replica_id = r.replica_id -``` - -After the connection is established, the **Managed Instance Databases** view in SSMS initially shows the replicated databases in a **Restoring** state as the initial seeding phase moves and restores the full backup of the database. After the database is restored, replication has to catch up to bring the two databases to a synchronized state. The database will no longer be in **Restoring** after the initial seeding finishes. Seeding small databases might be fast enough that you won't see the initial **Restoring** state in SSMS. - -> [!IMPORTANT] -> - The link won't work unless network connectivity exists between SQL Server and SQL Managed Instance. To troubleshoot network connectivity, follow the steps in [Test bidirectional network connectivity](managed-instance-link-preparation.md#test-bidirectional-network-connectivity). -> - Take regular backups of the log file on SQL Server. If the used log space reaches 100 percent, replication to SQL Managed Instance stops until space use is reduced. We highly recommend that you automate log backups by setting up a daily job. For details, see [Back up log files on SQL Server](managed-instance-link-best-practices.md#take-log-backups-regularly). - - -## Next steps - -For more information on the link feature, see the following resources: - -- [Managed Instance link – connecting SQL Server to Azure reimagined](https://aka.ms/mi-link-techblog) -- [Prepare your environment for a Managed Instance link](./managed-instance-link-preparation.md) -- [Use a Managed Instance link with scripts to migrate a database](./managed-instance-link-use-scripts-to-failover-database.md) -- [Use a Managed Instance link via SSMS to replicate a database](./managed-instance-link-use-ssms-to-replicate-database.md) -- [Use a Managed Instance link via SSMS to migrate a database](./managed-instance-link-use-ssms-to-failover-database.md) diff --git a/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-failover-database.md b/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-failover-database.md deleted file mode 100644 index ffec3631ceda0..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-failover-database.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Fail over a database by using the link in SSMS -titleSuffix: Azure SQL Managed Instance -description: Learn how to use the link feature in SQL Server Management Studio (SSMS) to fail over a database from SQL Server to Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma, danil -ms.date: 03/10/2022 ---- -# Fail over a database by using the link in SSMS - Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you how to fail over a database from SQL Server to Azure SQL Managed Instance by using [the link feature](managed-instance-link-feature-overview.md) in SQL Server Management Studio (SSMS). - -Failing over your database from SQL Server to SQL Managed Instance breaks the link between the two databases. It stops replication and leaves both databases in an independent state, ready for individual read/write workloads. - -> [!NOTE] -> The link is a feature of Azure SQL Managed Instance and is currently in preview. - -## Prerequisites - -To fail over your databases to SQL Managed Instance, you need the following prerequisites: - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- [SQL Server 2019 Enterprise or Developer edition](https://www.microsoft.com/en-us/evalcenter/evaluate-sql-server-2019), starting with [CU15 (15.0.4198.2)](https://support.microsoft.com/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6). -- Azure SQL Managed Instance. [Get started](instance-create-quickstart.md) if you don't have it. -- [SQL Server Management Studio v18.11.1 or later](/sql/ssms/download-sql-server-management-studio-ssms). -- [An environment that's prepared for replication](managed-instance-link-preparation.md). -- [Setup of the link feature and replication of your database to your managed instance in Azure](managed-instance-link-use-ssms-to-replicate-database.md). - -## Fail over a database - -In the following steps, you use the **Failover database to Managed Instance** wizard in SSMS to fail over your database from SQL Server to SQL Managed Instance. The wizard takes you through failing over your database, breaking the link between the two instances in the process. - -> [!CAUTION] -> If you're performing a planned manual failover, stop the workload on the source SQL Server database to allow the SQL Managed Instance replicated database to completely catch up and fail over without data loss. If you're performing a forced failover, you might lose data. - -1. Open SSMS and connect to your SQL Server instance. -1. In Object Explorer, right-click your database, hover over **Azure SQL Managed Instance link**, and select **Failover database** to open the **Failover database to Managed Instance** wizard. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-database-context-failover-database.png" alt-text="Screenshot that shows a database's context menu option for failover."::: - -1. On the **Introduction** page of the **Failover database to Managed Instance** wizard, select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-introduction.png" alt-text="Screenshot that shows the Introduction page."::: - - -3. On the **Log in to Azure** page, select **Sign-in** to provide your credentials and sign in to your Azure account. Select the subscription that's hosting SQL Managed Instance from the dropdown list, and then select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-login-to-azure.png" alt-text="Screenshot that shows the page for signing in to Azure."::: - -4. On the **Failover Type** page, choose the type of failover you're performing. Select the box to confirm that you've stopped the workload for a planned failover, or you understand that you might lose data if using a forced failover. Select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-failover-type.png" alt-text="Screenshot that shows the Failover Type page."::: - -1. On the **Clean-up (optional)** page, choose to drop the availability group if you created it solely for the purpose of migrating your database to Azure and you no longer need it. If you want to keep the availability group, leave the boxes cleared. Select **Next**. - - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-cleanup-optional.png" alt-text="Screenshot that shows the page for the option of deleting an availability group."::: - -1. On the **Summary** page, review the actions that will be performed for your failover. Optionally, select **Script** to create a script that you can run at a later time. When you're ready to proceed with the failover, select **Finish**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-summary.png" alt-text="Screenshot that shows the Summary page."::: - -7. The **Executing actions** page displays the progress of each action. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-executing-actions.png" alt-text="Screenshot that shows the page for executing actions."::: - -8. After all steps finish, the **Results** page shows check marks next to the successfully completed actions. You can now close the window. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-results.png" alt-text="Screenshot that shows the Results page with completed status."::: - -## View the failed-over database - -During the failover process, the link is dropped and no longer exists. The source SQL Server database and the target SQL Managed Instance database can both execute a read/write workload. They're completely independent. - -You can validate that the link has been dropped by reviewing the database on SQL Server. - -:::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-sql-server-database.png" alt-text="Screenshot that shows a database on SQL Server in S S M S."::: - -Then, review the database on SQL Managed Instance. - -:::image type="content" source="./media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-managed-instance-database.png" alt-text="Screenshot that shows a database on SQL Managed Instance in S S M S."::: - -## Next steps - -To learn more, see [Link feature for Azure SQL Managed Instance](managed-instance-link-feature-overview.md). diff --git a/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-replicate-database.md b/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-replicate-database.md deleted file mode 100644 index 87c6d0cc43c9a..0000000000000 --- a/articles/azure-sql/managed-instance/managed-instance-link-use-ssms-to-replicate-database.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Replicate a database by using the link in SSMS -titleSuffix: Azure SQL Managed Instance -description: Learn how to use a link feature in SQL Server Management Studio (SSMS) to replicate a database from SQL Server to Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: -ms.devlang: -ms.topic: guide -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma, danil -ms.date: 03/22/2022 ---- -# Replicate a database by using the link feature in SSMS - Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article teaches you how to replicate your database from SQL Server to Azure SQL Managed Instance by using [the link feature](managed-instance-link-feature-overview.md) in SQL Server Management Studio (SSMS). - -> [!NOTE] -> The link is a feature of Azure SQL Managed Instance and is currently in preview. - -## Prerequisites - -To replicate your databases to SQL Managed Instance through the link, you need the following prerequisites: - -- An active Azure subscription. If you don't have one, [create a free account](https://azure.microsoft.com/free/). -- [SQL Server 2019 Enterprise or Developer edition](https://www.microsoft.com/en-us/evalcenter/evaluate-sql-server-2019), starting with [CU15 (15.0.4198.2)](https://support.microsoft.com/topic/kb5008996-cumulative-update-15-for-sql-server-2019-4b6a8ee9-1c61-482d-914f-36e429901fb6). -- Azure SQL Managed Instance. [Get started](instance-create-quickstart.md) if you don't have it. -- [SQL Server Management Studio v18.11.1 or later](/sql/ssms/download-sql-server-management-studio-ssms). -- A properly [prepared environment](managed-instance-link-preparation.md). - - -## Replicate a database - -In the following steps, you use the **New Managed Instance link** wizard in SSMS to create the link between SQL Server and SQL Managed Instance. After you create the link, your source database gets a read-only replica copy on your target managed instance. - -> [!NOTE] -> The link supports replication of user databases only. Replication of system databases is not supported. To replicate instance-level objects (stored in master or msdb databases), we recommend that you script them out and run T-SQL scripts on the destination instance. - -1. Open SSMS and connect to your SQL Server instance. -1. In Object Explorer, right-click your database, hover over **Azure SQL Managed Instance link**, and select **Replicate database** to open the **New Managed Instance link** wizard. If your SQL Server version isn't supported, this option won't be available on the context menu. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-database-context-replicate-database.png" alt-text="Screenshot that shows a database's context menu option for replication."::: - -1. On the **Introduction** page of the wizard, select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-introduction.png" alt-text="Screenshot that shows the Introduction page of the wizard for creating a new Managed Instance link."::: - -1. On the **SQL Server requirements** page, the wizard validates requirements to establish a link to SQL Managed Instance. Select **Next** after all the requirements are validated. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-sql-server-requirements.png" alt-text="Screenshot that shows the Requirements page for a Managed Instance link."::: - -1. On the **Select Databases** page, choose one or more databases that you want to replicate to SQL Managed Instance via the link feature. Then select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-select-databases.png" alt-text="Screenshot that shows the Select Databases page."::: - -1. On the **Login to Azure and select Managed Instance** page, select **Sign In** to sign in to Microsoft Azure. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure.png" alt-text="Screenshot that shows the area for signing in to Azure."::: - -1. On the **Login to Azure and select Managed Instance** page, choose the subscription, resource group, and target managed instance from the dropdown lists. Select **Login** and provide login details for SQL Managed Instance. After you've provided all necessary information, select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure-populated.png" alt-text="Screenshot that shows the populated page for selecting a managed instance."::: - -1. Review the prepopulated values on the **Specify Distributed AG Options** page, and change any that need customization. When you're ready, select **Next**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-distributed-ag-options.png" alt-text="Screenshot that shows the Specify Distributed A G Options page."::: - -1. Review the actions on the **Summary** page. Optionally, select **Script** to create a script that you can run at a later time. When you're ready, select **Finish**. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-summary.png" alt-text="Screenshot that shows the Summary page."::: - -1. The **Executing actions** page displays the progress of each action. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-executing-actions.png" alt-text="Screenshot that shows the page for executing actions."::: - -1. After all steps finish, the **Results** page shows check marks next to the successfully completed actions. You can now close the window. - - :::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-results.png" alt-text="Screenshot that shows the Results page with completed status."::: - -## View a replicated database - -After the link is created, the selected databases are replicated to the managed instance. - -Use Object Explorer on your SQL Server instance to view the **Synchronized** status of the replicated database. Expand **Always On High Availability** and **Availability Groups** to view the distributed availability group that's created for the link. - -:::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-sql-server-database.png" alt-text="Screenshot that shows the state of the SQL Server database and availability group, and the distributed availability group in S S M S."::: - -Connect to your managed instance and use Object Explorer to view your replicated database. Depending on the database size and network speed, the database might initially be in a **Restoring** state. After initial seeding finishes, the database is restored to the managed instance and ready for read-only workloads. - -:::image type="content" source="./media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-managed-instance-database.png" alt-text="Screenshot that shows the state of the SQL Managed Instance database."::: - -## Next steps - -To break the link and fail over your database to SQL Managed Instance, see [Fail over a database](managed-instance-link-use-ssms-to-failover-database.md). To learn more, see [Link feature for Azure SQL Managed Instance](managed-instance-link-feature-overview.md). diff --git a/articles/azure-sql/managed-instance/management-endpoint-find-ip-address.md b/articles/azure-sql/managed-instance/management-endpoint-find-ip-address.md deleted file mode 100644 index 2ce94e8e51512..0000000000000 --- a/articles/azure-sql/managed-instance/management-endpoint-find-ip-address.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Discover management endpoint IP address -titleSuffix: Azure SQL Managed Instance -description: Learn how to get the Azure SQL Managed Instance management endpoint public IP address and verify its built-in firewall protection -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma -ms.date: 12/04/2018 ---- -# Determine the management endpoint IP address - Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -The Azure SQL Managed Instance virtual cluster contains a management endpoint that Azure uses for management operations. The management endpoint is protected with a built-in firewall on the network level and mutual certificate verification on the application level. You can determine the IP address of the management endpoint, but you can't access this endpoint. - -To determine the management IP address, do a [DNS lookup](/windows-server/administration/windows-commands/nslookup) on your SQL Managed Instance FQDN: `mi-name.zone_id.database.windows.net`. This will return a DNS entry that's like `trx.region-a.worker.vnet.database.windows.net`. You can then do a DNS lookup on this FQDN with ".vnet" removed. This will return the management IP address. - -This PowerShell code will do it all for you if you replace \ with the DNS entry of SQL Managed Instance: `mi-name.zone_id.database.windows.net`: - -``` powershell - $MIFQDN = "" - resolve-dnsname $MIFQDN | select -first 1 | %{ resolve-dnsname $_.NameHost.Replace(".vnet","")} -``` - -For more information about SQL Managed Instance and connectivity, see [Azure SQL Managed Instance connectivity architecture](connectivity-architecture-overview.md). diff --git a/articles/azure-sql/managed-instance/management-endpoint-verify-built-in-firewall.md b/articles/azure-sql/managed-instance/management-endpoint-verify-built-in-firewall.md deleted file mode 100644 index 6770162c3879c..0000000000000 --- a/articles/azure-sql/managed-instance/management-endpoint-verify-built-in-firewall.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Verify port security in the built-in firewall -description: Learn how to verify built-in firewall protection in Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma -ms.date: 12/04/2018 ---- -# Verify the Azure SQL Managed Instance built-in firewall -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -The Azure SQL Managed Instance [mandatory inbound security rules](connectivity-architecture-overview.md#mandatory-inbound-security-rules-with-service-aided-subnet-configuration) require management ports 9000, 9003, 1438, 1440, and 1452 to be open from **Any source** on the Network Security Group (NSG) that protects SQL Managed Instance. Although these ports are open at the NSG level, they are protected at the network level by the built-in firewall. - -## Verify firewall - -To verify these ports, use any security scanner tool to test these ports. The following screenshot shows how to use one of these tools. - -![Verifying built-in firewall](./media/management-endpoint-verify-built-in-firewall/03_verify_firewall.png) - -## Next steps - -For more information about SQL Managed Instance and connectivity, see [Azure SQL Managed Instance connectivity architecture](connectivity-architecture-overview.md). diff --git a/articles/azure-sql/managed-instance/management-operations-cancel.md b/articles/azure-sql/managed-instance/management-operations-cancel.md deleted file mode 100644 index 700ebb87a4d49..0000000000000 --- a/articles/azure-sql/managed-instance/management-operations-cancel.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Cancel management operations -titleSuffix: Azure SQL Managed Instance -description: Learn how to cancel Azure SQL Managed Instance management operations. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma, bonova, MashaMSFT -ms.date: 09/03/2020 ---- - -# Canceling Azure SQL Managed Instance management operations -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance provides the ability to cancel some [management operations](management-operations-overview.md), such as when you deploy a new managed instance or update instance properties. - -## Overview - - All management operations can be categorized as follows: - -- Instance deployment (new instance creation). -- Instance update (changing instance properties, such as vCores or reserved storage). -- Instance deletion. - -You can [monitor progress and status of management operations](management-operations-monitor.md) and cancel some of them if necessary. - -The following table summarizes management operations, whether or not you can cancel them, and their typical overall duration: - -Category |Operation |Cancelable |Estimated cancel duration | -|---------|---------|---------|---------| -|Deployment |Instance creation |Yes |90% of operations finish in 5 minutes. | -|Update |Instance storage scaling up/down (General Purpose) |No | | -|Update |Instance storage scaling up/down (Business Critical) |Yes |90% of operations finish in 5 minutes. | -|Update |Instance compute (vCores) scaling up and down (General Purpose) |Yes |90% of operations finish in 5 minutes. | -|Update |Instance compute (vCores) scaling up and down (Business Critical) |Yes |90% of operations finish in 5 minutes. | -|Update |Instance service tier change (General Purpose to Business Critical and vice versa) |Yes |90% of operations finish in 5 minutes. | -|Delete |Instance deletion |No | | -|Delete |Virtual cluster deletion (as user-initiated operation) |No | | - -## Cancel management operation - -# [Portal](#tab/azure-portal) - -To cancel management operations using the Azure portal, follow these steps: - -1. Go to the [Azure portal](https://portal.azure.com) -1. Go to the **Overview** blade of your SQL Managed Instance. -1. Select the **Notification** box next to the ongoing operation to open the **Ongoing Operation** page. - - :::image type="content" source="media/management-operations-cancel/open-ongoing-operation.png" alt-text="Select the ongoing operation box to open the ongoing operation page."::: - -1. Select **Cancel the operation** at the bottom of the page. - - :::image type="content" source="media/management-operations-cancel/cancel-operation.png" alt-text="Select cancel to cancel the operation."::: - -1. Confirm that you want to cancel the operation. - - -If the cancel request succeeds, the management operation is canceled and results in a failure. You will get a notification that the cancellation succeeds or fails. - -![Canceling operation result](./media/management-operations-cancel/canceling-operation-result.png) - - -If the cancel request fails or the cancel button is not active, it means that the management operation has entered non-cancelable state and that will finish shortly. The management operation will continue its execution until it is completed. - -# [PowerShell](#tab/azure-powershell) - -If you don't already have Azure PowerShell installed, see [Install the Azure PowerShell module](/powershell/azure/install-az-ps). - -To cancel a management operation, you need to specify management operation name. Therefore, first use the get command to retrieve the operation list, and then cancel specific the operation. - -```powershell-interactive -$managedInstance = "" -$resourceGroup = "" - -$managementOperations = Get-AzSqlInstanceOperation -ManagedInstanceName $managedInstance -ResourceGroupName $resourceGroup - -foreach ($mo in $managementOperations ) { - if($mo.State -eq "InProgress" -and $mo.IsCancellable){ - $cancelRequest = Stop-AzSqlInstanceOperation -ResourceGroupName $resourceGroup -ManagedInstanceName $managedInstance -Name $mo.Name - Get-AzSqlInstanceOperation -ManagedInstanceName $managedInstance -ResourceGroupName $resourceGroup -Name $mo.Name - } -} -``` - -For detailed commands explanation, see [Get-AzSqlInstanceOperation](/powershell/module/az.sql/get-azsqlinstanceoperation) and [Stop-AzSqlInstanceOperation](/powershell/module/az.sql/stop-azsqlinstanceoperation). - -# [Azure CLI](#tab/azure-cli) - -If you don't already have the Azure CLI installed, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -To cancel the management operation, you need to specify the management operation name. Therefore, first use the get command to retrieve the operation list, and then cancel the specific operation. - -```azurecli-interactive -az sql mi op list -g yourResourceGroupName --mi yourInstanceName | - --query "[?state=='InProgress' && isCancellable].{Name: name}" -o tsv | -while read -r operationName; do - -az sql mi op cancel -g yourResourceGroupName --mi yourInstanceName -n $operationName -done -``` - -For detailed commands explanation, see [az sql mi op](/cli/azure/sql/mi/op). - ---- - -## Canceled deployment request - -With API version 2020-02-02, as soon as the instance creation request is accepted, the instance starts to exist as a resource, no matter the progress of the deployment process (managed instance status is **Provisioning**). If you cancel the instance deployment request (new instance creation), the managed instance will go from the **Provisioning** state to **FailedToCreate**. - -Instances that have failed to create are still present as a resource and: - -- Are not charged -- Do not count towards resource limits (subnet or vCore quota) - - -> [!NOTE] -> To minimize noise in the the list of resources or managed instances, delete instances that have failed to deploy or instances with cancelled deployments. - - -## Next steps - -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [Common SQL features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). diff --git a/articles/azure-sql/managed-instance/management-operations-monitor.md b/articles/azure-sql/managed-instance/management-operations-monitor.md deleted file mode 100644 index 9fc15fe202188..0000000000000 --- a/articles/azure-sql/managed-instance/management-operations-monitor.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Monitor management operations -titleSuffix: Azure SQL Managed Instance -description: Learn about different ways for monitoring of Azure SQL Managed Instance management operations. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma, bonova, MashaMSFT -ms.date: 09/03/2020 ---- - -# Monitoring Azure SQL Managed Instance management operations -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance provides monitoring of [management operations](management-operations-overview.md) that you use to deploy new managed instances, update instance properties, or delete instances when no longer needed. - -## Overview - -All management operations can be categorized as follows: - -- Instance deployment (new instance creation). -- Instance update (changing instance properties, such as vCores or reserved storage). -- Instance deletion. - -Most management operations are [long running operations](management-operations-overview.md#duration). Therefore there is a need to monitor the status or follow the progress of operation steps. - -There are several ways to monitor managed instance management operations: - -- [Resource group deployments](../../azure-resource-manager/templates/deployment-history.md) -- [Activity log](../../azure-monitor/essentials/activity-log.md) -- [Managed instance operations API](#managed-instance-operations-api) - - -The following table compares management operation monitoring options: - -| Option | Retention | Supports cancel | Create | Update | Delete | Cancel | Steps | -| --- | --- | --- | --- | --- | --- | --- | --- | -| Resource group deployments | Infinite1 | No2 | Visible | Visible | Not visible | Visible | Not visible | -| Activity log | 90 days | No | Visible | Visible | Visible | Visible | Not visible | -| Managed instance operations API | 24 hours | [Yes](management-operations-cancel.md) | Visible | Visible | Visible | Visible | Visible | - - -1 The deployment history for a resource group is limited to 800 deployments. - -2 Resource group deployments support cancel operation. However, due to cancel logic, only an operation scheduled for deployment after the cancel action is performed will be canceled. Ongoing deployment is not canceled when the resource group deployment is canceled. Since managed instance deployment consists of one long running step (from the Azure Resource Manger perspective), canceling resource group deployment will not cancel managed instance deployment and the operation will complete. - -## Managed instance operations API - -Management operations APIs are specially designed to monitor operations. Monitoring managed instance operations can provide insights on operation parameters and operation steps, as well as [cancel specific operations](management-operations-cancel.md). Besides operation details and cancel command, this API can be used in automation scripts with multi-resource deployments - based on the progress step, you can kick off some dependent resource deployment. - -These are the APIs: - -| Command | Description | -| --- | --- | -|[Managed Instance Operations - Get](/rest/api/sql/managedinstanceoperations/get)|Gets a management operation on a managed instance.| -|[Managed Instance Operations - Cancel](/rest/api/sql/managedinstanceoperations/cancel)|Cancels the asynchronous operation on the managed instance.| -|[Managed Instance Operations - List By Managed Instance](/rest/api/sql/managedinstanceoperations/listbymanagedinstance)|Gets a list of operations performed on the managed instance.| - -> [!NOTE] -> Use API version 2020-02-02 to see the managed instance create operation in the list of operations. This is the default version used in the Azure portal and the latest PowerShell and Azure CLI packages. - -## Monitor operations - -# [Portal](#tab/azure-portal) - -In the Azure portal, use the managed instance **Overview** page to monitor managed instance operations. - -For example, the **Create operation** is visible at the start of the creation process on the **Overview** page: - -![Managed instance create progress](./media/management-operations-monitor/monitoring-create-operation.png) - -Select **Ongoing operation** to open the **Ongoing operation** page and view **Create** or **Update** operations. You can also [Cancel](management-operations-cancel.md) operations from this page as well. - -![Managed instance operation details](./media/management-operations-monitor/monitoring-operation-details.png) - -> [!NOTE] -> Create operations submitted through Azure portal, PowerShell, Azure CLI or other tooling using REST API version 2020-02-02 [can be canceled](management-operations-cancel.md). REST API versions older than 2020-02-02 used to submit a create operation will start the instance deployment, but the deployment won't be listed in the Operations API and can't be cancelled. - -# [PowerShell](#tab/azure-powershell) - -The Get-AzSqlInstanceOperation cmdlet gets information about the operations on a managed instance. You can view all operations on a managed instance or view a specific operation by providing the operation name. - -```powershell-interactive -$managedInstance = "yourInstanceName" -$resourceGroup = "yourResourceGroupName" - -$managementOperations = Get-AzSqlInstanceOperation ` - -ManagedInstanceName $managedInstance -ResourceGroupName $resourceGroup -``` - -For detailed commands explanation, see [Get-AzSqlInstanceOperation](/powershell/module/az.sql/get-azsqlinstanceoperation). - -# [Azure CLI](#tab/azure-cli) - -The az sql mi op list gets a list of operations performed on the managed instance. If you don't already have the Azure CLI installed, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -```azurecli-interactive -az sql mi op list -g yourResourceGroupName --mi yourInstanceName -``` - -For detailed commands explanation, see [az sql mi op](/cli/azure/sql/mi/op). - ---- - -## Next steps - -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [common SQL features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). diff --git a/articles/azure-sql/managed-instance/management-operations-overview.md b/articles/azure-sql/managed-instance/management-operations-overview.md deleted file mode 100644 index 3a8feb7896f42..0000000000000 --- a/articles/azure-sql/managed-instance/management-operations-overview.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Management operations overview -titleSuffix: Azure SQL Managed Instance -description: Learn about Azure SQL Managed Instance management operations duration and best practices. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: ignite-fall-2021 -ms.devlang: -ms.topic: overview -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 08/20/2021 ---- - -# Overview of Azure SQL Managed Instance management operations -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance provides management operations that you can use to automatically deploy new managed instances, update instance properties, and delete instances when no longer needed. - -## What are management operations? - -All management operations can be categorized as follows: - -- Instance deployment (new instance creation) -- Instance update (changing instance properties, such as vCores or reserved storage) -- Instance deletion - -To support [deployments within Azure virtual networks](../../virtual-network/virtual-network-for-azure-services.md) and provide isolation and security for customers, SQL Managed Instance relies on [virtual clusters](connectivity-architecture-overview.md#high-level-connectivity-architecture). The virtual cluster represents a dedicated set of isolated virtual machines deployed inside the customer's virtual network subnet. Essentially, every managed instance deployed to an empty subnet results in a new virtual cluster buildout. - -Subsequent management operations on managed instances may impact the underlying virtual cluster. Changes that impact the underlying virtual cluster may affect the duration of management operations, as deploying additional virtual machines comes with an overhead that you need to consider when you plan new deployments or updates to existing managed instances. - - -## Duration - -The duration of operations on the virtual cluster can vary, but typically have the longest duration. - -The following table lists the long running steps that can be triggered as part of the create, update, or delete operation. Table also lists the durations that you can typically expect, based on existing service telemetry data: - -|Step|Description|Estimated duration| -|---------|---------|---------| -|**Virtual cluster creation**|Creation is a synchronous step in instance management operations.|**90% of operations finish in 4 hours**| -|**Virtual cluster resizing (expansion or shrinking)**|Expansion is a synchronous step, while shrinking is performed asynchronously (without impact on the duration of instance management operations).|**90% of cluster expansions finish in less than 2.5 hours**| -|**Virtual cluster deletion**|Virtual cluster deletion can be synchronous and asynchronous. Asynchronous deletion is performed in the background and it is triggered in case of multiple virtual clusters inside the same subnet, when last instance in the non-last cluster in the subnet is deleted. Synchronous deletion of the virtual cluster is triggered as part of the very last instance deletion in the subnet.|**90% of cluster deletions finish in 1.5 hours**| -|**Seeding database files**1|A synchronous step, triggered during compute (vCores), or storage scaling in the Business Critical service tier as well as in changing the service tier from General Purpose to Business Critical (or vice versa). Duration of this operation is proportional to the total database size as well as current database activity (number of active transactions). Database activity when updating an instance can introduce significant variance to the total duration.|**90% of these operations execute at 220 GB/hour or higher**| - -1 When scaling compute (vCores) or storage in Business Critical service tier, or switching service tier from General Purpose to Business Critical, seeding also includes Always On availability group seeding. - -> [!IMPORTANT] -> Scaling storage up or down in the General Purpose service tier consists of updating meta data and propagating response for submitted request. It is a fast operation that completes in up to 5 minutes, without a downtime and failover. - -### Management operations long running segments - -The following tables summarize operations and typical overall durations, based on the category of the operation: - -**Category: Deployment** - -|Operation |Long-running segment |Estimated duration | -|---------|---------|---------| -|First instance in an empty subnet|Virtual cluster creation|90% of operations finish in 4 hours.| -|First instance of another hardware or maintenance window in a non-empty subnet (for example, first Premium-series instance in a subnet with Standard-series instances)|Virtual cluster creation1|90% of operations finish in 4 hours.| -|Subsequent instance creation within the non-empty subnet (2nd, 3rd, etc. instance)|Virtual cluster resizing|90% of operations finish in 2.5 hours.| - - -1 A separate virtual cluster is created for each hardware configuration and for each maintenance window configuration. - -**Category: Update** - -|Operation |Long-running segment |Estimated duration | -|---------|---------|---------| -|Instance property change (admin password, Azure AD login, Azure Hybrid Benefit flag)|N/A|Up to 1 minute.| -|Instance storage scaling up/down (General Purpose)|No long-running segment|99% of operations finish in 5 minutes.| -|Instance storage scaling up/down (Business Critical)|- Virtual cluster resizing
    - Always On availability group seeding|90% of operations finish in 2.5 hours + time to seed all databases (220 GB/hour).| -|Instance compute (vCores) scaling up and down (General Purpose)|- Virtual cluster resizing|90% of operations finish in 2.5 hours.| -|Instance compute (vCores) scaling up and down (Business Critical)|- Virtual cluster resizing
    - Always On availability group seeding|90% of operations finish in 2.5 hours + time to seed all databases (220 GB/hour).| -|Instance service tier change (General Purpose to Business Critical and vice versa)|- Virtual cluster resizing
    - Always On availability group seeding|90% of operations finish in 2.5 hours + time to seed all databases (220 GB/hour).| -|Instance hardware or maintenance window change (General Purpose)|- Virtual cluster creation or resizing1|90% of operations finish in 4 hours (creation) or 2.5 hours (resizing) .| -|Instance hardware or maintenance window change (Business Critical)|- Virtual cluster creation or resizing1
    - Always On availability group seeding|90% of operations finish in 4 hours (creation) or 2.5 hours (resizing) + time to seed all databases (220 GB/hour).| - - -1 Managed instance must be placed in a virtual cluster with the corresponding hardware and maintenance window. If there is no such virtual cluster in the subnet, a new one must be created first to accommodate the instance. - -**Category: Delete** - -|Operation |Long-running segment |Estimated duration | -|---------|---------|---------| -|Non-last instance deletion|Log tail backup for all databases|90% of operations finish in up to 1 minute.1| -|Last instance deletion |- Log tail backup for all databases
    - Virtual cluster deletion|90% of operations finish in up to 1.5 hours.2| - - -1 In case of multiple virtual clusters in the subnet, if the last instance in the virtual cluster is deleted, this operation will immediately trigger **asynchronous** deletion of the virtual cluster. - -2 Deletion of last instance in the subnet immediately triggers **synchronous** deletion of the virtual cluster. - -> [!IMPORTANT] -> As soon as delete operation is triggered, billing for SQL Managed Instance is disabled. Duration of the delete operation will not impact the billing. - -## Instance availability - -SQL Managed Instance **is available during update operations**, except a short downtime caused by the failover that happens at the end of the update. It typically lasts up to 10 seconds even in case of interrupted long-running transactions, thanks to [accelerated database recovery](../accelerated-database-recovery.md). - -> [!NOTE] -> Scaling General Purpose managed instance storage will not cause a failover at the end of update. - -SQL Managed Instance is not available to client applications during deployment and deletion operations. - -> [!IMPORTANT] -> It's not recommended to scale compute or storage of Azure SQL Managed Instance or to change the service tier at the same time as long-running transactions (data import, data processing jobs, index rebuild, etc.). The failover of the database at the end of the operation cancels all ongoing transactions. - -## Management operations steps - -Management operations consist of multiple steps. With [Operations API introduced](management-operations-monitor.md) these steps are exposed for subset of operations (deployment and update). Deployment operation consists of three steps while update operation is performed in six steps. For details on operations duration, see the [management operations duration](#duration) section. Steps are listed by order of execution. - -### Managed instance deployment steps - -|Step name |Step description | -|----|---------| -|Request validation |Submitted parameters are validated. In case of misconfiguration operation will fail with an error. | -|Virtual cluster resizing / creation |Depending on the state of subnet, virtual cluster goes into creation or resizing. | -|New SQL instance startup |SQL process is started on deployed virtual cluster. | - -### Managed instance update steps - -|Step name |Step description | -|----|---------| -|Request validation |Submitted parameters are validated. In case of misconfiguration operation will fail with an error. | -|Virtual cluster resizing / creation |Depending on the state of subnet, virtual cluster goes into creation or resizing. | -|New SQL instance startup |SQL process is started on deployed virtual cluster. | -|Seeding database files / attaching database files |Depending on the type of the update operation, either database seeding or attaching database files is performed. | -|Preparing failover and failover |After data has been seeded or database files reattached, system is being prepared for the failover. When everything is set, failover is performed **with a short downtime**. | -|Old SQL instance cleanup |Removing old SQL process from the virtual cluster | - -### Managed instance delete steps -|Step name |Step description | -|----|---------| -|Request validation |Submitted parameters are validated. In case of misconfiguration operation will fail with an error. | -|SQL instance cleanup |Removing SQL process from the virtual cluster | -|Virtual cluster deletion |Depending if the instance being deleted is last in the subnet, virtual cluster is synchronously deleted as last step. | - -> [!NOTE] -> As a result of scaling instances, underlying virtual cluster will go through process of releasing unused capacity and possible capacity defragmentation, which could impact instances that did not participate in creation / scaling operations. - - -## Management operations cross-impact - -Management operations on a managed instance can affect other management operations of the instances placed inside the same virtual cluster: - -- **Long-running restore operations** in a virtual cluster will put other instance creation or scaling operations in the same subnet on hold.
    **Example:** If there is a long-running restore operation and there is a create or scale request in the same subnet, this request will take longer to complete as it waits for the restore operation to complete before it continues. - -- **A subsequent instance creation or scaling** operation is put on hold by a previously initiated instance creation or instance scale that initiated a resize of the virtual cluster.
    **Example:** If there are multiple create and/or scale requests in the same subnet under the same virtual cluster, and one of them initiates a virtual cluster resize, all requests that were submitted 5+ minutes after the initial operation request will last longer than expected, as these requests will have to wait for the resize to complete before resuming. - -- **Create/scale operations submitted in a 5-minute window** will be batched and executed in parallel.
    **Example:** Only one virtual cluster resize will be performed for all operations submitted in a 5-minute window (measuring from the moment of executing the first operation request). If another request is submitted more than 5 minutes after the first one is submitted, it will wait for the virtual cluster resize to complete before execution starts. - -> [!IMPORTANT] -> Management operations that are put on hold because of another operation that is in progress will automatically be resumed once conditions to proceed are met. No user action is necessary to resume the temporarily paused management operations. - -## Monitoring management operations - -To learn how to monitor management operation progress and status, see [Monitoring management operations](management-operations-monitor.md). - -## Canceling management operations - -To learn how to cancel management operation, see [Canceling management operations](management-operations-cancel.md). - - -## Next steps - -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [Common SQL features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/mfa-login-prompt.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/mfa-login-prompt.png deleted file mode 100644 index 3af8826a690a1..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/mfa-login-prompt.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/native-login.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/native-login.png deleted file mode 100644 index 9c8a67c619b9a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/native-login.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-db-not-accessible.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-db-not-accessible.png deleted file mode 100644 index b2dfe30c78b47..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-db-not-accessible.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-login-prompt.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-login-prompt.png deleted file mode 100644 index cbe8d8a803f19..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-login-prompt.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table-query.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table-query.png deleted file mode 100644 index b10a9d72d73fc..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table-query.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table.png b/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table.png deleted file mode 100644 index a9a81d38fb0d9..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/aad-security-configure-tutorial/ssms-test-table.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-add-alerts-action-group-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-add-alerts-action-group-annotated.png deleted file mode 100644 index 73b5b06627aaf..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-add-alerts-action-group-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-alerting-menu-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-alerting-menu-annotated.png deleted file mode 100644 index 15851d37dfcb8..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-alerting-menu-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-configure-signal-logic-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-configure-signal-logic-annotated.png deleted file mode 100644 index 4734e445415c7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-configure-signal-logic-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-create-alert-action-group-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-create-alert-action-group-smaller-annotated.png deleted file mode 100644 index a0a8a2c68b4a6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-create-alert-action-group-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-create-metrics-alert-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-create-metrics-alert-smaller-annotated.png deleted file mode 100644 index cee07bef996da..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-create-metrics-alert-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-email-alert-example-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-email-alert-example-smaller-annotated.png deleted file mode 100644 index 25ce4a68eb329..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-email-alert-example-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alert-rules-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alert-rules-smaller-annotated.png deleted file mode 100644 index f340371543a3f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alert-rules-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alerts-browse-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alerts-browse-smaller-annotated.png deleted file mode 100644 index 45477dd6d3f7b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-manage-alerts-browse-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-rule-details-complete-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-rule-details-complete-smaller-annotated.png deleted file mode 100644 index c4d3791bb772a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-rule-details-complete-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/alerts-create/mi-select-action-group-smaller-annotated.png b/articles/azure-sql/managed-instance/media/alerts-create/mi-select-action-group-smaller-annotated.png deleted file mode 100644 index 77cc46df11369..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/alerts-create/mi-select-action-group-smaller-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/10_mi_ssms_new_audit.png b/articles/azure-sql/managed-instance/media/auditing-configure/10_mi_ssms_new_audit.png deleted file mode 100644 index c6e56f7c15b6b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/10_mi_ssms_new_audit.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/11_mi_ssms_audit_browse.png b/articles/azure-sql/managed-instance/media/auditing-configure/11_mi_ssms_audit_browse.png deleted file mode 100644 index 4a37a043d3b38..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/11_mi_ssms_audit_browse.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/12_mi_ssms_sign_in_to_azure.png b/articles/azure-sql/managed-instance/media/auditing-configure/12_mi_ssms_sign_in_to_azure.png deleted file mode 100644 index b2a31edd78f74..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/12_mi_ssms_sign_in_to_azure.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/13_mi_ssms_select_subscription_account_container.png b/articles/azure-sql/managed-instance/media/auditing-configure/13_mi_ssms_select_subscription_account_container.png deleted file mode 100644 index cf6f63ba3b38a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/13_mi_ssms_select_subscription_account_container.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/1_blobs_widget.png b/articles/azure-sql/managed-instance/media/auditing-configure/1_blobs_widget.png deleted file mode 100644 index e903b9c24303a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/1_blobs_widget.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/2_create_container_button.png b/articles/azure-sql/managed-instance/media/auditing-configure/2_create_container_button.png deleted file mode 100644 index e873e1df4c8dc..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/2_create_container_button.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/3_create_container_config.png b/articles/azure-sql/managed-instance/media/auditing-configure/3_create_container_config.png deleted file mode 100644 index b245569110eaa..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/3_create_container_config.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/4_container_properties_button.png b/articles/azure-sql/managed-instance/media/auditing-configure/4_container_properties_button.png deleted file mode 100644 index 48c554400aea4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/4_container_properties_button.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/5_container_copy_name.png b/articles/azure-sql/managed-instance/media/auditing-configure/5_container_copy_name.png deleted file mode 100644 index 18355ee724553..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/5_container_copy_name.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/6_storage_settings_menu.png b/articles/azure-sql/managed-instance/media/auditing-configure/6_storage_settings_menu.png deleted file mode 100644 index bd8b28dae9b26..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/6_storage_settings_menu.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/7_sas_configure.png b/articles/azure-sql/managed-instance/media/auditing-configure/7_sas_configure.png deleted file mode 100644 index 92874e1530918..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/7_sas_configure.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/8_sas_copy.png b/articles/azure-sql/managed-instance/media/auditing-configure/8_sas_copy.png deleted file mode 100644 index e8e9096da9461..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/8_sas_copy.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/9_mi_configure_diagnostics.png b/articles/azure-sql/managed-instance/media/auditing-configure/9_mi_configure_diagnostics.png deleted file mode 100644 index 3dfd9cc6062b0..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/9_mi_configure_diagnostics.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auditing-configure/support-operations.png b/articles/azure-sql/managed-instance/media/auditing-configure/support-operations.png deleted file mode 100644 index 57a6603888fb7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auditing-configure/support-operations.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/create-instance-configure-identities.png b/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/create-instance-configure-identities.png deleted file mode 100644 index fea034f2d7f58..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/create-instance-configure-identities.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-create-basic.png b/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-create-basic.png deleted file mode 100644 index 790e235c2a5a3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-create-basic.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-user-assigned-managed-identity-configuration.png b/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-user-assigned-managed-identity-configuration.png deleted file mode 100644 index faeb65e8ffa29..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance/managed-instance-user-assigned-managed-identity-configuration.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-failover-group.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-failover-group.png deleted file mode 100644 index 902d9aa71c443..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-subnet-gateway-primary-vnet.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-subnet-gateway-primary-vnet.png deleted file mode 100644 index ebf1fb6e37993..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/add-subnet-gateway-primary-vnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-failover-group.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-failover-group.png deleted file mode 100644 index b4d9e90b1d09d..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-gateway-connection.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-gateway-connection.png deleted file mode 100644 index 31a269ca2a7dc..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-gateway-connection.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-virtual-network-gateway.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-virtual-network-gateway.png deleted file mode 100644 index 29c70e539645b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/create-virtual-network-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/failover-mi-failover-group.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/failover-mi-failover-group.png deleted file mode 100644 index 9fef926f9a820..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/failover-mi-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/find-failover-group-connection-string.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/find-failover-group-connection-string.png deleted file mode 100644 index 18e6d7941bce4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/find-failover-group-connection-string.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/mi-switched-after-failover.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/mi-switched-after-failover.png deleted file mode 100644 index e20a49b275cf2..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/mi-switched-after-failover.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/select-failover-group.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/select-failover-group.png deleted file mode 100644 index d396265eecb02..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/select-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-primary-gateway.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-primary-gateway.png deleted file mode 100644 index cd4c070924d55..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-primary-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-secondary-gateway.png b/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-secondary-gateway.png deleted file mode 100644 index c8a238bc44e45..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-configure-sql-mi/settings-for-secondary-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/auto-failover-group-sql-mi/auto-failover-group-mi.png b/articles/azure-sql/managed-instance/media/auto-failover-group-sql-mi/auto-failover-group-mi.png deleted file mode 100644 index 66d13a36b0642..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/auto-failover-group-sql-mi/auto-failover-group-mi.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/azure-sql-managed-instance-rebrand/reservations.png b/articles/azure-sql/managed-instance/media/azure-sql-managed-instance-rebrand/reservations.png deleted file mode 100644 index dee0ba6182480..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/azure-sql-managed-instance-rebrand/reservations.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-differential.png b/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-differential.png deleted file mode 100644 index ef0282a437451..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-differential.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-full.png b/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-full.png deleted file mode 100644 index d03aea127bd13..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/backup-activity-monitor/output-with-full.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/backup-activity-monitor/present-xevents-output.png b/articles/azure-sql/managed-instance/media/backup-activity-monitor/present-xevents-output.png deleted file mode 100644 index de2f6f7e7f45b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/backup-activity-monitor/present-xevents-output.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-application-instance/application-deployment-topologies.png b/articles/azure-sql/managed-instance/media/connect-application-instance/application-deployment-topologies.png deleted file mode 100644 index 0c9b28431acb5..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-application-instance/application-deployment-topologies.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-application-instance/ingress-egress-numbers.png b/articles/azure-sql/managed-instance/media/connect-application-instance/ingress-egress-numbers.png deleted file mode 100644 index 77a067d324cf5..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-application-instance/ingress-egress-numbers.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-application-instance/integrated-app-peering.png b/articles/azure-sql/managed-instance/media/connect-application-instance/integrated-app-peering.png deleted file mode 100644 index 3bd67c89f7f69..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-application-instance/integrated-app-peering.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-application-instance/route-txt.png b/articles/azure-sql/managed-instance/media/connect-application-instance/route-txt.png deleted file mode 100644 index 4f2ef36aad0e6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-application-instance/route-txt.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-application-instance/vnet-peering.png b/articles/azure-sql/managed-instance/media/connect-application-instance/vnet-peering.png deleted file mode 100644 index 2a9f9f086a162..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-application-instance/vnet-peering.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/create-client-sql-vm.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/create-client-sql-vm.png deleted file mode 100644 index 51121b03965b7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/create-client-sql-vm.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/new-subnet.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/new-subnet.png deleted file mode 100644 index e1cd6a9f1c18d..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/new-subnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/rdp.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/rdp.png deleted file mode 100644 index f9ac29bc196aa..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/rdp.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/resources.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/resources.png deleted file mode 100644 index 3329d6220436b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/resources.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/ssms-connect.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/ssms-connect.png deleted file mode 100644 index ad8070a494528..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/ssms-connect.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/subnets.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/subnets.png deleted file mode 100644 index c578649ef6adf..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/subnets.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/vm.png b/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/vm.png deleted file mode 100644 index a4afb3cc158da..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connect-vm-instance-configure/vm.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connection-types-overview/proxy.png b/articles/azure-sql/managed-instance/media/connection-types-overview/proxy.png deleted file mode 100644 index 7f7c25b78cc15..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connection-types-overview/proxy.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connection-types-overview/redirect.png b/articles/azure-sql/managed-instance/media/connection-types-overview/redirect.png deleted file mode 100644 index 6ed6ff9bb9b0a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connection-types-overview/redirect.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch001.png b/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch001.png deleted file mode 100644 index 3914bffee3549..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch001.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch002.png b/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch002.png deleted file mode 100644 index b6c3fc372d0b8..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch002.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch003.png b/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch003.png deleted file mode 100644 index fe8461896b1cf..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/connectivity-architecture-overview/connectivityarch003.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-failover-group.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-failover-group.png deleted file mode 100644 index 902d9aa71c443..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-subnet-gateway-primary-vnet.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-subnet-gateway-primary-vnet.png deleted file mode 100644 index ebf1fb6e37993..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/add-subnet-gateway-primary-vnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-failover-group.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-failover-group.png deleted file mode 100644 index b4d9e90b1d09d..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-gateway-connection.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-gateway-connection.png deleted file mode 100644 index 06d5f40f7133e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-gateway-connection.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-virtual-network-gateway.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-virtual-network-gateway.png deleted file mode 100644 index 29c70e539645b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/create-virtual-network-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/failover-mi-failover-group.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/failover-mi-failover-group.png deleted file mode 100644 index 9fef926f9a820..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/failover-mi-failover-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/mi-switched-after-failover.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/mi-switched-after-failover.png deleted file mode 100644 index e20a49b275cf2..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/mi-switched-after-failover.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/networking-settings-for-secondary-mi.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/networking-settings-for-secondary-mi.png deleted file mode 100644 index 9f0cbd2bcc6e4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/networking-settings-for-secondary-mi.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/primary-sql-mi-values.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/primary-sql-mi-values.png deleted file mode 100644 index e10b8dfedc419..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/primary-sql-mi-values.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-mi-failover.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-mi-failover.png deleted file mode 100644 index 37b58d962bab3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-mi-failover.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-virtual-network.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-virtual-network.png deleted file mode 100644 index d895f05f9abd1..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/secondary-virtual-network.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/select-managed-instance.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/select-managed-instance.png deleted file mode 100644 index 7566a539af0aa..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/select-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-primary-gateway.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-primary-gateway.png deleted file mode 100644 index cd4c070924d55..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-primary-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-secondary-gateway.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-secondary-gateway.png deleted file mode 100644 index c8a238bc44e45..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/settings-for-secondary-gateway.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/verify-primary-subnet-range.png b/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/verify-primary-subnet-range.png deleted file mode 100644 index 0f4a0ed1d25b0..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/failover-group-add-instance-tutorial/verify-primary-subnet-range.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/frequently-asked-questions-faq/tail-log-backup.png b/articles/azure-sql/managed-instance/media/frequently-asked-questions-faq/tail-log-backup.png deleted file mode 100644 index 77fb37d36c1c7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/frequently-asked-questions-faq/tail-log-backup.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-deployment-in-progress.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-deployment-in-progress.png deleted file mode 100644 index 021cd5f2b515d..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-deployment-in-progress.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-additional-settings.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-additional-settings.png deleted file mode 100644 index 37bbb825b7480..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-additional-settings.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-basics.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-basics.png deleted file mode 100644 index e2eea3d2dd128..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-basics.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-configure-performance.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-configure-performance.png deleted file mode 100644 index 0981239de62c7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-configure-performance.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-networking.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-networking.png deleted file mode 100644 index 8324988fe258f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-networking.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-review-create.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-review-create.png deleted file mode 100644 index 1a57f459bb456..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-create-tab-review-create.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-host-name.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-host-name.png deleted file mode 100644 index 57b4d3529aee5..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-host-name.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-resources.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-resources.png deleted file mode 100644 index c6f09bc219578..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-resources.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-route-table-user-defined-route.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-route-table-user-defined-route.png deleted file mode 100644 index 038a202dc8727..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-route-table-user-defined-route.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-security-rules.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-security-rules.png deleted file mode 100644 index 31d37b5abcdd7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/azure-sql-managed-instance-security-rules.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-create-quickstart/create-azure-sql-managed-instance.png b/articles/azure-sql/managed-instance/media/instance-create-quickstart/create-azure-sql-managed-instance.png deleted file mode 100644 index ef13ec74bce6e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-create-quickstart/create-azure-sql-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools1.png b/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools1.png deleted file mode 100644 index fac3bb2d87167..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools1.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools2.png b/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools2.png deleted file mode 100644 index f902e054fba39..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-pools-overview/instance-pools2.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/instance-pools-overview/support-request.png b/articles/azure-sql/managed-instance/media/instance-pools-overview/support-request.png deleted file mode 100644 index 7012762463a4e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/instance-pools-overview/support-request.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/log-replay-service-conceptual.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/log-replay-service-conceptual.png deleted file mode 100644 index 1d8c68c13ba55..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/log-replay-service-conceptual.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-generated-uri-token.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-generated-uri-token.png deleted file mode 100644 index 03b12a5c0badf..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-generated-uri-token.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-01.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-01.png deleted file mode 100644 index 852689b333493..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-01.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-02.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-02.png deleted file mode 100644 index 038e0d9a01d50..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-sas-token-02.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-structure.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-structure.png deleted file mode 100644 index 71af8a0d4451c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-structure.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-01.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-01.png deleted file mode 100644 index 324b5db039e06..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-01.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-02.png b/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-02.png deleted file mode 100644 index 5b6b86d52aaac..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/log-replay-service-migrate/lrs-token-uri-copy-part-02.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-restore.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-restore.png deleted file mode 100644 index b06ada139e2a3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-select-database.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-select-database.png deleted file mode 100644 index afeb71e5b9466..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups-select-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups.png deleted file mode 100644 index 7f040367a4d70..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-available-backups.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-ltr.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-ltr.png deleted file mode 100644 index eda82e1ae9cea..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-ltr.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-policies.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-policies.png deleted file mode 100644 index 21fbaa4690661..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-configure-policies.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-restore.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-restore.png deleted file mode 100644 index bb2ba6ec85204..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/ltr-restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/restore-job-progress-long-term.png b/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/restore-job-progress-long-term.png deleted file mode 100644 index b59c62e218912..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/long-term-backup-retention-configure/restore-job-progress-long-term.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-best-practices/database-log-file-size.png b/articles/azure-sql/managed-instance/media/managed-instance-link-best-practices/database-log-file-size.png deleted file mode 100644 index 63eec7c6e9475..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-best-practices/database-log-file-size.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-ag-dag.png b/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-ag-dag.png deleted file mode 100644 index 0525491bde1eb..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-ag-dag.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-main-scenario.png b/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-main-scenario.png deleted file mode 100644 index 0963be09e9c72..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-feature-overview/mi-link-main-scenario.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/always-on-availability-groups-properties.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/always-on-availability-groups-properties.png deleted file mode 100644 index 3aa1fdf88f474..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/always-on-availability-groups-properties.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/powershell-output-tnc-command.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/powershell-output-tnc-command.png deleted file mode 100644 index 96f5bbad1df35..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/powershell-output-tnc-command.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-properties.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-properties.png deleted file mode 100644 index 64b0cd7a0478f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-properties.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-restart.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-restart.png deleted file mode 100644 index abca52e42f439..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/sql-server-configuration-manager-sql-server-restart.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-database-context-menu-managed-instance-link.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-database-context-menu-managed-instance-link.png deleted file mode 100644 index 4843492a552d2..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-database-context-menu-managed-instance-link.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-output-tnchelper.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-output-tnchelper.png deleted file mode 100644 index 0514701218ca6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-output-tnchelper.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-results-expected-outcome.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-results-expected-outcome.png deleted file mode 100644 index 191ee35943e78..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/ssms-results-expected-outcome.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/startup-parameters-properties.png b/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/startup-parameters-properties.png deleted file mode 100644 index 4111e9693336b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-preparation/startup-parameters-properties.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-cleanup-optional.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-cleanup-optional.png deleted file mode 100644 index b2c67850d8774..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-cleanup-optional.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-executing-actions.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-executing-actions.png deleted file mode 100644 index 440a26822c5a4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-executing-actions.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-failover-type.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-failover-type.png deleted file mode 100644 index 9915b3a3d486a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-failover-type.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-introduction.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-introduction.png deleted file mode 100644 index 2708646b54e20..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-introduction.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-login-to-azure.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-login-to-azure.png deleted file mode 100644 index 18fee111ca192..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-login-to-azure.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-results.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-results.png deleted file mode 100644 index b265ecc2e7443..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-results.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-database-context-failover-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-database-context-failover-database.png deleted file mode 100644 index cff59a3d8bbe6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-database-context-failover-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-managed-instance-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-managed-instance-database.png deleted file mode 100644 index 0786f68d86921..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-managed-instance-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-sql-server-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-sql-server-database.png deleted file mode 100644 index d2b3d60b41b89..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-ssms-sql-server-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-summary.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-summary.png deleted file mode 100644 index 45caf72d5daf9..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-failover-database/link-failover-summary.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-distributed-ag-options.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-distributed-ag-options.png deleted file mode 100644 index 9fca8ff287d0b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-distributed-ag-options.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-executing-actions.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-executing-actions.png deleted file mode 100644 index 450fe89b7b30c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-executing-actions.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-introduction.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-introduction.png deleted file mode 100644 index 5902389a6eab1..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-introduction.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure-populated.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure-populated.png deleted file mode 100644 index 533832b245eb7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure-populated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure.png deleted file mode 100644 index 7b6c7721220cf..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-login-to-azure.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-results.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-results.png deleted file mode 100644 index b9b640e7c0693..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-results.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-select-databases.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-select-databases.png deleted file mode 100644 index 187140560ec68..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-select-databases.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-sql-server-requirements.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-sql-server-requirements.png deleted file mode 100644 index 94b84f9800632..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-sql-server-requirements.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-database-context-replicate-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-database-context-replicate-database.png deleted file mode 100644 index 1bd90fb94a20c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-database-context-replicate-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-managed-instance-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-managed-instance-database.png deleted file mode 100644 index acd165f8df051..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-managed-instance-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-sql-server-database.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-sql-server-database.png deleted file mode 100644 index fd2bc79e94763..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-ssms-sql-server-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-summary.png b/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-summary.png deleted file mode 100644 index 1276293a2e32e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/managed-instance-link-use-ssms-to-replicate-database/link-replicate-summary.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-endpoint-verify-built-in-firewall/03_verify_firewall.png b/articles/azure-sql/managed-instance/media/management-endpoint-verify-built-in-firewall/03_verify_firewall.png deleted file mode 100644 index 73cbf17a341c4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-endpoint-verify-built-in-firewall/03_verify_firewall.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-operations-cancel/cancel-operation.png b/articles/azure-sql/managed-instance/media/management-operations-cancel/cancel-operation.png deleted file mode 100644 index b740f12327a9a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-operations-cancel/cancel-operation.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-operations-cancel/canceling-operation-result.png b/articles/azure-sql/managed-instance/media/management-operations-cancel/canceling-operation-result.png deleted file mode 100644 index 1fe1648329df8..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-operations-cancel/canceling-operation-result.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-operations-cancel/open-ongoing-operation.png b/articles/azure-sql/managed-instance/media/management-operations-cancel/open-ongoing-operation.png deleted file mode 100644 index 6a2b06129a426..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-operations-cancel/open-ongoing-operation.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-create-operation.png b/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-create-operation.png deleted file mode 100644 index 3a2b27dda20a3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-create-operation.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-operation-details.png b/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-operation-details.png deleted file mode 100644 index b156d8859de45..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/management-operations-monitor/monitoring-operation-details.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/managed-instance-sizing.png b/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/managed-instance-sizing.png deleted file mode 100644 index df6cb150be211..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/managed-instance-sizing.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-flow.png b/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-flow.png deleted file mode 100644 index 3d3b7fe601e52..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-flow.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-process.png b/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-process.png deleted file mode 100644 index 82aedd7e1de95..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/migrate-to-instance-from-sql-server/migration-process.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-in-time-restore/delete-database-from-mi.png b/articles/azure-sql/managed-instance/media/point-in-time-restore/delete-database-from-mi.png deleted file mode 100644 index b080695f434c3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-in-time-restore/delete-database-from-mi.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-database-to-mi.png b/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-database-to-mi.png deleted file mode 100644 index 56ab3b6612257..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-database-to-mi.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-deleted-sql-managed-instance-annotated.png b/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-deleted-sql-managed-instance-annotated.png deleted file mode 100644 index 51be5ffc13dba..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-in-time-restore/restore-deleted-sql-managed-instance-annotated.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/download-vpn-client.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/download-vpn-client.png deleted file mode 100644 index 7a8d89d65ab25..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/download-vpn-client.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/ssms-connect.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/ssms-connect.png deleted file mode 100644 index ad8070a494528..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/ssms-connect.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-client-defender.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-client-defender.png deleted file mode 100644 index ee4bd9395da9a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-client-defender.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection-succeeded.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection-succeeded.png deleted file mode 100644 index 30773a93301f3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection-succeeded.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection.png deleted file mode 100644 index d687d5f367f06..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection2.png b/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection2.png deleted file mode 100644 index 56f0d92e10c22..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/point-to-site-p2s-configure/vpn-connection2.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-nsg-rules.png b/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-nsg-rules.png deleted file mode 100644 index 481a3a5078890..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-nsg-rules.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-overview.png b/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-overview.png deleted file mode 100644 index f4178c4f9e380..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-overview.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-public-endpoint-conn-string.png b/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-public-endpoint-conn-string.png deleted file mode 100644 index dd91f0f1e5de4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-public-endpoint-conn-string.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-config.png b/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-config.png deleted file mode 100644 index 2d5ea33668de9..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-config.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-subnet.png b/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-subnet.png deleted file mode 100644 index 6837f95a006ee..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-configure/mi-vnet-subnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/public-endpoint-overview/managed-instance-vnet.png b/articles/azure-sql/managed-instance/media/public-endpoint-overview/managed-instance-vnet.png deleted file mode 100644 index 9a2e4673240c3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/public-endpoint-overview/managed-instance-vnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-between-two-instances-configure-tutorial/sqlmi-sqlmi-repl.png b/articles/azure-sql/managed-instance/media/replication-between-two-instances-configure-tutorial/sqlmi-sqlmi-repl.png deleted file mode 100644 index 4305a69f25d66..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-between-two-instances-configure-tutorial/sqlmi-sqlmi-repl.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-transactional-overview/01-single-instance-asdbmi-pubdist.png b/articles/azure-sql/managed-instance/media/replication-transactional-overview/01-single-instance-asdbmi-pubdist.png deleted file mode 100644 index 4be58a84dcb5c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-transactional-overview/01-single-instance-asdbmi-pubdist.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-transactional-overview/02-separate-instances-asdbmi-pubdist.png b/articles/azure-sql/managed-instance/media/replication-transactional-overview/02-separate-instances-asdbmi-pubdist.png deleted file mode 100644 index 5a17e0e3fcebe..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-transactional-overview/02-separate-instances-asdbmi-pubdist.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-transactional-overview/03-azure-sql-db-subscriber.png b/articles/azure-sql/managed-instance/media/replication-transactional-overview/03-azure-sql-db-subscriber.png deleted file mode 100644 index d3485e7663244..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-transactional-overview/03-azure-sql-db-subscriber.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-transactional-overview/replication-to-sql-database.png b/articles/azure-sql/managed-instance/media/replication-transactional-overview/replication-to-sql-database.png deleted file mode 100644 index 1fbd4e70486bd..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-transactional-overview/replication-to-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-a-record.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-a-record.png deleted file mode 100644 index 07569aadcfa8b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-a-record.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-vnet-link.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-vnet-link.png deleted file mode 100644 index 9d2ff3724e8cd..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/configure-vnet-link.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/create-private-dns-zone.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/create-private-dns-zone.png deleted file mode 100644 index 66a510dc958ff..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/create-private-dns-zone.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/snapshot-agent-security.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/snapshot-agent-security.png deleted file mode 100644 index 839d70a96df4b..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/snapshot-agent-security.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/sqlmi-to-sql-replication.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/sqlmi-to-sql-replication.png deleted file mode 100644 index 835f35bb5712c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/sqlmi-to-sql-replication.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/test-connectivity-to-mi.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/test-connectivity-to-mi.png deleted file mode 100644 index e9fed38ba0f92..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/test-connectivity-to-mi.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/use-same-vnet-for-distributor.png b/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/use-same-vnet-for-distributor.png deleted file mode 100644 index 857d4220d8651..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/replication-two-instances-and-sql-server-configure-tutorial/use-same-vnet-for-distributor.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/credential.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/credential.png deleted file mode 100644 index be246ddf81486..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/credential.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/file-list.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/file-list.png deleted file mode 100644 index 92bb421ee6415..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/file-list.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/new-restore-wizard.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/new-restore-wizard.png deleted file mode 100644 index 80063a0e2b2f3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/new-restore-wizard.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-location.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-location.png deleted file mode 100644 index 20bb2af16f76a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-location.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-selection.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-selection.png deleted file mode 100644 index cef5295fb88c9..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-backup-file-selection.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-connect-subscription-sign-in.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-connect-subscription-sign-in.png deleted file mode 100644 index 6c80519124766..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-connect-subscription-sign-in.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-finish-restore.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-finish-restore.png deleted file mode 100644 index 4233e1c1ccb53..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-finish-restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-generate-shared-access-signature.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-generate-shared-access-signature.png deleted file mode 100644 index 8c145c68b6988..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-generate-shared-access-signature.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-restored-database.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-restored-database.png deleted file mode 100644 index 7c04c4f5fad9e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-restored-database.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-running-restore.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-running-restore.png deleted file mode 100644 index f5a4a383a46c5..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-running-restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-container.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-container.png deleted file mode 100644 index 8ced1b98907d4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-container.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-device.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-device.png deleted file mode 100644 index 4c34221e841e3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-device.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-storage-account.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-storage-account.png deleted file mode 100644 index fb9fa78776885..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-storage-account.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-subscription.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-subscription.png deleted file mode 100644 index e8c95495a4650..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-select-subscription.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-sign-in-session.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-sign-in-session.png deleted file mode 100644 index dd81001478213..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-sign-in-session.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start-restore.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start-restore.png deleted file mode 100644 index e268a854778c6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start-restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start.png deleted file mode 100644 index eb25135eeb8fe..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore-wizard-start.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore.png b/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore.png deleted file mode 100644 index 42b22ae3645b6..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/restore-sample-database-quickstart/restore.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/configure-sql-trust-group.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/configure-sql-trust-group.png deleted file mode 100644 index 163d5f0c96491..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/configure-sql-trust-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/confirm-delete-sql-trust-group-2.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/confirm-delete-sql-trust-group-2.png deleted file mode 100644 index 4bb22300020f1..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/confirm-delete-sql-trust-group-2.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/delete-sql-trust-group.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/delete-sql-trust-group.png deleted file mode 100644 index 62008363828bc..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/delete-sql-trust-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group-button.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group-button.png deleted file mode 100644 index c9205d15467e3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group-button.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group.png deleted file mode 100644 index 469684a48e218..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/new-sql-trust-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/security-sql-trust-groups.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/security-sql-trust-groups.png deleted file mode 100644 index ed71731c1c79f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/security-sql-trust-groups.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/select-delete-sql-trust-group.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/select-delete-sql-trust-group.png deleted file mode 100644 index d1995ad128e9c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/select-delete-sql-trust-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-blade.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-blade.png deleted file mode 100644 index cda2f835dfbb1..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-blade.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-new-group.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-new-group.png deleted file mode 100644 index 390f7f7a94618..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-create-new-group.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete-confirm.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete-confirm.png deleted file mode 100644 index e27d1d5a3aa2c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete-confirm.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete.png deleted file mode 100644 index 63e22028fdc65..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-delete.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-select.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-select.png deleted file mode 100644 index 5a02536985b36..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/server-trust-group-manage-select.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/server-trust-group-overview/sql-trust-groups.png b/articles/azure-sql/managed-instance/media/server-trust-group-overview/sql-trust-groups.png deleted file mode 100644 index 58f8a8f1c7981..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/server-trust-group-overview/sql-trust-groups.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-a-resource.png b/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-a-resource.png deleted file mode 100644 index 79d216b1a80d3..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-a-resource.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-an-alias.png b/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-an-alias.png deleted file mode 100644 index 0137577b44931..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/add-an-alias.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/associate-service-endpoint-policy.png b/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/associate-service-endpoint-policy.png deleted file mode 100644 index 8258b4ac97c05..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/service-endpoint-policies-configure/associate-service-endpoint-policy.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/application-deployment-topologies.png b/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/application-deployment-topologies.png deleted file mode 100644 index 38d0f8a1b906c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/application-deployment-topologies.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/key-features.png b/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/key-features.png deleted file mode 100644 index f4c48e21656d7..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/key-features.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/migration.png b/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/migration.png deleted file mode 100644 index e2857d5d0c704..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/sql-managed-instance-paas-overview/migration.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/tde-certificate-migrate/backup-onprem-certificate.png b/articles/azure-sql/managed-instance/media/tde-certificate-migrate/backup-onprem-certificate.png deleted file mode 100644 index 6d0aca04b7027..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/tde-certificate-migrate/backup-onprem-certificate.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/tde-certificate-migrate/onprem-certificate-list.png b/articles/azure-sql/managed-instance/media/tde-certificate-migrate/onprem-certificate-list.png deleted file mode 100644 index bd9c0847954c2..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/tde-certificate-migrate/onprem-certificate-list.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/threat-detection-configure/threat-detection.png b/articles/azure-sql/managed-instance/media/threat-detection-configure/threat-detection.png deleted file mode 100644 index c381f9fe28f37..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/threat-detection-configure/threat-detection.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/timezones-overview/01-setting_timezone-during-instance-creation.png b/articles/azure-sql/managed-instance/media/timezones-overview/01-setting_timezone-during-instance-creation.png deleted file mode 100644 index eec3057228c3a..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/timezones-overview/01-setting_timezone-during-instance-creation.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/transact-sql-tsql-differences-sql-server/migration.png b/articles/azure-sql/managed-instance/media/transact-sql-tsql-differences-sql-server/migration.png deleted file mode 100644 index 5f912c3f02e6f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/transact-sql-tsql-differences-sql-server/migration.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-delete.png b/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-delete.png deleted file mode 100644 index 2b025993dd155..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-delete.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-search.png b/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-search.png deleted file mode 100644 index 87936d69921e9..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/virtual-cluster-delete/virtual-clusters-search.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/virtual-network-subnet-create-arm-template/create-mi-network-arm.png b/articles/azure-sql/managed-instance/media/virtual-network-subnet-create-arm-template/create-mi-network-arm.png deleted file mode 100644 index fdc1add9de684..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/virtual-network-subnet-create-arm-template/create-mi-network-arm.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/how-to-select-subnet.png b/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/how-to-select-subnet.png deleted file mode 100644 index 0a7ae5b29f189..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/how-to-select-subnet.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/monitor-subnet-move-operation.png b/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/monitor-subnet-move-operation.png deleted file mode 100644 index d7eeaf94d9b8d..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/monitor-subnet-move-operation.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/subnet-grouping-per-state.png b/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/subnet-grouping-per-state.png deleted file mode 100644 index 0985097409e6e..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/vnet-subnet-move-instance/subnet-grouping-per-state.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/auth-kerberos.svg b/articles/azure-sql/managed-instance/media/winauth-azuread/auth-kerberos.svg deleted file mode 100644 index e8445ce2f505f..0000000000000 --- a/articles/azure-sql/managed-instance/media/winauth-azuread/auth-kerberos.svg +++ /dev/null @@ -1,371 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-azuread-app-registrations.png b/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-azuread-app-registrations.png deleted file mode 100644 index bd093dc3b627c..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-azuread-app-registrations.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-configure-permissions-admin-consent.png b/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-configure-permissions-admin-consent.png deleted file mode 100644 index 97e8be147348f..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-configure-permissions-admin-consent.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-managed-instance-identity-enable-system-assigned-service-principal.png b/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-managed-instance-identity-enable-system-assigned-service-principal.png deleted file mode 100644 index f11d8afb2e3c4..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/azure-portal-managed-instance-identity-enable-system-assigned-service-principal.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy-server-settings-detail.png b/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy-server-settings-detail.png deleted file mode 100644 index 75352f2c6ae74..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy-server-settings-detail.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy.png b/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy.png deleted file mode 100644 index 317d4c6972167..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/configure-policy-kdc-proxy.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/decision-authentication.svg b/articles/azure-sql/managed-instance/media/winauth-azuread/decision-authentication.svg deleted file mode 100644 index 0bb1da2e7db1c..0000000000000 --- a/articles/azure-sql/managed-instance/media/winauth-azuread/decision-authentication.svg +++ /dev/null @@ -1,506 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/policy-allow-retrieving-cloud-kerberos-ticket-during-logon.png b/articles/azure-sql/managed-instance/media/winauth-azuread/policy-allow-retrieving-cloud-kerberos-ticket-during-logon.png deleted file mode 100644 index f0fe6b7ba2f81..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/policy-allow-retrieving-cloud-kerberos-ticket-during-logon.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/policy-enable-cloud-kerberos-ticket-during-logon-setting.png b/articles/azure-sql/managed-instance/media/winauth-azuread/policy-enable-cloud-kerberos-ticket-during-logon-setting.png deleted file mode 100644 index be1af7f173135..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/policy-enable-cloud-kerberos-ticket-during-logon-setting.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/media/winauth-azuread/winauth-connect-to-managed-instance.png b/articles/azure-sql/managed-instance/media/winauth-azuread/winauth-connect-to-managed-instance.png deleted file mode 100644 index fb4bb2cf46cd8..0000000000000 Binary files a/articles/azure-sql/managed-instance/media/winauth-azuread/winauth-connect-to-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial.md b/articles/azure-sql/managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial.md deleted file mode 100644 index 2269892c74336..0000000000000 --- a/articles/azure-sql/managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial.md +++ /dev/null @@ -1,339 +0,0 @@ ---- -title: Migrate SQL Server Windows users and groups to SQL Managed Instance using T-SQL -description: Learn about how to migrate Windows users and groups in a SQL Server instance to Azure SQL Managed Instance -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: seo-lt-2019, sqldbrb=1 -ms.topic: tutorial -author: GitHubMirek -ms.author: mireks -ms.reviewer: vanto -ms.date: 05/10/2021 ---- - -# Tutorial: Migrate Windows users and groups in a SQL Server instance to Azure SQL Managed Instance using T-SQL DDL syntax -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article takes you through the process of migrating your on-premises Windows users and groups in your SQL Server to Azure SQL Managed Instance using T-SQL syntax. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> -> - Create logins for SQL Server -> - Create a test database for migration -> - Create logins, users, and roles -> - Backup and restore your database to SQL Managed Instance (MI) -> - Manually migrate users to MI using ALTER USER syntax -> - Testing authentication with the new mapped users - -## Prerequisites - -To complete this tutorial, the following prerequisites apply: - -- The Windows domain is federated with Azure Active Directory (Azure AD). -- Access to Active Directory to create users/groups. -- An existing SQL Server in your on-premises environment. -- An existing SQL Managed Instance. See [Quickstart: Create a SQL Managed Instance](instance-create-quickstart.md). - - A `sysadmin` in the SQL Managed Instance must be used to create Azure AD logins. -- [Create an Azure AD admin for SQL Managed Instance](../database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance). -- You can connect to your SQL Managed Instance within your network. See the following articles for additional information: - - [Connect your application to Azure SQL Managed Instance](connect-application-instance.md) - - [Quickstart: Configure a point-to-site connection to an Azure SQL Managed Instance from on-premises](point-to-site-p2s-configure.md) - - [Configure public endpoint in Azure SQL Managed Instance](public-endpoint-configure.md) - -## T-SQL DDL syntax - -Below are the T-SQL DDL syntax used to support the migration of Windows users and groups from a SQL Server instance to SQL Managed Instance with Azure AD authentication. - -```sql --- For individual Windows users with logins -ALTER USER [domainName\userName] WITH LOGIN = [loginName@domainName.com]; - ---For individual groups with logins -ALTER USER [domainName\groupName] WITH LOGIN=[groupName] -``` - -## Arguments - -_domainName_
    -Specifies the domain name of the user. - -_userName_
    -Specifies the name of the user identified inside the database. - -_= loginName\@domainName.com_
    -Remaps a user to the Azure AD login - -_groupName_
    -Specifies the name of the group identified inside the database. - -## Part 1: Create logins in SQL Server for Windows users and groups - -> [!IMPORTANT] -> The following syntax creates a user and a group login in your SQL Server. You'll need to make sure that the user and group exist inside your Active Directory (AD) before executing the below syntax.

    -> Users: testUser1, testGroupUser
    -> Group: migration - testGroupUser needs to belong to the migration group in AD - -The example below creates a login in SQL Server for an account named _testUser1_ under the domain _aadsqlmi_. - -```sql --- Sign into SQL Server as a sysadmin or a user that can create logins and databases - -use master; -go - --- Create Windows login -create login [aadsqlmi\testUser1] from windows; -go; - -/** Create a Windows group login which contains one user [aadsqlmi\testGroupUser]. -testGroupUser will need to be added to the migration group in Active Directory -**/ -create login [aadsqlmi\migration] from windows; -go; - - --- Check logins were created -select * from sys.server_principals; -go; -``` - -Create a database for this test. - -```sql --- Create a database called [migration] -create database migration -go -``` - -## Part 2: Create Windows users and groups, then add roles and permissions - -Use the following syntax to create the test user. - -```sql -use migration; -go - --- Create Windows user [aadsqlmi\testUser1] with login -create user [aadsqlmi\testUser1] from login [aadsqlmi\testUser1]; -go -``` - -Check the user permissions: - -```sql --- Check the user in the Metadata -select * from sys.database_principals; -go - --- Display the permissions – should only have CONNECT permissions -select user_name(grantee_principal_id), * from sys.database_permissions; -go -``` - -Create a role and assign your test user to this role: - -```sql --- Create a role with some permissions and assign the user to the role -create role UserMigrationRole; -go - -grant CONNECT, SELECT, View DATABASE STATE, VIEW DEFINITION to UserMigrationRole; -go - -alter role UserMigrationRole add member [aadsqlmi\testUser1]; -go -``` - -Use the following query to display user names assigned to a specific role: - -```sql --- Display user name assigned to a specific role -SELECT DP1.name AS DatabaseRoleName, - isnull (DP2.name, 'No members') AS DatabaseUserName - FROM sys.database_role_members AS DRM - RIGHT OUTER JOIN sys.database_principals AS DP1 - ON DRM.role_principal_id = DP1.principal_id - LEFT OUTER JOIN sys.database_principals AS DP2 - ON DRM.member_principal_id = DP2.principal_id -WHERE DP1.type = 'R' -ORDER BY DP1.name; -``` - -Use the following syntax to create a group. Then add the group to the role `db_owner`. - -```sql --- Create Windows group -create user [aadsqlmi\migration] from login [aadsqlmi\migration]; -go - --- ADD 'db_owner' role to this group -sp_addrolemember 'db_owner', 'aadsqlmi\migration'; -go - ---Check the db_owner role for 'aadsqlmi\migration' group -select is_rolemember('db_owner', 'aadsqlmi\migration') -go --- Output ( 1 means YES) -``` - -Create a test table and add some data using the following syntax: - -```sql --- Create a table and add data -create table test ( a int, b int); -go - -insert into test values (1,10) -go - --- Check the table values -select * from test; -go -``` - -## Part 3: Backup and restore the individual user database to SQL Managed Instance - -Create a backup of the migration database using the article [Copy Databases with Backup and Restore](/sql/relational-databases/databases/copy-databases-with-backup-and-restore), or use the following syntax: - -```sql -use master; -go -backup database migration to disk = 'C:\Migration\migration.bak'; -go -``` - -Follow our [Quickstart: Restore a database to a SQL Managed Instance](restore-sample-database-quickstart.md). - -## Part 4: Migrate users to SQL Managed Instance - -Execute the ALTER USER command to complete the migration process on SQL Managed Instance. - -1. Sign into your SQL Managed Instance using the Azure AD admin account for SQL Managed Instance. Then create your Azure AD login in the SQL Managed Instance using the following syntax. For more information, see [Tutorial: SQL Managed Instance security in Azure SQL Database using Azure AD server principals (logins)](aad-security-configure-tutorial.md). - - ```sql - use master - go - - -- Create login for AAD user [testUser1@aadsqlmi.net] - create login [testUser1@aadsqlmi.net] from external provider - go - - -- Create login for the Azure AD group [migration]. This group contains one user [testGroupUser@aadsqlmi.net] - create login [migration] from external provider - go - - --Check the two new logins - select * from sys.server_principals - go - ``` - -1. Check your migration for the correct database, table, and principals. - - ```sql - -- Switch to the database migration that is already restored for MI - use migration; - go - - --Check if the restored table test exist and contain a row - select * from test; - go - - -- Check that the SQL on-premises Windows user/group exists - select * from sys.database_principals; - go - -- the old user aadsqlmi\testUser1 should be there - -- the old group aadsqlmi\migration should be there - ``` - -1. Use the ALTER USER syntax to map the on-premises user to the Azure AD login. - - ```sql - /** Execute the ALTER USER command to alter the Windows user [aadsqlmi\testUser1] - to map to the Azure AD user testUser1@aadsqlmi.net - **/ - alter user [aadsqlmi\testUser1] with login = [testUser1@aadsqlmi.net]; - go - - -- Check the principal - select * from sys.database_principals; - go - -- New user testUser1@aadsqlmi.net should be there instead - --Check new user permissions - should only have CONNECT permissions - select user_name(grantee_principal_id), * from sys.database_permissions; - go - - -- Check a specific role - -- Display Db user name assigned to a specific role - SELECT DP1.name AS DatabaseRoleName, - isnull (DP2.name, 'No members') AS DatabaseUserName - FROM sys.database_role_members AS DRM - RIGHT OUTER JOIN sys.database_principals AS DP1 - ON DRM.role_principal_id = DP1.principal_id - LEFT OUTER JOIN sys.database_principals AS DP2 - ON DRM.member_principal_id = DP2.principal_id - WHERE DP1.type = 'R' - ORDER BY DP1.name; - ``` - -1. Use the ALTER USER syntax to map the on-premises group to the Azure AD login. - - ```sql - /** Execute ALTER USER command to alter the Windows group [aadsqlmi\migration] - to the Azure AD group login [migration] - **/ - alter user [aadsqlmi\migration] with login = [migration]; - -- old group migration is changed to Azure AD migration group - go - - -- Check the principal - select * from sys.database_principals; - go - - --Check the group permission - should only have CONNECT permissions - select user_name(grantee_principal_id), * from sys.database_permissions; - go - - --Check the db_owner role for 'aadsqlmi\migration' user - select is_rolemember('db_owner', 'migration') - go - -- Output 1 means 'YES' - ``` - -## Part 5: Testing Azure AD user or group authentication - -Test authenticating to SQL Managed Instance using the user previously mapped to the Azure AD login using the ALTER USER syntax. - -1. Log into the federated VM using your Azure SQL Managed Instance subscription as `aadsqlmi\testUser1` -1. Using SQL Server Management Studio (SSMS), sign into your SQL Managed Instance using **Active Directory Integrated** authentication, connecting -to the database `migration`. - 1. You can also sign in using the testUser1@aadsqlmi.net credentials with the SSMS option **Active Directory – Universal with MFA support**. However, in this case, you can't use the Single Sign On mechanism and you must type a password. You won't need to use a federated VM to log in to your SQL Managed Instance. -1. As part of the role member **SELECT**, you can select from the `test` table - - ```sql - Select * from test -- and see one row (1,10) - ``` - -Test authenticating to a SQL Managed Instance using a member of a Windows group `migration`. The user `aadsqlmi\testGroupUser` should have been added to the group `migration` before the migration. - -1. Log into the federated VM using your Azure SQL Managed Instance subscription as `aadsqlmi\testGroupUser` -1. Using SSMS with **Active Directory Integrated** authentication, connect to the Azure SQL Managed Instance server and the database `migration` - 1. You can also sign in using the testGroupUser@aadsqlmi.net credentials with the SSMS option **Active Directory – Universal with MFA support**. However, in this case, you can't use the Single Sign On mechanism and you must type a password. You won't need to use a federated VM to log into your SQL Managed Instance. -1. As part of the `db_owner` role, you can create a new table. - - ```sql - -- Create table named 'new' with a default schema - Create table dbo.new ( a int, b int) - ``` - -> [!NOTE] -> Due to a known design issue for Azure SQL Database, a create a table statement executed as a member of a group will fail with the following error:

    -> `Msg 2760, Level 16, State 1, Line 4 -The specified schema name "testGroupUser@aadsqlmi.net" either does not exist or you do not have permission to use it.`

    -> The current workaround is to create a table with an existing schema in the case above - -## Next steps - -[Tutorial: Migrate SQL Server to Azure SQL Managed Instance offline using DMS](../../dms/tutorial-sql-server-to-managed-instance.md?toc=/azure/sql-database/toc.json) diff --git a/articles/azure-sql/managed-instance/minimal-tls-version-configure.md b/articles/azure-sql/managed-instance/minimal-tls-version-configure.md deleted file mode 100644 index 3911d2542b55a..0000000000000 --- a/articles/azure-sql/managed-instance/minimal-tls-version-configure.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Configure minimal TLS version - managed instance -description: "Learn how to configure minimal TLS version for managed instance" -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: devx-track-azurepowershell, devx-track-azurecli -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: -ms.date: 05/25/2020 -ms.devlang: azurecli ---- -# Configure minimal TLS version in Azure SQL Managed Instance -The Minimal [Transport Layer Security (TLS)](https://support.microsoft.com/help/3135244/tls-1-2-support-for-microsoft-sql-server) Version setting allows customers to control the version of TLS used by their Azure SQL Managed Instance. - -At present we support TLS 1.0, 1.1 and 1.2. Setting a Minimal TLS Version ensures that subsequent, newer TLS versions are supported. For example, e.g., choosing a TLS version greater than 1.1. means only connections with TLS 1.1 and 1.2 are accepted and TLS 1.0 is rejected. After testing to confirm your applications supports it, we recommend setting minimal TLS version to 1.2 since it includes fixes for vulnerabilities found in previous versions and is the highest version of TLS supported in Azure SQL Managed Instance. - -For customers with applications that rely on older versions of TLS, we recommend setting the Minimal TLS Version per the requirements of your applications. For customers that rely on applications to connect using an unencrypted connection, we recommend not setting any Minimal TLS Version. - -For more information, see [TLS considerations for SQL Database connectivity](../database/connect-query-content-reference-guide.md#tls-considerations-for-database-connectivity). - -After setting the Minimal TLS Version, login attempts from clients that are using a TLS version lower than the Minimal TLS Version of the server will fail with following error: - -```output -Error 47072 -Login failed with invalid TLS version -``` - -## Set minimal TLS version via PowerShell - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Database, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRm modules are substantially identical. The following script requires the [Azure PowerShell module](/powershell/azure/install-az-ps). - -The following PowerShell script shows how to `Get` and `Set` the **Minimal TLS Version** property at the instance level: - -```powershell -#Get the Minimal TLS Version property -(Get-AzSqlInstance -Name sql-instance-name -ResourceGroupName resource-group).MinimalTlsVersion - -# Update Minimal TLS Version Property -Set-AzSqlInstance -Name sql-instance-name -ResourceGroupName resource-group -MinimalTlsVersion "1.2" -``` - -## Set Minimal TLS Version via Azure CLI - -> [!IMPORTANT] -> All scripts in this section requires [Azure CLI](/cli/azure/install-azure-cli). - -### Azure CLI in a bash shell - -The following CLI script shows how to change the **Minimal TLS Version** setting in a bash shell: - -```azurecli-interactive -# Get current setting for Minimal TLS Version -az sql mi show -n sql-instance-name -g resource-group --query "minimalTlsVersion" - -# Update setting for Minimal TLS Version -az sql mi update -n sql-instance-name -g resource-group --set minimalTlsVersion="1.2" -``` diff --git a/articles/azure-sql/managed-instance/point-in-time-restore.md b/articles/azure-sql/managed-instance/point-in-time-restore.md deleted file mode 100644 index b071b7f5fa6d4..0000000000000 --- a/articles/azure-sql/managed-instance/point-in-time-restore.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Point-in-time restore (PITR) -titleSuffix: Azure SQL Managed Instance -description: Restore a database on Azure SQL Managed Instance to a previous point in time. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: backup-restore -ms.custom: devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: MilanMSFT -ms.author: mlazic -ms.reviewer: mathoma, nvraparl -ms.date: 08/25/2019 ---- -# Restore a database in Azure SQL Managed Instance to a previous point in time -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Use point-in-time restore (PITR) to create a database as a copy of another database from some time in the past. This article describes how to do a point-in-time restore of a database in Azure SQL Managed Instance. - -Point-in-time restore is useful in recovery scenarios, such as incidents caused by errors, incorrectly loaded data, or deletion of crucial data. You can also use it simply for testing or auditing. Backup files are kept for 7 to 35 days, depending on your database settings. - -Point-in-time restore can restore a database: - -- from an existing database. -- from a deleted database. -- to the same SQL Managed Instance, or to another SQL Managed Instance. - -## Limitations - -Point-in-time restore to SQL Managed Instance has the following limitations: - -- When you're restoring from one instance of SQL Managed Instance to another, both instances must be in the same subscription and region. Cross-region and cross-subscription restore aren't currently supported. -- Point-in-time restore of a whole SQL Managed Instance is not possible. This article explains only what's possible: point-in-time restore of a database that's hosted on SQL Managed Instance. - -> [!WARNING] -> Be aware of the storage size of your SQL Managed Instance. Depending on size of the data to be restored, you might run out of instance storage. If there isn't enough space for the restored data, use a different approach. - -The following table shows point-in-time restore scenarios for SQL Managed Instance: - -| |Restore existing DB to the same instance of SQL Managed Instance| Restore existing DB to another SQL Managed Instance|Restore dropped DB to same SQL Managed Instance|Restore dropped DB to another SQL Managed Instance| -|:----------|:----------|:----------|:----------|:----------| -|**Azure portal**| Yes|Yes|Yes|Yes| -|**Azure CLI**|Yes |Yes |No|No| -|**PowerShell**| Yes|Yes |Yes|Yes| - -## Restore an existing database - -Restore an existing database to the same SQL Managed Instance using the Azure portal, PowerShell, or the Azure CLI. To restore a database to another SQL Managed Instance, use PowerShell or the Azure CLI so you can specify the properties for the target SQL Managed Instance and resource group. If you don't specify these parameters, the database will be restored to the existing SQL Managed Instance by default. The Azure portal doesn't currently support restoring to another SQL Managed Instance. - -# [Portal](#tab/azure-portal) - -1. Sign in to the [Azure portal](https://portal.azure.com). -2. Go to your SQL Managed Instance and select the database that you want to restore. -3. Select **Restore** on the database page: - - ![Restore a database by using the Azure portal](./media/point-in-time-restore/restore-database-to-mi.png) - -4. On the **Restore** page, select the point for the date and time that you want to restore the database to. -5. Select **Confirm** to restore your database. This action starts the restore process, which creates a new database and populates it with data from the original database at the specified point in time. For more information about the recovery process, see [Recovery time](../database/recovery-using-backups.md#recovery-time). - -# [PowerShell](#tab/azure-powershell) - -If you don't already have Azure PowerShell installed, see [Install the Azure PowerShell module](/powershell/azure/install-az-ps). - -To restore the database by using PowerShell, specify your values for the parameters in the following command. Then, run the command: - -```powershell-interactive -$subscriptionId = "" -$resourceGroupName = "" -$managedInstanceName = "" -$databaseName = "" -$pointInTime = "2018-06-27T08:51:39.3882806Z" -$targetDatabase = "" - -Get-AzSubscription -SubscriptionId $subscriptionId -Select-AzSubscription -SubscriptionId $subscriptionId - -Restore-AzSqlInstanceDatabase -FromPointInTimeBackup ` - -ResourceGroupName $resourceGroupName ` - -InstanceName $managedInstanceName ` - -Name $databaseName ` - -PointInTime $pointInTime ` - -TargetInstanceDatabaseName $targetDatabase ` -``` - -To restore the database to another SQL Managed Instance, also specify the names of the target resource group and target SQL Managed Instance: - -```powershell-interactive -$targetResourceGroupName = "" -$targetInstanceName = "" - -Restore-AzSqlInstanceDatabase -FromPointInTimeBackup ` - -ResourceGroupName $resourceGroupName ` - -InstanceName $managedInstanceName ` - -Name $databaseName ` - -PointInTime $pointInTime ` - -TargetInstanceDatabaseName $targetDatabase ` - -TargetResourceGroupName $targetResourceGroupName ` - -TargetInstanceName $targetInstanceName -``` - -For details, see [Restore-AzSqlInstanceDatabase](/powershell/module/az.sql/restore-azsqlinstancedatabase). - -# [Azure CLI](#tab/azure-cli) - -If you don't already have the Azure CLI installed, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -To restore the database by using the Azure CLI, specify your values for the parameters in the following command. Then, run the command: - -```azurecli-interactive -az sql midb restore -g mygroupname --mi myinstancename | --n mymanageddbname --dest-name targetmidbname --time "2018-05-20T05:34:22" -``` - -To restore the database to another SQL Managed Instance, also specify the names of the target resource group and SQL Managed Instance: - -```azurecli-interactive -az sql midb restore -g mygroupname --mi myinstancename -n mymanageddbname | - --dest-name targetmidbname --time "2018-05-20T05:34:22" | - --dest-resource-group mytargetinstancegroupname | - --dest-mi mytargetinstancename -``` - -For a detailed explanation of the available parameters, see the [CLI documentation for restoring a database in a SQL Managed Instance](/cli/azure/sql/midb#az-sql-midb-restore). - ---- - -## Restore a deleted database - -Restoring a deleted database can be done by using PowerShell or Azure portal. To restore a deleted database to the same instance, use either the Azure portal or PowerShell. To restore a deleted database to another instance, use PowerShell. - -### Portal - - -To recover a managed database using the Azure portal, open the SQL Managed Instance overview page, and select **Deleted databases**. Choose a deleted database that you want to restore, and type the name for the new database that will be created with data restored from the backup. - - ![Screenshot of restore deleted Azure SQL instance database](./media/point-in-time-restore/restore-deleted-sql-managed-instance-annotated.png) - -### PowerShell - -To restore a database to the same instance, update the parameter values and then run the following PowerShell command: - -```powershell-interactive -$subscriptionId = "" -Get-AzSubscription -SubscriptionId $subscriptionId -Select-AzSubscription -SubscriptionId $subscriptionId - -$resourceGroupName = "" -$managedInstanceName = "" -$deletedDatabaseName = "" -$targetDatabaseName = "" - -$deletedDatabase = Get-AzSqlDeletedInstanceDatabaseBackup -ResourceGroupName $resourceGroupName ` --InstanceName $managedInstanceName -DatabaseName $deletedDatabaseName - -Restore-AzSqlinstanceDatabase -FromPointInTimeBackup -Name $deletedDatabase.Name ` - -InstanceName $deletedDatabase.ManagedInstanceName ` - -ResourceGroupName $deletedDatabase.ResourceGroupName ` - -DeletionDate $deletedDatabase.DeletionDate ` - -PointInTime UTCDateTime ` - -TargetInstanceDatabaseName $targetDatabaseName -``` - -To restore the database to another SQL Managed Instance, also specify the names of the target resource group and target SQL Managed Instance: - -```powershell-interactive -$targetResourceGroupName = "" -$targetInstanceName = "" - -Restore-AzSqlinstanceDatabase -FromPointInTimeBackup -Name $deletedDatabase.Name ` - -InstanceName $deletedDatabase.ManagedInstanceName ` - -ResourceGroupName $deletedDatabase.ResourceGroupName ` - -DeletionDate $deletedDatabase.DeletionDate ` - -PointInTime UTCDateTime ` - -TargetInstanceDatabaseName $targetDatabaseName ` - -TargetResourceGroupName $targetResourceGroupName ` - -TargetInstanceName $targetInstanceName -``` - -## Overwrite an existing database - -To overwrite an existing database, you must: - -1. Drop the existing database that you want to overwrite. -2. Rename the point-in-time-restored database to the name of the database that you dropped. - -### Drop the original database - -You can drop the database by using the Azure portal, PowerShell, or the Azure CLI. - -You can also drop the database by connecting to the SQL Managed Instance directly, starting SQL Server Management Studio (SSMS), and then running the following Transact-SQL (T-SQL) command: - -```sql -DROP DATABASE WorldWideImporters; -``` - -Use one of the following methods to connect to your database in the SQL Managed Instance: - -- [SSMS/Azure Data Studio via an Azure virtual machine](./connect-vm-instance-configure.md) -- [Point-to-site](./point-to-site-p2s-configure.md) -- [Public endpoint](./public-endpoint-configure.md) - -# [Portal](#tab/azure-portal) - -In the Azure portal, select the database from the SQL Managed Instance, and then select **Delete**. - - ![Delete a database by using the Azure portal](./media/point-in-time-restore/delete-database-from-mi.png) - -# [PowerShell](#tab/azure-powershell) - -Use the following PowerShell command to drop an existing database from a SQL Managed Instance: - -```powershell -$resourceGroupName = "" -$managedInstanceName = "" -$databaseName = "" - -Remove-AzSqlInstanceDatabase -Name $databaseName -InstanceName $managedInstanceName -ResourceGroupName $resourceGroupName -``` - -# [Azure CLI](#tab/azure-cli) - -Use the following Azure CLI command to drop an existing database from a SQL Managed Instance: - -```azurecli-interactive -az sql midb delete -g mygroupname --mi myinstancename -n mymanageddbname -``` - ---- - -### Alter the new database name to match the original database name - -Connect directly to the SQL Managed Instance and start SQL Server Management Studio. Then, run the following Transact-SQL (T-SQL) query. The query will change the name of the restored database to that of the dropped database that you intend to overwrite. - -```sql -ALTER DATABASE WorldWideImportersPITR MODIFY NAME = WorldWideImporters; -``` - -Use one of the following methods to connect to your database in SQL Managed Instance: - -- [Azure virtual machine](./connect-vm-instance-configure.md) -- [Point-to-site](./point-to-site-p2s-configure.md) -- [Public endpoint](./public-endpoint-configure.md) - -## Next steps - -Learn about [automated backups](../database/automated-backups-overview.md). diff --git a/articles/azure-sql/managed-instance/point-to-site-p2s-configure.md b/articles/azure-sql/managed-instance/point-to-site-p2s-configure.md deleted file mode 100644 index 67b1856737b59..0000000000000 --- a/articles/azure-sql/managed-instance/point-to-site-p2s-configure.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Configure point-to-site connectivity using SSMS -titleSuffix: Azure SQL Managed Instance -description: Connect to Azure SQL Managed Instance using SQL Server Management Studio (SSMS) using a point-to-site connection from an on-premises client computer. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: mode-other -ms.devlang: -ms.topic: quickstart -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, jovanpop -ms.date: 03/13/2019 ---- -# Quickstart: Configure a point-to-site connection to Azure SQL Managed Instance from on-premises -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This quickstart demonstrates how to connect to Azure SQL Managed Instance using [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) (SSMS) from an on-premises client computer over a point-to-site connection. For information about point-to-site connections, see [About Point-to-Site VPN](../../vpn-gateway/point-to-site-about.md). - -## Prerequisites - -This quickstart: - -- Uses the resources created in [Create a managed instance](instance-create-quickstart.md) as its starting point. -- Requires PowerShell 5.1 and Azure PowerShell 1.4.0 or later on your on-premises client computer. If necessary, see the instructions for [installing the Azure PowerShell module](/powershell/azure/install-az-ps#install-the-azure-powershell-module). -- Requires the newest version of [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms) on your on-premises client computer. - -## Attach a VPN gateway to a virtual network - -1. Open PowerShell on your on-premises client computer. - -2. Copy this PowerShell script. This script attaches a VPN gateway to the SQL Managed Instance virtual network that you created in the [Create a managed instance](instance-create-quickstart.md) quickstart. This script uses the Azure PowerShell Az Module and does the following for either Windows or Linux-based hosts: - - - Creates and installs certificates on a client machine - - Calculates the future VPN gateway subnet IP range - - Creates the gateway subnet - - Deploys the Azure Resource Manager template that attaches the VPN gateway to the VPN subnet - - ```powershell - $scriptUrlBase = 'https://raw.githubusercontent.com/Microsoft/sql-server-samples/master/samples/manage/azure-sql-db-managed-instance/attach-vpn-gateway' - - $parameters = @{ - subscriptionId = '' - resourceGroupName = '' - virtualNetworkName = '' - certificateNamePrefix = '' - } - - Invoke-Command -ScriptBlock ([Scriptblock]::Create((iwr ($scriptUrlBase+'/attachVPNGateway.ps1?t='+ [DateTime]::Now.Ticks)).Content)) -ArgumentList $parameters, $scriptUrlBase - ``` - -3. Paste the script in your PowerShell window and provide the required parameters. The values for ``, ``, and `` should match the ones that you used for the [Create a managed instance](instance-create-quickstart.md) quickstart. The value for `` can be a string of your choice. - -4. Execute the PowerShell script. - -> [!IMPORTANT] -> Do not continue until the PowerShell script completes. - -## Create a VPN connection - -1. Sign in to the [Azure portal](https://portal.azure.com/). -2. Open the resource group in which you created the virtual network gateway, and then open the virtual network gateway resource. -3. Select **Point-to-site configuration** and then select **Download VPN client**. - - ![Download VPN client](./media/point-to-site-p2s-configure/download-vpn-client.png) -4. On your on-premises client computer, extract the files from the zip file and then open the folder with the extracted files. -5. Open the **WindowsAmd64** folder and open the **VpnClientSetupAmd64.exe** file. -6. If you receive a **Windows protected your PC** message, click **More info** and then click **Run anyway**. - - ![Install VPN client](./media/point-to-site-p2s-configure/vpn-client-defender.png) -7. In the User Account Control dialog box, click **Yes** to continue. -8. In the dialog box referencing your virtual network, select **Yes** to install the VPN client for your virtual network. - -## Connect to the VPN connection - -1. Go to **VPN** in **Network & Internet** on your on-premises client computer and select your SQL Managed Instance virtual network to establish a connection to this VNet. In the following image, the VNet is named **MyNewVNet**. - - ![VPN connection](./media/point-to-site-p2s-configure/vpn-connection.png) -2. Select **Connect**. -3. In the dialog box, select **Connect**. - - ![Screenshot that highlights the Connect button.](./media/point-to-site-p2s-configure/vpn-connection2.png) -4. When you're prompted that Connection Manager needs elevated privileges to update your route table, choose **Continue**. -5. Select **Yes** in the User Account Control dialog box to continue. - - You've established a VPN connection to your SQL Managed Instance VNet. - - ![Screenshot that highlights the Connected message when you've established your connection.](./media/point-to-site-p2s-configure/vpn-connection-succeeded.png) - -## Connect with SSMS - -1. On the on-premises client computer, open SQL Server Management Studio. -2. In the **Connect to Server** dialog box, enter the fully qualified **host name** for your managed instance in the **Server name** box. -3. Select **SQL Server Authentication**, provide your username and password, and then select **Connect**. - - ![SSMS connect](./media/point-to-site-p2s-configure/ssms-connect.png) - -After you connect, you can view your system and user databases in the Databases node. You can also view various objects in the Security, Server Objects, Replication, Management, SQL Server Agent, and XEvent Profiler nodes. - -## Next steps - -- For a quickstart showing how to connect from an Azure virtual machine, see [Configure a point-to-site connection](point-to-site-p2s-configure.md). -- For an overview of the connection options for applications, see [Connect your applications to SQL Managed Instance](connect-application-instance.md). -- To restore an existing SQL Server database from on-premises to a managed instance, you can use [Azure Database Migration Service for migration](../../dms/tutorial-sql-server-to-managed-instance.md) or the [T-SQL RESTORE command](restore-sample-database-quickstart.md) to restore from a database backup file. diff --git a/articles/azure-sql/managed-instance/public-endpoint-configure.md b/articles/azure-sql/managed-instance/public-endpoint-configure.md deleted file mode 100644 index c6667510cfb52..0000000000000 --- a/articles/azure-sql/managed-instance/public-endpoint-configure.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Configure public endpoint - Azure SQL Managed Instance -description: "Learn how to configure a public endpoint for Azure SQL Managed Instance" -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: vanto, sstein -ms.date: 02/08/2021 ---- -# Configure public endpoint in Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Public endpoint for a [managed instance](./sql-managed-instance-paas-overview.md) enables data access to your managed instance from outside the [virtual network](../../virtual-network/virtual-networks-overview.md). You are able to access your managed instance from multi-tenant Azure services like Power BI, Azure App Service, or an on-premises network. By using the public endpoint on a managed instance, you do not need to use a VPN, which can help avoid VPN throughput issues. - -In this article, you'll learn how to: - -> [!div class="checklist"] -> -> - Enable public endpoint for your managed instance in the Azure portal -> - Enable public endpoint for your managed instance using PowerShell -> - Configure your managed instance network security group to allow traffic to the managed instance public endpoint -> - Obtain the managed instance public endpoint connection string - -## Permissions - -Due to the sensitivity of data that is in a managed instance, the configuration to enable managed instance public endpoint requires a two-step process. This security measure adheres to separation of duties (SoD): - -- Enabling public endpoint on a managed instance needs to be done by the managed instance admin. The managed instance admin can be found on **Overview** page of your managed instance resource. -- Allowing traffic using a network security group that needs to be done by a network admin. For more information, see [network security group permissions](../../virtual-network/manage-network-security-group.md#permissions). - -## Enabling public endpoint for a managed instance in the Azure portal - -1. Launch the Azure portal at -1. Open the resource group with the managed instance, and select the **SQL managed instance** that you want to configure public endpoint on. -1. On the **Security** settings, select the **Virtual network** tab. -1. In the Virtual network configuration page, select **Enable** and then the **Save** icon to update the configuration. - -![Screenshot shows a Virtual network page of SQL managed instance with the Public endpoint enabled.](./media/public-endpoint-configure/mi-vnet-config.png) - -## Enabling public endpoint for a managed instance using PowerShell - -### Enable public endpoint - -Run the following PowerShell commands. Replace **subscription-id** with your subscription ID. Also replace **rg-name** with the resource group for your managed instance, and replace **mi-name** with the name of your managed instance. - -```powershell -Install-Module -Name Az - -Import-Module Az.Accounts -Import-Module Az.Sql - -Connect-AzAccount - -# Use your subscription ID in place of subscription-id below - -Select-AzSubscription -SubscriptionId {subscription-id} - -# Replace rg-name with the resource group for your managed instance, and replace mi-name with the name of your managed instance - -$mi = Get-AzSqlInstance -ResourceGroupName {rg-name} -Name {mi-name} - -$mi = $mi | Set-AzSqlInstance -PublicDataEndpointEnabled $true -force -``` - -### Disable public endpoint - -To disable the public endpoint using PowerShell, you would execute the following command (and also do not forget to close the NSG for the inbound port 3342 if you have it configured): - -```powershell -Set-AzSqlInstance -PublicDataEndpointEnabled $false -force -``` - -## Allow public endpoint traffic on the network security group - -1. If you have the configuration page of the managed instance still open, navigate to the **Overview** tab. Otherwise, go back to your **SQL managed instance** resource. Select the **Virtual network/subnet** link, which will take you to the Virtual network configuration page. - - ![Screenshot shows the Virtual network configuration page where you can find your Virtual network/subnet value.](./media/public-endpoint-configure/mi-overview.png) - -1. Select the **Subnets** tab on the left configuration pane of your Virtual network, and make note of the **SECURITY GROUP** for your managed instance. - - ![Screenshot shows the Subnet tab, where you can get the SECURITY GROUP for your managed instance.](./media/public-endpoint-configure/mi-vnet-subnet.png) - -1. Go back to your resource group that contains your managed instance. You should see the **Network security group** name noted above. Select the name to go into the network security group configuration page. - -1. Select the **Inbound security rules** tab, and **Add** a rule that has higher priority than the **deny_all_inbound** rule with the following settings:

    - - |Setting |Suggested value |Description | - |---------|---------|---------| - |**Source** |Any IP address or Service tag |
    • For Azure services like Power BI, select the Azure Cloud Service Tag
    • For your computer or Azure virtual machine, use NAT IP address
    | - |**Source port ranges** |* |Leave this to * (any) as source ports are usually dynamically allocated and as such, unpredictable | - |**Destination** |Any |Leaving destination as Any to allow traffic into the managed instance subnet | - |**Destination port ranges** |3342 |Scope destination port to 3342, which is the managed instance public TDS endpoint | - |**Protocol** |TCP |SQL Managed Instance uses TCP protocol for TDS | - |**Action** |Allow |Allow inbound traffic to managed instance through the public endpoint | - |**Priority** |1300 |Make sure this rule is higher priority than the **deny_all_inbound** rule | - - ![Screenshot shows the Inbound security rules with your new public_endpoint_inbound rule above the deny_all_inbound rule.](./media/public-endpoint-configure/mi-nsg-rules.png) - - > [!NOTE] - > Port 3342 is used for public endpoint connections to managed instance, and cannot be changed at this point. - -## Obtaining the managed instance public endpoint connection string - -1. Navigate to the managed instance configuration page that has been enabled for public endpoint. Select the **Connection strings** tab under the **Settings** configuration. -1. Note that the public endpoint host name comes in the format .**public**..database.windows.net and that the port used for the connection is 3342. Here's an example of a server value of the connection string denoting the public endpoint port that can be used in SQL Server Management Studio or Azure Data Studio connections: `.public..database.windows.net,3342` - - ![Screenshot shows the connection strings for your public and private endpoints.](./media/public-endpoint-configure/mi-public-endpoint-conn-string.png) - -## Next steps - -Learn about using [Azure SQL Managed Instance securely with public endpoint](public-endpoint-overview.md). diff --git a/articles/azure-sql/managed-instance/public-endpoint-overview.md b/articles/azure-sql/managed-instance/public-endpoint-overview.md deleted file mode 100644 index 25c520e6b28d9..0000000000000 --- a/articles/azure-sql/managed-instance/public-endpoint-overview.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Secure Azure SQL Managed Instance public endpoints -description: "Securely use public endpoints in Azure SQL Managed Instance" -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: vanto, sstein -ms.date: 05/08/2019 ---- -# Use Azure SQL Managed Instance securely with public endpoints -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance can provide user connectivity over [public endpoints](public-endpoint-configure.md). This article explains how to make this configuration more secure. - -## Scenarios - -Azure SQL Managed Instance provides a private endpoint to allow connectivity from inside its virtual network. The default option is to provide maximum isolation. However, there are scenarios where you need to provide a public endpoint connection: - -- The managed instance must integrate with multi-tenant-only platform-as-a-service (PaaS) offerings. -- You need higher throughput of data exchange than is possible when you're using a VPN. -- Company policies prohibit PaaS inside corporate networks. - -## Deploy a managed instance for public endpoint access - -Although not mandatory, the common deployment model for a managed instance with public endpoint access is to create the instance in a dedicated isolated virtual network. In this configuration, the virtual network is used only for virtual cluster isolation. It doesn't matter if the managed instance's IP address space overlaps with a corporate network's IP address space. - -## Secure data in motion - -SQL Managed Instance data traffic is always encrypted if the client driver supports encryption. Data sent between the managed instance and other Azure virtual machines or Azure services never leaves Azure's backbone. If there's a connection between the managed instance and an on-premises network, we recommend you use Azure ExpressRoute. ExpressRoute helps you avoid moving data over the public internet. For managed instance private connectivity, only private peering can be used. - -## Lock down inbound and outbound connectivity - -The following diagram shows the recommended security configurations: - -![Security configurations for locking down inbound and outbound connectivity](./media/public-endpoint-overview/managed-instance-vnet.png) - -A managed instance has a public endpoint address that is dedicated to a customer. This endpoint shares the IP with the [management endpoint](management-endpoint-find-ip-address.md) but uses a different port. In the client-side outbound firewall and in the network security group rules, set this public endpoint IP address to limit outbound connectivity. - -To ensure traffic to the managed instance is coming from trusted sources, we recommend connecting from sources with well-known IP addresses. Use a network security group to limit access to the managed instance public endpoint on port 3342. - -When clients need to initiate a connection from an on-premises network, make sure the originating address is translated to a well-known set of IP addresses. If you can't do so (for example, a mobile workforce being a typical scenario), we recommend you use [point-to-site VPN connections and a private endpoint](point-to-site-p2s-configure.md). - -If connections are started from Azure, we recommend that traffic come from a well-known assigned [virtual IP address](/previous-versions/azure/virtual-network/virtual-networks-reserved-public-ip) (for example, a virtual machine). To make managing virtual IP (VIP) addresses easier, you might want to use [public IP address prefixes](../../virtual-network/ip-services/public-ip-address-prefix.md). - -## Next steps - -- Learn how to configure public endpoint for manage instances: [Configure public endpoint](public-endpoint-configure.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/quickstart-content-reference-guide.md b/articles/azure-sql/managed-instance/quickstart-content-reference-guide.md deleted file mode 100644 index 42b3c95274006..0000000000000 --- a/articles/azure-sql/managed-instance/quickstart-content-reference-guide.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Getting started content reference -titleSuffix: Azure SQL Managed Instance -description: "A reference for content that helps you get started with Azure SQL Managed Instance. " -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1, mode-other -ms.devlang: -ms.topic: quickstart -author: davidtrigano -ms.author: datrigan -ms.reviewer: vanto, mathoma -ms.date: 07/11/2019 ---- -# Getting started with Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -[Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) creates a database with near 100% compatibility with the latest SQL Server (Enterprise Edition) database engine, providing a native [virtual network (VNet)](../../virtual-network/virtual-networks-overview.md) implementation that addresses common security concerns, and a [business model](https://azure.microsoft.com/pricing/details/sql-database/) favorable for existing SQL Server customers. - -In this article, you will find references to content that teach you how to quickly configure and create a SQL Managed Instance and migrate your databases. - -## Quickstart overview - -The following quickstarts enable you to quickly create a SQL Managed Instance, configure a virtual machine or point to site VPN connection for client application, and restore a database to your new SQL Managed Instance using a `.bak` file. - -### Configure environment - -As a first step, you would need to create your first SQL Managed Instance with the network environment where it will be placed, and enable connection from the computer or virtual machine where you are executing queries to SQL Managed Instance. You can use the following guides: - -- [Create a SQL Managed Instance using the Azure portal](instance-create-quickstart.md). In the Azure portal, you configure the necessary parameters (username/password, number of cores, and max storage amount), and automatically create the Azure network environment without the need to know about networking details and infrastructure requirements. You just make sure that you have a [subscription type](resource-limits.md#supported-subscription-types) that is currently allowed to create a SQL Managed Instance. If you have your own network that you want to use or you want to customize the network, see [configure an existing virtual network for Azure SQL Managed Instance](vnet-existing-add-subnet.md) or [create a virtual network for Azure SQL Managed Instance](virtual-network-subnet-create-arm-template.md). -- A SQL Managed Instance is created in its own VNet with no public endpoint. For client application access, you can either **create a VM in the same VNet (different subnet)** or **create a point-to-site VPN connection to the VNet from your client computer** using one of these quickstarts: - - Enable [public endpoint](public-endpoint-configure.md) on your SQL Managed Instance in order to access your data directly from your environment. - - Create [Azure Virtual Machine in the SQL Managed Instance VNet](connect-vm-instance-configure.md) for client application connectivity, including SQL Server Management Studio. - - Set up [point-to-site VPN connection to your SQL Managed Instance](point-to-site-p2s-configure.md) from your client computer on which you have SQL Server Management Studio and other client connectivity applications. This is other of two options for connectivity to your SQL Managed Instance and to its VNet. - - > [!NOTE] - > - You can also use express route or site-to-site connection from your local network, but these approaches are out of the scope of these quickstarts. - > - If you change retention period from 0 (unlimited retention) to any other value, please note that retention will only apply to logs written after retention value was changed (logs written during the period when retention was set to unlimited are preserved, even after retention is enabled). - -As an alternative to manual creation of SQL Managed Instance, you can use [PowerShell](scripts/create-configure-managed-instance-powershell.md), [PowerShell with Resource Manager template](./create-template-quickstart.md), or [Azure CLI](/cli/azure/sql/mi#az-sql-mi-create) to script and automate this process. - -### Migrate your databases - -After you create a SQL Managed Instance and configure access, you can start migrating your SQL Server databases. Migration can fail if you have some unsupported features in the source database that you want to migrate. To avoid failures and check compatibility, you can use [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595) to analyze your databases on SQL Server and find any issue that could block migration to a SQL Managed Instance, such as existence of [FileStream](/sql/relational-databases/blob/filestream-sql-server) or multiple log files. If you resolve these issues, your databases are ready to migrate to SQL Managed Instance. [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview) is another useful tool that can record your workload on SQL Server and replay it on a SQL Managed Instance so you can determine are there going to be any performance issues if you migrate to a SQL Managed Instance. - -Once you are sure that you can migrate your database to a SQL Managed Instance, you can use the native SQL Server restore capabilities to restore a database into a SQL Managed Instance from a `.bak` file. You can use this method to migrate databases from SQL Server database engine installed on-premises or Azure Virtual Machines. For a quickstart, see [Restore from backup to a SQL Managed Instance](restore-sample-database-quickstart.md). In this quickstart, you restore from a `.bak` file stored in Azure Blob storage using the `RESTORE` Transact-SQL command. - -> [!TIP] -> To use the `BACKUP` Transact-SQL command to create a backup of your database in Azure Blob storage, see [SQL Server backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url). - -These quickstarts enable you to quickly create, configure, and restore database backup to a SQL Managed Instance. In some scenarios, you would need to customize or automate deployment of SQL Managed Instance and the required networking environment. These scenarios will be described below. - -## Customize network environment - -Although the VNet/subnet can be automatically configured when the instance is [created using the Azure portal](instance-create-quickstart.md), it might be good to create it before you start creating instances in SQL Managed Instance because you can configure the parameters of VNet and subnet. The easiest way to create and configure the network environment is to use the [Azure Resource deployment](virtual-network-subnet-create-arm-template.md) template that creates and configures your network and subnet where the instance will be placed. You just need to press the Azure Resource Manager deploy button and populate the form with parameters. - -As an alternative, you can also use this [PowerShell script](https://www.powershellmagazine.com/2018/07/23/configuring-azure-environment-to-set-up-azure-sql-database-managed-instance-preview/) to automate creation of the network. - -If you already have a VNet and subnet where you would like to deploy your SQL Managed Instance, you need to make sure that your VNet and subnet satisfy the [networking requirements](connectivity-architecture-overview.md#network-requirements). Use this [PowerShell script to verify that your subnet is properly configured](vnet-existing-add-subnet.md). This script validates your network and reports any issues, telling you what should be changed and then offers to make the necessary changes in your VNet/subnet. Run this script if you don't want to configure your VNet/subnet manually. You can also run it after any major reconfiguration of your network infrastructure. If you want to create and configure your own network, read [connectivity architecture](connectivity-architecture-overview.md) and this [ultimate guide for creating and configuring a SQL Managed Instance environment](https://medium.com/azure-sqldb-managed-instance/the-ultimate-guide-for-creating-and-configuring-azure-sql-managed-instance-environment-91ff58c0be01). - -## Migrate to a SQL Managed Instance - -The previously-mentioned quickstarts enable you to quickly set up a SQL Managed Instance and move your databases using the native `RESTORE` capability. This is a good starting point if you want to complete quick proof-of concepts and verify that your solution can work on Managed Instance. - -However, in order to migrate your production database or even dev/test databases that you want to use for some performance test, you would need to consider using some additional techniques, such as: - -- Performance testing - You should measure baseline performance metrics on your source SQL Server instance and compare them with the performance metrics on the destination SQL Managed Instance where you have migrated the database. Learn more about the [best practices for performance comparison](https://techcommunity.microsoft.com/t5/Azure-SQL-Database/The-best-practices-for-performance-comparison-between-Azure-SQL/ba-p/683210). -- Online migration - With the native `RESTORE` described in this article, you have to wait for the databases to be restored (and copied to Azure Blob storage if not already stored there). This causes some downtime of your application especially for larger databases. To move your production database, use the [Data Migration service (DMS)](../../dms/tutorial-sql-server-to-managed-instance.md?toc=%2fazure%2fsql-database%2ftoc.json) to migrate your database with the minimal downtime. DMS accomplishes this by incrementally pushing the changes made in your source database to the SQL Managed Instance database being restored. This way, you can quickly switch your application from source to target database with minimal downtime. - -Learn more about the [recommended migration process](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md). - -## Next steps - -- Find a [high-level list of supported features in SQL Managed Instance here](../database/features-comparison.md) and [details and known issues here](transact-sql-tsql-differences-sql-server.md). -- Learn about [technical characteristics of SQL Managed Instance](resource-limits.md#service-tier-characteristics). -- Find more advanced how-to's in [how to use a SQL Managed Instance](how-to-content-reference-guide.md). -- [Identify the right Azure SQL Managed Instance SKU for your on-premises database](/sql/dma/dma-sku-recommend-sql-db/). diff --git a/articles/azure-sql/managed-instance/replication-between-two-instances-configure-tutorial.md b/articles/azure-sql/managed-instance/replication-between-two-instances-configure-tutorial.md deleted file mode 100644 index fa150a236439b..0000000000000 --- a/articles/azure-sql/managed-instance/replication-between-two-instances-configure-tutorial.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: Configure replication between managed instances -titleSuffix: Azure SQL Managed Instance -description: This tutorial teaches you to configure transactional replication between an Azure SQL Managed Instance publisher/distributor and a SQL Managed Instance subscriber. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: replication -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: tutorial -author: ferno-ms -ms.author: ferno -ms.reviewer: mathoma -ms.date: 04/28/2020 ---- -# Tutorial: Configure replication between two managed instances - -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Transactional replication allows you to replicate data from one database to another hosted on either SQL Server or [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). SQL Managed Instance can be a publisher, distributor or subscriber in the replication topology. See [transactional replication configurations](replication-transactional-overview.md#common-configurations) for available configurations. - -Transactional replication is currently in public preview for SQL Managed Instance. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> -> - Configure a managed instance as a replication publisher and distributor. -> - Configure a managed instance as a replication subscriber. - -![Replicate between two managed instances](./media/replication-between-two-instances-configure-tutorial/sqlmi-sqlmi-repl.png) - -This tutorial is intended for an experienced audience and assumes that the user is familiar with deploying and connecting to both managed instances and SQL Server VMs within Azure. - - -> [!NOTE] -> - This article describes the use of [transactional replication](/sql/relational-databases/replication/transactional/transactional-replication) in Azure SQL Managed Instance. It is unrelated to [failover groups](../database/auto-failover-group-overview.md), an Azure SQL Managed Instance feature that allows you to create complete readable replicas of individual instances. There are additional considerations when configuring [transactional replication with failover groups](replication-transactional-overview.md#with-failover-groups). - - - -## Requirements - -Configuring SQL Managed Instance to function as a publisher and/or a distributor requires: - -- That the publisher managed instance is on the same virtual network as the distributor and the subscriber, or [VPN gateways](../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md) have been configured between the virtual networks of all three entities. -- Connectivity uses SQL Authentication between replication participants. -- An Azure storage account share for the replication working directory. -- Port 445 (TCP outbound) is open in the security rules of NSG for the managed instances to access the Azure file share. If you encounter the error `failed to connect to azure storage with os error 53`, you will need to add an outbound rule to the NSG of the appropriate SQL Managed Instance subnet. - -## 1 - Create a resource group - -Use the [Azure portal](https://portal.azure.com) to create a resource group with the name `SQLMI-Repl`. - -## 2 - Create managed instances - -Use the [Azure portal](https://portal.azure.com) to create two [SQL Managed Instances](instance-create-quickstart.md) on the same virtual network and subnet. For example, name the two managed instances: - -- `sql-mi-pub` (along with some characters for randomization) -- `sql-mi-sub` (along with some characters for randomization) - -You will also need to [configure an Azure VM to connect](connect-vm-instance-configure.md) to your managed instances. - -## 3 - Create an Azure storage account - -[Create an Azure storage account](../../storage/common/storage-account-create.md#create-a-storage-account) for the working directory, and then create a [file share](../../storage/files/storage-how-to-create-file-share.md) within the storage account. - -Copy the file share path in the format of: -`\\storage-account-name.file.core.windows.net\file-share-name` - -Example: `\\replstorage.file.core.windows.net\replshare` - -Copy the storage access keys in the format of: -`DefaultEndpointsProtocol=https;AccountName=;AccountKey=****;EndpointSuffix=core.windows.net` - -Example: -`DefaultEndpointsProtocol=https;AccountName=replstorage;AccountKey=dYT5hHZVu9aTgIteGfpYE64cfis0mpKTmmc8+EP53GxuRg6TCwe5eTYWrQM4AmQSG5lb3OBskhg==;EndpointSuffix=core.windows.net` - -For more information, see [Manage storage account access keys](../../storage/common/storage-account-keys-manage.md). - -## 4 - Create a publisher database - -Connect to your `sql-mi-pub` managed instance using SQL Server Management Studio and run the following Transact-SQL (T-SQL) code to create your publisher database: - -```sql -USE [master] -GO - -CREATE DATABASE [ReplTran_PUB] -GO - -USE [ReplTran_PUB] -GO -CREATE TABLE ReplTest ( - ID INT NOT NULL PRIMARY KEY, - c1 VARCHAR(100) NOT NULL, - dt1 DATETIME NOT NULL DEFAULT getdate() -) -GO - - -USE [ReplTran_PUB] -GO - -INSERT INTO ReplTest (ID, c1) VALUES (6, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (2, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (3, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (4, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (5, 'pub') -GO -SELECT * FROM ReplTest -GO -``` - -## 5 - Create a subscriber database - -Connect to your `sql-mi-sub` managed instance using SQL Server Management Studio and run the following T-SQL code to create your empty subscriber database: - -```sql -USE [master] -GO - -CREATE DATABASE [ReplTran_SUB] -GO - -USE [ReplTran_SUB] -GO -CREATE TABLE ReplTest ( - ID INT NOT NULL PRIMARY KEY, - c1 VARCHAR(100) NOT NULL, - dt1 DATETIME NOT NULL DEFAULT getdate() -) -GO -``` - -## 6 - Configure distribution - -Connect to your `sql-mi-pub` managed instance using SQL Server Management Studio and run the following T-SQL code to configure your distribution database. - -```sql -USE [master] -GO - -EXEC sp_adddistributor @distributor = @@ServerName; -EXEC sp_adddistributiondb @database = N'distribution'; -GO -``` - -## 7 - Configure publisher to use distributor - -On your publisher SQL Managed Instance `sql-mi-pub`, change the query execution to [SQLCMD](/sql/ssms/scripting/edit-sqlcmd-scripts-with-query-editor) mode and run the following code to register the new distributor with your publisher. - -```sql -:setvar username loginUsedToAccessSourceManagedInstance -:setvar password passwordUsedToAccessSourceManagedInstance -:setvar file_storage "\\storage-account-name.file.core.windows.net\file-share-name" --- example: file_storage "\\replstorage.file.core.windows.net\replshare" -:setvar file_storage_key "DefaultEndpointsProtocol=https;AccountName=;AccountKey=****;EndpointSuffix=core.windows.net" --- example: file_storage_key "DefaultEndpointsProtocol=https;AccountName=replstorage;AccountKey=dYT5hHZVu9aTgIteGfpYE64cfis0mpKTmmc8+EP53GxuRg6TCwe5eTYWrQM4AmQSG5lb3OBskhg==;EndpointSuffix=core.windows.net" - -USE [master] -EXEC sp_adddistpublisher - @publisher = @@ServerName, - @distribution_db = N'distribution', - @security_mode = 0, - @login = N'$(username)', - @password = N'$(password)', - @working_directory = N'$(file_storage)', - @storage_connection_string = N'$(file_storage_key)'; -- Remove this parameter for on-premises publishers -``` - - > [!NOTE] - > Be sure to use only backslashes (`\`) for the file_storage parameter. Using a forward slash (`/`) can cause an error when connecting to the file share. - -This script configures a local publisher on the managed instance, adds a linked server, and creates a set of jobs for the SQL Server agent. - -## 8 - Create publication and subscriber - -Using [SQLCMD](/sql/ssms/scripting/edit-sqlcmd-scripts-with-query-editor) mode, run the following T-SQL script to enable replication for your database, and configure replication between your publisher, distributor, and subscriber. - -```sql --- Set variables -:setvar username sourceLogin -:setvar password sourcePassword -:setvar source_db ReplTran_PUB -:setvar publication_name PublishData -:setvar object ReplTest -:setvar schema dbo -:setvar target_server "sql-mi-sub.wdec33262scj9dr27.database.windows.net" -:setvar target_username targetLogin -:setvar target_password targetPassword -:setvar target_db ReplTran_SUB - --- Enable replication for your source database -USE [$(source_db)] -EXEC sp_replicationdboption - @dbname = N'$(source_db)', - @optname = N'publish', - @value = N'true'; - --- Create your publication -EXEC sp_addpublication - @publication = N'$(publication_name)', - @status = N'active'; - - --- Configure your log reader agent -EXEC sp_changelogreader_agent - @publisher_security_mode = 0, - @publisher_login = N'$(username)', - @publisher_password = N'$(password)', - @job_login = N'$(username)', - @job_password = N'$(password)'; - --- Add the publication snapshot -EXEC sp_addpublication_snapshot - @publication = N'$(publication_name)', - @frequency_type = 1, - @publisher_security_mode = 0, - @publisher_login = N'$(username)', - @publisher_password = N'$(password)', - @job_login = N'$(username)', - @job_password = N'$(password)'; - --- Add the ReplTest table to the publication -EXEC sp_addarticle - @publication = N'$(publication_name)', - @type = N'logbased', - @article = N'$(object)', - @source_object = N'$(object)', - @source_owner = N'$(schema)'; - --- Add the subscriber -EXEC sp_addsubscription - @publication = N'$(publication_name)', - @subscriber = N'$(target_server)', - @destination_db = N'$(target_db)', - @subscription_type = N'Push'; - --- Create the push subscription agent -EXEC sp_addpushsubscription_agent - @publication = N'$(publication_name)', - @subscriber = N'$(target_server)', - @subscriber_db = N'$(target_db)', - @subscriber_security_mode = 0, - @subscriber_login = N'$(target_username)', - @subscriber_password = N'$(target_password)', - @job_login = N'$(username)', - @job_password = N'$(password)'; - --- Initialize the snapshot -EXEC sp_startpublication_snapshot - @publication = N'$(publication_name)'; -``` - -## 9 - Modify agent parameters - -Azure SQL Managed Instance is currently experiencing some backend issues with connectivity with the replication agents. While this issue is being addressed, the workaround is to increase the login timeout value for the replication agents. - -Run the following T-SQL command on the publisher to increase the login timeout: - -```sql --- Increase login timeout to 150s -update msdb..sysjobsteps set command = command + N' -LoginTimeout 150' -where subsystem in ('Distribution','LogReader','Snapshot') and command not like '%-LoginTimeout %' -``` - -Run the following T-SQL command again to set the login timeout back to the default value, should you need to do so: - -```sql --- Increase login timeout to 30 -update msdb..sysjobsteps set command = command + N' -LoginTimeout 30' -where subsystem in ('Distribution','LogReader','Snapshot') and command not like '%-LoginTimeout %' -``` - -Restart all three agents to apply these changes. - -## 10 - Test replication - -Once replication has been configured, you can test it by inserting new items on the publisher and watching the changes propagate to the subscriber. - -Run the following T-SQL snippet to view the rows on the subscriber: - -```sql -select * from dbo.ReplTest -``` - -Run the following T-SQL snippet to insert additional rows on the publisher, and then check the rows again on the subscriber. - -```sql -INSERT INTO ReplTest (ID, c1) VALUES (15, 'pub') -``` - -## Clean up resources - -To drop the publication, run the following T-SQL command: - -```sql --- Drops the publication -USE [ReplTran_PUB] -EXEC sp_droppublication @publication = N'PublishData' -GO -``` - -To remove the replication option from the database, run the following T-SQL command: - -```sql --- Disables publishing of the database -USE [ReplTran_PUB] -EXEC sp_removedbreplication -GO -``` - -To disable publishing and distribution, run the following T-SQL command: - -```sql --- Drops the distributor -USE [master] -EXEC sp_dropdistributor @no_checks = 1 -GO -``` - -You can clean up your Azure resources by [deleting the SQL Managed Instance resources from the resource group](../../azure-resource-manager/management/manage-resources-portal.md#delete-resources) and then deleting the resource group `SQLMI-Repl`. - -## Next steps - -You can also learn more information about [transactional replication with Azure SQL Managed Instance](replication-transactional-overview.md) or learn to -configure replication between a [SQL Managed Instance publisher/distributor and a SQL on Azure VM subscriber](replication-two-instances-and-sql-server-configure-tutorial.md). \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/replication-transactional-overview.md b/articles/azure-sql/managed-instance/replication-transactional-overview.md deleted file mode 100644 index 5ec337e3fc982..0000000000000 --- a/articles/azure-sql/managed-instance/replication-transactional-overview.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Transactional replication -titleSuffix: Azure SQL Managed Instance -description: Learn about using SQL Server transactional replication with Azure SQL Managed Instance (Preview). -services: sql-database -ms.service: sql-managed-instance -ms.subservice: data-movement -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: MashaMSFT -ms.author: mathoma -ms.reviewer: mathoma -ms.date: 05/10/2020 ---- -# Transactional replication with Azure SQL Managed Instance (Preview) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Transactional replication is a feature of Azure SQL Managed Instance and SQL Server that enables you to replicate data from a table in Azure SQL Managed Instance or a SQL Server instance to tables placed on remote databases. This feature allows you to synchronize multiple tables in different databases. - -Transactional replication is currently in public preview for SQL Managed Instance. - -## Overview - -You can use transactional replication to push changes made in an Azure SQL Managed Instance to: - -- A SQL Server database - on-premises or on Azure VM -- A database in Azure SQL Database -- An instance database in Azure SQL Managed Instance - - > [!NOTE] - > To use all the features of Azure SQL Managed Instance, you must be using the latest versions of [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) and [SQL Server Data Tools (SSDT)](/sql/ssdt/download-sql-server-data-tools-ssdt). - -### Components - -The key components in transactional replication are the **Publisher**, **Distributor**, and **Subscriber**, as shown in the following picture: - -![replication with SQL Database](./media/replication-transactional-overview/replication-to-sql-database.png) - -| Role | Azure SQL Database | Azure SQL Managed Instance | -| :----| :------------- | :--------------- | -| **Publisher** | No | Yes | -| **Distributor** | No | Yes| -| **Pull subscriber** | No | Yes| -| **Push Subscriber**| Yes | Yes| - - -The **Publisher** publishes changes made on some tables (articles) by sending the updates to the Distributor. The publisher can be an Azure SQL Managed Instance or a SQL Server instance. - -The **Distributor** collects changes in the articles from a Publisher and distributes them to the Subscribers. The Distributor can be either a Azure SQL Managed Instance or a SQL Server instance (any version as long it is equal to or higher than the Publisher version). - -The **Subscriber** receives changes made on the Publisher. A SQL Server instance and Azure SQL Managed Instance can both be push and pull subscribers, though a pull subscription is not supported when the distributor is an Azure SQL Managed Instance and the subscriber is not. A database in Azure SQL Database can only be a push subscriber. - -Azure SQL Managed Instance can support being a Subscriber from the following versions of SQL Server: - -- SQL Server 2016 and later -- SQL Server 2014 [RTM CU10 (12.0.4427.24)](https://support.microsoft.com/help/3094220/cumulative-update-10-for-sql-server-2014) or [SP1 CU3 (12.0.2556.4)](https://support.microsoft.com/help/3094221/cumulative-update-3-for-sql-server-2014-service-pack-1) -- SQL Server 2012 [SP2 CU8 (11.0.5634.1)](https://support.microsoft.com/help/3082561/cumulative-update-8-for-sql-server-2012-sp2) or [SP3 (11.0.6020.0)](https://www.microsoft.com/download/details.aspx?id=49996) or [SP4 (11.0.7001.0)](https://www.microsoft.com/download/details.aspx?id=56040) - - > [!NOTE] - > - > - For other versions of SQL Server that do not support publishing to objects in Azure, it is possible to utilize the [republishing data](/sql/relational-databases/replication/republish-data) method to move data to newer versions of SQL Server. - > - Attempting to configure replication using an older version can result in error number MSSQL_REPL20084 (The process could not connect to Subscriber.) and MSSQ_REPL40532 (Cannot open server \ requested by the login. The login failed.) - -### Types of replication - -There are different [types of replication](/sql/relational-databases/replication/types-of-replication): - -| Replication | Azure SQL Database | Azure SQL Managed Instance | -| :----| :------------- | :--------------- | -| [**Standard Transactional**](/sql/relational-databases/replication/transactional/transactional-replication) | Yes (only as subscriber) | Yes | -| [**Snapshot**](/sql/relational-databases/replication/snapshot-replication) | Yes (only as subscriber) | Yes| -| [**Merge replication**](/sql/relational-databases/replication/merge/merge-replication) | No | No| -| [**Peer-to-peer**](/sql/relational-databases/replication/transactional/peer-to-peer-transactional-replication) | No | No| -| [**Bidirectional**](/sql/relational-databases/replication/transactional/bidirectional-transactional-replication) | No | Yes| -| [**Updatable subscriptions**](/sql/relational-databases/replication/transactional/updatable-subscriptions-for-transactional-replication) | No | No| - - -### Supportability Matrix - - The transactional replication supportability matrix for Azure SQL Managed Instance is the same as the one for SQL Server. - -| **Publisher** | **Distributor** | **Subscriber** | -| :------------ | :-------------- | :------------- | -| SQL Server 2019 | SQL Server 2019 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    | -| SQL Server 2017 | SQL Server 2019
    SQL Server 2017 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    SQL Server 2014 | -| SQL Server 2016 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    SQL Server 2014
    SQL Server 2012 | -| SQL Server 2014 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    SQL Server 2014
    | SQL Server 2017
    SQL Server 2016
    SQL Server 2014
    SQL Server 2012
    SQL Server 2008 R2
    SQL Server 2008 | -| SQL Server 2012 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    SQL Server 2014
    SQL Server 2012
    | SQL Server 2016
    SQL Server 2014
    SQL Server 2012
    SQL Server 2008 R2
    SQL Server 2008 | -| SQL Server 2008 R2
    SQL Server 2008 | SQL Server 2019
    SQL Server 2017
    SQL Server 2016
    SQL Server 2014
    SQL Server 2012
    SQL Server 2008 R2
    SQL Server 2008 | SQL Server 2014
    SQL Server 2012
    SQL Server 2008 R2
    SQL Server 2008
    | - - -## When to use - -Transactional replication is useful in the following scenarios: - -- Publish changes made in one or more tables in a database and distribute them to one or many databases in a SQL Server instance or Azure SQL Database that subscribed for the changes. -- Keep several distributed databases in synchronized state. -- Migrate databases from one SQL Server instance or Azure SQL Managed Instance to another database by continuously publishing the changes. - -### Compare Data Sync with Transactional Replication - -| Category | Data Sync | Transactional Replication | -|---|---|---| -| Advantages | - Active-active support
    - Bi-directional between on-premises and Azure SQL Database | - Lower latency
    - Transactional consistency
    - Reuse existing topology after migration | -| Disadvantages | - No transactional consistency
    - Higher performance impact | - Can’t publish from Azure SQL Database
    - High maintenance cost | - -## Common configurations - -In general, the publisher and the distributor must be either in the cloud or on-premises. The following configurations are supported: - -### Publisher with local Distributor on SQL Managed Instance - -![Single instance as Publisher and Distributor](./media/replication-transactional-overview/01-single-instance-asdbmi-pubdist.png) - -Publisher and distributor are configured within a single SQL Managed Instance and distributing changes to another SQL Managed Instance, SQL Database, or SQL Server instance. - -### Publisher with remote distributor on SQL Managed Instance - -In this configuration, one managed instance publishes changes to a distributor placed on another SQL Managed Instance that can serve many source SQL Managed Instances and distribute changes to one or many targets on Azure SQL Database, Azure SQL Managed Instance, or SQL Server. - -![Separate instances for Publisher and Distributor](./media/replication-transactional-overview/02-separate-instances-asdbmi-pubdist.png) - -Publisher and distributor are configured on two managed instances. There are some constraints with this configuration: - -- Both managed instances are on the same vNet. -- Both managed instances are in the same location. - -### On-premises Publisher/Distributor with remote subscriber - -![Azure SQL Database as subscriber](./media/replication-transactional-overview/03-azure-sql-db-subscriber.png) - -In this configuration, a database in Azure SQL Database or Azure SQL Managed Instance is a subscriber. This configuration supports migration from on-premises to Azure. If a subscriber is a database in Azure SQL Database, it must be in push mode. - -## Requirements - -- Use SQL Authentication for connectivity between replication participants. -- Use an Azure Storage Account share for the working directory used by replication. -- Open TCP outbound port 445 in the subnet security rules to access the Azure file share. -- Open TCP outbound port 1433 when the SQL Managed Instance is the Publisher/Distributor, and the Subscriber is not. You may also need to change the SQL Managed Instance NSG outbound security rule for `allow_linkedserver_outbound` for the port 1433 **Destination Service tag** from `virtualnetwork` to `internet`. -- Place both the publisher and distributor in the cloud, or both on-premises. -- Configure VPN peering between the virtual networks of replication participants if the virtual networks are different. - -> [!NOTE] -> You may encounter error 53 when connecting to an Azure Storage File if the outbound network security group (NSG) port 445 is blocked when the distributor is an Azure SQL Managed Instance database and the subscriber is on-premises. [Update the vNet NSG](../../storage/files/storage-troubleshoot-windows-file-connection-problems.md) to resolve this issue. - -## With failover groups - -If a **publisher** or **distributor** SQL Managed Instance is in a [failover group](../database/auto-failover-group-overview.md), the SQL Managed Instance administrator must clean up all publications on the old primary and reconfigure them on the new primary after a failover occurs. The following activities are needed in this scenario: - -1. Stop all replication jobs running on the database, if there are any. -1. Drop subscription metadata from publisher by running the following script on publisher database: - - ```sql - EXEC sp_dropsubscription @publication='', @article='all',@subscriber='' - ``` - -1. Drop subscription metadata from the subscriber. Run the following script on the subscription database on subscriber SQL Managed Instance: - - ```sql - EXEC sp_subscription_cleanup - @publisher = N'', - @publisher_db = N'', - @publication = N''; - ``` - -1. Forcefully drop all replication objects from publisher by running the following script in the published database: - - ```sql - EXEC sp_removedbreplication - ``` - -1. Forcefully drop old distributor from original primary SQL Managed Instance (if failing back over to an old primary that used to have a distributor). Run the following script on the master database in old distributor SQL Managed Instance: - - ```sql - EXEC sp_dropdistributor 1,1 - ``` - -If a **subscriber** SQL Managed Instance is in a failover group, the publication should be configured to connect to the failover group listener endpoint for the subscriber managed instance. In the event of a failover, subsequent action by the managed instance administrator depends on the type of failover that occurred: - -- For a failover with no data loss, replication will continue working after failover. -- For a failover with data loss, replication will work as well. It will replicate the lost changes again. -- For a failover with data loss, but the data loss is outside of the distribution database retention period, the SQL Managed Instance administrator will need to reinitialize the subscription database. - -## Next steps - -For more information about configuring transactional replication, see the following tutorials: - -- [Configure replication between a SQL Managed Instance publisher and subscriber](../managed-instance/replication-between-two-instances-configure-tutorial.md) -- [Configure replication between a SQL Managed Instance publisher, SQL Managed Instance distributor, and SQL Server subscriber](../managed-instance/replication-two-instances-and-sql-server-configure-tutorial.md) -- [Create a publication](/sql/relational-databases/replication/publish/create-a-publication). -- [Create a push subscription](/sql/relational-databases/replication/create-a-push-subscription) by using the server name as the subscriber (for example `N'azuresqldbdns.database.windows.net` and the database in Azure SQL Database name as the destination database (for example, **Adventureworks**. ) - -## See also - -- [Replication with a SQL Managed Instance and a failover group](transact-sql-tsql-differences-sql-server.md#replication) -- [Replication to SQL Database](../database/replication-to-sql-database.md) -- [Replication to managed instance](../managed-instance/replication-between-two-instances-configure-tutorial.md) -- [Create a Publication](/sql/relational-databases/replication/publish/create-a-publication) -- [Create a Push Subscription](/sql/relational-databases/replication/create-a-push-subscription/) -- [Types of Replication](/sql/relational-databases/replication/types-of-replication) -- [Monitoring (Replication)](/sql/relational-databases/replication/monitor/monitoring-replication) -- [Initialize a Subscription](/sql/relational-databases/replication/initialize-a-subscription) diff --git a/articles/azure-sql/managed-instance/replication-two-instances-and-sql-server-configure-tutorial.md b/articles/azure-sql/managed-instance/replication-two-instances-and-sql-server-configure-tutorial.md deleted file mode 100644 index a81643314c85a..0000000000000 --- a/articles/azure-sql/managed-instance/replication-two-instances-and-sql-server-configure-tutorial.md +++ /dev/null @@ -1,422 +0,0 @@ ---- -title: "Configure transactional replication between Azure SQL Managed Instance and SQL Server" -description: "A tutorial that configures replication between a publisher managed instance, a distributor managed instance, and a SQL Server subscriber on an Azure VM, along with necessary networking components such as private DNS zone and VNet peering." -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.topic: tutorial -author: MashaMSFT -ms.author: mathoma -ms.reviewer: -ms.date: 11/21/2019 ---- -# Tutorial: Configure transactional replication between Azure SQL Managed Instance and SQL Server -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Transactional replication allows you to replicate data from one database to another hosted on either SQL Server or [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). SQL Managed Instance can be a publisher, distributor or subscriber in the replication topology. See [transactional replication configurations](replication-transactional-overview.md#common-configurations) for available configurations. - -Transactional replication is currently in public preview for SQL Managed Instance. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> -> - Configure a managed instance as a replication publisher. -> - Configure a managed instance as a replication distributor. -> - Configure SQL Server as a subscriber. - -![Replication between a managed instance publisher, managed instance distributor, and SQL Server subscriber](./media/replication-two-instances-and-sql-server-configure-tutorial/sqlmi-to-sql-replication.png) - -This tutorial is intended for an experienced audience and assumes that the user is familiar with deploying and connecting to both managed instances and SQL Server VMs within Azure. - - -> [!NOTE] -> This article describes the use of [transactional replication](/sql/relational-databases/replication/transactional/transactional-replication) in Azure SQL Managed Instance. It is unrelated to [failover groups](../database/auto-failover-group-overview.md), an Azure SQL Managed Instance feature that allows you to create complete readable replicas of individual instances. There are additional considerations when configuring [transactional replication with failover groups](replication-transactional-overview.md#with-failover-groups). - -## Prerequisites - -To complete the tutorial, make sure you have the following prerequisites: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- Experience with deploying two managed instances within the same virtual network. -- A SQL Server subscriber, either on-premises or on an Azure VM. This tutorial uses an Azure VM. -- [SQL Server Management Studio (SSMS) 18.0 or greater](/sql/ssms/download-sql-server-management-studio-ssms). -- The latest version of [Azure PowerShell](/powershell/azure/install-az-ps). -- Ports 445 and 1433 allow SQL traffic on both the Azure firewall and the Windows firewall. - -## Create the resource group - -Use the following PowerShell code snippet to create a new resource group: - -```powershell-interactive -# set variables -$ResourceGroupName = "SQLMI-Repl" -$Location = "East US 2" - -# Create a new resource group -New-AzResourceGroup -Name $ResourceGroupName -Location $Location -``` - -## Create two managed instances - -Create two managed instances within this new resource group using the [Azure portal](https://portal.azure.com). - -- The name of the publisher managed instance should be `sql-mi-publisher` (along with a few characters for randomization), and the name of the virtual network should be `vnet-sql-mi-publisher`. -- The name of the distributor managed instance should be `sql-mi-distributor` (along with a few characters for randomization), and it should be _in the same virtual network as the publisher managed instance_. - - ![Use the publisher VNet for the distributor](./media/replication-two-instances-and-sql-server-configure-tutorial/use-same-vnet-for-distributor.png) - -For more information about creating a managed instance, see [Create a managed instance in the portal](instance-create-quickstart.md). - - > [!NOTE] - > For the sake of simplicity, and because it is the most common configuration, this tutorial suggests placing the distributor managed instance within the same virtual network as the publisher. However, it's possible to create the distributor in a separate virtual network. To do so, you will need to configure VNet peering between the virtual networks of the publisher and distributor, and then configure VNet peering between the virtual networks of the distributor and subscriber. - -## Create a SQL Server VM - -Create a SQL Server virtual machine using the [Azure portal](https://portal.azure.com). The SQL Server virtual machine should have the following characteristics: - -- Name: `sql-vm-sub` -- Image: SQL Server 2016 or greater -- Resource group: the same as the managed instance -- Virtual network: `sql-vm-sub-vnet` - -For more information about deploying a SQL Server VM to Azure, see [Quickstart: Create a SQL Server VM](../virtual-machines/windows/sql-vm-create-portal-quickstart.md). - -## Configure VNet peering - -Configure VNet peering to enable communication between the virtual network of the two managed instances, and the virtual network of SQL Server. To do so, use this PowerShell code snippet: - -```powershell-interactive -# Set variables -$SubscriptionId = '' -$resourceGroup = 'SQLMI-Repl' -$pubvNet = 'sql-mi-publisher-vnet' -$subvNet = 'sql-vm-sub-vnet' -$pubsubName = 'Pub-to-Sub-Peer' -$subpubName = 'Sub-to-Pub-Peer' - -$virtualNetwork1 = Get-AzVirtualNetwork ` - -ResourceGroupName $resourceGroup ` - -Name $pubvNet - - $virtualNetwork2 = Get-AzVirtualNetwork ` - -ResourceGroupName $resourceGroup ` - -Name $subvNet - -# Configure VNet peering from publisher to subscriber -Add-AzVirtualNetworkPeering ` - -Name $pubsubName ` - -VirtualNetwork $virtualNetwork1 ` - -RemoteVirtualNetworkId $virtualNetwork2.Id - -# Configure VNet peering from subscriber to publisher -Add-AzVirtualNetworkPeering ` - -Name $subpubName ` - -VirtualNetwork $virtualNetwork2 ` - -RemoteVirtualNetworkId $virtualNetwork1.Id - -# Check status of peering on the publisher VNet; should say connected -Get-AzVirtualNetworkPeering ` - -ResourceGroupName $resourceGroup ` - -VirtualNetworkName $pubvNet ` - | Select PeeringState - -# Check status of peering on the subscriber VNet; should say connected -Get-AzVirtualNetworkPeering ` - -ResourceGroupName $resourceGroup ` - -VirtualNetworkName $subvNet ` - | Select PeeringState - -``` - -Once VNet peering is established, test connectivity by launching SQL Server Management Studio (SSMS) on SQL Server and connecting to both managed instances. For more information on connecting to a managed instance using SSMS, see [Use SSMS to connect to SQL Managed Instance](point-to-site-p2s-configure.md#connect-with-ssms). - -![Test connectivity to the managed instances](./media/replication-two-instances-and-sql-server-configure-tutorial/test-connectivity-to-mi.png) - -## Create a private DNS zone - -A private DNS zone allows DNS routing between the managed instances and SQL Server. - -### Create a private DNS zone - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Select **Create a resource** to create a new Azure resource. -1. Search for `private dns zone` on Azure Marketplace. -1. Choose the **Private DNS zone** resource published by Microsoft and then select **Create** to create the DNS zone. -1. Choose the subscription and resource group from the drop-down. -1. Provide an arbitrary name for your DNS zone, such as `repldns.com`. - - ![Create private DNS zone](./media/replication-two-instances-and-sql-server-configure-tutorial/create-private-dns-zone.png) - -1. Select **Review + create**. Review the parameters for your private DNS zone and then select **Create** to create your resource. - -### Create an A record - -1. Go to your new **Private DNS zone** and select **Overview**. -1. Select **+ Record set** to create a new A record. -1. Provide the name of your SQL Server VM as well as the private internal IP address. - - ![Configure an A record](./media/replication-two-instances-and-sql-server-configure-tutorial/configure-a-record.png) - -1. Select **OK** to create the A record. - -### Link the virtual network - -1. Go to your new **Private DNS zone** and select **Virtual network links**. -1. Select **+ Add**. -1. Provide a name for the link, such as `Pub-link`. -1. Select your subscription from the drop-down and then select the virtual network for your publisher managed instance. -1. Check the box next to **Enable auto registration**. - - ![Create VNet link](./media/replication-two-instances-and-sql-server-configure-tutorial/configure-vnet-link.png) - -1. Select **OK** to link your virtual network. -1. Repeat these steps to add a link for the subscriber virtual network, with a name such as `Sub-link`. - -## Create an Azure storage account - -[Create an Azure storage account](../../storage/common/storage-account-create.md#create-a-storage-account) for the working directory, and then create a [file share](../../storage/files/storage-how-to-create-file-share.md) within the storage account. - -Copy the file share path in the format of: -`\\storage-account-name.file.core.windows.net\file-share-name` - -Example: `\\replstorage.file.core.windows.net\replshare` - -Copy the storage access key connection string in the format of: -`DefaultEndpointsProtocol=https;AccountName=;AccountKey=****;EndpointSuffix=core.windows.net` - -Example: `DefaultEndpointsProtocol=https;AccountName=replstorage;AccountKey=dYT5hHZVu9aTgIteGfpYE64cfis0mpKTmmc8+EP53GxuRg6TCwe5eTYWrQM4AmQSG5lb3OBskhg==;EndpointSuffix=core.windows.net` - -For more information, see [Manage storage account access keys](../../storage/common/storage-account-keys-manage.md). - -## Create a database - -Create a new database on the publisher managed instance. To do so, follow these steps: - -1. Launch SQL Server Management Studio on SQL Server. -1. Connect to the `sql-mi-publisher` managed instance. -1. Open a **New Query** window and execute the following T-SQL query to create the database. - -```sql --- Create the databases -USE [master] -GO - --- Drop database if it exists -IF EXISTS (SELECT * FROM sys.sysdatabases WHERE name = 'ReplTutorial') -BEGIN -    DROP DATABASE ReplTutorial -END -GO - --- Create new database -CREATE DATABASE [ReplTutorial] -GO - --- Create table -USE [ReplTutorial] -GO -CREATE TABLE ReplTest ( - ID INT NOT NULL PRIMARY KEY, - c1 VARCHAR(100) NOT NULL, - dt1 DATETIME NOT NULL DEFAULT getdate() -) -GO - --- Populate table with data -USE [ReplTutorial] -GO - -INSERT INTO ReplTest (ID, c1) VALUES (6, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (2, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (3, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (4, 'pub') -INSERT INTO ReplTest (ID, c1) VALUES (5, 'pub') -GO -SELECT * FROM ReplTest -GO -``` - -## Configure distribution - -Once connectivity is established and you have a sample database, you can configure distribution on your `sql-mi-distributor` managed instance. To do so, follow these steps: - -1. Launch SQL Server Management Studio on SQL Server. -1. Connect to the `sql-mi-distributor` managed instance. -1. Open a **New Query** window and run the following Transact-SQL code to configure distribution on the distributor managed instance: - - ```sql - EXEC sp_adddistributor @distributor = 'sql-mi-distributor.b6bf57.database.windows.net', @password = '' - - EXEC sp_adddistributiondb @database = N'distribution' - - EXEC sp_adddistpublisher @publisher = 'sql-mi-publisher.b6bf57.database.windows.net', -- primary publisher - @distribution_db = N'distribution', - @security_mode = 0, - @login = N'azureuser', - @password = N'', - @working_directory = N'\\replstorage.file.core.windows.net\replshare', - @storage_connection_string = N'' - -- example: @storage_connection_string = N'DefaultEndpointsProtocol=https;AccountName=replstorage;AccountKey=dYT5hHZVu9aTgIteGfpYE64cfis0mpKTmmc8+EP53GxuRg6TCwe5eTYWrQM4AmQSG5lb3OBskhg==;EndpointSuffix=core.windows.net' - - ``` - - > [!NOTE] - > Be sure to use only backslashes (`\`) for the @working_directory parameter. Using a forward slash (`/`) can cause an error when connecting to the file share. - -1. Connect to the `sql-mi-publisher` managed instance. -1. Open a **New Query** window and run the following Transact-SQL code to register the distributor at the publisher: - - ```sql - Use MASTER - EXEC sys.sp_adddistributor @distributor = 'sql-mi-distributor.b6bf57.database.windows.net', @password = '' - ``` - -## Create the publication - -Once distribution has been configured, you can now create the publication. To do so, follow these steps: - -1. Launch SQL Server Management Studio on SQL Server. -1. Connect to the `sql-mi-publisher` managed instance. -1. In **Object Explorer**, expand the **Replication** node and right-click the **Local Publication** folder. Select **New Publication...**. -1. Select **Next** to move past the welcome page. -1. On the **Publication Database** page, select the `ReplTutorial` database you created previously. Select **Next**. -1. On the **Publication type** page, select **Transactional publication**. Select **Next**. -1. On the **Articles** page, check the box next to **Tables**. Select **Next**. -1. On the **Filter Table Rows** page, select **Next** without adding any filters. -1. On the **Snapshot Agent** page, check the box next to **Create snapshot immediately and keep the snapshot available to initialize subscriptions**. Select **Next**. -1. On the **Agent Security** page, select **Security Settings...**. Provide SQL Server login credentials to use for the Snapshot Agent, and to connect to the publisher. Select **OK** to close the **Snapshot Agent Security** page. Select **Next**. - - ![Configure Snapshot Agent security](./media/replication-two-instances-and-sql-server-configure-tutorial/snapshot-agent-security.png) - -1. On the **Wizard Actions** page, choose to **Create the publication** and (optionally) choose to **Generate a script file with steps to create the publication** if you want to save this script for later. -1. On the **Complete the Wizard** page, name your publication `ReplTest` and select **Next** to create your publication. -1. Once your publication has been created, refresh the **Replication** node in **Object Explorer** and expand **Local Publications** to see your new publication. - -## Create the subscription - -Once the publication has been created, you can create the subscription. To do so, follow these steps: - -1. Launch SQL Server Management Studio on SQL Server. -1. Connect to the `sql-mi-publisher` managed instance. -1. Open a **New Query** window and run the following Transact-SQL code to add the subscription and distribution agent. Use the DNS as part of the subscriber name. - -```sql -use [ReplTutorial] -exec sp_addsubscription -@publication = N'ReplTest', -@subscriber = N'sql-vm-sub.repldns.com', -- include the DNS configured in the private DNS zone -@destination_db = N'ReplSub', -@subscription_type = N'Push', -@sync_type = N'automatic', -@article = N'all', -@update_mode = N'read only', -@subscriber_type = 0 - -exec sp_addpushsubscription_agent -@publication = N'ReplTest', -@subscriber = N'sql-vm-sub.repldns.com', -- include the DNS configured in the private DNS zone -@subscriber_db = N'ReplSub', -@job_login = N'azureuser', -@job_password = '', -@subscriber_security_mode = 0, -@subscriber_login = N'azureuser', -@subscriber_password = '', -@dts_package_location = N'Distributor' -GO -``` - -## Test replication - -Once replication has been configured, you can test it by inserting new items on the publisher and watching the changes propagate to the subscriber. - -Run the following T-SQL snippet to view the rows on the subscriber: - -```sql -Use ReplSub -select * from dbo.ReplTest -``` - -Run the following T-SQL snippet to insert additional rows on the publisher, and then check the rows again on the subscriber. - -```sql -Use ReplTutorial -INSERT INTO ReplTest (ID, c1) VALUES (15, 'pub') -``` - -## Clean up resources - -1. Navigate to your resource group in the [Azure portal](https://portal.azure.com). -1. Select the managed instance(s) and then select **Delete**. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. This process may take some time to complete in the background, and until it's done, you will not be able to delete the *virtual cluster* or any other dependent resources. Monitor the delete in the **Activity** tab to confirm your managed instance has been deleted. -1. Once the managed instance is deleted, delete the *virtual cluster* by selecting it in your resource group, and then choosing **Delete**. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. -1. Delete any remaining resources. Type `yes` in the text box to confirm you want to delete the resource and then select **Delete**. -1. Delete the resource group by selecting **Delete resource group**, typing in the name of the resource group, `myResourceGroup`, and then selecting **Delete**. - -## Known errors - -### Windows logins are not supported - -`Exception Message: Windows logins are not supported in this version of SQL Server.` - -The agent was configured with a Windows login and needs to use a SQL Server login instead. Use the **Agent Security** page of the **Publication properties** to change the login credentials to a SQL Server login. - -### Failed to connect to Azure Storage - -`Connecting to Azure Files Storage '\\replstorage.file.core.windows.net\replshare' Failed to connect to Azure Storage '' with OS error: 53.` - -2019-11-19 02:21:05.07 Obtained Azure Storage Connection String for replstorage -2019-11-19 02:21:05.07 Connecting to Azure Files Storage '\\replstorage.file.core.windows.net\replshare' -2019-11-19 02:21:31.21 Failed to connect to Azure Storage '' with OS error: 53. - -This is likely because port 445 is closed in either the Azure firewall, the Windows firewall, or both. - -`Connecting to Azure Files Storage '\\replstorage.file.core.windows.net\replshare' Failed to connect to Azure Storage '' with OS error: 55.` - -Using a forward slash instead of backslash in the file path for the file share can cause this error. - - - This is okay: `\\replstorage.file.core.windows.net\replshare` - - This can cause an OS 55 error: `'\\replstorage.file.core.windows.net/replshare'` - -### Could not connect to Subscriber - -`The process could not connect to Subscriber 'SQL-VM-SUB` -`Could not open a connection to SQL Server [53].` -`A network-related or instance-specific error has occurred while establishing a connection to SQL Server. Server is not found or not accessible. Check if instance name is correct and if SQL Server is configured to allow remote connections.` - -Possible solutions: - -- Ensure port 1433 is open. -- Ensure TCP/IP is enabled on the subscriber. -- Confirm the DNS name was used when creating the subscriber. -- Verify that your virtual networks are correctly linked in the private DNS zone. -- Verify your A record is configured correctly. -- Verify your VNet peering is configured correctly. - -### No publications to which you can subscribe - -When you're adding a new subscription using the **New Subscription** wizard, on the **Publication** page, you may find that there are no databases and publications listed as available options, and you might see the following error message: - -`There are no publications to which you can subscribe, either because this server has no publications or because you do not have sufficient privileges to access the publications.` - -While it's possible that this error message is accurate, and there really aren't publications available on the publisher you connected to, or you're lacking sufficient permissions, this error could also be caused by an older version of SQL Server Management Studio. Try upgrading to SQL Server Management Studio 18.0 or greater to rule this out as a root cause. - -## Next steps - -### Enable security features - -See the [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md#advanced-security-and-compliance) article for a comprehensive list of ways to secure your database. The following security features are discussed: - -- [SQL Managed Instance auditing](auditing-configure.md) -- [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine) -- [Threat detection](threat-detection-configure.md) -- [Dynamic data masking](/sql/relational-databases/security/dynamic-data-masking) -- [Row-level security](/sql/relational-databases/security/row-level-security) -- [Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql) - -### SQL Managed Instance capabilities - -For a complete overview of managed instance capabilities, see: - -> [!div class="nextstepaction"] -> [SQL Managed Instance capabilities](sql-managed-instance-paas-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/resource-limits.md b/articles/azure-sql/managed-instance/resource-limits.md deleted file mode 100644 index 11adc0d18b5ba..0000000000000 --- a/articles/azure-sql/managed-instance/resource-limits.md +++ /dev/null @@ -1,294 +0,0 @@ ---- -title: Resource limits -titleSuffix: Azure SQL Managed Instance -description: This article provides an overview of the resource limits for Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: references_regions, ignite-fall-2021 -ms.devlang: -ms.topic: reference -author: vladai78 -ms.author: vladiv -ms.reviewer: mathoma, vladiv, sachinp, wiassaf -ms.date: 04/06/2022 ---- -# Overview of Azure SQL Managed Instance resource limits -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](../database/resource-limits-logical-server.md) -> * [Azure SQL Managed Instance](resource-limits.md) - -This article provides an overview of the technical characteristics and resource limits for Azure SQL Managed Instance, and provides information about how to request an increase to these limits. - -> [!NOTE] -> For differences in supported features and T-SQL statements see [Feature differences](../database/features-comparison.md) and [T-SQL statement support](transact-sql-tsql-differences-sql-server.md). For general differences between service tiers for Azure SQL Database and SQL Managed Instance review [General Purpose](../database/service-tier-general-purpose.md) and [Business Critical](../database/service-tier-business-critical.md) service tiers. - -## Hardware configuration characteristics - -SQL Managed Instance has characteristics and resource limits that depend on the underlying infrastructure and architecture. SQL Managed Instance can be deployed on multiple hardware configurations. - -> [!NOTE] -> The Gen5 hardware has been renamed to the **standard-series (Gen5)**. We are introducing two new hardware configurations in limited preview: **premium-series** and **memory optimized premium-series**. - -For information on previously available hardware, see [Previously available hardware](#previously-available-hardware) later in this article. - -Hardware configurations have different characteristics, as described in the following table: - -| | **Standard-series (Gen5)** | **Premium-series (preview)** | **Memory optimized premium-series (preview)** | -|:-- |:-- |:-- |:-- | -| **CPU** | Intel® E5-2673 v4 (Broadwell) 2.3 GHz, Intel® SP-8160 (Skylake), and Intel® 8272CL (Cascade Lake) 2.5 GHz processors | Intel® 8370C (Ice Lake) 2.8 GHz processors | Intel® 8370C (Ice Lake) 2.8 GHz processors | -| **Number of vCores**
    vCore=1 LP (hyper-thread) | 4-80 vCores | 4-80 vCores | 4-64 vCores | -| **Max memory (memory/vCore ratio)** | 5.1 GB per vCore
    Add more vCores to get more memory. | 7 GB per vCore | 13.6 GB per vCore | -| **Max In-Memory OLTP memory** | Instance limit: 0.8 - 1.65 GB per vCore | Instance limit: 1.1 - 2.3 GB per vCore | Instance limit: 2.2 - 4.5 GB per vCore | -| **Max instance reserved storage**\* | **General Purpose:** up to 16 TB
    **Business Critical:** up to 4 TB | **General Purpose:** up to 16 TB
    **Business Critical:** up to 5.5 TB | **General Purpose:** up to 16 TB
    **Business Critical:** up to 16 TB | - -\* Dependent on [the number of vCores](#service-tier-characteristics). - ->[!NOTE] -> If your workload requires storage sizes greater than the available resource limits for Azure SQL Managed Instance, consider the Azure SQL Database [Hyperscale service tier](../database/service-tier-hyperscale.md). - -### Regional support for premium-series hardware (preview) - -Support for the premium-series hardware (public preview) is currently available only in these specific regions:
    - -| Region | **Premium-series** | **Memory optimized premium-series** | -|:--- |:--- |:--- | -| Australia Central | Yes | | -| Australia East | Yes | | -| Canada Central | Yes | | -| Canada East | Yes | | -| Central US | Yes | Yes | -| East US | Yes | Yes | -| Germany West Central | Yes | Yes | -| Japan East | Yes | | -| Japan West | Yes | | -| Korea Central | Yes | | -| North Central US | Yes | Yes | -| North Europe | Yes | Yes | -| Norway East | Yes | | -| South Africa West | Yes | | -| South Central US | Yes | Yes | -| Southeast Asia | Yes | | -| Sweden Central | | Yes | -| Switzerland North | Yes | | -| Switzerland West | Yes | | -| UAE North | Yes | | -| UK South | Yes | Yes | -| UK West | Yes | | -| West Central US | Yes | | -| West Europe | Yes | Yes | -| West US | Yes | | -| West US 2 | Yes | Yes | -| West US 3 | Yes | Yes | - -### In-memory OLTP available space - -The amount of In-memory OLTP space in [Business Critical](../database/service-tier-business-critical.md) service tier depends on the number of vCores and hardware configuration. The following table lists the limits of memory that can be used for In-memory OLTP objects. - -| **vCores** | **Standard-series (Gen5)** | **Premium-series** | **Memory optimized premium-series** | -|:--- |:--- |:--- |:--- | -| 4 vCores | 3.14 GB | 4.39 GB | 8.79 GB | -| 8 vCores | 6.28 GB | 8.79 GB | 22.06 GB | -| 16 vCores | 15.77 GB | 22.06 GB | 57.58 GB | -| 24 vCores | 25.25 GB | 35.34 GB | 93.09 GB | -| 32 vCores | 37.94 GB | 53.09 GB | 128.61 GB | -| 40 vCores | 52.23 GB | 73.09 GB | 164.13 GB | -| 64 vCores | 99.9 GB | 139.82 GB | 288.61 GB | -| 80 vCores | 131.68 GB| 184.30 GB | N/A | - -## Service tier characteristics - -SQL Managed Instance has two service tiers: [General Purpose](../database/service-tier-general-purpose.md) and [Business Critical](../database/service-tier-business-critical.md). - -> [!Important] -> The Business Critical service tier provides an additional built-in copy of the SQL Managed Instance (secondary replica) that can be used for read-only workload. If you can separate read-write queries and read-only/analytic/reporting queries, you are getting twice the vCores and memory for the same price. The secondary replica might lag a few seconds behind the primary instance, so it is designed to offload reporting/analytic workloads that don't need exact current state of data. In the table below, **read-only queries** are the queries that are executed on secondary replica. - -| **Feature** | **General Purpose** | **Business Critical** | -| --- | --- | --- | -| Number of vCores\* | 4, 8, 16, 24, 32, 40, 64, 80 | **Standard-series (Gen5)**: 4, 8, 16, 24, 32, 40, 64, 80
    **Premium-series**: 4, 8, 16, 24, 32, 40, 64, 80
    **Memory optimized premium-series**: 4, 8, 16, 24, 32, 40, 64
    \*Same number of vCores is dedicated for read-only queries. | -| Max memory | **Standard-series (Gen5)**: 20.4 GB - 408 GB (5.1 GB/vCore)
    **Premium-series**: 28 GB - 560 GB (7 GB/vCore)
    **Memory optimized premium-series**: 54.4 GB - 870.4 GB (13.6 GB/vCore) | **Standard-series (Gen5)**: 20.4 GB - 408 GB (5.1 GB/vCore) on each replica
    **Premium-series**: 28 GB - 560 GB (7 GB/vCore) on each replica
    **Memory optimized premium-series**: 54.4 GB - 870.4 GB (13.6 GB/vCore) on each replica | -| Max instance storage size (reserved) | - 2 TB for 4 vCores
    - 8 TB for 8 vCores
    - 16 TB for other sizes
    | **Standard-series (Gen5)**:
    - 1 TB for 4, 8, 16 vCores
    - 2 TB for 24 vCores
    - 4 TB for 32, 40, 64, 80 vCores
    **Premium-series**:
    - 1 TB for 4, 8 vCores
    - 2 TB for 16, 24 vCores
    - 4 TB for 32 vCores
    - 5.5 TB for 40, 64, 80 vCores
    **Memory optimized premium-series**:
    - 1 TB for 4, 8 vCores
    - 2 TB for 16, 24 vCores
    - 4 TB for 32 vCores
    - 5.5 TB for 40 vCores
    - 16 TB for 64 vCores
    | -| Max database size | Up to currently available instance size (depending on the number of vCores). | Up to currently available instance size (depending on the number of vCores). | -| Max tempDB size | Limited to 24 GB/vCore (96 - 1,920 GB) and currently available instance storage size.
    Add more vCores to get more TempDB space.
    Log file size is limited to 120 GB.| Up to currently available instance storage size. | -| Max number of databases per instance | 100 user databases, unless the instance storage size limit has been reached. | 100 user databases, unless the instance storage size limit has been reached. | -| Max number of database files per instance | Up to 280, unless the instance storage size or [Azure Premium Disk storage allocation space](doc-changes-updates-known-issues.md#exceeding-storage-space-with-small-database-files) limit has been reached. | 32,767 files per database, unless the instance storage size limit has been reached. | -| Max data file size | Maximum size of each data file is 8 TB. Use at least two data files for databases larger than 8 TB. | Up to currently available instance size (depending on the number of vCores). | -| Max log file size | Limited to 2 TB and currently available instance storage size. | Limited to 2 TB and currently available instance storage size. | -| Data/Log IOPS (approximate) | 500 - 7500 per file
    \*[Increase file size to get more IOPS](#file-io-characteristics-in-general-purpose-tier)| 16 K - 320 K (4000 IOPS/vCore)
    Add more vCores to get better IO performance. | -| Log write throughput limit (per instance) | 3 MB/s per vCore
    Max 120 MB/s per instance
    22 - 65 MB/s per DB (depending on log file size)
    \*[Increase the file size to get better IO performance](#file-io-characteristics-in-general-purpose-tier) | 4 MB/s per vCore
    Max 96 MB/s | -| Data throughput (approximate) | 100 - 250 MB/s per file
    \*[Increase the file size to get better IO performance](#file-io-characteristics-in-general-purpose-tier) | Not limited. | -| Storage IO latency (approximate) | 5-10 ms | 1-2 ms | -| In-memory OLTP | Not supported | Available, [size depends on number of vCore](#in-memory-oltp-available-space) | -| Max sessions | 30000 | 30000 | -| Max concurrent workers | 105 * number of vCores + 800 | 105 * number of vCores + 800 | -| [Read-only replicas](../database/read-scale-out.md) | 0 | 1 (included in price) | -| Compute isolation | Not supported as General Purpose instances may share physical hardware with other instances| **Standard-series (Gen5)**:
    Supported for 40, 64, 80 vCores
    **Premium-series**: Supported for 64, 80 vCores
    **Memory optimized premium-series**: Supported for 64 vCores | - - -A few additional considerations: - -- **Currently available instance storage size** is the difference between reserved instance size and the used storage space. -- Both data and log file size in the user and system databases are included in the instance storage size that is compared with the max storage size limit. Use the [sys.master_files](/sql/relational-databases/system-catalog-views/sys-master-files-transact-sql) system view to determine the total used space by databases. Error logs are not persisted and not included in the size. Backups are not included in storage size. -- Throughput and IOPS in the General Purpose tier also depend on the [file size](#file-io-characteristics-in-general-purpose-tier) that is not explicitly limited by the SQL Managed Instance. - You can create another readable replica in a different Azure region using [auto-failover groups](auto-failover-group-configure-sql-mi.md) -- Max instance IOPS depend on the file layout and distribution of workload. As an example, if you create 7 x 1 TB files with max 5 K IOPS each and seven small files (smaller than 128 GB) with 500 IOPS each, you can get 38500 IOPS per instance (7x5000+7x500) if your workload can use all files. Note that some IOPS are also used for auto-backups. - -Find more information about the [resource limits in SQL Managed Instance pools in this article](instance-pools-overview.md#resource-limitations). - -### Data and log storage - -The following factors affect the amount of storage used for data and log files, and apply to General Purpose and Business Critical tiers. - -- Each compute size supports a maximum data size, with a default of 16 GB. For more information on resource limits in Azure SQL Managed Instance, see [resource-limits.md]. -- When you configure maximum data size, an additional 30 percent of storage is automatically added for log files. -- You can select any maximum data size between 1 GB and the supported storage size maximum, in 1 GB increments. -- In the General Purpose service tier, `tempdb` uses local SSD storage, and this storage cost is included in the vCore price. -- In the Business Critical service tier, `tempdb` shares local SSD storage with data and log files, and `tempdb` storage cost is included in the vCore price. -- The maximum storage size for a SQL Managed Instance must be specified in multiples of 32 GB. - -> [!IMPORTANT] -> In the General Purpose and Business Critical tiers, you are charged for the maximum storage size configured for a managed instance. - -To monitor total consumed instance storage size for SQL Managed Instance, use the *storage_space_used_mb* [metric](../../azure-monitor/essentials/metrics-supported.md#microsoftsqlmanagedinstances). To monitor the current allocated and used storage size of individual data and log files in a database using T-SQL, use the [sys.database_files](/sql/relational-databases/system-catalog-views/sys-database-files-transact-sql) view and the [FILEPROPERTY(... , 'SpaceUsed')](/sql/t-sql/functions/fileproperty-transact-sql) function. - -> [!TIP] -> Under some circumstances, you may need to shrink a database to reclaim unused space. For more information, see [Manage file space in Azure SQL Database](../database/file-space-manage.md). - -### Backups and storage - -Storage for database backups is allocated to support the [point-in-time restore (PITR)](../database/recovery-using-backups.md) and [long-term retention (LTR)](../database/long-term-retention-overview.md) capabilities of SQL Managed Instance. This storage is separate from data and log file storage, and is billed separately. - -- **PITR**: In General Purpose and Business Critical tiers, individual database backups are copied to [read-access geo-redundant (RA-GRS) storage](../../storage/common/geo-redundant-design.md) automatically. The storage size increases dynamically as new backups are created. The storage is used by full, differential, and transaction log backups. The storage consumption depends on the rate of change of the database and the retention period configured for backups. You can configure a separate retention period for each database between 0 to 35 days for SQL Managed Instance. A backup storage amount equal to the configured maximum data size is provided at no extra charge. -- **LTR**: You also have the option to configure long-term retention of full backups for up to 10 years. If you set up an LTR policy, these backups are stored in RA-GRS storage automatically, but you can control how often the backups are copied. To meet different compliance requirements, you can select different retention periods for weekly, monthly, and/or yearly backups. The configuration you choose determines how much storage will be used for LTR backups. For more information, see [Long-term backup retention](../database/long-term-retention-overview.md). - -### File IO characteristics in General Purpose tier - -In the General Purpose service tier, every database file gets dedicated IOPS and throughput that depend on the file size. Larger files get more IOPS and throughput. IO characteristics of database files are shown in the following table: - -| **File size** | **>=0 and <=128 GiB** | **>128 and <= 512 GiB** | **>0.5 and <=1 TiB** | **>1 and <=2 TiB** | **>2 and <=4 TiB** | **>4 and <=8 TiB** | -|:--|:--|:--|:--|:--|:--|:--| -| IOPS per file | 500 | 2300 | 5000 | 7500 | 7500 | 12,500 | -| Throughput per file | 100 MiB/s | 150 MiB/s | 200 MiB/s | 250 MiB/s| 250 MiB/s | 250 MiB/s | - -If you notice high IO latency on some database file or you see that IOPS/throughput is reaching the limit, you might improve performance by [increasing the file size](https://techcommunity.microsoft.com/t5/Azure-SQL-Database/Increase-data-file-size-to-improve-HammerDB-workload-performance/ba-p/823337). - -There is also an instance-level limit on the max log write throughput (see above for values, e.g., 22 MB/s), so you may not be able to reach the max file throughout on the log file because you are hitting the instance throughput limit. - -## Supported regions - -SQL Managed Instance can be created only in [supported regions](https://azure.microsoft.com/global-infrastructure/services/?products=sql-database®ions=all). To create a SQL Managed Instance in a region that is currently not supported, you can [send a support request via the Azure portal](../database/quota-increase-request.md). - -## Supported subscription types - -SQL Managed Instance currently supports deployment only on the following types of subscriptions: - -- [Enterprise Agreement (EA)](https://azure.microsoft.com/pricing/enterprise-agreement/) -- [Pay-as-you-go](https://azure.microsoft.com/offers/ms-azr-0003p/) -- [Cloud Service Provider (CSP)](/partner-center/csp-documents-and-learning-resources) -- [Enterprise Dev/Test](https://azure.microsoft.com/offers/ms-azr-0148p/) -- [Pay-As-You-Go Dev/Test](https://azure.microsoft.com/offers/ms-azr-0023p/) -- [Subscriptions with monthly Azure credit for Visual Studio subscribers](https://azure.microsoft.com/pricing/member-offers/credit-for-visual-studio-subscribers/) - -## Regional resource limitations - -> [!Note] -> For the latest information on region availability for subscriptions, first check [select a region](../capacity-errors-troubleshoot.md). - -Supported subscription types can contain a limited number of resources per region. SQL Managed Instance has two default limits per Azure region (that can be increased on-demand by creating a special [support request in the Azure portal](../database/quota-increase-request.md) depending on a type of subscription type: - -- **Subnet limit**: The maximum number of subnets where instances of SQL Managed Instance are deployed in a single region. -- **vCore unit limit**: The maximum number of vCore units that can be deployed across all instances in a single region. One GP vCore uses one vCore unit and one BC vCore takes four vCore units. The total number of instances is not limited as long as it is within the vCore unit limit. - -> [!Note] -> These limits are default settings and not technical limitations. The limits can be increased on-demand by creating a special [support request in the Azure portal](../database/quota-increase-request.md) if you need more instances in the current region. As an alternative, you can create new instances of SQL Managed Instance in another Azure region without sending support requests. - -The following table shows the **default regional limits** for supported subscription types (default limits can be extended using support request described below): - -|Subscription type| Max number of SQL Managed Instance subnets | Max number of vCore units* | -| :---| :--- | :--- | -|CSP |16 (30 in some regions**)|960 (1440 in some regions**)| -|EA|16 (30 in some regions**)|960 (1440 in some regions**)| -|Enterprise Dev/Test|6|320| -|Pay-as-you-go|6|320| -|Pay-as-you-go Dev/Test|6|320| -|Azure Pass|3|64| -|BizSpark|3|64| -|BizSpark Plus|3|64| -|Microsoft Azure Sponsorship|3|64| -|Microsoft Partner Network|3|64| -|Visual Studio Enterprise (MPN)|3|64| -|Visual Studio Enterprise|3|32| -|Visual Studio Enterprise (BizSpark)|3|32| -|Visual Studio Professional|3|32| -|MSDN Platforms|3|32| - -\* In planning deployments, please take into consideration that Business Critical (BC) service tier requires four (4) times more vCore capacity than General Purpose (GP) service tier. For example: 1 GP vCore = 1 vCore unit and 1 BC vCore = 4 vCore. To simplify your consumption analysis against the default limits, summarize the vCore units across all subnets in the region where SQL Managed Instance is deployed and compare the results with the instance unit limits for your subscription type. **Max number of vCore units** limit applies to each subscription in a region. There is no limit per individual subnets except that the sum of all vCores deployed across multiple subnets must be lower or equal to **max number of vCore units**. - -\*\* Larger subnet and vCore limits are available in the following regions: Australia East, East US, East US 2, North Europe, South Central US, Southeast Asia, UK South, West Europe, West US 2. - -> [!IMPORTANT] -> In case your vCore and subnet limit is 0, it means that default regional limit for your subscription type is not set. You can also use quota increase request for getting subscription access in specific region following the same procedure - providing required vCore and subnet values. - -## Request a quota increase - -If you need more instances in your current regions, send a support request to extend the quota using the Azure portal. For more information, see [Request quota increases for Azure SQL Database](../database/quota-increase-request.md). - -## Previously available hardware - -This section includes details on previously available hardware. Consider [moving your instance of SQL Managed Instance to the standard-series (Gen5)](../database/service-tiers-vcore.md) hardware to experience a wider range of vCore and storage scalability, accelerated networking, best IO performance, and minimal latency. - -> [!IMPORTANT] -> Gen4 hardware is being retired and is not available for new deployments. - -### Hardware characteristics - -| | **Gen4** | -| --- | --- | -| **Hardware** | Intel® E5-2673 v3 (Haswell) 2.4 GHz processors, attached SSD vCore = 1 PP (physical core) | -| **Number of vCores** | 8, 16, 24 vCores | -| **Max memory (memory/core ratio)** | 7 GB per vCore
    Add more vCores to get more memory. | -| **Max In-Memory OLTP memory** | Instance limit: 1-1.5 GB per vCore | -| **Max instance reserved storage** | General Purpose: 8 TB
    Business Critical: 1 TB | - -### In-memory OLTP available space - -The amount of In-memory OLTP space in [Business Critical](../database/service-tier-business-critical.md) service tier depends on the number of vCores and hardware configuration. The following table lists limits of memory that can be used for In-memory OLTP objects. - -| In-memory OLTP space | **Gen4** | -| --- | --- | -| 8 vCores | 8 GB | -| 16 vCores | 20 GB | -| 24 vCores | 36 GB | - - -### Service tier characteristics - -| **Feature** | **General Purpose** | **Business Critical** | -| --- | --- | --- | -| Number of vCores\* | Gen4: 8, 16, 24 | Gen4: 8, 16, 24
    \*Same number of vCores is dedicated for read-only queries. | -| Max memory | Gen4: 56 GB - 168 GB (7GB/vCore)
    Add more vCores to get more memory. | Gen4: 56 GB - 168 GB (7GB/vCore)
    + additional 20.4 GB - 408 GB (5.1GB/vCore) for read-only queries.
    Add more vCores to get more memory. | -| Max instance storage size (reserved) | Gen4: 8 TB | Gen4: 1 TB | -| Max database size | Gen4: Up to currently available instance size (max 2 TB - 8 TB depending on the number of vCores). | Gen4: Up to currently available instance size (max 1 TB - 4 TB depending on the number of vCores). | -| Max tempDB size | Gen4: Limited to 24 GB/vCore (96 - 1,920 GB) and currently available instance storage size.
    Add more vCores to get more TempDB space.
    Log file size is limited to 120 GB.| Gen4: Up to currently available instance storage size. | -| Max number of databases per instance | Gen4: 100 user databases, unless the instance storage size limit has been reached. | Gen4: 100 user databases, unless the instance storage size limit has been reached. | -| Max number of database files per instance | Gen4: Up to 280, unless the instance storage size or [Azure Premium Disk storage allocation space](../database/doc-changes-updates-release-notes.md#exceeding-storage-space-with-small-database-files) limit has been reached. | Gen4: 32,767 files per database, unless the instance storage size limit has been reached. | -| Max data file size | Gen4: Limited to currently available instance storage size (max 2 TB - 8 TB) and [Azure Premium Disk storage allocation space](../database/doc-changes-updates-release-notes.md#exceeding-storage-space-with-small-database-files). Use at least two data files for databases larger than 8 TB. | Gen4: Limited to currently available instance storage size (up to 1 TB - 4 TB). | -| Max log file size | Gen4: Limited to 2 TB and currently available instance storage size. | Gen4: Limited to 2 TB and currently available instance storage size. | -| Data/Log IOPS (approximate) | Gen4: Up to 30-40 K IOPS per instance*, 500 - 7500 per file
    \*[Increase file size to get more IOPS](#file-io-characteristics-in-general-purpose-tier)| Gen4: 16 K - 320 K (4000 IOPS/vCore)
    Add more vCores to get better IO performance. | -| Log write throughput limit (per instance) | Gen4: 3 MB/s per vCore
    Max 120 MB/s per instance
    22 - 65 MB/s per DB
    \*[Increase the file size to get better IO performance](#file-io-characteristics-in-general-purpose-tier) | Gen4: 4 MB/s per vCore
    Max 96 MB/s | -| Data throughput (approximate) | Gen4: 100 - 250 MB/s per file
    \*[Increase the file size to get better IO performance](#file-io-characteristics-in-general-purpose-tier) | Gen4: Not limited. | -| Storage IO latency (approximate) | Gen4: 5-10 ms | Gen4: 1-2 ms | -| In-memory OLTP | Gen4: Not supported | Gen4: Available, [size depends on number of vCore](#in-memory-oltp-available-space) | -| Max sessions | Gen4: 30000 | Gen4: 30000 | -| Max concurrent workers | Gen4: 210 * number of vCores + 800 | Gen4: 210 * vCore count + 800 | -| [Read-only replicas](../database/read-scale-out.md) | Gen4: 0 | Gen4: 1 (included in price) | -| Compute isolation | Gen4: not supported | Gen4: not supported | - - -## Next steps - -- For more information about SQL Managed Instance, see [What is a SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- For pricing information, see [SQL Managed Instance pricing](https://azure.microsoft.com/pricing/details/sql-database/managed/). -- To learn how to create your first SQL Managed Instance, see [the quickstart guide](instance-create-quickstart.md). diff --git a/articles/azure-sql/managed-instance/restore-sample-database-quickstart.md b/articles/azure-sql/managed-instance/restore-sample-database-quickstart.md deleted file mode 100644 index 03db7dfdca005..0000000000000 --- a/articles/azure-sql/managed-instance/restore-sample-database-quickstart.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: "Quickstart: Restore a backup (SSMS)" -titleSuffix: Azure SQL Managed Instance -description: In this quickstart, learn to restore a database backup to Azure SQL Managed Instance using SQL Server Management Studio (SSMS). -services: sql-database -ms.service: sql-managed-instance -ms.subservice: backup-restore -ms.custom: mode-other -ms.devlang: -ms.topic: quickstart -author: MilanMSFT -ms.author: mlazic -ms.reviewer: mathoma, nvraparl -ms.date: 09/13/2021 ---- -# Quickstart: Restore a database to Azure SQL Managed Instance with SSMS -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -In this quickstart, you'll use SQL Server Management Studio (SSMS) to restore a database (the Wide World Importers - Standard backup file) from Azure Blob storage to [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). - -> [!VIDEO https://www.youtube.com/embed/RxWYojo_Y3Q] - -> [!NOTE] -> For more information on migration using Azure Database Migration Service, see [Tutorial: Migrate SQL Server to an Azure Managed Instance using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). -> For more information on various migration methods, see [SQL Server to Azure SQL Managed Instance Guide](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md). - -## Prerequisites - -This quickstart: - -- Uses resources from the [Create a managed instance](instance-create-quickstart.md) quickstart. -- Requires the latest version of [SSMS](/sql/ssms/sql-server-management-studio-ssms) installed. -- Requires using SSMS to connect to SQL Managed Instance. See these quickstarts on how to connect: - - [Enable a public endpoint](public-endpoint-configure.md) on SQL Managed Instance - this is the recommended approach for this tutorial. - - [Connect to SQL Managed Instance from an Azure VM](connect-vm-instance-configure.md). - - [Configure a point-to-site connection to SQL Managed Instance from on-premises](point-to-site-p2s-configure.md). - -> [!NOTE] -> For more information on backing up and restoring a SQL Server database using Azure Blob storage and a [Shared Access Signature (SAS) key](../../storage/common/storage-sas-overview.md), see [SQL Server Backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url). - -## Restore from a backup file using the restore wizard - -In SSMS, follow these steps to restore the Wide World Importers database to SQL Managed Instance by using the restore wizard. The database backup file is stored in a pre-configured Azure Blob Storage account. - -1. Open SSMS and connect to your managed instance. -2. In **Object Explorer**, right-click the databases of your managed instance and select **Restore Database** to open the restore wizard. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-start.png" alt-text="Screenshot that shows opening the restore wizard."::: - -3. In the new restore wizard, select the ellipsis (**...**) to select the source of the backup file to use. - - :::image type="content" source="./media/restore-sample-database-quickstart/new-restore-wizard.png" alt-text="Screenshot that shows opening a new restore wizard window."::: - -4. In **Select backup devices**, select **Add**. In **Backup media type**, **URL** is the only option because it is the only source type supported. Select **OK**. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-select-device.png" alt-text="Screenshot that shows selecting the device."::: - -5. In **Select a Backup File Location**, you can choose from three options to provide information about backup files are located: - - Select a pre-registered storage container from the dropdown. - - Enter a new storage container and a shared access signature. (A new SQL credential will be registered for you.) - - Select **Add** to browse more storage containers from your Azure subscription. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-backup-file-location.png" alt-text="Screenshot that shows selecting the backup file location."::: - - Complete the next steps if you select the **Add** button. If you use a different method to provide the backup file location, go to step 12. -6. In **Connect to a Microsoft Subscription**, select **Sign in** to sign in to your Azure subscription: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-connect-subscription-sign-in.png" alt-text="Screenshot that shows Azure subscription sign-in."::: - -7. Sign in to your Microsoft Account to initiate the session in Azure: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-sign-in-session.png" alt-text="Screenshot that shows signing in to the Azure session."::: - -8. Select the subscription where the storage account with the backup files is located: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-select-subscription.png" alt-text="Screenshot that shows selecting the subscription."::: - -9. Select the storage account where the backup files are located: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-select-storage-account.png" alt-text="Screenshot that shows the storage account."::: - -10. Select the blob container where the backup files are located: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-select-container.png" alt-text="Select Blob container"::: - -11. Specify the expiration date of the shared access policy and select **Create Credential**. A shared access signature with the correct permissions is created. Select **OK**. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-generate-shared-access-signature.png" alt-text="Screenshot that shows generating the shared access signature."::: - -12. In the left pane, expand the folder structure to show the folder where the backup files are located. Select all the backup files that are related to the backup set to be restored, and then select **OK**: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-backup-file-selection.png" alt-text="Screenshot that shows the backup file selection."::: - - SSMS validates the backup set. The process takes up to a few seconds depending on the size of the backup set. - -13. If the backup is validated, specify the destination database name or leave the database name of the backup set, and then select **OK**: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-start-restore.png" alt-text="Screenshot that shows starting the restore."::: - - The restore starts. The duration depends on the size of the backup set. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-running-restore.png" alt-text="Screenshot that shows running the restore."::: - -14. When the restore finishes, a dialog shows that it was successful. Select **OK**. - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-finish-restore.png" alt-text="Screenshot that shows the finished restore."::: - -15. Check the restored database in Object Explorer: - - :::image type="content" source="./media/restore-sample-database-quickstart/restore-wizard-restored-database.png" alt-text="Screenshot that shows the restored database."::: - - -## Restore from a backup file using T-SQL - -In SQL Server Management Studio, follow these steps to restore the Wide World Importers database to SQL Managed Instance. The database backup file is stored in a pre-configured Azure Blob storage account. - -1. Open SSMS and connect to your managed instance. -2. In **Object Explorer**, right-click your managed instance and select **New Query** to open a new query window. -3. Run the following SQL script, which uses a pre-configured storage account and SAS key to [create a credential](/sql/t-sql/statements/create-credential-transact-sql) in your managed instance. - - > [!IMPORTANT] - > `CREDENTIAL` must match the container path, begin with `https`, and can't contain a trailing forward slash. `IDENTITY` must be `SHARED ACCESS SIGNATURE`. `SECRET` must be the Shared Access Signature token and can't contain a leading `?`. - - ```sql - CREATE CREDENTIAL [https://mitutorials.blob.core.windows.net/databases] - WITH IDENTITY = 'SHARED ACCESS SIGNATURE' - , SECRET = 'sv=2017-11-09&ss=bfqt&srt=sco&sp=rwdlacup&se=2028-09-06T02:52:55Z&st=2018-09-04T18:52:55Z&spr=https&sig=WOTiM%2FS4GVF%2FEEs9DGQR9Im0W%2BwndxW2CQ7%2B5fHd7Is%3D' - ``` - - :::image type="content" source="./media/restore-sample-database-quickstart/credential.png" alt-text="create credential"::: - -4. To check your credential, run the following script, which uses a [container](https://azure.microsoft.com/services/container-instances/) URL to get a backup file list. - - ```sql - RESTORE FILELISTONLY FROM URL = - 'https://mitutorials.blob.core.windows.net/databases/WideWorldImporters-Standard.bak' - ``` - - :::image type="content" source="./media/restore-sample-database-quickstart/file-list.png" alt-text="file list"::: - -5. Run the following script to restore the Wide World Importers database. - - ```sql - RESTORE DATABASE [Wide World Importers] FROM URL = - 'https://mitutorials.blob.core.windows.net/databases/WideWorldImporters-Standard.bak' - ``` - - :::image type="content" source="./media/restore-sample-database-quickstart/restore.png" alt-text="Screenshot shows the script running in Object Explorer with a success message."::: - -6. Run the following script to track the status of your restore. - - ```sql - SELECT session_id as SPID, command, a.text AS Query, start_time, percent_complete - , dateadd(second,estimated_completion_time/1000, getdate()) as estimated_completion_time - FROM sys.dm_exec_requests r - CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) a - WHERE r.command in ('BACKUP DATABASE','RESTORE DATABASE') - ``` - -7. When the restore completes, view the database in Object Explorer. You can verify that database restore is completed using the [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) view. - -> [!NOTE] -> A database restore operation is asynchronous and retryable. You might get an error in SQL Server Management Studio if the connection breaks or a time-out expires. Azure SQL Managed Instance will keep trying to restore database in the background, and you can track the progress of the restore using the [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) and [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) views. -> In some phases of the restore process, you will see a unique identifier instead of the actual database name in the system views. Learn about `RESTORE` statement behavior differences [here](./transact-sql-tsql-differences-sql-server.md#restore-statement). - -## Next steps - -- If, at step 5, a database restore is terminated with the message ID 22003, create a new backup file containing backup checksums and perform the restore again. See [Enable or disable backup checksums during backup or restore](/sql/relational-databases/backup-restore/enable-or-disable-backup-checksums-during-backup-or-restore-sql-server). -- For troubleshooting a backup to a URL, see [SQL Server Backup to URL best practices and troubleshooting](/sql/relational-databases/backup-restore/sql-server-backup-to-url-best-practices-and-troubleshooting). -- For an overview of app connection options, see [Connect your applications to SQL Managed Instance](connect-application-instance.md). -- To query using your favorite tools or languages, see [Quickstarts: Azure SQL Database connect and query](../database/connect-query-content-reference-guide.md). diff --git a/articles/azure-sql/managed-instance/scripts/add-to-failover-group-powershell.md b/articles/azure-sql/managed-instance/scripts/add-to-failover-group-powershell.md deleted file mode 100644 index a5230bbd2388d..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/add-to-failover-group-powershell.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "PowerShell: Add a managed instance to an auto-failover group" -titleSuffix: Azure SQL Managed Instance -description: Azure PowerShell example script to create a managed instance, add it to an auto-failover group, and test failover. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: high-availability -ms.custom: sqldbrb=1, devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: emlisa -ms.author: emlisa -ms.reviewer: mathoma -ms.date: 07/16/2019 ---- -# Use PowerShell to add a managed instance to a failover group - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This PowerShell script example creates two managed instances, adds them to a failover group, and then tests failover from the primary managed instance to the secondary managed instance. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Azure PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/failover-groups/add-managed-instance-to-failover-group-az-ps.ps1 "Add managed instance to a failover group")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. You will need to remove the resource group twice. Removing the resource group the first time will remove the managed instance and virtual clusters but will then fail with the error message `Remove-AzResourceGroup : Long running operation failed with status 'Conflict'`. Run the Remove-AzResourceGroup command a second time to remove any residual resources as well as the resource group. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates an Azure resource group. | -| [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) | Creates a virtual network. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/add-azvirtualnetworksubnetconfig) | Adds a subnet configuration to a virtual network. | -| [Get-AzVirtualNetwork](/powershell/module/az.network/get-azvirtualnetwork) | Gets a virtual network in a resource group. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/get-azvirtualnetworksubnetconfig) | Gets a subnet in a virtual network. | -| [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup) | Creates a network security group. | -| [New-AzRouteTable](/powershell/module/az.network/new-azroutetable) | Creates a route table. | -| [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/set-azvirtualnetworksubnetconfig) | Updates a subnet configuration for a virtual network. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/set-azvirtualnetwork) | Updates a virtual network. | -| [Get-AzNetworkSecurityGroup](/powershell/module/az.network/get-aznetworksecuritygroup) | Gets a network security group. | -| [Add-AzNetworkSecurityRuleConfig](/powershell/module/az.network/add-aznetworksecurityruleconfig)| Adds a network security rule configuration to a network security group. | -| [Set-AzNetworkSecurityGroup](/powershell/module/az.network/set-aznetworksecuritygroup) | Updates a network security group. | -| [Add-AzRouteConfig](/powershell/module/az.network/add-azrouteconfig) | Adds a route to a route table. | -| [Set-AzRouteTable](/powershell/module/az.network/set-azroutetable) | Updates a route table. | -| [New-AzSqlInstance](/powershell/module/az.sql/new-azsqlinstance) | Creates a managed instance. | -| [Get-AzSqlInstance](/powershell/module/az.sql/get-azsqlinstance)| Returns information about Azure SQL Managed Instance. | -| [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) | Creates a public IP address. | -| [New-AzVirtualNetworkGatewayIpConfig](/powershell/module/az.network/new-azvirtualnetworkgatewayipconfig) | Creates an IP Configuration for a Virtual Network Gateway | -| [New-AzVirtualNetworkGateway](/powershell/module/az.network/new-azvirtualnetworkgateway) | Creates a Virtual Network Gateway | -| [New-AzVirtualNetworkGatewayConnection](/powershell/module/az.network/new-azvirtualnetworkgatewayconnection) | Creates a connection between the two virtual network gateways. | -| [New-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/new-azsqldatabaseinstancefailovergroup)| Creates a new Azure SQL Managed Instance failover group. | -| [Get-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/get-azsqldatabaseinstancefailovergroup) | Gets or lists SQL Managed Instance failover groups.| -| [Switch-AzSqlDatabaseInstanceFailoverGroup](/powershell/module/az.sql/switch-azsqldatabaseinstancefailovergroup) | Executes a failover of a SQL Managed Instance failover group. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Removes a resource group. | - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples for SQL Managed Instance can be found in [Azure SQL Managed Instance PowerShell scripts](../../database/powershell-script-content-guide.md). diff --git a/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-cli.md b/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-cli.md deleted file mode 100644 index 7c91bdafe9c23..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-cli.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "Azure CLI example: Create a managed instance" -description: Use this Azure CLI example script to create a managed instance in Azure SQL Managed Instance -services: sql-managed-instance -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurecli -ms.devlang: azurecli -ms.topic: sample -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 01/26/2022 ---- - -# Create an Azure SQL Managed Instance using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This Azure CLI script example creates an Azure SQL Managed Instance in a dedicated subnet within a new virtual network. It also configures a route table and a network security group for the virtual network. Once the script has been successfully run, the managed instance can be accessed from within the virtual network or from an on-premises environment. See [Configure Azure VM to connect to an Azure SQL Managed Instance](/azure/azure-sql/managed-instance/connect-vm-instance-configure) and [Configure a point-to-site connection to an Azure SQL Managed Instance from on-premises](/azure/azure-sql/managed-instance/point-to-site-p2s-configure). - -> [!IMPORTANT] -> For limitations, see [supported regions](/azure/azure-sql/managed-instance/resource-limits#supported-regions) and [supported subscription types](/azure/azure-sql/managed-instance/resource-limits#supported-subscription-types). - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-run-local-sign-in.md](../../../../includes/cli-run-local-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/managed-instance/create-managed-instance.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az network vnet](/cli/azure/network/vnet) | Virtual network commands. | -| [az network vnet subnet](/cli/azure/network/vnet/subnet) | Virtual network subnet commands. | -| [az network route-table](/cli/azure/network/route-table) | Network route table commands. | -| [az sql mi](/cli/azure/sql/mi) | SQL Managed Instance commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-powershell.md b/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-powershell.md deleted file mode 100644 index 70b1f6f881d16..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/create-configure-managed-instance-powershell.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "PowerShell: Create a managed instance" -titleSuffix: Azure SQL Managed Instance -description: This article provides an Azure PowerShell example script to create a managed instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 03/25/2019 ---- -# Use PowerShell to create a managed instance - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This PowerShell script example creates a managed instance in a dedicated subnet within a new virtual network. It also configures a route table and a network security group for the virtual network. Once the script has been successfully run, the managed instance can be accessed from within the virtual network or from an on-premises environment. See [Configure Azure VM to connect to Azure SQL Database Managed Instance](../connect-vm-instance-configure.md) and [Configure a point-to-site connection to Azure SQL Managed Instance from on-premises](../point-to-site-p2s-configure.md). - -> [!IMPORTANT] -> For limitations, see [supported regions](../resource-limits.md#supported-regions) and [supported subscription types](../resource-limits.md#supported-subscription-types). - -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Azure PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/managed-instance/create-and-configure-managed-instance.ps1 "Create managed instance")] - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```powershell -Remove-AzResourceGroup -ResourceGroupName $resourcegroupname -``` - -## Script explanation - -This script uses some of the following commands. For more information about used and other commands in the table below, click on the links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) | Creates a resource group in which all resources are stored. -| [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) | Creates a virtual network. | -| [Add-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/Add-AzVirtualNetworkSubnetConfig) | Adds a subnet configuration to a virtual network. | -| [Get-AzVirtualNetwork](/powershell/module/az.network/Get-AzVirtualNetwork) | Gets a virtual network in a resource group. | -| [Set-AzVirtualNetwork](/powershell/module/az.network/Set-AzVirtualNetwork) | Sets the goal state for a virtual network. | -| [Get-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/Get-AzVirtualNetworkSubnetConfig) | Gets a subnet in a virtual network. | -| [Set-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/Set-AzVirtualNetworkSubnetConfig) | Configures the goal state for a subnet configuration in a virtual network. | -| [New-AzRouteTable](/powershell/module/az.network/New-AzRouteTable) | Creates a route table. | -| [Get-AzRouteTable](/powershell/module/az.network/Get-AzRouteTable) | Gets route tables. | -| [Set-AzRouteTable](/powershell/module/az.network/Set-AzRouteTable) | Sets the goal state for a route table. | -| [New-AzSqlInstance](/powershell/module/az.sql/New-AzSqlInstance) | Creates a managed instance. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group, including all nested resources. | - - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples for Azure SQL Managed Instance can be found in [Azure SQL Managed Instance PowerShell scripts](../../database/powershell-script-content-guide.md). diff --git a/articles/azure-sql/managed-instance/scripts/restore-geo-backup-cli.md b/articles/azure-sql/managed-instance/scripts/restore-geo-backup-cli.md deleted file mode 100644 index 20c1e026ee450..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/restore-geo-backup-cli.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Azure CLI example: Restore geo-backup - Azure SQL Database" -description: Use this Azure CLI example script to restore an Azure SQL Managed Instance Database from a geo-redundant backup. -services: sql-database -ms.service: sql-database -ms.subservice: backup-restore -ms.custom: -ms.devlang: azurecli -ms.topic: sample -author: SudhirRaparla -ms.author: nvraparl -ms.reviewer: mathoma -ms.date: 02/11/2022 ---- - -# Restore a Managed Instance database to another geo-region using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This Azure CLI script example restores an Azure SQL Managed Instance database from a remote geo-region (geo-restore) to a point in time. - -This sample requires an existing pair of managed instances, see [Use Azure CLI to create an Azure SQL Managed Instance](create-configure-managed-instance-cli.md) to create a pair of managed instances in different regions. - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-run-local-sign-in.md](../../../../includes/cli-run-local-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/sql-managed-instance-restore-geo-backup/restore-geo-backup-cli.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Script | Description | -|---|---| -| [az sql midb](/cli/azure/sql/midb) | Managed Instance Database commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/managed-instance/scripts/restore-geo-backup.md b/articles/azure-sql/managed-instance/scripts/restore-geo-backup.md deleted file mode 100644 index 3b82d9d0d1cca..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/restore-geo-backup.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "PowerShell: Restore geo-backup for Azure SQL Managed Instance" -description: Azure PowerShell example script to restore an Azure SQL Managed Instance database from a geo-redundant backup. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: backup-restore -ms.custom: devx-track-azurepowershell -ms.devlang: PowerShell -ms.topic: sample -author: MilanMSFT -ms.author: mlazic -ms.reviewer: mathoma, nvraparl -ms.date: 07/03/2019 ---- -# Use PowerShell to restore an Azure SQL Managed Instance database to another geo-region - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This PowerShell script example restores an Azure SQL Managed Instance database from a remote geo-region (geo-restore). - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -If you choose to install and use PowerShell locally, this tutorial requires Azure PowerShell 1.4.0 or later. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps). If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample script - -```azurepowershell-interactive -# Connect-AzAccount -# The SubscriptionId in which to create these objects -$SubscriptionId = '' -# Set the information for your managed instance -$SourceResourceGroupName = "myResourceGroup-$(Get-Random)" -$SourceInstanceName = "myManagedInstance-$(Get-Random)" -$SourceDatabaseName = "myInstanceDatabase-$(Get-Random)" - -# Set the information for your destination managed instance -$TargetResourceGroupName = "myTargetResourceGroup-$(Get-Random)" -$TargetInstanceName = "myTargetManagedInstance-$(Get-Random)" -$TargetDatabaseName = "myTargetInstanceDatabase-$(Get-Random)" - -Connect-AzAccount -Set-AzContext -SubscriptionId $SubscriptionId - -$backup = Get-AzSqlInstanceDatabaseGeoBackup ` --ResourceGroupName $SourceResourceGroupName ` --InstanceName $SourceInstanceName ` --Name $SourceDatabaseName - -$backup | Restore-AzSqlInstanceDatabase -FromGeoBackup ` --TargetInstanceDatabaseName $TargetDatabaseName ` --TargetInstanceName $TargetInstanceName ` --TargetResourceGroupName $TargetResourceGroupName - -``` - -## Clean up deployment - -Use the following command to remove the resource group and all resources associated with it. - -```azurepowershell-interactive -Remove-AzResourceGroup -ResourceGroupName $TargetResourceGroupName -``` - -## Script explanation - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Notes | -|---|---| -| [New-AzResourceGroup](/powershell/module/az.resources/New-AzResourceGroup) | Creates a resource group in which all resources are stored. | -| [Get-AzSqlInstanceDatabaseGeoBackup](/powershell/module/az.sql/Get-AzSqlInstanceDatabaseGeoBackup) | Gets one or more geo-backups from a database within an Azure SQL Managed Instance. | -| [Restore-AzSqlInstanceDatabase](/powershell/module/az.sql/Restore-AzSqlInstanceDatabase) | Creates a database on SQL Managed Instance from geo-backup. | -| [Remove-AzResourceGroup](/powershell/module/az.resources/remove-azresourcegroup) | Deletes a resource group, including all nested resources. | - -## Next steps - -For more information about PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples for Azure SQL Database can be found in [Azure SQL Database PowerShell scripts](../../database/powershell-script-content-guide.md). diff --git a/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-powershell.md b/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-powershell.md deleted file mode 100644 index 327dca620aeb8..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-powershell.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "PowerShell: Enable bring-your-own-key (BYOK) TDE" -titleSuffix: Azure SQL Managed Instance -description: "Learn how to configure Azure SQL Managed Instance to start using bring-your-own-key (BYOK) Transparent Data Encryption (TDE) for encryption-at-rest using PowerShell." -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: conceptual -author: MladjoA -ms.author: mlandzic -ms.reviewer: vanto, sstein -ms.date: 08/25/2020 ---- - -# Transparent Data Encryption in SQL Managed Instance using your own key from Azure Key Vault - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This PowerShell script example configures Transparent Data Encryption (TDE) with a customer-managed key for Azure SQL Managed Instance, using a key from Azure Key Vault. This is often referred to as a bring-your-own-key (BYOK) scenario for TDE. To learn more, see [Azure SQL Transparent Data Encryption with customer-managed key](../../database/transparent-data-encryption-byok-overview.md). - -## Prerequisites - -- An existing managed instance. See [Use PowerShell to create a managed instance](create-configure-managed-instance-powershell.md). - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] -[!INCLUDE [updated-for-az](../../../../includes/updated-for-az.md)] -[!INCLUDE [cloud-shell-try-it.md](../../../../includes/cloud-shell-try-it.md)] - -Using PowerShell locally or using Azure Cloud Shell requires Azure PowerShell 2.3.2 or a later version. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-az-ps), or run the below sample script to install the module for the current user: - -`Install-Module -Name Az -AllowClobber -Scope CurrentUser` - -If you are running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure. - -## Sample scripts - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-database/transparent-data-encryption/setup-tde-byok-sqlmi.ps1 "Set up BYOK TDE for SQL Managed Instance")] - -## Next steps - -For more information on Azure PowerShell, see [Azure PowerShell documentation](/powershell/azure/). - -Additional PowerShell script samples for SQL Managed Instance can be found in [Azure SQL Managed Instance PowerShell scripts](../../database/powershell-script-content-guide.md). diff --git a/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-sql-managed-instance-cli.md b/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-sql-managed-instance-cli.md deleted file mode 100644 index 48cfe1604b34f..0000000000000 --- a/articles/azure-sql/managed-instance/scripts/transparent-data-encryption-byok-sql-managed-instance-cli.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Azure CLI example: Enable BYOK TDE - Azure SQL Managed Instance" -description: "Learn how to configure an Azure SQL Managed Instance to start using BYOK Transparent Data Encryption (TDE) for encryption-at-rest using PowerShell." -services: sql-database -ms.service: sql-database -ms.subservice: security -ms.custom: -ms.devlang: azurecli -ms.topic: conceptual -author: MladjoA -ms.author: mlandzic -ms.reviewer: vanto -ms.date: 01/26/2022 ---- - -# Manage Transparent Data Encryption in a Managed Instance using your own key from Azure Key Vault using the Azure CLI - -[!INCLUDE[appliesto-sqldb](../../includes/appliesto-sqlmi.md)] - -This Azure CLI script example configures Transparent Data Encryption (TDE) with customer-managed key for Azure SQL Managed Instance, using a key from Azure Key Vault. This is often referred to as a Bring Your Own Key scenario for TDE. To learn more about the TDE with customer-managed key, see [TDE Bring Your Own Key to Azure SQL](/azure/azure-sql/database/transparent-data-encryption-byok-overview). - -This sample requires an existing Managed Instance, see [Use Azure CLI to create an Azure SQL Managed Instance](create-configure-managed-instance-cli.md). - -[!INCLUDE [quickstarts-free-trial-note](../../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -## Sample script - -[!INCLUDE [cli-run-local-sign-in.md](../../../../includes/cli-run-local-sign-in.md)] - -### Run the script - -:::code language="azurecli" source="~/azure_cli_scripts/sql-database/transparent-data-encryption/setup-tde-byok-sqlmi.sh" id="FullScript"::: - -## Clean up resources - -[!INCLUDE [cli-clean-up-resources.md](../../../../includes/cli-clean-up-resources.md)] - -```azurecli -az group delete --name $resourceGroup -``` - -## Sample reference - -This script uses the following commands. Each command in the table links to command specific documentation. - -| Command | Description | -|---|---| -| [az sql db](/cli/azure/sql/db) | Database commands. | -| [az sql failover-group](/cli/azure/sql/failover-group) | Failover group commands. | - -## Next steps - -For more information on Azure CLI, see [Azure CLI documentation](/cli/azure). - -Additional SQL Database CLI script samples can be found in the [Azure SQL Database documentation](/azure/azure-sql/database/az-cli-script-samples-content-guide). diff --git a/articles/azure-sql/managed-instance/server-trust-group-overview.md b/articles/azure-sql/managed-instance/server-trust-group-overview.md deleted file mode 100644 index 65b6382e4beb3..0000000000000 --- a/articles/azure-sql/managed-instance/server-trust-group-overview.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Server trust group -titleSuffix: Azure SQL Managed Instance -description: Learn how to manage trust between instances by using a server trust group in Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.devlang: -ms.topic: conceptual -author: sasapopo -ms.author: sasapopo -ms.reviewer: mathoma -ms.date: 11/02/2021 -ms.custom: ignite-fall-2021 ---- -# Set up trust between instances with server trust group (Azure SQL Managed Instance) -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Server trust group (also known as SQL trust group) is a concept used for managing trust between instances in Azure SQL Managed Instance. By creating a group, a certificate-based trust is established between its members. This trust can be used for different cross-instance scenarios. Removing servers from the group or deleting the group removes the trust between the servers. To create or delete a server trust group, the user needs to have write permissions on the managed instance. -[Server trust group](/azure/templates/microsoft.sql/allversions) is an Azure Resource Manager object which has been labeled as **SQL trust group** in Azure portal. - - -## Set up group - -Server trust group can be setup via [Azure PowerShell](/powershell/module/az.sql/new-azsqlservertrustgroup) or [Azure CLI](/cli/azure/sql/stg). - -To create a server trust group by using the Azure portal, follow these steps: - -1. Go to the [Azure portal](https://portal.azure.com/). - -2. Navigate to Azure SQL Managed Instance that you plan to add to a server trust group. - -3. On the **Security** settings, select the **SQL trust groups** tab. - - :::image type="content" source="./media/server-trust-group-overview/sql-trust-groups.png" alt-text="SQL trust groups"::: - -4. On the **SQL trust groups** configuration page, select the **New Group** icon. - - :::image type="content" source="./media/server-trust-group-overview/new-sql-trust-group-button.png" alt-text="New Group"::: - -5. On the **SQL trust group** create blade set the **Group name**. It needs to be unique in the group's subscription, resource group and region. **Trust scope** defines the type of cross-instance scenario that is enabled with the server trust group. Trust scope is fixed - all available functionalities are preselected and this cannot be changed. Select **Subscription** and **Resource group** to choose the managed instances that will be members of the group. - - :::image type="content" source="./media/server-trust-group-overview/new-sql-trust-group.png" alt-text="SQL trust group create blade"::: - -6. After all required fields are populated, select **Save**. - -## Edit group - -To edit a server trust group, follow these steps: - -1. Go to Azure portal. -1. Navigate to a managed instance that belongs to the trust group. -1. On the **Security** settings select the **SQL trust groups** tab. -1. Select the trust group you want to edit. -1. Click **Configure group**. - - :::image type="content" source="./media/server-trust-group-overview/configure-sql-trust-group.png" alt-text="Configure SQL trust group"::: - -1. Add or remove managed instances from the group. -1. Click **Save** to confirm choice or **Cancel** to abandon changes. - -## Delete group - -To delete a server trust group, follow these steps: - -1. Go to the Azure portal. -1. Navigate to a managed instance that belongs to the SQL trust group. -1. On the **Security** settings select the **SQL trust groups** tab. -1. Select the trust group you want to delete. - - :::image type="content" source="./media/server-trust-group-overview/select-delete-sql-trust-group.png" alt-text="Select SQL trust group"::: - -1. Select **Delete group**. - - :::image type="content" source="./media/server-trust-group-overview/delete-sql-trust-group.png" alt-text="Delete SQL trust group"::: - -1. Type in the SQL trust group name to confirm deletion and select **Delete**. - - :::image type="content" source="./media/server-trust-group-overview/confirm-delete-sql-trust-group-2.png" alt-text="Confirm SQL trust group deletion"::: - -> [!NOTE] -> Deleting the SQL trust group might not immediately remove the trust between the two managed instances. Trust removal can be enforced by invoking a [failover](/powershell/module/az.sql/Invoke-AzSqlInstanceFailover) of managed instances. Check the [Known issues](../managed-instance/doc-changes-updates-known-issues.md) for the latest updates on this. - -## Limitations - -Following limitations apply to Server Trust Groups: - - * Group can contain only instances of Azure SQL Managed Instance. - * Trust scope cannot be changed when a group is created or modified. - * The name of the server trust group must be unique for its subscription, resource group and region. - -## Next steps - -* For more information about distributed transactions in Azure SQL Managed Instance, see [Distributed transactions](../database/elastic-transactions-overview.md). -* For release updates and known issues state, see [What's new?](doc-changes-updates-release-notes-whats-new.md). -* If you have feature requests, add them to the [Managed Instance forum](https://feedback.azure.com/d365community/forum/a99f7006-3425-ec11-b6e6-000d3a4f0f84). diff --git a/articles/azure-sql/managed-instance/service-endpoint-policies-configure.md b/articles/azure-sql/managed-instance/service-endpoint-policies-configure.md deleted file mode 100644 index 8d2fee183c845..0000000000000 --- a/articles/azure-sql/managed-instance/service-endpoint-policies-configure.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: Configure service endpoint policies -description: Configure Azure Storage service endpoint policies to protect Azure SQL Managed Instance against exfiltration to unauthorized Azure Storage accounts. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: references_regions, ignite-fall-2021 -ms.devlang: -ms.topic: how-to -author: zoran-rilak-msft -ms.author: zoranrilak -ms.reviewer: mathoma -ms.date: 11/02/2021 ---- - - -# Configure service endpoint policies (Preview) for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Virtual Network (VNet) Azure Storage [service endpoint policies](../../virtual-network/virtual-network-service-endpoint-policies-overview.md) allow you to filter egress virtual network traffic to Azure Storage, restricting data transfers to specific storage accounts. - -The ability to configure your endpoint policies and associate them with your SQL Managed Instance is currently in preview. - -## Key benefits - -Configuring Virtual network Azure Storage service endpoint policies for your Azure SQL Managed Instance provides the following benefits: - -- __Improved security for your Azure SQL Managed Instance traffic to Azure Storage__: Endpoint policies establish a security control that prevents erroneous or malicious exfiltration of business-critical data. Traffic can be limited to only those storage accounts that are compliant with your data governance requirements. - -- __Granular control over which storage accounts can be accessed__: Service endpoint policies can permit traffic to storage accounts at a subscription, resource group, and individual storage account level. Administrators can use service endpoint policies to enforce adherence to the organization's data security architecture in Azure. - -- __System traffic remains unaffected__: Service endpoint policies never obstruct access to storage that is required for Azure SQL Managed Instance to function. This includes the storage of backups, data files, transaction log files, and other assets. - -> [!IMPORTANT] -> Service endpoint policies only control traffic that originates from the SQL Managed Instance subnet and terminates in Azure storage. The policies do not affect, for example, exporting the database to an on-prem BACPAC file, Azure Data Factory integration, the collection of diagnostic information via Azure Diagnostic Settings, or other mechanisms of data extraction that do not directly target Azure Storage. - -## Limitations - -Enabling service endpoint policies for your Azure SQL Managed Instance has the following limitations: - -- While in preview, this feature is available in all Azure regions where SQL Managed Instance is supported except for **China East 2**, **China North 2**, **Central US EUAP**, **East US 2 EUAP**, **US Gov Arizona**, **US Gov Texas**, **US Gov Virginia**, and **West Central US**. -- The feature is available only to virtual networks deployed through the Azure Resource Manager deployment model. -- The feature is available only in subnets that have [service endpoints](../../virtual-network/virtual-network-service-endpoints-overview.md) for Azure Storage enabled. -- Enabling service endpoints for Azure Storage also extends to include paired regions where you deploy the virtual network to support Read-Access Geo-Redundant storage (RA-GRS) and Geo-Redundant storage (GRS) traffic. -- Assigning a service endpoint policy to a service endpoint upgrades the endpoint from regional to global scope. In other words, all traffic to Azure Storage will go through the service endpoint regardless of the region in which the storage account resides. - -## Prepare storage inventory - -Before you begin configuring service endpoint policies on a subnet, compose a list of storage accounts that the managed instance should have access to in that subnet. - -The following is a list of workflows that may contact Azure Storage: - -- [Auditing](auditing-configure.md) to Azure Storage. -- Performing a [copy-only backup](/sql/relational-databases/backup-restore/copy-only-backups-sql-server) to Azure Storage. -- [Restoring](restore-sample-database-quickstart.md) a database from Azure Storage. -- Importing data with [BULK INSERT or OPENROWSET(BULK ...)](/sql/relational-databases/import-export/import-bulk-data-by-using-bulk-insert-or-openrowset-bulk-sql-server). -- Logging [extended events](../database/xevent-db-diff-from-svr.md) to an Event File target on Azure Storage. -- [Azure DMS offline migration](../../dms/tutorial-sql-server-to-managed-instance.md) to Azure SQL Managed Instance. -- [Log Replay Service migration](log-replay-service-migrate.md) to Azure SQL Managed Instance. -- Synchronizing tables using [transactional replication](replication-transactional-overview.md). - -Note the account name, resource group, and subscription for any storage account that participates in these, or any other, workflows that access storage. - - -## Configure policies - -You'll first need to create your service endpoint policy, and then associate the policy with the SQL Managed Instance subnet. Modify the workflow in this section to suit your business needs. - - -> [!NOTE] -> - SQL Managed Instance subnets require policies to contain the /Services/Azure/ManagedInstance service alias (See step 5). -> - Managed instances deployed to a subnet that already contains service endpoint policies will be automatically upgraded the /Services/Azure/ManagedInstance service alias. - -### Create a service endpoint policy - -To create a service endpoint policy, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Select **+ Create a resource**. -1. In the search pane, enter _service endpoint policy_, select **Service endpoint policy**, and then select **Create**. - - ![Create service endpoint policy](../../virtual-network/media/virtual-network-service-endpoint-policies-portal/create-sep-resource.png) - -1. Fill in the following values on the **Basics** page: - - - Subscription: Select the subscription for your policy from the drop-down. - - Resource group: Select the resource group where your managed instance is located, or select **Create new** and fill in the name for a new resource group. - - Name: Provide a name for your policy, such as **mySEP**. - - Location: Select the region of the virtual network hosting the managed instance. - - ![Create service endpoint policy basics](../../virtual-network/media/virtual-network-service-endpoint-policies-portal/create-sep-basics.png) - -1. In **Policy definitions**, select **Add an alias** and enter the following information on the **Add an alias** pane: - - Service Alias: Select /Services/Azure/ManagedInstance. - - Select **Add** to finish adding the service alias. - - ![Add an alias to a service endpoint policy](./media/service-endpoint-policies-configure/add-an-alias.png) - -1. In Policy definitions, select **+ Add** under **Resources** and enter or select the following information in the **Add a resource** pane: - - Service: Select **Microsoft.Storage**. - - Scope: Select **All accounts in subscription**. - - Subscription: Select a subscription containing the storage account(s) to permit. Refer to your [inventory of Azure storage accounts](#prepare-storage-inventory) created earlier. - - Select **Add** to finish adding the resource. - - Repeat this step to add any additional subscriptions. - - ![Add a resource to a service endpoint policy](./media/service-endpoint-policies-configure/add-a-resource.png) - -1. Optional: you may configure tags on the service endpoint policy under **Tags**. -1. Select **Review + Create**. Validate the information and select **Create**. To make further edits, select **Previous**. - - > [!TIP] - > First, configure policies to allow access to entire subscriptions. Validate the configuration by ensuring that all workflows operate normally. Then, optionally, reconfigure policies to allow individual storage accounts, or accounts in a resource group. To do so, select **Single account** or **All accounts in resource group** in the _Scope:_ field instead and fill in the other fields accordingly. - -### Associate policy with subnet - -After your service endpoint policy is created, associate the policy with your SQL Managed Instance subnet. - -To associate your policy, follow these steps: - -1. In the _All services_ box in the Azure portal, search for _virtual networks_. Select **Virtual networks**. -1. Locate and select the virtual network hosting your managed instance. -1. Select **Subnets** and choose the subnet dedicated to your managed instance. Enter the following information in the subnet pane: - - Services: Select **Microsoft.Storage**. If this field is empty, you need to configure the service endpoint for Azure Storage on this subnet. - - Service endpoint policies: Select any service endpoint policies you want to apply to the SQL Managed Instance subnet. - - ![Associate a service endpoint policy with a subnet](./media/service-endpoint-policies-configure/associate-service-endpoint-policy.png) - -1. Select **Save** to finish configuring the virtual network. - -> [!WARNING] -> If the policies on this subnet do not have the `/Services/Azure/ManagedInstance` alias, you may see the following error: -> ` Failed to save subnet 'subnet'. Error: 'Found conflicts with NetworkIntentPolicy.` -> `Details: Service endpoint policies on subnet are missing definitions` -> To resolve this, update all the policies on the subnet to include the `/Services/Azure/ManagedInstance` alias. - -## Next steps - -- Learn more on [securing your Azure Storage accounts](../../storage/common/storage-network-security.md). -- Read about [SQL Managed Instance's security capabilities](../database/security-overview.md). -- Explore the [connectivity architecture](connectivity-architecture-overview.md) of SQL Managed Instance. diff --git a/articles/azure-sql/managed-instance/service-tiers-managed-instance-vcore.md b/articles/azure-sql/managed-instance/service-tiers-managed-instance-vcore.md deleted file mode 100644 index f954b251895db..0000000000000 --- a/articles/azure-sql/managed-instance/service-tiers-managed-instance-vcore.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: vCore purchasing model -description: The vCore purchasing model lets you independently scale compute and storage resources, match on-premises performance, and optimize price for Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: performance -ms.topic: conceptual -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: sashan, moslake -ms.date: 04/06/2022 -ms.custom: ignite-fall-2021 ---- -# vCore purchasing model - Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -> [!div class="op_single_selector"] -> * [Azure SQL Database](../database/service-tiers-sql-database-vcore.md) -> * [Azure SQL Managed Instance](service-tiers-managed-instance-vcore.md) - -This article reviews the [vCore purchasing model](../database/service-tiers-vcore.md) for [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). - -## Overview - -[!INCLUDE [vcore-overview](../includes/vcore-overview.md)] - -The virtual core (vCore) purchasing model used by Azure SQL Managed Instance provides the following benefits: - -- Control over hardware configuration to better match the compute and memory requirements of the workload. -- Pricing discounts for [Azure Hybrid Benefit (AHB)](../azure-hybrid-benefit.md) and [Reserved Instance (RI)](../database/reserved-capacity-overview.md). -- Greater transparency in the hardware details that power compute, helping facilitate planning for migrations from on-premises deployments. -- Higher scaling granularity with multiple compute sizes available. - -## Service tiers - -Service tier options in the vCore purchasing model include General Purpose and Business Critical. The service tier generally defines the storage architecture, space and I/O limits, and business continuity options related to availability and disaster recovery. - -For more details, review [resource limits](resource-limits.md). - -|**Category**|**General Purpose**|**Business Critical**| -|---|---|---| -|**Best for**|Most business workloads. Offers budget-oriented, balanced, and scalable compute and storage options. |Offers business applications the highest resilience to failures by using several isolated replicas, and provides the highest I/O performance.| -|**Availability**|1 replica, no read-scale replicas|4 replicas total, 1 [read-scale replica](../database/read-scale-out.md),
    2 high availability replicas (HA)| -|**Read-only replicas**| 0 built-in
    0 - 4 using [geo-replication](../database/active-geo-replication-overview.md) | 1 built-in, included in price
    0 - 4 using [geo-replication](../database/active-geo-replication-overview.md) | -|**Pricing/billing**| [vCore, reserved storage, and backup storage](https://azure.microsoft.com/pricing/details/sql-database/managed/) is charged.
    IOPS is not charged| [vCore, reserved storage, and backup storage](https://azure.microsoft.com/pricing/details/sql-database/managed/) is charged.
    IOPS is not charged. -|**Discount models**| [Reserved instances](../database/reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions|[Reserved instances](../database/reserved-capacity-overview.md)
    [Azure Hybrid Benefit](../azure-hybrid-benefit.md) (not available on dev/test subscriptions)
    [Enterprise](https://azure.microsoft.com/offers/ms-azr-0148p/) and [Pay-As-You-Go](https://azure.microsoft.com/offers/ms-azr-0023p/) Dev/Test subscriptions| - - -> [!NOTE] -> For more information on the Service Level Agreement (SLA), see [SLA for Azure SQL Managed Instance](https://azure.microsoft.com/support/legal/sla/azure-sql-sql-managed-instance/). - -### Choosing a service tier - -For information on selecting a service tier for your particular workload, see the following articles: - -- [When to choose the General Purpose service tier](../database/service-tier-general-purpose.md#when-to-choose-this-service-tier) -- [When to choose the Business Critical service tier](../database/service-tier-business-critical.md#when-to-choose-this-service-tier) - -## Compute - -SQL Managed Instance compute provides a specific amount of compute resources that are continuously provisioned independent of workload activity, and bills for the amount of compute provisioned at a fixed price per hour. - -## Hardware configurations - -Hardware configuration options in the vCore model include standard-series (Gen5), premium-series, and memory optimized premium-series. Hardware configuration generally defines the compute and memory limits and other characteristics that impact workload performance. - -For more information on the hardware configuration specifics and limitations, see [Hardware configuration characteristics](resource-limits.md#hardware-configuration-characteristics). - -In the [sys.dm_user_db_resource_governance](/sql/relational-databases/system-dynamic-management-views/sys-dm-user-db-resource-governor-azure-sql-database) dynamic management view, hardware generation for instances using Intel® SP-8160 (Skylake) processors appears as Gen6, while hardware generation for instances using Intel® 8272CL (Cascade Lake) appears as Gen7. The Intel® 8370C (Ice Lake) CPUs used by premium-series and memory optimized premium-series hardware generations appear as Gen8. Resource limits for all standard-series (Gen5) instances are the same regardless of processor type (Broadwell, Skylake, or Cascade Lake). - -### Selecting a hardware configuration - -You can select hardware configuration at the time of instance creation, or you can change hardware of an existing instance. - -**To select hardware configuration when creating a SQL Managed Instance** - -For detailed information, see [Create a SQL Managed Instance](../managed-instance/instance-create-quickstart.md). - -On the **Basics** tab, select the **Configure database** link in the **Compute + storage** section, and then select desired hardware: - -:::image type="content" source="../database/media/service-tiers-vcore/configure-managed-instance.png" alt-text="configure SQL Managed Instance" loc-scope="azure-portal"::: - -**To change hardware of an existing SQL Managed Instance** - -#### [The Azure portal](#tab/azure-portal) - -From the SQL Managed Instance page, select **Pricing tier** link placed under the Settings section - -:::image type="content" source="../database/media/service-tiers-vcore/change-managed-instance-hardware.png" alt-text="change SQL Managed Instance hardware" loc-scope="azure-portal"::: - -On the Pricing tier page, you will be able to change hardware as described in the previous steps. - -#### [PowerShell](#tab/azure-powershell) - -Use the following PowerShell script: - -```powershell-interactive -Set-AzSqlInstance -Name "managedinstance1" -ResourceGroupName "ResourceGroup01" -ComputeGeneration Gen5 -``` - -For more details, check [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) command. - -#### [The Azure CLI](#tab/azure-cli) - -Use the following CLI command: - -```azurecli-interactive -az sql mi update -g mygroup -n myinstance --family Gen5 -``` - -For more details, check [az sql mi update](/cli/azure/sql/mi#az-sql-mi-update) command. - ---- - -### Hardware availability - -#### Gen4 - -Gen4 hardware is [being retired](https://azure.microsoft.com/updates/gen-4-hardware-on-azure-sql-database-approaching-end-of-life-in-2020/) and is no longer available for new deployments. All new instances must be deployed on other hardware configurations. - -#### Standard-series (Gen5) and premium-series - -Standard-series (Gen5) hardware is available in all public regions worldwide. - -Premium-series and memory optimized premium-series hardware is in preview, and has limited regional availability. For more details, see [Azure SQL Managed Instance resource limits](../managed-instance/resource-limits.md#hardware-configuration-characteristics). - -## Next steps - -- To get started, see [Creating a SQL Managed Instance using the Azure portal](instance-create-quickstart.md) -- For pricing details, see - - [Azure SQL Managed Instance single instance pricing page](https://azure.microsoft.com/pricing/details/azure-sql-managed-instance/single/) - - [Azure SQL Managed Instance pools pricing page](https://azure.microsoft.com/pricing/details/azure-sql-managed-instance/pools/) -- For details about the specific compute and storage sizes available in the General Purpose and Business Critical service tiers, see [vCore-based resource limits for Azure SQL Managed Instance](resource-limits.md). diff --git a/articles/azure-sql/managed-instance/sql-managed-instance-paas-overview.md b/articles/azure-sql/managed-instance/sql-managed-instance-paas-overview.md deleted file mode 100644 index b046f63c0fbd0..0000000000000 --- a/articles/azure-sql/managed-instance/sql-managed-instance-paas-overview.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -title: What is Azure SQL Managed Instance? -description: Learn about how Azure SQL Managed Instance provides near 100% compatibility with the latest SQL Server (Enterprise Edition) database engine -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: overview -author: niko-neugebauer -ms.author: nneugebauer -ms.reviewer: mathoma, vanto -ms.date: 04/06/2022 ---- - -# What is Azure SQL Managed Instance? -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance is the intelligent, scalable cloud database service that combines the broadest SQL Server database engine compatibility with all the benefits of a fully managed and evergreen platform as a service. SQL Managed Instance has near 100% compatibility with the latest SQL Server (Enterprise Edition) database engine, providing a native [virtual network (VNet)](../../virtual-network/virtual-networks-overview.md) implementation that addresses common security concerns, and a [business model](https://azure.microsoft.com/pricing/details/sql-database/) favorable for existing SQL Server customers. SQL Managed Instance allows existing SQL Server customers to lift and shift their on-premises applications to the cloud with minimal application and database changes. At the same time, SQL Managed Instance preserves all PaaS capabilities (automatic patching and version updates, [automated backups](../database/automated-backups-overview.md), [high availability](../database/high-availability-sla.md)) that drastically reduce management overhead and TCO. - -If you're new to Azure SQL Managed Instance, check out the *Azure SQL Managed Instance* video from our in-depth [Azure SQL video series](/shows/Azure-SQL-for-Beginners/?WT.mc_id=azuresql4beg_azuresql-ch9-niner): -> [!VIDEO https://docs.microsoft.com/shows/Azure-SQL-for-Beginners/Azure-SQL-Managed-Instance-Overview-6-of-61/player] - -> [!IMPORTANT] -> For a list of regions where SQL Managed Instance is currently available, see [Supported regions](resource-limits.md#supported-regions). - -The following diagram outlines key features of SQL Managed Instance: - -![Key features](./media/sql-managed-instance-paas-overview/key-features.png) - -Azure SQL Managed Instance is designed for customers looking to migrate a large number of apps from an on-premises or IaaS, self-built, or ISV provided environment to a fully managed PaaS cloud environment, with as low a migration effort as possible. Using the fully automated [Azure Data Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md#create-an-azure-database-migration-service-instance), customers can lift and shift their existing SQL Server instance to SQL Managed Instance, which offers compatibility with SQL Server and complete isolation of customer instances with native VNet support. For more information on migration options and tools, see [Migration overview: SQL Server to Azure SQL Managed Instance](../migration-guides/managed-instance/sql-server-to-managed-instance-overview.md).
    With Software Assurance, you can exchange your existing licenses for discounted rates on SQL Managed Instance using the [Azure Hybrid Benefit for SQL Server](https://azure.microsoft.com/pricing/hybrid-benefit/). SQL Managed Instance is the best migration destination in the cloud for SQL Server instances that require high security and a rich programmability surface. - - - - -## Key features and capabilities - -SQL Managed Instance combines the best features that are available both in Azure SQL Database and the SQL Server database engine. - -> [!IMPORTANT] -> SQL Managed Instance runs with all of the features of the most recent version of SQL Server, including online operations, automatic plan corrections, and other enterprise performance enhancements. A comparison of the features available is explained in [Feature comparison: Azure SQL Managed Instance versus SQL Server](../database/features-comparison.md). - -| **PaaS benefits** | **Business continuity** | -| --- | --- | -|No hardware purchasing and management
    No management overhead for managing underlying infrastructure
    Quick provisioning and service scaling
    Automated patching and version upgrade
    Integration with other PaaS data services |99.99% uptime SLA
    Built-in [high availability](../database/high-availability-sla.md)
    Data protected with [automated backups](../database/automated-backups-overview.md)
    Customer configurable backup retention period
    User-initiated [backups](/sql/t-sql/statements/backup-transact-sql?preserve-view=true&view=azuresqldb-mi-current)
    [Point-in-time database restore](../database/recovery-using-backups.md#point-in-time-restore) capability | -|**Security and compliance** | **Management**| -|Isolated environment ([VNet integration](connectivity-architecture-overview.md), single tenant service, dedicated compute and storage)
    [Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql)
    [Azure Active Directory (Azure AD) authentication](../database/authentication-aad-overview.md), single sign-on support
    [Azure AD server principals (logins)](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true)
    [What is Windows Authentication for Azure AD principals (Preview)](winauth-azuread-overview.md)
    Adheres to compliance standards same as Azure SQL Database
    [SQL auditing](auditing-configure.md)
    [Advanced Threat Protection](threat-detection-configure.md) |Azure Resource Manager API for automating service provisioning and scaling
    Azure portal functionality for manual service provisioning and scaling
    Data Migration Service - -> [!IMPORTANT] -> Azure SQL Managed Instance has been certified against a number of compliance standards. For more information, see the [Microsoft Azure Compliance Offerings](https://servicetrust.microsoft.com/ViewPage/MSComplianceGuideV3?command=Download&downloadType=Document&downloadId=44bbae63-bf4d-4e3b-9d3d-c96fb25ec363&tab=7027ead0-3d6b-11e9-b9e1-290b1eb4cdeb&docTab=7027ead0-3d6b-11e9-b9e1-290b1eb4cdeb_FAQ_and_White_Papers), where you can find the most current list of SQL Managed Instance compliance certifications, listed under **SQL Database**. - -The key features of SQL Managed Instance are shown in the following table: - -|Feature | Description| -|---|---| -| SQL Server version/build | SQL Server database engine (latest stable) | -| Managed automated backups | Yes | -| Built-in instance and database monitoring and metrics | Yes | -| Automatic software patching | Yes | -| The latest database engine features | Yes | -| Number of data files (ROWS) per the database | Multiple | -| Number of log files (LOG) per database | 1 | -| VNet - Azure Resource Manager deployment | Yes | -| VNet - Classic deployment model | No | -| Portal support | Yes| -| Built-in Integration Service (SSIS) | No - SSIS is a part of [Azure Data Factory PaaS](../../data-factory/tutorial-deploy-ssis-packages-azure.md) | -| Built-in Analysis Service (SSAS) | No - SSAS is separate [PaaS](../../analysis-services/analysis-services-overview.md) | -| Built-in Reporting Service (SSRS) | No - use [Power BI paginated reports](/power-bi/paginated-reports/paginated-reports-report-builder-power-bi) instead or host SSRS on an Azure VM. While SQL Managed Instance cannot run SSRS as a service, it can host [SSRS catalog databases](/sql/reporting-services/install-windows/ssrs-report-server-create-a-report-server-database#database-server-version-requirements) for a reporting server installed on Azure Virtual Machine, using SQL Server authentication. | - - -## vCore-based purchasing model - -The [vCore-based purchasing model](../database/service-tiers-vcore.md) for SQL Managed Instance gives you flexibility, control, transparency, and a straightforward way to translate on-premises workload requirements to the cloud. This model allows you to change compute, memory, and storage based upon your workload needs. The vCore model is also eligible for up to 55 percent savings with the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) for SQL Server. - -In the vCore model, you can choose hardware configurations as follows: - -- **Standard Series (Gen5)** logical CPUs are based on Intel® E5-2673 v4 (Broadwell) 2.3 GHz, Intel® SP-8160 (Skylake), and Intel® 8272CL (Cascade Lake) 2.5 GHz processors, with **5.1 GB of RAM per CPU vCore**, fast NVMe SSD, hyper-threaded logical core, and compute sizes between 4 and 80 cores. -- **Premium Series** logical CPUs are based on Intel® 8370C (Ice Lake) 2.8 GHz processors, with **7 GB of RAM per CPU vCore**, fast NVMe SSD, hyper-threaded logical core, and compute sizes between 4 and 80 cores. -- **Premium Series Memory-Optimized** logical CPUs are based on Intel® 8370C (Ice Lake) 2.8 GHz processors, with **13.6 GB of RAM per CPU vCore**, fast NVMe SSD, hyper-threaded logical core, and compute sizes between 4 and 64 cores. - -Find more information about the difference between hardware configurations in [SQL Managed Instance resource limits](resource-limits.md#hardware-configuration-characteristics). - -## Service tiers - -SQL Managed Instance is available in two service tiers: - -- **General purpose**: Designed for applications with typical performance and I/O latency requirements. -- **Business critical**: Designed for applications with low I/O latency requirements and minimal impact of underlying maintenance operations on the workload. - -Both service tiers guarantee 99.99% availability and enable you to independently select storage size and compute capacity. For more information on the high availability architecture of Azure SQL Managed Instance, see [High availability and Azure SQL Managed Instance](../database/high-availability-sla.md). - -### General Purpose service tier - -The following list describes key characteristics of the General Purpose service tier: - -- Designed for the majority of business applications with typical performance requirements -- High-performance Azure Blob storage (16 TB) -- Built-in [high availability](../database/high-availability-sla.md#basic-standard-and-general-purpose-service-tier-locally-redundant-availability) based on reliable Azure Blob storage and [Azure Service Fabric](../../service-fabric/service-fabric-overview.md) - -For more information, see [Storage layer in the General Purpose tier](https://medium.com/azure-sqldb-managed-instance/file-layout-in-general-purpose-azure-sql-managed-instance-cf21fff9c76c) and [Storage performance best practices and considerations for SQL Managed Instance (General Purpose)](/archive/blogs/sqlcat/storage-performance-best-practices-and-considerations-for-azure-sql-db-managed-instance-general-purpose). - -Find more information about the difference between service tiers in [SQL Managed Instance resource limits](resource-limits.md#service-tier-characteristics). - -### Business Critical service tier - -The Business Critical service tier is built for applications with high I/O requirements. It offers the highest resilience to failures using several isolated replicas. - -The following list outlines the key characteristics of the Business Critical service tier: - -- Designed for business applications with highest performance and HA requirements -- Comes with super-fast local SSD storage (up to 4 TB on Standard Series (Gen5), up to 5.5 TB on Premium Series and up to 16 TB on Premium Series Memory-Optimized) -- Built-in [high availability](../database/high-availability-sla.md#premium-and-business-critical-service-tier-locally-redundant-availability) based on [Always On availability groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server) and [Azure Service Fabric](../../service-fabric/service-fabric-overview.md) -- Built-in additional [read-only database replica](../database/read-scale-out.md) that can be used for reporting and other read-only workloads -- [In-Memory OLTP](../in-memory-oltp-overview.md) that can be used for workload with high-performance requirements - -Find more information about the differences between service tiers in [SQL Managed Instance resource limits](resource-limits.md#service-tier-characteristics). - -## Management operations - -Azure SQL Managed Instance provides management operations that you can use to automatically deploy new managed instances, update instance properties, and delete instances when no longer needed. Detailed explanation of management operations can be found on [managed instance management operations overview](management-operations-overview.md) page. - -## Advanced security and compliance - -SQL Managed Instance comes with advanced security features provided by the Azure platform and the SQL Server database engine. - -### Security isolation - -SQL Managed Instance provides additional security isolation from other tenants on the Azure platform. Security isolation includes: - -- [Native virtual network implementation](connectivity-architecture-overview.md) and connectivity to your on-premises environment using Azure ExpressRoute or VPN Gateway. -- In a default deployment, the SQL endpoint is exposed only through a private IP address, allowing safe connectivity from private Azure or hybrid networks. -- Single-tenant with dedicated underlying infrastructure (compute, storage). - -The following diagram outlines various connectivity options for your applications: - -![High availability](./media/sql-managed-instance-paas-overview/application-deployment-topologies.png) - -To learn more details about VNet integration and networking policy enforcement at the subnet level, see [VNet architecture for managed instances](connectivity-architecture-overview.md) and [Connect your application to a managed instance](connect-application-instance.md). - -> [!IMPORTANT] -> Place multiple managed instances in the same subnet, wherever that is allowed by your security requirements, as that will bring you additional benefits. Co-locating instances in the same subnet will significantly simplify networking infrastructure maintenance and reduce instance provisioning time, since a long provisioning duration is associated with the cost of deploying the first managed instance in a subnet. - -### Security features - -Azure SQL Managed Instance provides a set of advanced security features that can be used to protect your data. - -- [SQL Managed Instance auditing](auditing-configure.md) tracks database events and writes them to an audit log file placed in your Azure storage account. Auditing can help you maintain regulatory compliance, understand database activity, and gain insight into discrepancies and anomalies that could indicate business concerns or suspected security violations. -- Data encryption in motion - SQL Managed Instance secures your data by providing encryption for data in motion using Transport Layer Security. In addition to Transport Layer Security, SQL Managed Instance offers protection of sensitive data in flight, at rest, and during query processing with [Always Encrypted](/sql/relational-databases/security/encryption/always-encrypted-database-engine). Always Encrypted offers data security against breaches involving the theft of critical data. For example, with Always Encrypted, credit card numbers are stored encrypted in the database always, even during query processing, allowing decryption at the point of use by authorized staff or applications that need to process that data. -- [Advanced Threat Protection](threat-detection-configure.md) complements [auditing](auditing-configure.md) by providing an additional layer of security intelligence built into the service that detects unusual and potentially harmful attempts to access or exploit databases. You are alerted about suspicious activities, potential vulnerabilities, and SQL injection attacks, as well as anomalous database access patterns. Advanced Threat Protection alerts can be viewed from [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/). They provide details of suspicious activity and recommend action on how to investigate and mitigate the threat. -- [Dynamic data masking](/sql/relational-databases/security/dynamic-data-masking) limits sensitive data exposure by masking it to non-privileged users. Dynamic data masking helps prevent unauthorized access to sensitive data by enabling you to designate how much of the sensitive data to reveal with minimal impact on the application layer. It's a policy-based security feature that hides the sensitive data in the result set of a query over designated database fields, while the data in the database is not changed. -- [Row-level security](/sql/relational-databases/security/row-level-security) (RLS) enables you to control access to rows in a database table based on the characteristics of the user executing a query (such as by group membership or execution context). RLS simplifies the design and coding of security in your application. RLS enables you to implement restrictions on data row access. For example, ensuring that workers can access only the data rows that are pertinent to their department, or restricting a data access to only the relevant data. -- [Transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption-azure-sql) encrypts SQL Managed Instance data files, known as encrypting data at rest. TDE performs real-time I/O encryption and decryption of the data and log files. The encryption uses a database encryption key (DEK), which is stored in the database boot record for availability during recovery. You can protect all your databases in a managed instance with transparent data encryption. TDE is proven encryption-at-rest technology in SQL Server that is required by many compliance standards to protect against theft of storage media. - -Migration of an encrypted database to SQL Managed Instance is supported via Azure Database Migration Service or native restore. If you plan to migrate an encrypted database using native restore, migration of the existing TDE certificate from the SQL Server instance to SQL Managed Instance is a required step. For more information about migration options, see [SQL Server to Azure SQL Managed Instance Guide](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md). - -## Azure Active Directory integration - -SQL Managed Instance supports traditional SQL Server database engine logins and logins integrated with Azure AD. Azure AD server principals (logins) (**public preview**) are an Azure cloud version of on-premises database logins that you are using in your on-premises environment. Azure AD server principals (logins) enable you to specify users and groups from your Azure AD tenant as true instance-scoped principals, capable of performing any instance-level operation, including cross-database queries within the same managed instance. - -A new syntax is introduced to create Azure AD server principals (logins), **FROM EXTERNAL PROVIDER**. For more information on the syntax, see [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true), and review the [Provision an Azure Active Directory administrator for SQL Managed Instance](../database/authentication-aad-configure.md#provision-azure-ad-admin-sql-managed-instance) article. - -### Azure Active Directory integration and multi-factor authentication - -SQL Managed Instance enables you to centrally manage identities of database users and other Microsoft services with [Azure Active Directory integration](../database/authentication-aad-overview.md). This capability simplifies permission management and enhances security. Azure Active Directory supports [multi-factor authentication](../database/authentication-mfa-ssms-configure.md) to increase data and application security while supporting a single sign-on process. - -### Authentication - -SQL Managed Instance authentication refers to how users prove their identity when connecting to the database. SQL Managed Instance supports three types of authentication: - -- **SQL Authentication**: - - This authentication method uses a username and password. -- **Azure Active Directory Authentication**: - - This authentication method uses identities managed by Azure Active Directory and is supported for managed and integrated domains. Use Active Directory authentication (integrated security) [whenever possible](/sql/relational-databases/security/choose-an-authentication-mode). - -- **Windows Authentication for Azure AD Principals (Preview)**: - - [Kerberos authentication for Azure AD Principals](../managed-instance/winauth-azuread-overview.md) (Preview) enables Windows Authentication for Azure SQL Managed Instance. Windows Authentication for managed instances empowers customers to move existing services to the cloud while maintaining a seamless user experience and provides the basis for infrastructure modernization. - -### Authorization - -Authorization refers to what a user can do within a database in Azure SQL Managed Instance, and is controlled by your user account's database role memberships and object-level permissions. SQL Managed Instance has the same authorization capabilities as SQL Server 2017. - -## Database migration - -SQL Managed Instance targets user scenarios with mass database migration from on-premises or IaaS database implementations. SQL Managed Instance supports several database migration options that are discussed in the migration guides. See [Migration overview: SQL Server to Azure SQL Managed Instance](../migration-guides/managed-instance/sql-server-to-managed-instance-overview.md) for more information. - -### Backup and restore - -The migration approach leverages SQL backups to Azure Blob storage. Backups stored in an Azure storage blob can be directly restored into a managed instance using the [T-SQL RESTORE command](/sql/t-sql/statements/restore-statements-transact-sql?preserve-view=true&view=azuresqldb-mi-current). - -- For a quickstart showing how to restore the Wide World Importers - Standard database backup file, see [Restore a backup file to a managed instance](restore-sample-database-quickstart.md). This quickstart shows that you have to upload a backup file to Azure Blob storage and secure it using a shared access signature (SAS) key. -- For information about restore from URL, see [Native RESTORE from URL](../migration-guides/managed-instance/sql-server-to-managed-instance-guide.md#backup-and-restore). - -> [!IMPORTANT] -> Backups from a managed instance can only be restored to another managed instance. They cannot be restored to a SQL Server instance or to Azure SQL Database. - -### Database Migration Service - -Azure Database Migration Service is a fully managed service designed to enable seamless migrations from multiple database sources to Azure data platforms with minimal downtime. This service streamlines the tasks required to move existing third-party and SQL Server databases to Azure SQL Database, Azure SQL Managed Instance, and SQL Server on Azure VM. See [How to migrate your on-premises database to SQL Managed Instance using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). - -## SQL features supported - -SQL Managed Instance aims to deliver close to 100% surface area compatibility with the latest SQL Server version through a staged release plan. For a features and comparison list, see [SQL Managed Instance feature comparison](../database/features-comparison.md), and for a list of T-SQL differences in SQL Managed Instance versus SQL Server, see [SQL Managed Instance T-SQL differences from SQL Server](transact-sql-tsql-differences-sql-server.md). - -SQL Managed Instance supports backward compatibility to SQL Server 2008 databases. Direct migration from SQL Server 2005 database servers is supported, and the compatibility level for migrated SQL Server 2005 databases is updated to SQL Server 2008. - -The following diagram outlines surface area compatibility in SQL Managed Instance: - -![surface area compatibility](./media/sql-managed-instance-paas-overview/migration.png) - -### Key differences between SQL Server on-premises and SQL Managed Instance - -SQL Managed Instance benefits from being always-up-to-date in the cloud, which means that some features in SQL Server may be obsolete, be retired, or have alternatives. There are specific cases when tools need to recognize that a particular feature works in a slightly different way or that the service is running in an environment you do not fully control. - -Some key differences: - -- High availability is built in and pre-configured using technology similar to [Always On availability groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server). -- There are only automated backups and point-in-time restore. Customers can initiate `copy-only` backups that do not interfere with the automatic backup chain. -- Specifying full physical paths is unsupported, so all corresponding scenarios have to be supported differently: RESTORE DB does not support WITH MOVE, CREATE DB doesn't allow physical paths, BULK INSERT works with Azure blobs only, etc. -- SQL Managed Instance supports [Azure AD authentication](../database/authentication-aad-overview.md) and [Windows Authentication for Azure Active Directory principals (Preview)](winauth-azuread-overview.md). -- SQL Managed Instance automatically manages XTP filegroups and files for databases containing In-Memory OLTP objects. -- SQL Managed Instance supports SQL Server Integration Services (SSIS) and can host an SSIS catalog (SSISDB) that stores SSIS packages, but they are executed on a managed Azure-SSIS Integration Runtime (IR) in Azure Data Factory. See [Create Azure-SSIS IR in Data Factory](../../data-factory/create-azure-ssis-integration-runtime.md). To compare the SSIS features, see [Compare SQL Database to SQL Managed Instance](../../data-factory/create-azure-ssis-integration-runtime.md#comparison-of-sql-database-and-sql-managed-instance). -- SQL Managed Instance supports connectivity only through the TCP protocol. It does not support connectivity through named pipes. - -### Administration features - -SQL Managed Instance enables system administrators to spend less time on administrative tasks because the service either performs them for you or greatly simplifies those tasks. For example, [OS/RDBMS installation and patching](../database/high-availability-sla.md), [dynamic instance resizing and configuration](../database/single-database-scale.md), [backups](../database/automated-backups-overview.md), [database replication](replication-between-two-instances-configure-tutorial.md) (including system databases), [high availability configuration](../database/high-availability-sla.md), and configuration of health and [performance monitoring](../../azure-monitor/insights/azure-sql.md) data streams. - -For more information, see [a list of supported and unsupported SQL Managed Instance features](../database/features-comparison.md), and [T-SQL differences between SQL Managed Instance and SQL Server](transact-sql-tsql-differences-sql-server.md). - -### Programmatically identify a managed instance - -The following table shows several properties, accessible through Transact-SQL, that you can use to detect that your application is working with SQL Managed Instance and retrieve important properties. - -|Property|Value|Comment| -|---|---|---| -|`@@VERSION`|Microsoft SQL Azure (RTM) - 12.0.2000.8 2018-03-07 Copyright (C) 2018 Microsoft Corporation.|This value is same as in SQL Database. This **does not** indicate SQL engine version 12 (SQL Server 2014). SQL Managed Instance always runs the latest stable SQL engine version, which is equal to or higher than latest available RTM version of SQL Server. | -|`SERVERPROPERTY ('Edition')`|SQL Azure|This value is same as in SQL Database.| -|`SERVERPROPERTY('EngineEdition')`|8|This value uniquely identifies a managed instance.| -|`@@SERVERNAME`, `SERVERPROPERTY ('ServerName')`|Full instance DNS name in the following format:``.``.database.windows.net, where `` is name provided by the customer, while `` is autogenerated part of the name guaranteeing global DNS name uniqueness ("wcus17662feb9ce98", for example)|Example: my-managed-instance.wcus17662feb9ce98.database.windows.net| - -## Next steps - -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [SQL common features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). -- For advanced monitoring of SQL Managed Instance database performance with built-in troubleshooting intelligence, see [Monitor Azure SQL Managed Instance using Azure SQL Analytics](../../azure-monitor/insights/azure-sql.md). -- For pricing information, see [SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/managed/). diff --git a/articles/azure-sql/managed-instance/subnet-service-aided-configuration-enable.md b/articles/azure-sql/managed-instance/subnet-service-aided-configuration-enable.md deleted file mode 100644 index 2baf9f250198d..0000000000000 --- a/articles/azure-sql/managed-instance/subnet-service-aided-configuration-enable.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Enabling service-aided subnet configuration for Azure SQL Managed Instance -description: Enabling service-aided subnet configuration for Azure SQL Managed Instance -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.date: 03/25/2022 ---- -# Enabling service-aided subnet configuration for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Service-aided subnet configuration provides automated network configuration management for subnets hosting managed instances. With service-aided subnet configuration user stays in full control of access to data (TDS traffic flows) while managed instance takes responsibility to ensure uninterrupted flow of management traffic in order to fulfill SLA. - -Automatically configured network security groups and route table rules are visible to customer and annotated with prefix _Microsoft.Sql-managedInstances_UseOnly__. - -Service-aided configuration is enabled automatically once you turn on [subnet-delegation](../../virtual-network/subnet-delegation-overview.md) for `Microsoft.Sql/managedInstances` resource provider. - -> [!IMPORTANT] -> Once subnet-delegation is turned on you could not turn it off until the very last virtual cluster is removed from the subnet. For more details on virtual cluster lifetime see the following [article](virtual-cluster-delete.md). - -> [!NOTE] -> As service-aided subnet configuration is essential feature for maintaining SLA, starting May 1st 2020, it won't be possible to deploy managed instances in subnets that are not delegated to managed instance resource provider. On July 1st 2020 all subnets containing managed instances will be automatically delegated to managed instance resource provider. - -## Enabling subnet-delegation for new deployments -To deploy managed instance in to empty subnet you need to delegate it to `Microsoft.Sql/managedInstances` resource provider as described in following [article](../../virtual-network/manage-subnet-delegation.md). _Please note that referenced article uses `Microsoft.DBforPostgreSQL/serversv2` resource provider for example. You'll need to use `Microsoft.Sql/managedInstances` resource provider instead._ - -## Enabling subnet-delegation for existing deployments - -In order to enable subnet-delegation for your existing managed instance deployment you need to find out virtual network subnet where it is placed. - -To learn this you can check `Virtual network/subnet` at the `Overview` portal blade of your managed instance. - -As an alternative, you could run the following PowerShell commands to learn this. Replace **subscription-id** with your subscription ID. Also replace **rg-name** with the resource group for your managed instance, and replace **mi-name** with the name of your managed instance. - -```powershell -Install-Module -Name Az - -Import-Module Az.Accounts -Import-Module Az.Sql - -Connect-AzAccount - -# Use your subscription ID in place of subscription-id below - -Select-AzSubscription -SubscriptionId {subscription-id} - -# Replace rg-name with the resource group for your managed instance, and replace mi-name with the name of your managed instance - -$mi = Get-AzSqlInstance -ResourceGroupName {rg-name} -Name {mi-name} - -$mi.SubnetId -``` - -Once you find managed instance subnet you need to delegate it to `Microsoft.Sql/managedInstances` resource provider as described in following [article](../../virtual-network/manage-subnet-delegation.md). _Please note that referenced article uses `Microsoft.DBforPostgreSQL/serversv2` resource provider for example. You'll need to use `Microsoft.Sql/managedInstances` resource provider instead._ - - -> [!IMPORTANT] -> Enabling service-aided configuration doesn't cause failover or interruption in connectivity for managed instances that are already in the subnet. diff --git a/articles/azure-sql/managed-instance/synchronize-vnet-dns-servers-setting-on-virtual-cluster.md b/articles/azure-sql/managed-instance/synchronize-vnet-dns-servers-setting-on-virtual-cluster.md deleted file mode 100644 index f82252e87947d..0000000000000 --- a/articles/azure-sql/managed-instance/synchronize-vnet-dns-servers-setting-on-virtual-cluster.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Synchronize virtual network DNS servers setting on SQL Managed Instance virtual cluster -description: Learn how synchronize virtual network DNS servers setting on SQL Managed Instance virtual cluster. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -author: srdan-bozovic-msft -ms.author: srbozovi -ms.topic: how-to -ms.date: 01/17/2021 -ms.custom: devx-track-azurepowershell ---- - -# Synchronize virtual network DNS servers setting on SQL Managed Instance virtual cluster -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains when and how to synchronize virtual network DNS servers setting on SQL Managed Instance virtual cluster. - -## When to synchronize the DNS setting - -There are a few scenarios (for example, db mail, linked servers to other SQL Server instances in your cloud or hybrid environment) that require private host names to be resolved from SQL Managed Instance. In this case, you need to configure a custom DNS inside Azure. See [Configure a custom DNS for Azure SQL Managed Instance](custom-dns-configure.md) for details. - -If this change is implemented after [virtual cluster](connectivity-architecture-overview.md#virtual-cluster-connectivity-architecture) hosting Managed Instance is created you'll need to synchronize DNS servers setting on the virtual cluster with the virtual network configuration. - -> [!IMPORTANT] -> Synchronizing DNS servers setting will affect all of the Managed Instances hosted in the virtual cluster. - -## How to synchronize the DNS setting - -### Azure RBAC permissions required - -User synchronizing DNS server configuration will need to have one of the following Azure roles: - -- Subscription contributor role, or -- Custom role with the following permission: - - `Microsoft.Sql/virtualClusters/updateManagedInstanceDnsServers/action` - -### Use Azure PowerShell - -Get virtual network where DNS servers setting has been updated. - -```PowerShell -$ResourceGroup = 'enter resource group of virtual network' -$VirtualNetworkName = 'enter virtual network name' -$virtualNetwork = Get-AzVirtualNetwork -ResourceGroup $ResourceGroup -Name $VirtualNetworkName -``` -Use PowerShell command [Invoke-AzResourceAction](/powershell/module/az.resources/invoke-azresourceaction) to synchronize DNS servers configuration for all the virtual clusters in the subnet. - -```PowerShell -Get-AzSqlVirtualCluster ` - | where SubnetId -match $virtualNetwork.Id ` - | select Id ` - | Invoke-AzResourceAction -Action updateManagedInstanceDnsServers -Force -``` -### Use the Azure CLI - -Get virtual network where DNS servers setting has been updated. - -```Azure CLI -resourceGroup="auto-failover-group" -virtualNetworkName="vnet-fog-eastus" -virtualNetwork=$(az network vnet show -g $resourceGroup -n $virtualNetworkName --query "id" -otsv) -``` - -Use Azure CLI command [az resource invoke-action](/cli/azure/resource#az-resource-invoke-action) to synchronize DNS servers configuration for all the virtual clusters in the subnet. - -```Azure CLI -az sql virtual-cluster list --query "[? contains(subnetId,'$virtualNetwork')].id" -o tsv \ - | az resource invoke-action --action updateManagedInstanceDnsServers --ids @- -``` -## Next steps - -- Learn more about configuring a custom DNS [Configure a custom DNS for Azure SQL Managed Instance](custom-dns-configure.md). -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). diff --git a/articles/azure-sql/managed-instance/tde-certificate-migrate.md b/articles/azure-sql/managed-instance/tde-certificate-migrate.md deleted file mode 100644 index 12b6546649aae..0000000000000 --- a/articles/azure-sql/managed-instance/tde-certificate-migrate.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Migrate TDE certificate - managed instance -description: Migrate a certificate protecting the database encryption key of a database with Transparent Data Encryption to Azure SQL Managed Instance -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: MladjoA -ms.author: mlandzic -ms.reviewer: mathoma, jovanpop -ms.date: 06/01/2021 ---- - -# Migrate a certificate of a TDE-protected database to Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -When you're migrating a database protected by [Transparent Data Encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption) to Azure SQL Managed Instance using the native restore option, the corresponding certificate from the SQL Server instance needs to be migrated before database restore. This article walks you through the process of manual migration of the certificate to Azure SQL Managed Instance: - -> [!div class="checklist"] -> -> * Export the certificate to a Personal Information Exchange (.pfx) file -> * Extract the certificate from a file to a base-64 string -> * Upload it using a PowerShell cmdlet - -For an alternative option using a fully managed service for seamless migration of both a TDE-protected database and a corresponding certificate, see [How to migrate your on-premises database to Azure SQL Managed Instance using Azure Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). - -> [!IMPORTANT] -> A migrated certificate is used for restore of the TDE-protected database only. Soon after restore is done, the migrated certificate gets replaced by a different protector, either a service-managed certificate or an asymmetric key from the key vault, depending on the type of the TDE you set on the instance. - -## Prerequisites - -To complete the steps in this article, you need the following prerequisites: - -* [Pvk2Pfx](/windows-hardware/drivers/devtest/pvk2pfx) command-line tool installed on the on-premises server or other computer with access to the certificate exported as a file. The Pvk2Pfx tool is part of the [Enterprise Windows Driver Kit](/windows-hardware/drivers/download-the-wdk), a self-contained command-line environment. -* [Windows PowerShell](/powershell/scripting/install/installing-windows-powershell) version 5.0 or higher installed. - -# [PowerShell](#tab/azure-powershell) - -Make sure you have the following: - -* Azure PowerShell module [installed and updated](/powershell/azure/install-az-ps). -* [Az.Sql module](https://www.powershellgallery.com/packages/Az.Sql). - -[!INCLUDE [updated-for-az](../../../includes/updated-for-az.md)] - -> [!IMPORTANT] -> The PowerShell Azure Resource Manager module is still supported by Azure SQL Managed Instance, but all future development is for the Az.Sql module. For these cmdlets, see [AzureRM.Sql](/powershell/module/AzureRM.Sql/). The arguments for the commands in the Az module and in the AzureRM modules are substantially identical. - -Run the following commands in PowerShell to install/update the module: - -```azurepowershell -Install-Module -Name Az.Sql -Update-Module -Name Az.Sql -``` - -# [Azure CLI](#tab/azure-cli) - -If you need to install or upgrade, see [Install the Azure CLI](/cli/azure/install-azure-cli). - -* * * - -## Export the TDE certificate to a .pfx file - -The certificate can be exported directly from the source SQL Server instance, or from the certificate store if it's being kept there. - -### Export the certificate from the source SQL Server instance - -Use the following steps to export the certificate with SQL Server Management Studio and convert it into .pfx format. The generic names *TDE_Cert* and *full_path* are being used for certificate and file names and paths through the steps. They should be replaced with the actual names. - -1. In SSMS, open a new query window and connect to the source SQL Server instance. - -1. Use the following script to list TDE-protected databases and get the name of the certificate protecting encryption of the database to be migrated: - - ```sql - USE master - GO - SELECT db.name as [database_name], cer.name as [certificate_name] - FROM sys.dm_database_encryption_keys dek - LEFT JOIN sys.certificates cer - ON dek.encryptor_thumbprint = cer.thumbprint - INNER JOIN sys.databases db - ON dek.database_id = db.database_id - WHERE dek.encryption_state = 3 - ``` - - ![List of TDE certificates](./media/tde-certificate-migrate/onprem-certificate-list.png) - -1. Execute the following script to export the certificate to a pair of files (.cer and .pvk), keeping the public and private key information: - - ```sql - USE master - GO - BACKUP CERTIFICATE TDE_Cert - TO FILE = 'c:\full_path\TDE_Cert.cer' - WITH PRIVATE KEY ( - FILE = 'c:\full_path\TDE_Cert.pvk', - ENCRYPTION BY PASSWORD = '' - ) - ``` - - ![Backup TDE certificate](./media/tde-certificate-migrate/backup-onprem-certificate.png) - -1. Use the PowerShell console to copy certificate information from a pair of newly created files to a .pfx file, using the Pvk2Pfx tool: - - ```cmd - .\pvk2pfx -pvk c:/full_path/TDE_Cert.pvk -pi "" -spc c:/full_path/TDE_Cert.cer -pfx c:/full_path/TDE_Cert.pfx - ``` - -### Export the certificate from a certificate store - -If the certificate is kept in the SQL Server local machine certificate store, it can be exported using the following steps: - -1. Open the PowerShell console and execute the following command to open the Certificates snap-in of Microsoft Management Console: - - ```cmd - certlm - ``` - -2. In the Certificates MMC snap-in, expand the path Personal > Certificates to see the list of certificates. - -3. Right-click the certificate and click **Export**. - -4. Follow the wizard to export the certificate and private key to a .pfx format. - -## Upload the certificate to Azure SQL Managed Instance using an Azure PowerShell cmdlet - -# [PowerShell](#tab/azure-powershell) - -1. Start with preparation steps in PowerShell: - - ```azurepowershell - # import the module into the PowerShell session - Import-Module Az - # connect to Azure with an interactive dialog for sign-in - Connect-AzAccount - # list subscriptions available and copy id of the subscription target the managed instance belongs to - Get-AzSubscription - # set subscription for the session - Select-AzSubscription - ``` - -2. Once all preparation steps are done, run the following commands to upload base-64 encoded certificate to the target managed instance: - - ```azurepowershell - # If you are using PowerShell 6.0 or higher, run this command: - $fileContentBytes = Get-Content 'C:/full_path/TDE_Cert.pfx' -AsByteStream - # If you are using PowerShell 5.x, uncomment and run this command instead of the one above: - # $fileContentBytes = Get-Content 'C:/full_path/TDE_Cert.pfx' -Encoding Byte - $base64EncodedCert = [System.Convert]::ToBase64String($fileContentBytes) - $securePrivateBlob = $base64EncodedCert | ConvertTo-SecureString -AsPlainText -Force - $password = "" - $securePassword = $password | ConvertTo-SecureString -AsPlainText -Force - Add-AzSqlManagedInstanceTransparentDataEncryptionCertificate -ResourceGroupName "" ` - -ManagedInstanceName "" -PrivateBlob $securePrivateBlob -Password $securePassword - ``` - -# [Azure CLI](#tab/azure-cli) - -You need to first [set up an Azure key vault](../../key-vault/general/manage-with-cli2.md) with your *.pfx* file. - -1. Start with preparation steps in PowerShell: - - ```azurecli - # connect to Azure with an interactive dialog for sign-in - az login - - # list subscriptions available and copy id of the subscription target the managed instance belongs to - az account list - - # set subscription for the session - az account set --subscription - ``` - -1. Once all preparation steps are done, run the following commands to upload the base-64 encoded certificate to the target managed instance: - - ```azurecli - az sql mi tde-key set --server-key-type AzureKeyVault --kid "" ` - --managed-instance "" --resource-group "" - ``` - -* * * - -The certificate is now available to the specified managed instance, and the backup of the corresponding TDE-protected database can be restored successfully. - -## Next steps - -In this article, you learned how to migrate a certificate protecting the encryption key of a database with Transparent Data Encryption, from the on-premises or IaaS SQL Server instance to Azure SQL Managed Instance. - -See [Restore a database backup to a Azure SQL Managed Instance](restore-sample-database-quickstart.md) to learn how to restore a database backup to Azure SQL Managed Instance. diff --git a/articles/azure-sql/managed-instance/threat-detection-configure.md b/articles/azure-sql/managed-instance/threat-detection-configure.md deleted file mode 100644 index 1c3e1a877e862..0000000000000 --- a/articles/azure-sql/managed-instance/threat-detection-configure.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Configure Advanced Threat Protection -titleSuffix: Azure SQL Managed Instance -description: Advanced Threat Protection detects anomalous database activities indicating potential security threats to the database in Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: security -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: rmatchoro -ms.author: ronmat -ms.reviewer: vanto -ms.date: 12/01/2020 ---- -# Configure Advanced Threat Protection in Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -[Advanced Threat Protection](../database/threat-detection-overview.md) for an [Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) detects anomalous activities indicating unusual and potentially harmful attempts to access or exploit databases. Advanced Threat Protection can identify **Potential SQL injection**, **Access from unusual location or data center**, **Access from unfamiliar principal or potentially harmful application**, and **Brute force SQL credentials** - see more details in [Advanced Threat Protection alerts](../database/threat-detection-overview.md#alerts). - -You can receive notifications about the detected threats via [email notifications](../database/threat-detection-overview.md#explore-detection-of-a-suspicious-event) or [Azure portal](../database/threat-detection-overview.md#explore-alerts-in-the-azure-portal) - -[Advanced Threat Protection](../database/threat-detection-overview.md) is part of the [Microsoft Defender for SQL](../database/azure-defender-for-sql.md) offering, which is a unified package for advanced SQL security capabilities. Advanced Threat Protection can be accessed and managed via the central Microsoft Defender for SQL portal. - -## Azure portal - -1. Sign in to the [Azure portal](https://portal.azure.com). -2. Navigate to the configuration page of the instance of SQL Managed Instance you want to protect. Under **Security**, select **Defender for SQL**. -3. In the Microsoft Defender for SQL configuration page: - - Turn **ON** Microsoft Defender for SQL. - - Configure the **Send alerts to** email address to receive security alerts upon detection of anomalous database activities. - - Select the **Azure storage account** where anomalous threat audit records are saved. - - Select the **Advanced Threat Protection types** that you would like configured. Learn more about [Advanced Threat Protection alerts](../database/threat-detection-overview.md). -4. Click **Save** to save the new or updated Microsoft Defender for SQL policy. - - :::image type="content" source="../database/media/azure-defender-for-sql/set-up-advanced-threat-protection-mi.png" alt-text="set up advanced threat protection"::: - -## Next steps - -- Learn more about [Advanced Threat Protection](../database/threat-detection-overview.md). -- Learn about managed instances, see [What is an Azure SQL Managed Instance](sql-managed-instance-paas-overview.md). -- Learn more about [Advanced Threat Protection for Azure SQL Database](../database/threat-detection-configure.md). -- Learn more about [SQL Managed Instance auditing](./auditing-configure.md). -- Learn more about [Microsoft Defender for Cloud](../../security-center/security-center-introduction.md). diff --git a/articles/azure-sql/managed-instance/timezones-overview.md b/articles/azure-sql/managed-instance/timezones-overview.md deleted file mode 100644 index 0a49a3110c6bc..0000000000000 --- a/articles/azure-sql/managed-instance/timezones-overview.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Azure SQL Managed Instance time zones -description: Learn about the time zone specifics of Azure SQL Managed Instance -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: reference -author: MladjoA -ms.author: mlandzic -ms.reviewer: mathoma -ms.date: 10/12/2020 ---- -# Time zones in Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Coordinated Universal Time (UTC) is the recommended time zone for the data tier of cloud solutions. Azure SQL Managed Instance also offers a choice of time zones to meet the needs of existing applications that store date and time values and call date and time functions with an implicit context of a specific time zone. - -T-SQL functions like [GETDATE()](/sql/t-sql/functions/getdate-transact-sql) or CLR code observe the time zone set on the instance level. SQL Server Agent jobs also follow schedules according to the time zone of the instance. - - > [!NOTE] - > Azure SQL Database does not support time zone settings; it always follows UTC. Use [AT TIME ZONE](/sql/t-sql/queries/at-time-zone-transact-sql) in SQL Database if you need to interpret date and time information in a non-UTC time zone. - -## Supported time zones - -A set of supported time zones is inherited from the underlying operating system of the managed instance. It's regularly updated to get new time zone definitions and reflect changes to the existing ones. - -[Daylight saving time/time zone changes policy](/troubleshoot/windows-client/system-management-components/daylight-saving-time-help-support) guarantees historical accuracy from 2010 forward. - -A list with names of the supported time zones is exposed through the [sys.time_zone_info](/sql/relational-databases/system-catalog-views/sys-time-zone-info-transact-sql) system view. - -## Set a time zone - -A time zone of a managed instance can be set during instance creation only. The default time zone is UTC. - - >[!NOTE] - > The time zone of an existing managed instance can't be changed. - -### Set the time zone through the Azure portal - -When you enter parameters for a new instance, select a time zone from the list of supported time zones. - -![Setting a time zone during instance creation](./media/timezones-overview/01-setting_timezone-during-instance-creation.png) - -### Azure Resource Manager template - -Specify the timezoneId property in your [Resource Manager template](./create-template-quickstart.md) to set the time zone during instance creation. - -```json -"properties": { - "administratorLogin": "[parameters('user')]", - "administratorLoginPassword": "[parameters('pwd')]", - "subnetId": "[parameters('subnetId')]", - "storageSizeInGB": 256, - "vCores": 8, - "licenseType": "LicenseIncluded", - "hardwareFamily": "Gen5", - "collation": "Serbian_Cyrillic_100_CS_AS", - "timezoneId": "Central European Standard Time" - }, - -``` - -A list of supported values for the timezoneId property is at the end of this article. - -If not specified, the time zone is set to UTC. - -## Check the time zone of an instance - -The [CURRENT_TIMEZONE](/sql/t-sql/functions/current-timezone-transact-sql) function returns a display name of the time zone of the instance. - -## Cross-feature considerations - -### Restore and import - -You can restore a backup file or import data to a managed instance from an instance or a server with different time zone settings. Make sure to do so with caution. Analyze the application behavior and the results of the queries and reports, just like when you transfer data between two SQL Server instances with different time zone settings. - -### Point-in-time restore - -When you perform a point-in-time restore, the time to restore to is interpreted as UTC time. This way any ambiguities due to daylight saving time and its potential changes are avoided. - -### Auto-failover groups - -Using the same time zone across a primary and secondary instance in a failover group isn't enforced, but we strongly recommend it. - - >[!WARNING] - > We strongly recommend that you use the same time zone for the primary and secondary instance in a failover group. Because of certain rare use cases keeping the same time zone across primary and secondary instances isn't enforced. It's important to understand that in the case of manual or automatic failover, the secondary instance will retain its original time zone. - -## Limitations - -- The time zone of the existing managed instance can't be changed. As a workaround, create a new managed instance with the proper time zone and then either perform a manual backup and restore, or what we recommend, perform a [cross-instance point-in-time restore](./point-in-time-restore.md?tabs=azure-portal#restore-an-existing-database). -- External processes launched from the SQL Server Agent jobs don't observe the time zone of the instance. - -## List of supported time zones - -| **Time zone ID** | **Time zone display name** | -| --- | --- | -| Dateline Standard Time | (UTC-12:00) International Date Line West | -| UTC-11 | (UTC-11:00) Coordinated Universal Time-11 | -| Aleutian Standard Time | (UTC-10:00) Aleutian Islands | -| Hawaiian Standard Time | (UTC-10:00) Hawaii | -| Marquesas Standard Time | (UTC-09:30) Marquesas Islands | -| Alaskan Standard Time | (UTC-09:00) Alaska | -| UTC-09 | (UTC-09:00) Coordinated Universal Time-09 | -| Pacific Standard Time (Mexico) | (UTC-08:00) Baja California | -| UTC-08 | (UTC-08:00) Coordinated Universal Time-08 | -| Pacific Standard Time | (UTC-08:00) Pacific Time (US & Canada) | -| US Mountain Standard Time | (UTC-07:00) Arizona | -| Mountain Standard Time (Mexico) | (UTC-07:00) Chihuahua, La Paz, Mazatlan | -| Mountain Standard Time | (UTC-07:00) Mountain Time (US & Canada) | -| Central America Standard Time | (UTC-06:00) Central America | -| Central Standard Time | (UTC-06:00) Central Time (US & Canada) | -| Easter Island Standard Time | (UTC-06:00) Easter Island | -| Central Standard Time (Mexico) | (UTC-06:00) Guadalajara, Mexico City, Monterrey | -| Canada Central Standard Time | (UTC-06:00) Saskatchewan | -| SA Pacific Standard Time | (UTC-05:00) Bogota, Lima, Quito, Rio Branco | -| Eastern Standard Time (Mexico) | (UTC-05:00) Chetumal | -| Eastern Standard Time | (UTC-05:00) Eastern Time (US & Canada) | -| Haiti Standard Time | (UTC-05:00) Haiti | -| Cuba Standard Time | (UTC-05:00) Havana | -| US Eastern Standard Time | (UTC-05:00) Indiana (East) | -| Turks And Caicos Standard Time | (UTC-05:00) Turks and Caicos | -| Paraguay Standard Time | (UTC-04:00) Asuncion | -| Atlantic Standard Time | (UTC-04:00) Atlantic Time (Canada) | -| Venezuela Standard Time | (UTC-04:00) Caracas | -| Central Brazilian Standard Time | (UTC-04:00) Cuiaba | -| SA Western Standard Time | (UTC-04:00) Georgetown, La Paz, Manaus, San Juan | -| Pacific SA Standard Time | (UTC-04:00) Santiago | -| Newfoundland Standard Time | (UTC-03:30) Newfoundland | -| Tocantins Standard Time | (UTC-03:00) Araguaina | -| E. South America Standard Time | (UTC-03:00) Brasilia | -| SA Eastern Standard Time | (UTC-03:00) Cayenne, Fortaleza | -| Argentina Standard Time | (UTC-03:00) City of Buenos Aires | -| Greenland Standard Time | (UTC-03:00) Greenland | -| Montevideo Standard Time | (UTC-03:00) Montevideo | -| Magallanes Standard Time | (UTC-03:00) Punta Arenas | -| Saint Pierre Standard Time | (UTC-03:00) Saint Pierre and Miquelon | -| Bahia Standard Time | (UTC-03:00) Salvador | -| UTC-02 | (UTC-02:00) Coordinated Universal Time-02 | -| Mid-Atlantic Standard Time | (UTC-02:00) Mid-Atlantic - Old | -| Azores Standard Time | (UTC-01:00) Azores | -| Cape Verde Standard Time | (UTC-01:00) Cabo Verde Is. | -| UTC | (UTC) Coordinated Universal Time | -| GMT Standard Time | (UTC+00:00) Dublin, Edinburgh, Lisbon, London | -| Greenwich Standard Time | (UTC+00:00) Monrovia, Reykjavik | -| W. Europe Standard Time | (UTC+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna | -| Central Europe Standard Time | (UTC+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague | -| Romance Standard Time | (UTC+01:00) Brussels, Copenhagen, Madrid, Paris | -| Morocco Standard Time | (UTC+01:00) Casablanca | -| Sao Tome Standard Time | (UTC+01:00) Sao Tome | -| Central European Standard Time | (UTC+01:00) Sarajevo, Skopje, Warsaw, Zagreb | -| W. Central Africa Standard Time | (UTC+01:00) West Central Africa | -| Jordan Standard Time | (UTC+02:00) Amman | -| GTB Standard Time | (UTC+02:00) Athens, Bucharest | -| Middle East Standard Time | (UTC+02:00) Beirut | -| Egypt Standard Time | (UTC+02:00) Cairo | -| E. Europe Standard Time | (UTC+02:00) Chisinau | -| Syria Standard Time | (UTC+02:00) Damascus | -| West Bank Standard Time | (UTC+02:00) Gaza, Hebron | -| South Africa Standard Time | (UTC+02:00) Harare, Pretoria | -| FLE Standard Time | (UTC+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius | -| Israel Standard Time | (UTC+02:00) Jerusalem | -| Kaliningrad Standard Time | (UTC+02:00) Kaliningrad | -| Sudan Standard Time | (UTC+02:00) Khartoum | -| Libya Standard Time | (UTC+02:00) Tripoli | -| Namibia Standard Time | (UTC+02:00) Windhoek | -| Arabic Standard Time | (UTC+03:00) Baghdad | -| Turkey Standard Time | (UTC+03:00) Istanbul | -| Arab Standard Time | (UTC+03:00) Kuwait, Riyadh | -| Belarus Standard Time | (UTC+03:00) Minsk | -| Russian Standard Time | (UTC+03:00) Moscow, St. Petersburg | -| E. Africa Standard Time | (UTC+03:00) Nairobi | -| Iran Standard Time | (UTC+03:30) Tehran | -| Arabian Standard Time | (UTC+04:00) Abu Dhabi, Muscat | -| Astrakhan Standard Time | (UTC+04:00) Astrakhan, Ulyanovsk | -| Azerbaijan Standard Time | (UTC+04:00) Baku | -| Russia Time Zone 3 | (UTC+04:00) Izhevsk, Samara | -| Mauritius Standard Time | (UTC+04:00) Port Louis | -| Saratov Standard Time | (UTC+04:00) Saratov | -| Georgian Standard Time | (UTC+04:00) Tbilisi | -| Volgograd Standard Time | (UTC+04:00) Volgograd | -| Caucasus Standard Time | (UTC+04:00) Yerevan | -| Afghanistan Standard Time | (UTC+04:30) Kabul | -| West Asia Standard Time | (UTC+05:00) Ashgabat, Tashkent | -| Ekaterinburg Standard Time | (UTC+05:00) Ekaterinburg | -| Pakistan Standard Time | (UTC+05:00) Islamabad, Karachi | -| India Standard Time | (UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi | -| Sri Lanka Standard Time | (UTC+05:30) Sri Jayawardenepura | -| Nepal Standard Time | (UTC+05:45) Kathmandu | -| Central Asia Standard Time | (UTC+06:00) Astana | -| Bangladesh Standard Time | (UTC+06:00) Dhaka | -| Omsk Standard Time | (UTC+06:00) Omsk | -| Myanmar Standard Time | (UTC+06:30) Yangon (Rangoon) | -| SE Asia Standard Time | (UTC+07:00) Bangkok, Hanoi, Jakarta | -| Altai Standard Time | (UTC+07:00) Barnaul, Gorno-Altaysk | -| W. Mongolia Standard Time | (UTC+07:00) Hovd | -| North Asia Standard Time | (UTC+07:00) Krasnoyarsk | -| N. Central Asia Standard Time | (UTC+07:00) Novosibirsk | -| Tomsk Standard Time | (UTC+07:00) Tomsk | -| China Standard Time | (UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi | -| North Asia East Standard Time | (UTC+08:00) Irkutsk | -| Singapore Standard Time | (UTC+08:00) Kuala Lumpur, Singapore | -| W. Australia Standard Time | (UTC+08:00) Perth | -| Taipei Standard Time | (UTC+08:00) Taipei | -| Ulaanbaatar Standard Time | (UTC+08:00) Ulaanbaatar | -| Aus Central W. Standard Time | (UTC+08:45) Eucla | -| Transbaikal Standard Time | (UTC+09:00) Chita | -| Tokyo Standard Time | (UTC+09:00) Osaka, Sapporo, Tokyo | -| North Korea Standard Time | (UTC+09:00) Pyongyang | -| Korea Standard Time | (UTC+09:00) Seoul | -| Yakutsk Standard Time | (UTC+09:00) Yakutsk | -| Cen. Australia Standard Time | (UTC+09:30) Adelaide | -| AUS Central Standard Time | (UTC+09:30) Darwin | -| E. Australia Standard Time | (UTC+10:00) Brisbane | -| AUS Eastern Standard Time | (UTC+10:00) Canberra, Melbourne, Sydney | -| West Pacific Standard Time | (UTC+10:00) Guam, Port Moresby | -| Tasmania Standard Time | (UTC+10:00) Hobart | -| Vladivostok Standard Time | (UTC+10:00) Vladivostok | -| Lord Howe Standard Time | (UTC+10:30) Lord Howe Island | -| Bougainville Standard Time | (UTC+11:00) Bougainville Island | -| Russia Time Zone 10 | (UTC+11:00) Chokurdakh | -| Magadan Standard Time | (UTC+11:00) Magadan | -| Norfolk Standard Time | (UTC+11:00) Norfolk Island | -| Sakhalin Standard Time | (UTC+11:00) Sakhalin | -| Central Pacific Standard Time | (UTC+11:00) Solomon Is., New Caledonia | -| Russia Time Zone 11 | (UTC+12:00) Anadyr, Petropavlovsk-Kamchatsky | -| New Zealand Standard Time | (UTC+12:00) Auckland, Wellington | -| UTC+12 | (UTC+12:00) Coordinated Universal Time+12 | -| Fiji Standard Time | (UTC+12:00) Fiji | -| Kamchatka Standard Time | (UTC+12:00) Petropavlovsk-Kamchatsky - Old | -| Chatham Islands Standard Time | (UTC+12:45) Chatham Islands | -| UTC+13 | (UTC+13:00) Coordinated Universal Time+13 | -| Tonga Standard Time | (UTC+13:00) Nuku'alofa | -| Samoa Standard Time | (UTC+13:00) Samoa | -| Line Islands Standard Time | (UTC+14:00) Kiritimati Island | - -## See also - -- [CURRENT_TIMEZONE (Transact-SQL)](/sql/t-sql/functions/current-timezone-transact-sql) -- [CURRENT_TIMEZONE_ID (Transact-SQL)](/sql/t-sql/functions/current-timezone-id-transact-sql) -- [AT TIME ZONE (Transact-SQL)](/sql/t-sql/queries/at-time-zone-transact-sql) -- [sys.time_zone_info (Transact-SQL)](/sql/relational-databases/system-catalog-views/sys-time-zone-info-transact-sql) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server.md b/articles/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server.md deleted file mode 100644 index 54e22b15f9068..0000000000000 --- a/articles/azure-sql/managed-instance/transact-sql-tsql-differences-sql-server.md +++ /dev/null @@ -1,559 +0,0 @@ ---- -title: T-SQL differences between SQL Server & Azure SQL Managed Instance -description: This article discusses the Transact-SQL (T-SQL) differences between an Azure SQL Managed Instance and SQL Server. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: service-overview -ms.devlang: -ms.topic: reference -author: danimir -ms.author: danil -ms.reviewer: mathoma, bonova, danil -ms.date: 04/19/2022 -ms.custom: seoapril2019, sqldbrb=1 ---- - -# T-SQL differences between SQL Server & Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article summarizes and explains the differences in syntax and behavior between Azure SQL Managed Instance and SQL Server. - - -SQL Managed Instance provides high compatibility with the SQL Server database engine, and most features are supported in a SQL Managed Instance. - -![Easy migration from SQL Server](./media/transact-sql-tsql-differences-sql-server/migration.png) - -There are some PaaS limitations that are introduced in SQL Managed Instance and some behavior changes compared to SQL Server. The differences are divided into the following categories: - -- [Availability](#availability) includes the differences in [Always On Availability Groups](#always-on-availability-groups) and [backups](#backup). -- [Security](#security) includes the differences in [auditing](#auditing), [certificates](#certificates), [credentials](#credential), [cryptographic providers](#cryptographic-providers), [logins and users](#logins-and-users), and the [service key and service master key](#service-key-and-service-master-key). -- [Configuration](#configuration) includes the differences in [buffer pool extension](#buffer-pool-extension), [collation](#collation), [compatibility levels](#compatibility-levels), [database mirroring](#database-mirroring), [database options](#database-options), [SQL Server Agent](#sql-server-agent), and [table options](#tables). -- [Functionalities](#functionalities) include [BULK INSERT/OPENROWSET](#bulk-insert--openrowset), [CLR](#clr), [DBCC](#dbcc), [distributed transactions](#distributed-transactions), [extended events](#extended-events), [external libraries](#external-libraries), [filestream and FileTable](#filestream-and-filetable), [full-text Semantic Search](#full-text-semantic-search), [linked servers](#linked-servers), [PolyBase](#polybase), [Replication](#replication), [RESTORE](#restore-statement), [Service Broker](#service-broker), [stored procedures, functions, and triggers](#stored-procedures-functions-and-triggers). -- [Environment settings](#Environment) such as VNets and subnet configurations. - -Most of these features are architectural constraints and represent service features. - -Temporary known issues that are discovered in SQL Managed Instance and will be resolved in the future are described in [What's new?](doc-changes-updates-release-notes-whats-new.md). - -## Availability - -### Always On Availability Groups - -[High availability](../database/high-availability-sla.md) is built into SQL Managed Instance and can't be controlled by users. The following statements aren't supported: - -- [CREATE ENDPOINT … FOR DATABASE_MIRRORING](/sql/t-sql/statements/create-endpoint-transact-sql) -- [CREATE AVAILABILITY GROUP](/sql/t-sql/statements/create-availability-group-transact-sql) -- [ALTER AVAILABILITY GROUP](/sql/t-sql/statements/alter-availability-group-transact-sql) -- [DROP AVAILABILITY GROUP](/sql/t-sql/statements/drop-availability-group-transact-sql) -- The [SET HADR](/sql/t-sql/statements/alter-database-transact-sql-set-hadr) clause of the [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql) statement - -### Backup - -Azure SQL Managed Instance has automatic backups, so users can create full database `COPY_ONLY` backups. Differential, log, and file snapshot backups aren't supported. - -- With a SQL Managed Instance, you can back up an instance database only to an Azure Blob storage account: - - Only `BACKUP TO URL` is supported. - - `FILE`, `TAPE`, and backup devices aren't supported. -- Most of the general `WITH` options are supported. - - `COPY_ONLY` is mandatory. - - `FILE_SNAPSHOT` isn't supported. - - Tape options: `REWIND`, `NOREWIND`, `UNLOAD`, and `NOUNLOAD` aren't supported. - - Log-specific options: `NORECOVERY`, `STANDBY`, and `NO_TRUNCATE` aren't supported. - -Limitations: - -- With a SQL Managed Instance, you can back up an instance database to a backup with up to 32 stripes, which is enough for databases up to 4 TB if backup compression is used. -- You can't execute `BACKUP DATABASE ... WITH COPY_ONLY` on a database that's encrypted with service-managed Transparent Data Encryption (TDE). Service-managed TDE forces backups to be encrypted with an internal TDE key. The key can't be exported, so you can't restore the backup. Use automatic backups and point-in-time restore, or use [customer-managed (BYOK) TDE](../database/transparent-data-encryption-tde-overview.md#customer-managed-transparent-data-encryption---bring-your-own-key) instead. You also can disable encryption on the database. -- Native backups taken on a SQL Managed Instance cannot be restored to a SQL Server. This is because SQL Managed Instance has higher internal database version compared to any version of SQL Server. -- To backup or restore a database to/from an Azure storage, it is necessary to create a shared access signature (SAS) an URI that grants you restricted access rights to Azure Storage resources [Learn more on this](restore-sample-database-quickstart.md#restore-from-a-backup-file-using-t-sql). Using Access keys for these scenarios is not supported. -- The maximum backup stripe size by using the `BACKUP` command in SQL Managed Instance is 195 GB, which is the maximum blob size. Increase the number of stripes in the backup command to reduce individual stripe size and stay within this limit. - - > [!TIP] - > To work around this limitation, when you back up a database from either SQL Server in an on-premises environment or in a virtual machine, you can: - > - > - Back up to `DISK` instead of backing up to `URL`. - > - Upload the backup files to Blob storage. - > - Restore into SQL Managed Instance. - > - > The `Restore` command in SQL Managed Instance supports bigger blob sizes in the backup files because a different blob type is used for storage of the uploaded backup files. - -For information about backups using T-SQL, see [BACKUP](/sql/t-sql/statements/backup-transact-sql). - -## Security - -### Auditing - -The key differences between auditing in Microsoft Azure SQL and in SQL Server are: - -- With SQL Managed Instance, auditing works at the server level. The `.xel` log files are stored in Azure Blob storage. -- With Azure SQL Database, auditing works at the database level. The `.xel` log files are stored in Azure Blob storage. -- With SQL Server, on-premises or in virtual machines, auditing works at the server level. Events are stored on file system or Windows event logs. - -XEvent auditing in SQL Managed Instance supports Azure Blob storage targets. File and Windows logs aren't supported. - -The key differences in the `CREATE AUDIT` syntax for auditing to Azure Blob storage are: - -- A new syntax `TO URL` is provided that you can use to specify the URL of the Azure Blob storage container where the `.xel` files are placed. -- The syntax `TO FILE` isn't supported because SQL Managed Instance can't access Windows file shares. - -For more information, see: - -- [CREATE SERVER AUDIT](/sql/t-sql/statements/create-server-audit-transact-sql) -- [ALTER SERVER AUDIT](/sql/t-sql/statements/alter-server-audit-transact-sql) -- [Auditing](/sql/relational-databases/security/auditing/sql-server-audit-database-engine) - -### Certificates - -SQL Managed Instance can't access file shares and Windows folders, so the following constraints apply: - -- The `CREATE FROM`/`BACKUP TO` file isn't supported for certificates. -- The `CREATE`/`BACKUP` certificate from `FILE`/`ASSEMBLY` isn't supported. Private key files can't be used. - -See [CREATE CERTIFICATE](/sql/t-sql/statements/create-certificate-transact-sql) and [BACKUP CERTIFICATE](/sql/t-sql/statements/backup-certificate-transact-sql). - -**Workaround**: Instead of creating backup of certificate and restoring the backup, [get the certificate binary content and private key, store it as .sql file, and create from binary](/sql/t-sql/functions/certencoded-transact-sql#b-copying-a-certificate-to-another-database): - -```sql -CREATE CERTIFICATE - FROM BINARY = asn_encoded_certificate -WITH PRIVATE KEY () -``` - -### Credential - -Only Azure Key Vault and `SHARED ACCESS SIGNATURE` identities are supported. Windows users aren't supported. - -See [CREATE CREDENTIAL](/sql/t-sql/statements/create-credential-transact-sql) and [ALTER CREDENTIAL](/sql/t-sql/statements/alter-credential-transact-sql). - -### Cryptographic providers - -SQL Managed Instance can't access files, so cryptographic providers can't be created: - -- `CREATE CRYPTOGRAPHIC PROVIDER` isn't supported. See [CREATE CRYPTOGRAPHIC PROVIDER](/sql/t-sql/statements/create-cryptographic-provider-transact-sql). -- `ALTER CRYPTOGRAPHIC PROVIDER` isn't supported. See [ALTER CRYPTOGRAPHIC PROVIDER](/sql/t-sql/statements/alter-cryptographic-provider-transact-sql). - -### Logins and users - -- SQL logins created by using `FROM CERTIFICATE`, `FROM ASYMMETRIC KEY`, and `FROM SID` are supported. See [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql). -- Azure Active Directory (Azure AD) server principals (logins) created with the [CREATE LOGIN](/sql/t-sql/statements/create-login-transact-sql?view=azuresqldb-mi-current&preserve-view=true) syntax or the [CREATE USER FROM LOGIN [Azure AD Login]](/sql/t-sql/statements/create-user-transact-sql?view=azuresqldb-mi-current&preserve-view=true) syntax are supported. These logins are created at the server level. - - SQL Managed Instance supports Azure AD database principals with the syntax `CREATE USER [AADUser/AAD group] FROM EXTERNAL PROVIDER`. This feature is also known as Azure AD contained database users. - -- Windows logins created with the `CREATE LOGIN ... FROM WINDOWS` syntax aren't supported. Use Azure Active Directory logins and users. -- The Azure AD admin for the instance has [unrestricted admin privileges](../database/logins-create-manage.md). -- Non-administrator Azure AD database-level users can be created by using the `CREATE USER ... FROM EXTERNAL PROVIDER` syntax. See [CREATE USER ... FROM EXTERNAL PROVIDER](../database/authentication-aad-configure.md#create-contained-users-mapped-to-azure-ad-identities). -- Azure AD server principals (logins) support SQL features within one SQL Managed Instance only. Features that require cross-instance interaction, no matter whether they're within the same Azure AD tenant or different tenants, aren't supported for Azure AD users. Examples of such features are: - - - SQL transactional replication. - - Link server. - -- Setting an Azure AD login mapped to an Azure AD group as the database owner isn't supported. A member of the Azure AD group can be a database owner, even if the login hasn't been created in the database. -- Impersonation of Azure AD server-level principals by using other Azure AD principals is supported, such as the [EXECUTE AS](/sql/t-sql/statements/execute-as-transact-sql) clause. EXECUTE AS limitations are: - - - EXECUTE AS USER isn't supported for Azure AD users when the name differs from the login name. An example is when the user is created through the syntax `CREATE USER [myAadUser] FROM LOGIN [john@contoso.com]` and impersonation is attempted through `EXEC AS USER = myAadUser`. When you create a **USER** from an Azure AD server principal (login), specify the user_name as the same login_name from **LOGIN**. - - Only the SQL Server-level principals (logins) that are part of the `sysadmin` role can execute the following operations that target Azure AD principals: - - - EXECUTE AS USER - - EXECUTE AS LOGIN - - - To impersonate a user with EXECUTE AS statement the user needs to be mapped directly to Azure AD server principal (login). Users that are members of Azure AD groups mapped into Azure AD server principals cannot effectively be impersonated with EXECUTE AS statement, even though the caller has the impersonate permissions on the specified user name. - -- Database export/import using bacpac files are supported for Azure AD users in SQL Managed Instance using either [SSMS V18.4 or later](/sql/ssms/download-sql-server-management-studio-ssms), or [SQLPackage.exe](/sql/tools/sqlpackage-download). - - The following configurations are supported using database bacpac file: - - Export/import a database between different manage instances within the same Azure AD domain. - - Export a database from SQL Managed Instance and import to SQL Database within the same Azure AD domain. - - Export a database from SQL Database and import to SQL Managed Instance within the same Azure AD domain. - - Export a database from SQL Managed Instance and import to SQL Server (version 2012 or later). - - In this configuration, all Azure AD users are created as SQL Server database principals (users) without logins. The type of users is listed as `SQL` and is visible as `SQL_USER` in `sys.database_principals`). Their permissions and roles remain in the SQL Server database metadata and can be used for impersonation. However, they cannot be used to access and sign in to the SQL Server using their credentials. - -- Only the server-level principal login, which is created by the SQL Managed Instance provisioning process, members of the server roles, such as `securityadmin` or `sysadmin`, or other logins with ALTER ANY LOGIN permission at the server level can create Azure AD server principals (logins) in the master database for SQL Managed Instance. -- If the login is a SQL principal, only logins that are part of the `sysadmin` role can use the create command to create logins for an Azure AD account. -- The Azure AD login must be a member of an Azure AD within the same directory that's used for Azure SQL Managed Instance. -- Azure AD server principals (logins) are visible in Object Explorer starting with SQL Server Management Studio 18.0 preview 5. -- A server principal with *sysadmin* access level is automatically created for the Azure AD admin account once it's enabled on an instance. -- During authentication, the following sequence is applied to resolve the authenticating principal: - - 1. If the Azure AD account exists as directly mapped to the Azure AD server principal (login), which is present in `sys.server_principals` as type "E," grant access and apply permissions of the Azure AD server principal (login). - 1. If the Azure AD account is a member of an Azure AD group that's mapped to the Azure AD server principal (login), which is present in `sys.server_principals` as type "X," grant access and apply permissions of the Azure AD group login. - 1. If the Azure AD account exists as directly mapped to an Azure AD user in a database, which is present in `sys.database_principals` as type "E," grant access and apply permissions of the Azure AD database user. - 1. If the Azure AD account is a member of an Azure AD group that's mapped to an Azure AD user in a database, which is present in `sys.database_principals` as type "X," grant access and apply permissions of the Azure AD group user. - -### Service key and service master key - -- [Master key backup](/sql/t-sql/statements/backup-master-key-transact-sql) isn't supported (managed by SQL Database service). -- [Master key restore](/sql/t-sql/statements/restore-master-key-transact-sql) isn't supported (managed by SQL Database service). -- [Service master key backup](/sql/t-sql/statements/backup-service-master-key-transact-sql) isn't supported (managed by SQL Database service). -- [Service master key restore](/sql/t-sql/statements/restore-service-master-key-transact-sql) isn't supported (managed by SQL Database service). - -## Configuration - -### Buffer pool extension - -- [Buffer pool extension](/sql/database-engine/configure-windows/buffer-pool-extension) isn't supported. -- `ALTER SERVER CONFIGURATION SET BUFFER POOL EXTENSION` isn't supported. See [ALTER SERVER CONFIGURATION](/sql/t-sql/statements/alter-server-configuration-transact-sql). - -### Collation - -The default instance collation is `SQL_Latin1_General_CP1_CI_AS` and can be specified as a creation parameter. See [Collations](/sql/t-sql/statements/collations). - -### Compatibility levels - -- Supported compatibility levels are 100, 110, 120, 130, 140 and 150. -- Compatibility levels below 100 aren't supported. -- The default compatibility level for new databases is 140. For restored databases, the compatibility level remains unchanged if it was 100 and above. - -See [ALTER DATABASE Compatibility Level](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level). - -### Database mirroring - -Database mirroring isn't supported. - -- `ALTER DATABASE SET PARTNER` and `SET WITNESS` options aren't supported. -- `CREATE ENDPOINT … FOR DATABASE_MIRRORING` isn't supported. - -For more information, see [ALTER DATABASE SET PARTNER and SET WITNESS](/sql/t-sql/statements/alter-database-transact-sql-database-mirroring) and [CREATE ENDPOINT … FOR DATABASE_MIRRORING](/sql/t-sql/statements/create-endpoint-transact-sql). - -### Database options - -- Multiple log files aren't supported. -- In-memory objects aren't supported in the General Purpose service tier. -- There's a limit of 280 files per General Purpose instance, which implies a maximum of 280 files per database. Both data and log files in the General Purpose tier are counted toward this limit. [The Business Critical tier supports 32,767 files per database](./resource-limits.md#service-tier-characteristics). -- The database can't contain filegroups that contain filestream data. Restore fails if .bak contains `FILESTREAM` data. -- Every file is placed in Azure Blob storage. IO and throughput per file depend on the size of each individual file. - -#### CREATE DATABASE statement - -The following limitations apply to `CREATE DATABASE`: - -- Files and filegroups can't be defined. -- The `CONTAINMENT` option isn't supported. -- `WITH` options aren't supported. - > [!TIP] - > As a workaround, use `ALTER DATABASE` after `CREATE DATABASE` to set database options to add files or to set containment. - -- The `FOR ATTACH` option isn't supported. -- The `AS SNAPSHOT OF` option isn't supported. - -For more information, see [CREATE DATABASE](/sql/t-sql/statements/create-database-sql-server-transact-sql). - -#### ALTER DATABASE statement - -Some file properties can't be set or changed: - -- A file path can't be specified in the `ALTER DATABASE ADD FILE (FILENAME='path')` T-SQL statement. Remove `FILENAME` from the script because SQL Managed Instance automatically places the files. -- A file name can't be changed by using the `ALTER DATABASE` statement. - -The following options are set by default and can't be changed: - -- `MULTI_USER` -- `ENABLE_BROKER` -- `AUTO_CLOSE OFF` - -The following options can't be modified: - -- `AUTO_CLOSE` -- `AUTOMATIC_TUNING(CREATE_INDEX=ON|OFF)` -- `AUTOMATIC_TUNING(DROP_INDEX=ON|OFF)` -- `DISABLE_BROKER` -- `EMERGENCY` -- `ENABLE_BROKER` -- `FILESTREAM` -- `HADR` -- `NEW_BROKER` -- `OFFLINE` -- `PAGE_VERIFY` -- `PARTNER` -- `READ_ONLY` -- `RECOVERY BULK_LOGGED` -- `RECOVERY_SIMPLE` -- `REMOTE_DATA_ARCHIVE` -- `RESTRICTED_USER` -- `SINGLE_USER` -- `WITNESS` - -Some `ALTER DATABASE` statements (for example, [SET CONTAINMENT](/sql/relational-databases/databases/migrate-to-a-partially-contained-database#converting-a-database-to-partially-contained-using-transact-sql)) might transiently fail, for example during the automated database backup or right after a database is created. In this case `ALTER DATABASE` statement should be retried. For more information on related error messages, see the [Remarks section](/sql/t-sql/statements/alter-database-transact-sql?preserve-view=true&tabs=sqlpool&view=azuresqldb-mi-current#remarks-2). - -For more information, see [ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql-file-and-filegroup-options). - -### SQL Server Agent - -- Enabling and disabling SQL Server Agent is currently not supported in SQL Managed Instance. SQL Agent is always running. -- Job schedule trigger based on an idle CPU is not supported. -- SQL Server Agent settings are read only. The procedure `sp_set_agent_properties` isn't supported in SQL Managed Instance. -- Jobs - - T-SQL job steps are supported. - - The following replication jobs are supported: - - Transaction-log reader - - Snapshot - - Distributor - - SSIS job steps are supported. - - Other types of job steps aren't currently supported: - - The merge replication job step isn't supported. - - Queue Reader isn't supported. - - Command shell isn't yet supported. - - SQL Managed Instance can't access external resources, for example, network shares via robocopy. - - SQL Server Analysis Services isn't supported. -- Notifications are partially supported. -- Email notification is supported, although it requires that you configure a Database Mail profile. SQL Server Agent can use only one Database Mail profile, and it must be called `AzureManagedInstance_dbmail_profile`. - - Pager isn't supported. - - NetSend isn't supported. - - Alerts aren't yet supported. - - Proxies aren't supported. -- EventLog isn't supported. -- User must be directly mapped to Azure AD server principal (login) to create, modify, or execute SQL Agent jobs. Users that are not directly mapped, for example, users that belong to an Azure AD group that has the rights to create, modify or execute SQL Agent jobs, will not effectively be able to perform those actions. This is due to SQL Managed Instance impersonation and [EXECUTE AS limitations](#logins-and-users). -- The Multi Server Administration feature for master/target (MSX/TSX) jobs are not supported. - -For information about SQL Server Agent, see [SQL Server Agent](/sql/ssms/agent/sql-server-agent). - -### Tables - -The following table types aren't supported: - -- [FILESTREAM](/sql/relational-databases/blob/filestream-sql-server) -- [FILETABLE](/sql/relational-databases/blob/filetables-sql-server) -- [EXTERNAL TABLE](/sql/t-sql/statements/create-external-table-transact-sql) (except Polybase, in preview) -- [MEMORY_OPTIMIZED](/sql/relational-databases/in-memory-oltp/introduction-to-memory-optimized-tables) (not supported only in General Purpose tier) - -For information about how to create and alter tables, see [CREATE TABLE](/sql/t-sql/statements/create-table-transact-sql) and [ALTER TABLE](/sql/t-sql/statements/alter-table-transact-sql). - -## Functionalities - -### Bulk insert / OPENROWSET - -SQL Managed Instance can't access file shares and Windows folders, so the files must be imported from Azure Blob storage: - -- `DATASOURCE` is required in the `BULK INSERT` command while you import files from Azure Blob storage. See [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql). -- `DATASOURCE` is required in the `OPENROWSET` function when you read the content of a file from Azure Blob storage. See [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql). -- `OPENROWSET` can be used to read data from Azure SQL Database, Azure SQL Managed Instance, or SQL Server instances. Other sources such as Oracle databases or Excel files are not supported. - -### CLR - -A SQL Managed Instance can't access file shares and Windows folders, so the following constraints apply: - -- Only `CREATE ASSEMBLY FROM BINARY` is supported. See [CREATE ASSEMBLY FROM BINARY](/sql/t-sql/statements/create-assembly-transact-sql). -- `CREATE ASSEMBLY FROM FILE` isn't supported. See [CREATE ASSEMBLY FROM FILE](/sql/t-sql/statements/create-assembly-transact-sql). -- `ALTER ASSEMBLY` can't reference files. See [ALTER ASSEMBLY](/sql/t-sql/statements/alter-assembly-transact-sql). - -### Database Mail (db_mail) - - `sp_send_dbmail` cannot send attachments using @file_attachments parameter. Local file system and external shares or Azure Blob Storage are not accessible from this procedure. - - See the known issues related to `@query` parameter and authentication. - -### DBCC - -Undocumented DBCC statements that are enabled in SQL Server aren't supported in SQL Managed Instance. - -- Only a limited number of Global Trace flags are supported. Session-level `Trace flags` aren't supported. See [Trace flags](/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql). -- [DBCC TRACEOFF](/sql/t-sql/database-console-commands/dbcc-traceoff-transact-sql) and [DBCC TRACEON](/sql/t-sql/database-console-commands/dbcc-traceon-transact-sql) work with the limited number of global trace-flags. -- [DBCC CHECKDB](/sql/t-sql/database-console-commands/dbcc-checkdb-transact-sql) with options REPAIR_ALLOW_DATA_LOSS, REPAIR_FAST, and REPAIR_REBUILD cannot be used because database cannot be set in `SINGLE_USER` mode - see [ALTER DATABASE differences](#alter-database-statement). Potential database corruption is handled by the Azure support team. Contact Azure support if there is any indication of database corruption. - -### Distributed transactions - -Partial support for [distributed transactions](../database/elastic-transactions-overview.md) is currently in public preview. Distributed transactions are supported under following conditions (all of them must be met): -* all transaction participants are Azure SQL Managed Instances that are part of the [Server trust group](./server-trust-group-overview.md). -* transactions are initiated either from .NET (TransactionScope class) or Transact-SQL. - -Azure SQL Managed Instance currently does not support other scenarios that are regularly supported by MSDTC on-premises or in Azure Virtual Machines. - -### Extended Events - -Some Windows-specific targets for Extended Events (XEvents) aren't supported: - -- The `etw_classic_sync` target isn't supported. Store `.xel` files in Azure Blob storage. See [etw_classic_sync target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#etw_classic_sync_target-target). -- The `event_file` target isn't supported. Store `.xel` files in Azure Blob storage. See [event_file target](/sql/relational-databases/extended-events/targets-for-extended-events-in-sql-server#event_file-target). - -### External libraries - -In-database R and Python external libraries are supported in limited public preview. See [Machine Learning Services in Azure SQL Managed Instance (preview)](machine-learning-services-overview.md). - -### Filestream and FileTable - -- Filestream data isn't supported. -- The database can't contain filegroups with `FILESTREAM` data. -- `FILETABLE` isn't supported. -- Tables can't have `FILESTREAM` types. -- The following functions aren't supported: - - `GetPathLocator()` - - `GET_FILESTREAM_TRANSACTION_CONTEXT()` - - `PathName()` - - `GetFileNamespacePat)` - - `FileTableRootPath()` - -For more information, see [FILESTREAM](/sql/relational-databases/blob/filestream-sql-server) and [FileTables](/sql/relational-databases/blob/filetables-sql-server). - -### Full-text Semantic Search - -[Semantic Search](/sql/relational-databases/search/semantic-search-sql-server) isn't supported. - -### Linked servers - -[Linked servers](/sql/relational-databases/linked-servers/linked-servers-database-engine) in SQL Managed Instance support a limited number of targets: - -- Supported targets are SQL Managed Instance, SQL Database, Azure Synapse SQL [serverless](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/) and dedicated pools, and SQL Server instances. -- Distributed writable transactions are possible only among SQL Managed Instances. For more information, see [Distributed Transactions](../database/elastic-transactions-overview.md). However, MS DTC is not supported. -- Targets that aren't supported are files, Analysis Services, and other RDBMS. Try to use native CSV import from Azure Blob Storage using `BULK INSERT` or `OPENROWSET` as an alternative for file import, or load files using a [serverless SQL pool in Azure Synapse Analytics](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/). - -Operations: - -- [Cross-instance](../database/elastic-transactions-overview.md) write transactions are supported only for SQL Managed Instances. -- `sp_dropserver` is supported for dropping a linked server. See [sp_dropserver](/sql/relational-databases/system-stored-procedures/sp-dropserver-transact-sql). -- The `OPENROWSET` function can be used to execute queries only on SQL Server instances. They can be either managed, on-premises, or in virtual machines. See [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql). -- The `OPENDATASOURCE` function can be used to execute queries only on SQL Server instances. They can be either managed, on-premises, or in virtual machines. Only the `SQLNCLI`, `SQLNCLI11`, and `SQLOLEDB` values are supported as a provider. An example is `SELECT * FROM OPENDATASOURCE('SQLNCLI', '...').AdventureWorks2012.HumanResources.Employee`. See [OPENDATASOURCE](/sql/t-sql/functions/opendatasource-transact-sql). -- Linked servers cannot be used to read files (Excel, CSV) from the network shares. Try to use [BULK INSERT](/sql/t-sql/statements/bulk-insert-transact-sql#e-importing-data-from-a-csv-file), [OPENROWSET](/sql/t-sql/functions/openrowset-transact-sql#g-accessing-data-from-a-csv-file-with-a-format-file) that reads CSV files from Azure Blob Storage, or a [linked server that references a serverless SQL pool in Synapse Analytics](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/). Track this requests on [SQL Managed Instance Feedback item](https://feedback.azure.com/d365community/idea/db80cf6e-3425-ec11-b6e6-000d3a4f0f84)| - -Linked servers on Azure SQL Managed Instance support SQL authentication and [Azure AD authentication](/sql/relational-databases/linked-servers/create-linked-servers-sql-server-database-engine#linked-servers-with-azure-sql-managed-instance). - -### PolyBase - -Work on enabling Polybase support in SQL Managed Instance is [in progress](https://feedback.azure.com/d365community/idea/ccc44856-3425-ec11-b6e6-000d3a4f0f84). In the meantime, as a workaround you can use linked servers to [a serverless SQL pool in Synapse Analytics](https://devblogs.microsoft.com/azure-sql/linked-server-to-synapse-sql-to-implement-polybase-like-scenarios-in-managed-instance/) or SQL Server to query data from files stored in Azure Data Lake or Azure Storage. -For general information about PolyBase, see [PolyBase](/sql/relational-databases/polybase/polybase-guide). - -### Replication - -- Snapshot and Bi-directional replication types are supported. Merge replication, Peer-to-peer replication, and updatable subscriptions are not supported. -- [Transactional Replication](replication-transactional-overview.md) is available for public preview on SQL Managed Instance with some constraints: - - All types of replication participants (Publisher, Distributor, Pull Subscriber, and Push Subscriber) can be placed on SQL Managed Instance, but the publisher and the distributor must be either both in the cloud or both on-premises. - - SQL Managed Instance can communicate with the recent versions of SQL Server. See the [supported versions matrix](replication-transactional-overview.md#supportability-matrix) for more information. - - Transactional Replication has some [additional networking requirements](replication-transactional-overview.md#requirements). - -For more information about configuring transactional replication, see the following tutorials: -- [Replication between a SQL MI publisher and SQL MI subscriber](replication-between-two-instances-configure-tutorial.md) -- [Replication between an SQL MI publisher, SQL MI distributor, and SQL Server subscriber](replication-two-instances-and-sql-server-configure-tutorial.md) - -### RESTORE statement - -- Supported syntax: - - `RESTORE DATABASE` - - `RESTORE FILELISTONLY ONLY` - - `RESTORE HEADER ONLY` - - `RESTORE LABELONLY ONLY` - - `RESTORE VERIFYONLY ONLY` -- Unsupported syntax: - - `RESTORE LOG ONLY` - - `RESTORE REWINDONLY ONLY` -- Source: - - `FROM URL` (Azure Blob storage) is the only supported option. - - `FROM DISK`/`TAPE`/backup device isn't supported. - - Backup sets aren't supported. -- `WITH` options aren't supported. Restore attempts including `WITH` like `DIFFERENTIAL`, `STATS`, `REPLACE`, etc., will fail. -- `ASYNC RESTORE`: Restore continues even if the client connection breaks. If your connection is dropped, you can check the `sys.dm_operation_status` view for the status of a restore operation, and for a CREATE and DROP database. See [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database). - -The following database options are set or overridden and can't be changed later: - -- `NEW_BROKER` if the broker isn't enabled in the .bak file. -- `ENABLE_BROKER` if the broker isn't enabled in the .bak file. -- `AUTO_CLOSE=OFF` if a database in the .bak file has `AUTO_CLOSE=ON`. -- `RECOVERY FULL` if a database in the .bak file has `SIMPLE` or `BULK_LOGGED` recovery mode. -- A memory-optimized filegroup is added and called XTP if it wasn't in the source .bak file. -- Any existing memory-optimized filegroup is renamed to XTP. -- `SINGLE_USER` and `RESTRICTED_USER` options are converted to `MULTI_USER`. - -Limitations: - -- Backups of the corrupted databases might be restored depending on the type of the corruption, but automated backups will not be taken until the corruption is fixed. Make sure that you run `DBCC CHECKDB` on the source SQL Managed Instance and use backup `WITH CHECKSUM` in order to prevent this issue. -- Restore of `.BAK` file of a database that contains any limitation described in this document (for example, `FILESTREAM` or `FILETABLE` objects) cannot be restored on SQL Managed Instance. -- `.BAK` files that contain multiple backup sets can't be restored. -- `.BAK` files that contain multiple log files can't be restored. -- Backups that contain databases bigger than 8 TB, active in-memory OLTP objects, or number of files that would exceed 280 files per instance can't be restored on a General Purpose instance. -- Backups that contain databases bigger than 4 TB or in-memory OLTP objects with the total size larger than the size described in [resource limits](resource-limits.md) cannot be restored on Business Critical instance. -For information about restore statements, see [RESTORE statements](/sql/t-sql/statements/restore-statements-transact-sql). - - > [!IMPORTANT] - > The same limitations apply to built-in point-in-time restore operation. As an example, General Purpose database greater than 4 TB cannot be restored on Business Critical instance. Business Critical database with In-memory OLTP files or more than 280 files cannot be restored on General Purpose instance. - -### Service broker - -Cross-instance service broker message exchange is supported only between Azure SQL Managed Instances: - -- `CREATE ROUTE`: You can't use `CREATE ROUTE` with `ADDRESS` other than `LOCAL` or DNS name of another SQL Managed Instance. Port is always 4022. -- `ALTER ROUTE`: You can't use `ALTER ROUTE` with `ADDRESS` other than `LOCAL` or DNS name of another SQL Managed Instance. Port is always 4022. - -Transport security is supported, dialog security is not: -- `CREATE REMOTE SERVICE BINDING`is not supported. - -Service broker is enabled by default and cannot be disabled. The following ALTER DATABASE options are not supported: -- `ENABLE_BROKER` -- `DISABLE_BROKER` - -### Stored procedures, functions, and triggers - -- `NATIVE_COMPILATION` isn't supported in the General Purpose tier. -- The following [sp_configure](/sql/relational-databases/system-stored-procedures/sp-configure-transact-sql) options aren't supported: - - `allow polybase export` - - `allow updates` - - `filestream_access_level` - - `remote access` - - `remote data archive` - - `remote proc trans` - - `scan for startup procs` -- The following [sp_configure](/sql/relational-databases/system-stored-procedures/sp-configure-transact-sql) options are ignored and have no effect: - - `Ole Automation Procedures` -- `sp_execute_external_scripts` isn't supported. See [sp_execute_external_scripts](/sql/relational-databases/system-stored-procedures/sp-execute-external-script-transact-sql#examples). -- `xp_cmdshell` isn't supported. See [xp_cmdshell](/sql/relational-databases/system-stored-procedures/xp-cmdshell-transact-sql). -- `Extended stored procedures` aren't supported, and this includes `sp_addextendedproc` and `sp_dropextendedproc`. This functionality won't be supported because it's on a deprecation path for SQL Server. For more information, see [Extended Stored Procedures](/sql/relational-databases/extended-stored-procedures-programming/database-engine-extended-stored-procedures-programming). -- `sp_attach_db`, `sp_attach_single_file_db`, and `sp_detach_db` aren't supported. See [sp_attach_db](/sql/relational-databases/system-stored-procedures/sp-attach-db-transact-sql), [sp_attach_single_file_db](/sql/relational-databases/system-stored-procedures/sp-attach-single-file-db-transact-sql), and [sp_detach_db](/sql/relational-databases/system-stored-procedures/sp-detach-db-transact-sql). - -### System functions and variables - -The following variables, functions, and views return different results: - -- `SERVERPROPERTY('EngineEdition')` returns the value 8. This property uniquely identifies a SQL Managed Instance. See [SERVERPROPERTY](/sql/t-sql/functions/serverproperty-transact-sql). -- `SERVERPROPERTY('InstanceName')` returns NULL because the concept of instance as it exists for SQL Server doesn't apply to SQL Managed Instance. See [SERVERPROPERTY('InstanceName')](/sql/t-sql/functions/serverproperty-transact-sql). -- `@@SERVERNAME` returns a full DNS "connectable" name, for example, `my-managed-instance.wcus17662feb9ce98.database.windows.net`. See [@@SERVERNAME](/sql/t-sql/functions/servername-transact-sql). -- `SYS.SERVERS` returns a full DNS "connectable" name, such as `myinstance.domain.database.windows.net` for the properties "name" and "data_source." See [SYS.SERVERS](/sql/relational-databases/system-catalog-views/sys-servers-transact-sql). -- `@@SERVICENAME` returns NULL because the concept of service as it exists for SQL Server doesn't apply to SQL Managed Instance. See [@@SERVICENAME](/sql/t-sql/functions/servicename-transact-sql). -- `SUSER_ID` is supported. It returns NULL if the Azure AD login isn't in `sys.syslogins`. See [SUSER_ID](/sql/t-sql/functions/suser-id-transact-sql). -- `SUSER_SID` isn't supported. The wrong data is returned, which is a temporary known issue. See [SUSER_SID](/sql/t-sql/functions/suser-sid-transact-sql). - -## Environment constraints - -### Subnet -- You cannot place any other resources (for example virtual machines) in the subnet where you have deployed your SQL Managed Instance. Deploy these resources using a different subnet. -- Subnet must have sufficient number of available [IP addresses](connectivity-architecture-overview.md#network-requirements). Minimum is to have at least 32 IP addresses in the subnet. -- The number of vCores and types of instances that you can deploy in a region have some [constraints and limits](resource-limits.md#regional-resource-limitations). -- There is a [networking configuration](connectivity-architecture-overview.md#network-requirements) that must be applied on the subnet. - -### VNET -- VNet can be deployed using Resource Model - Classic Model for VNet is not supported. -- After a SQL Managed Instance is created, moving the SQL Managed Instance or VNet to another resource group or subscription is not supported. -- For SQL Managed Instances hosted in virtual clusters that are created before September 22, 2020, [global peering](../../virtual-network/virtual-networks-faq.md#what-are-the-constraints-related-to-global-vnet-peering-and-load-balancers) is not supported. You can connect to these resources via ExpressRoute or VNet-to-VNet through VNet Gateways. - -### Failover groups -System databases are not replicated to the secondary instance in a failover group. Therefore, scenarios that depend on objects from the system databases will be impossible on the secondary instance unless the objects are manually created on the secondary. - -### TEMPDB -- The maximum file size of the `tempdb` system database can't be greater than 24 GB per core on a General Purpose tier. The maximum `tempdb` size on a Business Critical tier is limited by the SQL Managed Instance storage size. `Tempdb` log file size is limited to 120 GB on General Purpose tier. Some queries might return an error if they need more than 24 GB per core in `tempdb` or if they produce more than 120 GB of log data. -- `Tempdb` is always split into 12 data files: 1 primary, also called master, data file and 11 non-primary data files. The file structure cannot be changed and new files cannot be added to `tempdb`. -- [Memory-optimized `tempdb` metadata](/sql/relational-databases/databases/tempdb-database?view=sql-server-ver15&preserve-view=true#memory-optimized-tempdb-metadata), a new SQL Server 2019 in-memory database feature, is not supported. -- Objects created in the model database cannot be auto-created in `tempdb` after a restart or a failover because `tempdb` does not get its initial object list from the model database. You must create objects in `tempdb` manually after each restart or a failover. - -### MSDB - -The following schemas in the `msdb` system database in SQL Managed Instance must be owned by their respective predefined roles: - -- General roles - - TargetServersRole -- [Fixed database roles](/sql/ssms/agent/sql-server-agent-fixed-database-roles?view=sql-server-ver15&preserve-view=true) - - SQLAgentUserRole - - SQLAgentReaderRole - - SQLAgentOperatorRole -- [DatabaseMail roles](/sql/relational-databases/database-mail/database-mail-configuration-objects?view=sql-server-ver15&preserve-view=true#DBProfile): - - DatabaseMailUserRole -- [Integration services roles](/sql/integration-services/security/integration-services-roles-ssis-service?view=sql-server-ver15&preserve-view=true): - - db_ssisadmin - - db_ssisltduser - - db_ssisoperator - -> [!IMPORTANT] -> Changing the predefined role names, schema names and schema owners by customers will impact the normal operation of the service. Any changes made to these will be reverted back to the predefined values as soon as detected, or at the next service update at the latest to ensure normal service operation. - -### Error logs - -SQL Managed Instance places verbose information in error logs. There are many internal system events that are logged in the error log. Use a custom procedure to read error logs that filters out some irrelevant entries. For more information, see [SQL Managed Instance – sp_readmierrorlog](/archive/blogs/sqlcat/azure-sql-db-managed-instance-sp_readmierrorlog) or [SQL Managed Instance extension(preview)](/sql/azure-data-studio/azure-sql-managed-instance-extension#logs) for Azure Data Studio. - -## Next steps - -- For more information about SQL Managed Instance, see [What is SQL Managed Instance?](sql-managed-instance-paas-overview.md) -- For a features and comparison list, see [Azure SQL Managed Instance feature comparison](../database/features-comparison.md). -- For release updates, see [What's new?](doc-changes-updates-release-notes-whats-new.md). -- For issues, workarounds, and resolutions, see [Known issues](doc-changes-updates-known-issues.md). -- For a quickstart that shows you how to create a new SQL Managed Instance, see [Create a SQL Managed Instance](instance-create-quickstart.md). diff --git a/articles/azure-sql/managed-instance/user-initiated-failover.md b/articles/azure-sql/managed-instance/user-initiated-failover.md deleted file mode 100644 index 8770f2aea1054..0000000000000 --- a/articles/azure-sql/managed-instance/user-initiated-failover.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: Manually initiate a failover on SQL Managed Instance -description: Learn how to manually failover primary and secondary replicas on Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: high-availability -ms.custom: seo-lt-2019, sqldbrb=1, devx-track-azurepowershell -ms.devlang: -ms.topic: how-to -author: danimir -ms.author: danil -ms.reviewer: mathoma -ms.date: 02/27/2021 ---- - -# User-initiated manual failover on SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains how to manually failover a primary node on SQL Managed Instance General Purpose (GP) and Business Critical (BC) service tiers, and how to manually failover a secondary read-only replica node on the BC service tier only. - -> [!NOTE] -> This article is not related with cross-region failovers on [auto-failover groups](../database/auto-failover-group-overview.md). - -## When to use manual failover - -[High availability](../database/high-availability-sla.md) is a fundamental part of SQL Managed Instance platform that works transparently for your database applications. Failovers from primary to secondary nodes in case of node degradation or fault detection, or during regular monthly software updates are an expected occurrence for all applications using SQL Managed Instance in Azure. - -You might consider executing a [manual failover](../database/high-availability-sla.md#testing-application-fault-resiliency) on SQL Managed Instance for some of the following reasons: -- Test application for failover resiliency before deploying to production -- Test end-to-end systems for fault resiliency on automatic failovers -- Test how failover impacts existing database sessions -- Verify if a failover changes end-to-end performance because of changes in the network latency -- In some cases of query performance degradations, manual failover can help mitigate the performance issue. - -> [!NOTE] -> Ensuring that your applications are failover resilient prior to deploying to production will help mitigate the risk of application faults in production and will contribute to application availability for your customers. Learn more about testing your applications for cloud readiness with [Testing App Cloud Readiness for Failover Resiliency with SQL Managed Instance](https://youtu.be/FACWYLgYDL8) video recoding. - -## Initiate manual failover on SQL Managed Instance - -### Azure RBAC permissions required - -User initiating a failover will need to have one of the following Azure roles: - -- Subscription Owner role, or -- [Managed Instance Contributor](../../role-based-access-control/built-in-roles.md#sql-managed-instance-contributor) role, or -- Custom role with the following permission: - - `Microsoft.Sql/managedInstances/failover/action` - -### Using PowerShell - -The minimum version of Az.Sql needs to be [v2.9.0](https://www.powershellgallery.com/packages/Az.Sql/2.9.0). Consider using [Azure Cloud Shell](../../cloud-shell/overview.md) from the Azure portal that always has the latest PowerShell version available. - -As a pre-requirement, use the following PowerShell script to install required Azure modules. In addition, select the subscription where Managed Instance you wish to failover is located. - -```powershell -$subscription = 'enter your subscription ID here' -Install-Module -Name Az -Import-Module Az.Accounts -Import-Module Az.Sql - -Connect-AzAccount -Select-AzSubscription -SubscriptionId $subscription -``` - -Use PowerShell command [Invoke-AzSqlInstanceFailover](/powershell/module/az.sql/invoke-azsqlinstancefailover) with the following example to initiate failover of the primary node, applicable to both BC and GP service tier. - -```powershell -$ResourceGroup = 'enter resource group of your MI' -$ManagedInstanceName = 'enter MI name' -Invoke-AzSqlInstanceFailover -ResourceGroupName $ResourceGroup -Name $ManagedInstanceName -``` - -Use the following PS command to failover read secondary node, applicable to BC service tier only. - -```powershell -$ResourceGroup = 'enter resource group of your MI' -$ManagedInstanceName = 'enter MI name' -Invoke-AzSqlInstanceFailover -ResourceGroupName $ResourceGroup -Name $ManagedInstanceName -ReadableSecondary -``` - -### Using CLI - -Ensure to have the latest CLI scripts installed. - -Use az sql mi failover CLI command with the following example to initiate failover of the primary node, applicable to both BC and GP service tier. - -```cli -az sql mi failover -g myresourcegroup -n myinstancename -``` - -Use the following CLI command to failover read secondary node, applicable to BC service tier only. - -```cli -az sql mi failover -g myresourcegroup -n myinstancename --replica-type ReadableSecondary -``` - -### Using REST API - -For advanced users who would perhaps need to automate failovers of their SQL Managed Instances for purposes of implementing continuous testing pipeline, or automated performance mitigators, this function can be accomplished through initiating failover through an API call. see [Managed Instances - Failover REST API](/rest/api/sql/managed%20instances%20-%20failover/failover) for details. - -To initiate failover using REST API call, first generate the Auth Token using API client of your choice. The generated authentication token is used as Authorization property in the header of API request and it is mandatory. - -The following code is an example of the API URI to call: - -```HTTP -POST https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/failover?api-version=2019-06-01-preview -``` - -The following properties need to be passed in the API call: - -| **API property** | **Parameter** | -| --- | --- | -| subscriptionId | Subscription ID to which managed instance is deployed | -| resourceGroupName | Resource group that contains managed instance | -| managedInstanceName | Name of managed instance | -| replicaType | (Optional) (Primary or ReadableSecondary). These parameters represent the type of replica to be failed over: primary or readable secondary. If not specified, failover will be initiated on the primary replica by default. | -| api-version | Static value and currently needs to be “2019-06-01-preview" | - -API response will be one of the following two: - -- 202 Accepted -- One of the 400 request errors. - -Operation status can be tracked through reviewing API responses in response headers. For more information, see [Status of asynchronous Azure operations](../../azure-resource-manager/management/async-operations.md). - -## Monitor the failover - -To monitor the progress of user initiated failover for your BC instance, execute the following T-SQL query in your favorite client (such is SSMS) on SQL Managed Instance. It will read the system view sys.dm_hadr_fabric_replica_states and report replicas available on the instance. Refresh the same query after initiating the manual failover. - -```T-SQL -SELECT DISTINCT replication_endpoint_url, fabric_replica_role_desc FROM sys.dm_hadr_fabric_replica_states -``` - -Before initiating the failover, your output will indicate the current primary replica on BC service tier containing one primary and three secondaries in the AlwaysOn Availability Group. Upon execution of a failover, running this query again would need to indicate a change of the primary node. - -You will not be able to see the same output with GP service tier as the one above shown for BC. This is because GP service tier is based on a single node only. -You can use alternative T-SQL query showing the time SQL process started on the node for GP service tier instance: - -```T-SQL -SELECT sqlserver_start_time, sqlserver_start_time_ms_ticks FROM sys.dm_os_sys_info -``` - -The short loss of connectivity from your client during the failover, typically lasting under a minute, will be the indication of the failover execution regardless of the service tier. - -> [!NOTE] -> Completion of the failover process (not the actual short unavailability) might take several minutes at a time in case of **high-intensity** workloads. This is because the instance engine is taking care of all current transactions on the primary and catch up on the secondary node, prior to being able to failover. - -> [!IMPORTANT] -> Functional limitations of user-initiated manual failover are: -> - There could be one (1) failover initiated on the same Managed Instance every **15 minutes**. -> - For BC instances there must exist quorum of replicas for the failover request to be accepted. -> - For BC instances it is not possible to specify which readable secondary replica to initiate the failover on. -> - Failover will not be allowed until the first full backup for a new database is completed by automated backup systems. -> - Failover will not be allowed if there exists a database restore in progress. - -## Next steps -- Learn more about testing your applications for cloud readiness with [Testing App Cloud Readiness for Failover Resiliency with SQL Managed Instance](https://youtu.be/FACWYLgYDL8) video recoding. -- Learn more about high availability of managed instance [High availability for Azure SQL Managed Instance](../database/high-availability-sla.md). -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). diff --git a/articles/azure-sql/managed-instance/virtual-cluster-delete.md b/articles/azure-sql/managed-instance/virtual-cluster-delete.md deleted file mode 100644 index 75bb79df30732..0000000000000 --- a/articles/azure-sql/managed-instance/virtual-cluster-delete.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Delete a subnet after deleting a SQL Managed Instance -description: Learn how to delete an Azure virtual network after deleting an Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma -ms.date: 03/25/2022 ---- - -# Delete a subnet after deleting an Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article provides guidelines on how to manually delete a subnet after deleting the last Azure SQL Managed Instance residing in it. You can [delete a virtual network subnet](../../virtual-network/virtual-network-manage-subnet.md#delete-a-subnet) only if there are no resources in the subnet. - -SQL Managed Instances are deployed into [virtual clusters](connectivity-architecture-overview.md#virtual-cluster-connectivity-architecture). Each virtual cluster is associated with a subnet and **automatically deployed** together with first instance creation. In the same way, a virtual cluster is **automatically removed** together with last instance deletion leaving the subnet empty and ready for removal. - ->[!IMPORTANT] ->There is no need for any manual action on the virtual cluster in order to release the subnet. Once the last virtual cluster is deleted, you can go and delete the subnet. - -There are rare circumstances in which create operation can fail and result with deployed empty virtual cluster. Additionally, as instance creation [can be canceled](management-operations-cancel.md), it is possible for a virtual cluster to be deployed with instances residing inside, in a failed to deploy state. Virtual cluster removal will automatically be initiated in these situations and removed in the background. - -> [!IMPORTANT] -> - There are no charges for keeping an empty virtual cluster or instances that have failed to create. -> - Deletion of a virtual cluster is a long-running operation lasting for about 1.5 hours (see [SQL Managed Instance management operations](management-operations-overview.md) for up-to-date virtual cluster delete time). The virtual cluster will still be visible in the portal until this process is completed. -> - Only one delete operation can be run on the virtual cluster. All subsequent customer-initiated delete requests will result with an error as delete operation is already in progress. - -## Delete a virtual cluster from the Azure portal [DEPRECATED] - -> [!IMPORTANT] -> Starting September 1, 2021. all virtual clusters are automatically removed when last instance in the cluster has been deleted. Manual removal of the virtual cluster is not required anymore. - -To delete a virtual cluster by using the Azure portal, search for the virtual cluster resources. - -> [!div class="mx-imgBorder"] -> ![Screenshot of the Azure portal, with search box highlighted](./media/virtual-cluster-delete/virtual-clusters-search.png) - -After you locate the virtual cluster you want to delete, select this resource, and select **Delete**. You're prompted to confirm the virtual cluster deletion. - -> [!div class="mx-imgBorder"] -> ![Screenshot of the Azure portal Virtual clusters dashboard, with the Delete option highlighted](./media/virtual-cluster-delete/virtual-clusters-delete.png) - -Azure portal notifications will show you a confirmation that the request to delete the virtual cluster has been successfully submitted. The deletion operation itself will last for about 1.5 hours, during which the virtual cluster will still be visible in portal. Once the process is completed, the virtual cluster will no longer be visible and the subnet associated with it will be released for reuse. - -> [!TIP] -> If there are no SQL Managed Instances shown in the virtual cluster, and you are unable to delete the virtual cluster, ensure that you do not have an ongoing instance deployment in progress. This includes started and canceled deployments that are still in progress. This is because these operations will still use the virtual cluster, locking it from deletion. Review the **Deployments** tab of the resource group where the instance was deployed to see any deployments in progress. In this case, wait for the deployment to complete, then delete the SQL Managed Instance. The virtual cluster will be synchronously deleted as part of the instance removal. - -## Delete a virtual cluster by using the API [DEPRECATED] - -> [!IMPORTANT] -> Starting September 1, 2021. all virtual clusters are automatically removed when last instance in the cluster has been deleted. Manual removal of the virtual cluster is not required anymore. - -To delete a virtual cluster through the API, use the URI parameters specified in the [virtual clusters delete method](/rest/api/sql/virtualclusters/delete). - -## Next steps - -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- Learn about [connectivity architecture in SQL Managed Instance](connectivity-architecture-overview.md). -- Learn how to [modify an existing virtual network for SQL Managed Instance](vnet-existing-add-subnet.md). -- For a tutorial that shows how to create a virtual network, create an Azure SQL Managed Instance, and restore a database from a database backup, see [Create an Azure SQL Managed Instance (portal)](instance-create-quickstart.md). -- For DNS issues, see [Configure a custom DNS](custom-dns-configure.md). diff --git a/articles/azure-sql/managed-instance/virtual-network-subnet-create-arm-template.md b/articles/azure-sql/managed-instance/virtual-network-subnet-create-arm-template.md deleted file mode 100644 index fd46be7b1fd54..0000000000000 --- a/articles/azure-sql/managed-instance/virtual-network-subnet-create-arm-template.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Create a virtual network -titleSuffix: Azure SQL Managed Instance -description: This article describes how to create a virtual network configured to support deploying Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova -ms.date: 09/12/2019 ---- -# Create a virtual network for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -This article explains how to create a valid virtual network and subnet where you can deploy Azure SQL Managed Instance. - -Azure SQL Managed Instance must be deployed within an Azure [virtual network](../../virtual-network/virtual-networks-overview.md). This deployment enables the following scenarios: - -- Secure private IP address -- Connecting to SQL Managed Instance directly from an on-premises network -- Connecting SQL Managed Instance to a linked server or another on-premises data store -- Connecting SQL Managed Instance to Azure resources - -> [!NOTE] -> You should [determine the size of the subnet for SQL Managed Instance](vnet-subnet-determine-size.md) before you deploy the first instance. You can't resize the subnet after you put the resources inside. -> -> If you plan to use an existing virtual network, you need to modify that network configuration to accommodate SQL Managed Instance. For more information, see [Modify an existing virtual network for SQL Managed Instance](vnet-existing-add-subnet.md). -> -> After a managed instance is created, moving the managed instance or virtual network to another resource group or subscription is not supported. - -> [!IMPORTANT] -> You can [move the instance to another subnet inside the Vnet](vnet-subnet-move-instance.md). - -## Create a virtual network - -The easiest way to create and configure a virtual network is to use an Azure Resource Manager deployment template. - -1. Sign in to the Azure portal. - -2. Select the **Deploy to Azure** button: - - [![Image showing a button labeled "Deploy to Azure".](../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.sql%2Fsql-managed-instance-azure-environment%2Fazuredeploy.json) - - This button opens a form that you can use to configure the network environment where you can deploy SQL Managed Instance. - - > [!Note] - > This Azure Resource Manager template will deploy a virtual network with two subnets. One subnet, called **ManagedInstances**, is reserved for SQL Managed Instance and has a preconfigured route table. The other subnet, called **Default**, is used for other resources that should access SQL Managed Instance (for example, Azure Virtual Machines). - -3. Configure the network environment. On the following form, you can configure parameters of your network environment: - - ![Resource Manager template for configuring the Azure network](./media/virtual-network-subnet-create-arm-template/create-mi-network-arm.png) - - You might change the names of the virtual network and subnets, and adjust the IP ranges associated with your networking resources. After you select the **Purchase** button, this form will create and configure your environment. If you don't need two subnets, you can delete the default one. - -## Next steps - -- For an overview, see [What is SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- Learn about [connectivity architecture in SQL Managed Instance](connectivity-architecture-overview.md). -- Learn how to [modify an existing virtual network for SQL Managed Instance](vnet-existing-add-subnet.md). -- For a tutorial that shows how to create a virtual network, create a managed instance, and restore a database from a database backup, see [Create a managed instance](instance-create-quickstart.md). -- For DNS issues, see [Configure a custom DNS](custom-dns-configure.md). diff --git a/articles/azure-sql/managed-instance/vnet-existing-add-subnet.md b/articles/azure-sql/managed-instance/vnet-existing-add-subnet.md deleted file mode 100644 index 1d0f1b1cac271..0000000000000 --- a/articles/azure-sql/managed-instance/vnet-existing-add-subnet.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Configure an existing virtual network -titleSuffix: Azure SQL Managed Instance -description: This article describes how to configure an existing virtual network and subnet where you can deploy Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova -ms.date: 03/17/2020 ---- -# Configure an existing virtual network for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance must be deployed within an Azure [virtual network](../../virtual-network/virtual-networks-overview.md) and the subnet dedicated for managed instances only. You can use the existing virtual network and subnet if they're configured according to the [SQL Managed Instance virtual network requirements](connectivity-architecture-overview.md#network-requirements). - -If one of the following cases applies to you, you can validate and modify your network by using the script explained in this article: - -- You have a new subnet that's still not configured. -- You're not sure that the subnet is aligned with the [requirements](connectivity-architecture-overview.md#network-requirements). -- You want to check that the subnet still complies with the [network requirements](connectivity-architecture-overview.md#network-requirements) after you made changes. - -> [!Note] -> You can create a managed instance only in virtual networks created through the Azure Resource Manager deployment model. Azure virtual networks created through the classic deployment model are not supported. Calculate subnet size by following the guidelines in the [Determine the size of subnet for SQL Managed Instance](vnet-subnet-determine-size.md) article. You can't resize the subnet after you deploy the resources inside. -> -> After the managed instance is created, you can [move the instance to another subnet inside the Vnet](vnet-subnet-move-instance.md), but moving the instance or VNet to another resource group or subscription is not supported. - -## Validate and modify an existing virtual network - -If you want to create a managed instance inside an existing subnet, we recommend the following PowerShell script to prepare the subnet: - -```powershell -$scriptUrlBase = 'https://raw.githubusercontent.com/Microsoft/sql-server-samples/master/samples/manage/azure-sql-db-managed-instance/delegate-subnet' - -$parameters = @{ - subscriptionId = '' - resourceGroupName = '' - virtualNetworkName = '' - subnetName = '' - } - -Invoke-Command -ScriptBlock ([Scriptblock]::Create((iwr ($scriptUrlBase+'/delegateSubnet.ps1?t='+ [DateTime]::Now.Ticks)).Content)) -ArgumentList $parameters -``` - -The script prepares the subnet in three steps: - -1. Validate: It validates the selected virtual network and subnet for SQL Managed Instance networking requirements. -2. Confirm: It shows the user a set of changes that need to be made to prepare the subnet for SQL Managed Instance deployment. It also asks for consent. -3. Prepare: It properly configures the virtual network and subnet. - -## Next steps - -- For an overview, see [What is SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- For a tutorial that shows how to create a virtual network, create a managed instance, and restore a database from a database backup, see [Create a managed instance](instance-create-quickstart.md). -- For DNS issues, see [Configuring a custom DNS](custom-dns-configure.md). diff --git a/articles/azure-sql/managed-instance/vnet-subnet-determine-size.md b/articles/azure-sql/managed-instance/vnet-subnet-determine-size.md deleted file mode 100644 index 15ce7250d6d71..0000000000000 --- a/articles/azure-sql/managed-instance/vnet-subnet-determine-size.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Determine required subnet size & range -titleSuffix: Azure SQL Managed Instance -description: This topic describes how to calculate the size of the subnet where Azure SQL Managed Instance will be deployed. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.custom: seo-lt-2019, sqldbrb=1 -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, srbozovi, wiassaf -ms.date: 04/06/2022 ---- -# Determine required subnet size and range for Azure SQL Managed Instance -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance must be deployed within an Azure [virtual network](../../virtual-network/virtual-networks-overview.md). The number of managed instances that can be deployed in the subnet of a virtual network depends on the size of the subnet (subnet range). - -When you create a managed instance, Azure allocates a number of virtual machines that depend on the tier you selected during provisioning. Because these virtual machines are associated with your subnet, they require IP addresses. To ensure high availability during regular operations and service maintenance, Azure might allocate more virtual machines. The number of required IP addresses in a subnet then becomes larger than the number of managed instances in that subnet. - -By design, a managed instance needs a minimum of 32 IP addresses in a subnet. As a result, you can use a minimum subnet mask of /27 when defining your subnet IP ranges. We recommend careful planning of subnet size for your managed instance deployments. Consider the following inputs during planning: - -- Number of managed instances, including the following instance parameters: - - Service tier - - Number of vCores - - [Hardware configuration](resource-limits.md#hardware-configuration-characteristics) - - [Maintenance window](../database/maintenance-window.md) -- Plans to scale up/down or change the service tier, hardware configuration, or maintenance window - -> [!IMPORTANT] -> A subnet size of 16 IP addresses (subnet mask /28) allows the deployment of a single managed instance inside it. It should be used only for evaluation or for dev/test scenarios where scaling operations won't be performed. - -## Determine subnet size - -Size your subnet according to your future needs for instance deployment and scaling. The following parameters can help you in forming a calculation: - -- Azure uses five IP addresses in the subnet for its own needs. -- Each virtual cluster allocates an additional number of addresses. -- Each managed instance uses a number of addresses that depend on pricing tier and hardware configuration. -- Each scaling request temporarily allocates an additional number of addresses. - -> [!IMPORTANT] -> It's not possible to change the subnet address range if any resource exists in the subnet. Consider using bigger subnets rather than smaller ones to prevent issues in the future. - -GP = general purpose; -BC = business critical; -VC = virtual cluster - -| **Pricing tier** | **Azure usage** | **VC usage** | **Instance usage** | **Total** | -| --- | --- | --- | --- | --- | -| GP | 5 | 6 | 3 | 14 | -| BC | 5 | 6 | 5 | 16 | - -In the preceding table: - -- The **Total** column displays the total number of addresses that are used by a single-deployed instance to the subnet. -- When you add more instances to the subnet, the number of addresses used by the instance increases. The total number of addresses then also increases. -- Addresses represented in the **Azure usage** column are shared across multiple virtual clusters. -- Addresses represented in the **VC usage** column are shared across instances placed in that virtual cluster. - -Also consider the [maintenance window feature](../database/maintenance-window.md) when you're determining the subnet size, especially when multiple instances will be deployed inside the same subnet. Specifying a maintenance window for a managed instance during its creation or afterward means that it must be placed in a virtual cluster with the corresponding maintenance window. If there is no such virtual cluster in the subnet, a new one must be created first to accommodate the instance. - -The same scenario as for the maintenance window applies for changing the [hardware configuration](resource-limits.md#hardware-configuration-characteristics) as a virtual cluster always uses the same hardware. In case of new instance creation or changing the hardware of the existing instance, if there is no such virtual cluster in the subnet, a new one must be created first to accommodate the instance. - -An update operation typically requires [resizing the virtual cluster](management-operations-overview.md). When a new create or update request comes, the SQL Managed Instance service communicates with the compute platform with a request for new nodes that need to be added. Based on the compute response, the deployment system either expands the existing virtual cluster or creates a new one. Even if in most cases the operation will be completed within same virtual cluster, a new one might be created on the compute side. - - -## Update scenarios - -During a scaling operation, instances temporarily require additional IP capacity that depends on pricing tier: - -| **Pricing tier** | **Scenario** | **Additional addresses** | -| --- | --- | --- | -| GP | Scaling vCores | 3 | -| GP | Scaling storage | 0 | -| GP | Switching to BC | 5 | -| BC | Scaling vCores | 5 | -| BC | Scaling storage | 5 | -| BC | Switching to GP | 3 | - -## Calculate the number of IP addresses - -We recommend the following formula for calculating the total number of IP addresses. This formula takes into account the potential creation of a new virtual cluster during a later create request or instance update. It also takes into account the maintenance window and hardware requirements of virtual clusters. - -**Formula: 5 + (a * 12) + (b * 16) + (c * 16)** - -- a = number of GP instances -- b = number of BC instances -- c = number of different maintenance window configurations and hardware configurations - -Explanation: -- 5 = number of IP addresses reserved by Azure -- 12 addresses per GP instance = 6 for virtual cluster, 3 for managed instance, 3 more for scaling operation -- 16 addresses per BC instance = 6 for virtual cluster, 5 for managed instance, 5 more for scaling operation -- 16 addresses as a backup = scenario where new virtual cluster is created - -Example: -- You plan to have three general-purpose and two business-critical managed instances deployed in the same subnet. All instances will have same maintenance window configured. That means you need 5 + (3 * 12) + (2 * 16) + (1 * 16) = 89 IP addresses. - - Because IP ranges are defined in powers of 2, your subnet requires a minimum IP range of 128 (2^7) for this deployment. You need to reserve the subnet with a subnet mask of /25. - -> [!NOTE] -> Though it's possible to deploy managed instances to a subnet with a number of IP addresses that's less than the output of the subnet formula, always consider using bigger subnets instead. Using a bigger subnet can help avoid future issues stemming from a lack of IP addresses, such as the inability to create additional instances within the subnet or scale existing instances. - -## Next steps - -- For an overview, see [What is Azure SQL Managed Instance?](sql-managed-instance-paas-overview.md). -- Learn more about [connectivity architecture for SQL Managed Instance](connectivity-architecture-overview.md). -- See how to [create a virtual network where you'll deploy SQL Managed Instance](virtual-network-subnet-create-arm-template.md). -- For DNS issues, see [Configure a custom DNS](custom-dns-configure.md). diff --git a/articles/azure-sql/managed-instance/vnet-subnet-move-instance.md b/articles/azure-sql/managed-instance/vnet-subnet-move-instance.md deleted file mode 100644 index c5c8997f4687c..0000000000000 --- a/articles/azure-sql/managed-instance/vnet-subnet-move-instance.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: Move managed instance to another subnet -titleSuffix: Azure SQL Managed Instance -description: Learn how to move an Azure SQL Managed Instance to another subnet with only a short downtime during failover - typically up to 10 seconds. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: how-to -author: urosmil -ms.author: urmilano -ms.reviewer: mathoma, bonova, srbozovi, wiassaf -ms.date: 09/30/2021 -ms.custom: ignite-fall-2021 ---- -# Move Azure SQL Managed Instance across subnets -[!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)] - -Azure SQL Managed Instance must be deployed inside a dedicated subnet within an Azure [virtual network](../../virtual-network/virtual-networks-overview.md). The number of managed instances that can be deployed within the subnet depends on the size of the subnet (subnet range). - -This article teaches you to move your managed instance from one subnet to another, similar to scaling vCores or changing the instance service tier. SQL Managed Instance is available during the move, except during a short downtime caused by a failover at the end of the update - typically lasting up to 10 seconds, even if long-running transactions are interrupted. - -Moving the instance to another subnet triggers the following virtual cluster operations: -- The destination subnet builds out or resizes the virtual cluster. -- The virtual cluster is removed or defragmented in the source subnet. - -Before moving your instance to another subnet, consider familiarizing yourself with the following concepts: -- [Determine required subnet size and range for Azure SQL Managed Instance](vnet-subnet-determine-size.md). -- Choose between moving the instance to a [new subnet](virtual-network-subnet-create-arm-template.md) or [using an existing subnet](vnet-existing-add-subnet.md). -- Use [management operations](management-operations-overview.md) to automatically deploy new managed instances, update instance properties, or delete instances. It's possible to [monitor](management-operations-monitor.md) these management operations. - - - -## Requirements and limitations - -To deploy a managed instance, or move it to another subnet, the destination subnet must have certain [network requirements](connectivity-architecture-overview.md#service-aided-subnet-configuration). - -### Subnet readiness - -Before you move your managed instance, confirm the subnet is marked as **Ready for Managed Instance**. - -In the **Virtual network** UI of the Azure portal, virtual networks that meet the prerequisites for a managed instance are categorized as **Ready for Managed Instance**. Virtual networks that have subnets with managed instances already deployed to them display an icon before the virtual network name. Empty subnets that are ready for a managed instance do not have an icon. - -Subnets that are marked as **Other** are empty and can be used for a managed instance, but first you need to fulfill the [network requirements](connectivity-architecture-overview.md#service-aided-subnet-configuration). This includes: - -- delegating to the Microsoft.Sql/managedInstances resource provider -- attaching a route table -- attaching a network security group - -After all requirements are satisfied, the subnet moves from the **Other** to the **Ready for Managed Instance** category and can be used for a managed instance. - -Subnets marked as **Invalid** cannot be used for new or existing managed instances, either because they're already in use (instances used for instance deployments cannot contain other resources), or the subnet has a different DNS zone (a cross-subnet instance move limitation). - -> [!div class="mx-imgBorder"] -> ![Screenshot of the Azure SQL Managed Instance subnet dropdown](./media/vnet-subnet-move-instance/subnet-grouping-per-state.png) - - -Depending on the subnet state and designation, the following adjustments may be made to the destination subnet: - -- **Ready for Managed Instance (contains existing SQL Managed Instance)**: No adjustments are made. These subnets already contain managed instances, and making any change to the subnet could impact existing instances. -- **Ready for Managed Instance (empty)**: The workflow validates all the required rules in the network security group and route table, and adds any rules that are necessary but missing. 1 - -> [!Note] -> 1 Custom rules added to the source subnet configuration are not copied to the destination subnet. Any customization of the source subnet configuration must be replicated manually to the destination subnet. One way to achieve this is by using the same route table and network security group for the source and destination subnet. - - -### Destination subnet limitations - -Consider the following limitations when choosing a destination subnet for an existing instance: - -- The destination subnet must be in the same virtual network as the source subnet. -- The DNS zone of the destination subnet must match the DNS zone of the source subnet as changing the DNS zone of a managed instance is not currently supported. -- Instances running on Gen4 hardware must be upgraded to newer hardware since Gen4 is being retired. Upgrading hardware and moving to another subnet can be performed in one operation. - - -## Operation steps - -The following table details the operation steps that occur during the instance move operation: - -|Step name |Step description | -|----|---------| -|Request validation |Validates the submitted parameters. If a misconfiguration is detected, the operation fails with an error. | -|Virtual cluster resizing / creation |Depending on the state of the destination subnet, the virtual cluster is either created or resized. | -|New instance startup |The SQL process starts on the deployed virtual cluster in the destination subnet. | -|Seeding database files / attaching database files |Depending on the service tier, either the database is seeded or the database files are attached. | -|Preparing failover and failover |After data has been seeded or database files reattached, the system prepares for failover. When everything is ready, the system performs a failover **with a short downtime**, usually less than 10 seconds. | -|Old SQL instance cleanup |Removes the old SQL process from the source virtual cluster. | -|Virtual cluster deletion |If it's the last instance within the source subnet, the final step deletes the virtual cluster synchronously. Otherwise, the virtual cluster is asynchronously defragmented. | - -A detailed explanation of the operation steps can be found in the [overview of Azure SQL Managed Instance management operations](management-operations-overview.md#management-operations-steps) - -## Move the instance - -A cross-subnet instance move is part of the instance update operation. Existing instance update API, Azure PowerShell, and Azure CLI commands have been enhanced with a subnet ID property. - -In the Azure portal, use the subnet field on the **Networking** blade to move the instance to the destination subnet. When using Azure PowerShell or the Azure CLI, provide a different subnet ID in the update command to move the instance from an existing subnet to the destination subnet. - -For a full reference of instance management commands, see [Management API reference for Azure SQL Managed Instance](api-references-create-manage-instance.md). - -# [Portal](#tab/azure-portal) - -The option to choose the instance subnet is located on the **Networking** blade of the Azure portal. The instance move operation starts when you select a subnet and save your changes. - -The first step of the move operation is to prepare the destination subnet for deployment, which may take several minutes. Once the subnet is ready, the instance move management operation starts and becomes visible in the Azure portal. - - -> [!div class="mx-imgBorder"] -> ![How to select subnet on SQL Managed Instance networking blade](./media/vnet-subnet-move-instance/how-to-select-subnet.png) - - -Monitor instance move operations from the **Overview** blade of the Azure portal. Select the notification to open an additional blade containing information about the current step, the total steps, and a button to cancel the operation. - -> [!div class="mx-imgBorder"] -> ![How to monitor instance move operation](./media/vnet-subnet-move-instance/monitor-subnet-move-operation.png) - - -# [PowerShell](#tab/azure-powershell) - -Use the Azure PowerShell command [Set-AzSqlInstance](/powershell/module/az.sql/set-azsqlinstance) to move an instance after you create your subnet in the same virtual network as your destination subnet. If you want to use an existing subnet, provide that subnet name in the PowerShell command. - -The example PowerShell commands in this section prepare the destination subnet for instance deployment and move the managed instance. - - -Use the following PowerShell command to specify your parameters: - -```powershell-interactive -### PART 1 - DEFINE PARAMETERS - -#Generating basic parameters -$currentSubscriptionID = 'subscription-id' -$sqlMIResourceGroupName = 'resource-group-name-of-sql-mi' -$sqlMIName = 'sql-mi-name' -$sqlMIResourceVnetName = 'vnet-name-of-sql-mi' -$destinationSubnetName = 'name-of-the-destination-subnet-for-sql-mi' -``` - -Skip this command if your subnet already has instances deployed to it. If you are using a new subnet, use the following Azure PowerShell command to prepare your subnet: - -```powershell-interactive -### PART 2 - PREPARE DESTINATION SUBNET - -#Loading the url of script used for preparing the subnet for SQL MI deployment -$scriptUrlBase = 'https://raw.githubusercontent.com/Microsoft/sql-server-samples/master/samples/manage/azure-sql-db-managed-instance/delegate-subnet' - -#Generating destination subnet parameters -$parameters = @{ - subscriptionId = $currentSubscriptionID - resourceGroupName = $sqlMIResourceGroupName - virtualNetworkName = $sqlMIResourceVnetName - subnetName = $destinationSubnetName -} - -#Initiating subnet prepartion script -Invoke-Command -ScriptBlock ([Scriptblock]::Create((iwr ($scriptUrlBase+'/delegateSubnet.ps1?t='+ [DateTime]::Now.Ticks)).Content)) -ArgumentList $parameters -``` - -> [!Note] -> To learn more about the script that prepares the subnet, see [Configure an existing virtual network for Azure SQL Managed Instance](vnet-existing-add-subnet.md). - -The following Azure PowerShell command moves the instance to the source subnet: - -```powershell-interactive -### PART 3 - MOVE INSTANCE TO THE NEW SUBNET - -Set-AzSqlInstance -Name $sqlMIName -ResourceGroupName $sqlMIResourceGroupName ` --SubnetId "/subscriptions/$currentSubscriptionID/resourceGroups/$sqlMIResourceGroupName/providers/Microsoft.Network/virtualNetworks/$sqlMIResourceVnetName/subnets/$destinationSubnetName" -``` - -The following Azure PowerShell command moves the instance, and also provides a way to monitor progress: - -```powershell-interactive -###PART 3 EXTENDED - MOVE INSTANCE AND MONITOR PROGRESS - -# Extend the Set-AzSqlInstance command with -AsJob -Force parameters to be able to monitor the progress or proceed with script execution as moving the instance to another subnet is long running operation -Set-AzSqlInstance -Name $sqlMIName -ResourceGroupName $sqlMIResourceGroupName ` --SubnetId "/subscriptions/$currentSubscriptionID/resourceGroups/$sqlMIResourceGroupName/providers/Microsoft.Network/virtualNetworks/$sqlMIResourceVnetName/subnets/$destinationSubnetName" -AsJob -Force - -$operationProgress = Get-AzSqlInstanceOperation -ManagedInstanceName $sqlMIName -ResourceGroupName $sqlMIResourceGroupName -#checking the operation step status -Write-Host "Checking the ongoing step" -ForegroundColor Yellow -$operationProgress.OperationSteps.StepsList -``` - - -# [Azure CLI](#tab/azure-cli) - -Use the Azure CLI [az sql mi update](/cli/azure/sql/mi#az-sql-mi-update) command to move your instance to another subnet. - -Provide the destination by either specifying the subnet ID as the `--subnet` property, or by specifying the virtual network name as the `--vnet-name` property, and subnet name as the `--subnet` property. - -The following example moves the managed instance to another subnet by specifying the subnet ID: - - -```azurecli-interactive -az sql mi update -g myResourceGroup -n mySqlManagedInstance --subnet /subscriptions/xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualNetworkName/subnets/destinationSubnetName -``` - -The following example moves the managed instance to another subnet by specifying the virtual network name and subnet name: - -```azurecli-interactive -az sql mi update -g myResourceGroup -n mySqlManagedInstance --vnet-name myVirtualNetworkName --subnet destinationSubnetName -``` - -Use the following command to monitor the progress of the management operation: - -```azurecli-interactive -az sql mi op list -g myResourceGroup --mi mySqlManagedInstance -``` ---- - -## Next steps - -- To learn how to create your first managed instance, see [Quickstart guide](instance-create-quickstart.md). -- For a features and comparison list, see [common SQL features](../database/features-comparison.md). -- For more information about VNet configuration, see [SQL Managed Instance VNet configuration](connectivity-architecture-overview.md). -- For a quickstart that creates a managed instance and restores a database from a backup file, see [Create a managed instance](instance-create-quickstart.md). -- For a tutorial about using Azure Database Migration Service for migration, see [SQL Managed Instance migration using Database Migration Service](../../dms/tutorial-sql-server-to-managed-instance.md). diff --git a/articles/azure-sql/managed-instance/winauth-azuread-kerberos-managed-instance.md b/articles/azure-sql/managed-instance/winauth-azuread-kerberos-managed-instance.md deleted file mode 100644 index dc026e78fe919..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-kerberos-managed-instance.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn how to configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview) - -This article describes how to configure a managed instance to support [Windows Authentication for Azure AD principals](winauth-azuread-overview.md). The steps to set up Azure SQL Managed Instance are the same for both the [incoming trust-based authentication flow](winauth-azuread-setup-incoming-trust-based-flow.md) and the [modern interactive authentication flow](winauth-azuread-setup-modern-interactive-flow.md). - -## Prerequisites - -The following prerequisites are required to configure a managed instance for Windows Authentication for Azure AD principals: - -|Prerequisite | Description | -|---------|---------| -|Az.Sql PowerShell module | This PowerShell module provides management cmdlets for Azure SQL resources.

    Install this module by running the following PowerShell command: `Install-Module -Name Az.Sql` | -|Azure Active Directory PowerShell Module | This module provides management cmdlets for Azure AD administrative tasks such as user and service principal management.

    Install this module by running the following PowerShell command: `Install-Module –Name AzureAD` | -| A managed instance | You may [create a new managed instance](../../azure-arc/data/create-sql-managed-instance.md) or use an existing managed instance. You must [enable Azure AD authentication](../database/authentication-aad-configure.md) on the managed instance. | - -## Configure Azure AD Authentication for Azure SQL Managed Instance - -To enable Windows Authentication for Azure AD Principals, you need to enable a system assigned service principal on each managed instance. The system assigned service principal allows managed instance users to authenticate using the Kerberos protocol. You also need to grant admin consent to each service principal. -### Enable a system assigned service principal - -To enable a system assigned service principal for a managed instance: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Navigate to your managed instance -1. Select **Identity**. -1. Set **System assigned service principal** to **On**. - :::image type="content" source="media/winauth-azuread/azure-portal-managed-instance-identity-enable-system-assigned-service-principal.png" alt-text="Screenshot of the identity pane for a managed instance in the Azure portal. Under 'System assigned service principal' the radio button next to the 'Status' label has been set to 'On'." lightbox="media/winauth-azuread/azure-portal-managed-instance-identity-enable-system-assigned-service-principal.png"::: -1. Select **Save**. - -### Grant admin consent to a system assigned service principal - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Open Azure Active Directory. -1. Select **App registrations**. -1. Select **All applications**. - :::image type="content" source="media/winauth-azuread/azure-portal-azuread-app-registrations.png" alt-text="Screenshot of the Azure portal. Azure Active Directory is open. App registrations is selected in the left pane. App applications is highlighted in the right pane." lightbox="media/winauth-azuread/azure-portal-azuread-app-registrations.png"::: -1. Select the application with the display name matching your managed instance. The name will be in the format: ` principal`. -1. Select **API permissions**. -1. Select **Grant admin consent**. - - :::image type="content" source="media/winauth-azuread/azure-portal-configure-permissions-admin-consent.png" alt-text="Screenshot from the Azure portal of the configured permissions for applications. The status for the example application is 'Granted for aadsqlmi'." lightbox="media/winauth-azuread/azure-portal-configure-permissions-admin-consent.png"::: -1. Select **Yes** on the prompt to **Grant admin consent confirmation**. - -## Connect to the managed instance with Windows Authentication - -If you have already implemented either the incoming [trust-based authentication flow](winauth-azuread-setup-incoming-trust-based-flow.md) or the [modern interactive authentication flow](winauth-azuread-setup-modern-interactive-flow.md), depending on the version of your client, you can now test connecting to your managed instance with Windows Authentication. - -To test the connection with [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS), follow the steps in [Quickstart: Use SSMS to connect to and query Azure SQL Database or Azure SQL Managed Instance](../database/connect-query-ssms.md). Select **Windows Authentication** as your authentication type. - -:::image type="content" source="media/winauth-azuread/winauth-connect-to-managed-instance.png" alt-text="Dialog box from SQL Server Management Studio with a managed instance name in the 'Server Name' area and 'Authentication' set to 'Windows Authentication'." ::: - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [Troubleshoot Windows Authentication for Azure AD principals on Azure SQL Managed Instance](winauth-azuread-troubleshoot.md) -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-overview.md b/articles/azure-sql/managed-instance/winauth-azuread-overview.md deleted file mode 100644 index 9e13a3491d745..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-overview.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn about Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: overview -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview) - -[Azure SQL Managed Instance](sql-managed-instance-paas-overview.md) is the intelligent, scalable cloud database service that combines the broadest SQL Server database engine compatibility with the benefits of a fully managed and evergreen platform as a service. Kerberos authentication for Azure Active Directory (Azure AD) enables Windows Authentication access to Azure SQL Managed Instance. Windows Authentication for managed instances empowers customers to move existing services to the cloud while maintaining a seamless user experience and provides the basis for infrastructure modernization. - -## Key capabilities and scenarios - -As customers modernize their infrastructure, application, and data tiers, they also modernize their identity management capabilities by shifting to Azure AD. Azure SQL offers multiple [Azure AD Authentication](../database/authentication-aad-overview.md) options: - -- 'Azure Active Directory - Password' offers authentication with Azure AD credentials -- 'Azure Active Directory - Universal with MFA' adds multi-factor authentication -- 'Azure Active Directory – Integrated' uses federation providers like [Active Directory Federation Services](/windows-server/identity/active-directory-federation-services) (ADFS) to enable Single Sign-On experiences - -However, some legacy apps can't change their authentication to Azure AD: legacy application code may longer be available, there may be a dependency on legacy drivers, clients may not be able to be changed, and so on. Windows Authentication for Azure AD principals removes this migration blocker and provides support for a broader range of customer applications. - -Windows Authentication for Azure AD principals on managed instances is available for devices or virtual machines (VMs) joined to Active Directory (AD), Azure AD, or hybrid Azure AD. An Azure AD hybrid user whose user identity exists both in Azure AD and AD can access a managed instance in Azure using Azure AD Kerberos. - -Enabling Windows Authentication for a managed instance doesn't require customers to deploy new on-premises infrastructure or manage the overhead of setting up Domain Services. - -Windows Authentication for Azure AD principals on Azure SQL Managed Instance enables two key scenarios: migrating on-premises SQL Servers to Azure with minimal changes and modernizing security infrastructure. - -### Lift and shift on-premises SQL Servers to Azure with minimal changes - -By enabling Windows Authentication for Azure Active Directory principals, customers can migrate to Azure SQL Managed Instance without implementing changes to application authentication stacks or deploying Azure AD Domain Services. Customers can also use Windows Authentication to access a managed instance from their AD or Azure AD joined devices. - -Windows Authentication for Azure Active Directory principals also enables the following patterns on managed instances. These patterns are frequently used in traditional on-premises SQL Servers: - - -- **"Double hop" authentication**: Web applications use IIS identity impersonation to run queries against an instance in the security context of the end user. -- **Traces using extended events and SQL Server Profiler** can be launched using Windows authentication, providing ease of use for database administrators and developers accustomed to this workflow. Learn how to [run a trace against Azure SQL Managed Instance using Windows Authentication for Azure Active Directory principals](winauth-azuread-run-trace-managed-instance.md). - -### Modernize security infrastructure - -Enabling Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance equips customers to modernize their security practices. - -For example, a customer can enable a mobile analyst, using proven tools that rely on Windows Authentication, to authenticate to a managed instance using biometric credentials. This can be accomplished even if the mobile analyst works from a laptop that is joined to Azure AD. - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview)](winauth-implementation-aad-kerberos.md) -- [How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-run-trace-managed-instance.md b/articles/azure-sql/managed-instance/winauth-azuread-run-trace-managed-instance.md deleted file mode 100644 index 2079f7ed6a851..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-run-trace-managed-instance.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Run a trace against Azure SQL Managed Instance using Windows Authentication for Azure Active Directory principals (preview) -description: Learn how to run a trace against Azure SQL Managed Instance using Authentication for Azure Active Directory principals -author: srdan-bozovic-msft -ms.author: srbozovi -ms.service: sql-managed-instance -ms.topic: how-to -ms.custom: template-how-to -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# Run a trace against Azure SQL Managed Instance using Windows Authentication for Azure Active Directory principals (preview) - -This article shows how to connect and run a trace against Azure SQL Managed Instance using Windows Authentication for Azure Active Directory (Azure AD) principals. Windows authentication provides a convenient way for customers to connect to a managed instance, especially for database administrators and developers who are accustomed to launching [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS) with their Windows credentials. - -This article shares two options to run a trace against a managed instance: you can trace with [extended events](/sql/relational-databases/extended-events/extended-events) or with [SQL Server Profiler](/sql/tools/sql-server-profiler/sql-server-profiler). While SQL Server Profiler may still be used, the trace functionality used by SQL Server Profiler is deprecated and will be removed in a future version of Microsoft SQL Server. - -## Prerequisites - -To use Windows Authentication to connect to and run a trace against a managed instance, you must first meet the following prerequisites: - -- [Set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md). -- Install [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS) on the client that is connecting to the managed instance. The SSMS installation includes SQL Server Profiler and built-in components to create and run extended events traces. -- Enable tooling on your client machine to connect to the managed instance. This may be done by any of the following: - - [Configure an Azure VM to connect to Azure SQL Managed Instance](connect-vm-instance-configure.md). - - [Configure a point-to-site connection to Azure SQL Managed Instance from on-premises](point-to-site-p2s-configure.md). - - [Configure a public endpoint in Azure SQL Managed Instance](public-endpoint-configure.md). -- To create or modify extended events sessions, ensure that your account has the [server permission](/sql/t-sql/statements/grant-server-permissions-transact-sql) of ALTER ANY EVENT SESSION on the managed instance. -- To create or modify traces in SQL Server Profiler, ensure that your account has the [server permission](/sql/t-sql/statements/grant-server-permissions-transact-sql) of ALTER TRACE on the managed instance. - -If you have not yet enabled Windows authentication for Azure AD principals against your managed instance, you may run a trace against a managed instance using an [Azure AD Authentication](../database/authentication-aad-overview.md) option, including: - -- 'Azure Active Directory - Password' -- 'Azure Active Directory - Universal with MFA' -- 'Azure Active Directory – Integrated' - -## Run a trace with extended events - -To run a trace with extended events against a managed instance using Windows Authentication, you will first connect Object Explorer to your managed instance using Windows Authentication. - -1. Launch SQL Server Management Studio from a client machine where you have logged in using Windows Authentication. -1. The 'Connect to Server' dialog box should automatically appear. If it does not, ensure that **Object Explorer** is open and select **Connect**. -1. Enter the name of your managed instance as the **Server name**. The name of your managed instance should be in a format similar to `managedinstancename.12a34b5c67ce.database.windows.net`. -1. After **Authentication**, select **Windows Authentication**. - - :::image type="content" source="media/winauth-azuread/winauth-connect-to-managed-instance.png" alt-text="Dialog box from SQL Server Management Studio with a managed instance name in the 'Server Name' area and 'Authentication' set to 'Windows Authentication'."::: - -1. Select **Connect**. - -Now that **Object Explorer** is connected, you can create and run an extended events trace. Follow the steps in [Quick Start: Extended events in SQL Server](/sql/relational-databases/extended-events/quick-start-extended-events-in-sql-server) to learn how to create, test, and display the results of an extended events session. - -## Run a trace with Profiler - -To run a trace with SQL Server Profiler against a managed instance using Windows Authentication, launch the Profiler application. Profiler may be [run from the Windows Start menu or from SQL Server Management Studio](/sql/tools/sql-server-profiler/start-sql-server-profiler). - -1. On the File menu, select **New Trace**. -1. Enter the name of your managed instance as the **Server name**. The name of your managed instance should be in a format similar to `managedinstancename.12a34b5c67ce.database.windows.net`. -1. After **Authentication**, select **Windows Authentication**. - - :::image type="content" source="media/winauth-azuread/winauth-connect-to-managed-instance.png" alt-text="Dialog box from SQL Server Management Studio with a managed instance name in the 'Server Name' area and 'Authentication' set to 'Windows Authentication'."::: - -1. Select **Connect**. -1. Follow the steps in [Create a Trace (SQL Server Profiler)](/sql/tools/sql-server-profiler/create-a-trace-sql-server-profiler) to configure the trace. -1. Select **Run** after configuring the trace. - -## Next steps - -Learn more about Windows Authentication for Azure AD principals with Azure SQL Managed Instance: - -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md) -- [How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview)](winauth-implementation-aad-kerberos.md) -- [Extended Events](/sql/relational-databases/extended-events/extended-events) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-setup-incoming-trust-based-flow.md b/articles/azure-sql/managed-instance/winauth-azuread-setup-incoming-trust-based-flow.md deleted file mode 100644 index 45dd3b235392b..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-setup-incoming-trust-based-flow.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -title: How to set up Windows Authentication for Azure Active Directory with the incoming trust-based flow (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn how to set up Windows authentication for Azure Active Directory with the incoming trust-based flow. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# How to set up Windows Authentication for Azure AD with the incoming trust-based flow (Preview) - -This article describes how to implement the incoming trust-based authentication flow to allow Active Directory (AD) joined clients running Windows 10, Windows Server 2012, or higher versions of Windows to authenticate to an Azure SQL Managed Instance using Windows Authentication. This article also shares steps to rotate a Kerberos Key for your Azure Active Directory (Azure AD) service account and Trusted Domain Object, and steps to remove a Trusted Domain Object and all Kerberos settings, if desired. - -Enabling the incoming trust-based authentication flow is one step in [setting up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md). The [modern interactive flow (Preview)](winauth-azuread-setup-modern-interactive-flow.md) is available for enlightened clients running Windows 10 20H1, Windows Server 2022, or a higher version of Windows. - -## Permissions - -To complete the steps outlined in this article, you will need: - -- An on-premises Active Directory administrator username and password. -- Azure AD global administrator account username and password. - -## Prerequisites - -To implement the incoming trust-based authentication flow, first ensure that the following prerequisites have been met: - -|Prerequisite |Description | -|---------|---------| -|Client must run Windows 10, Windows Server 2012, or a higher version of Windows. | | -|Clients must be joined to AD. The domain must have a functional level of Windows Server 2012 or higher. | You can determine if the client is joined to AD by running the [dsregcmd command](../../active-directory/devices/troubleshoot-device-dsregcmd.md): `dsregcmd.exe /status` | -|Azure AD Hybrid Authentication Management Module. | This PowerShell module provides management features for on-premises setup. | -|Azure tenant. | | -|Azure subscription under the same Azure AD tenant you plan to use for authentication.| | -|Azure AD Connect installed. | Hybrid environments where identities exist both in Azure AD and AD. | - - -## Create and configure the Azure AD Kerberos Trusted Domain Object - -To create and configure the Azure AD Kerberos Trusted Domain Object, you will install the Azure AD Hybrid Authentication Management PowerShell module. - -You will then use the Azure AD Hybrid Authentication Management PowerShell module to set up a Trusted Domain Object in the on-premises AD domain and register trust information with Azure AD. This creates an in-bound trust relationship into the on-premises AD, which enables on-premises AD to trust Azure AD. - -### Set up the Trusted Domain Object - -To set up the Trusted Domain Object, first install the Azure AD Hybrid Authentication Management PowerShell module. - -#### Install the Azure AD Hybrid Authentication Management PowerShell module - -1. Start a Windows PowerShell session with the **Run as administrator** option. - -1. Install the Azure AD Hybrid Authentication Management PowerShell module using the following script. The script: - - - Enables TLS 1.2 for communication. - - Installs the NuGet package provider. - - Registers the PSGallery repository. - - Installs the PowerShellGet module. - - Installs the Azure AD Hybrid Authentication Management PowerShell module. - - The Azure AD Hybrid Authentication Management PowerShell uses the AzureADPreview module, which provides advanced Azure AD management feature. - - To protect against unnecessary installation conflicts with AzureAD PowerShell module, this command includes the –AllowClobber option flag. - -```powershell -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - -Install-PackageProvider -Name NuGet -Force - -if (@(Get-PSRepository | ? {$_.Name -eq "PSGallery"}).Count -eq 0){ - Register-PSRepository -DefaultSet-PSRepository -Name "PSGallery" -InstallationPolicy Trusted -} - -Install-Module -Name PowerShellGet -Force - -Install-Module -Name AzureADHybridAuthenticationManagement -AllowClobber -``` - -#### Create the Trusted Domain Object - -1. Start a Windows PowerShell session with the **Run as administrator** option. - -1. Set the common parameters. Customize the script below prior to running it. - - - Set the `$domain` parameter to your on-premises Active Directory domain name. - - When prompted by `Get-Credential`, enter an on-premises Active Directory administrator username and password. - - Set the `$cloudUserName` parameter to the username of a Global Administrator privileged account for Azure AD cloud access. - - > [!NOTE] - > If you wish to use your current Windows login account for your on-premises Active Directory access, you can skip the step where credentials are assigned to the `$domainCred` parameter. If you take this approach, do not include the `-DomainCredential` parameter in the PowerShell commands following this step. - - - ```powershell - $domain = "your on-premesis domain name, for example contoso.com" - - $domainCred = Get-Credential - - $cloudUserName = "Azure AD user principal name, for example admin@contoso.onmicrosoft.com" - ``` - -1. Check the current Kerberos Domain Settings. - - Run the following command to check your domain's current Kerberos settings: - - ```powershell - Get-AzureAdKerberosServer -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName - ``` - - If this is the first time calling any Azure AD Kerberos command, you will be prompted for Azure AD cloud access. - - Enter the password for your Azure AD global administrator account. - - If your organization uses other modern authentication methods such as MFA (Azure Multi-Factor Authentication) or Smart Card, follow the instructions as requested for sign in. - - If this is the first time you're configuring Azure AD Kerberos settings, the [Get-AzureAdKerberosServer cmdlet](../../active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md#view-and-verify-the-azure-ad-kerberos-server) will display empty information, as in the following sample output: - - ``` - ID : - UserAccount : - ComputerAccount : - DisplayName : - DomainDnsName : - KeyVersion : - KeyUpdatedOn : - KeyUpdatedFrom : - CloudDisplayName : - CloudDomainDnsName : - CloudId : - CloudKeyVersion : - CloudKeyUpdatedOn : - CloudTrustDisplay : - ``` - - If your domain already supports FIDO authentication, the `Get-AzureAdKerberosServer` cmdlet will display Azure AD Service account information, as in the following sample output. Note that the `CloudTrustDisplay` field returns an empty value. - - ``` - ID : 25614 - UserAccount : CN=krbtgt-AzureAD, CN=Users, DC=aadsqlmi, DC=net - ComputerAccount : CN=AzureADKerberos, OU=Domain Controllers, DC=aadsqlmi, DC=net - DisplayName : krbtgt_25614 - DomainDnsName : aadsqlmi.net - KeyVersion : 53325 - KeyUpdatedOn : 2/24/2022 9:03:15 AM - KeyUpdatedFrom : ds-aad-auth-dem.aadsqlmi.net - CloudDisplayName : krbtgt_25614 - CloudDomainDnsName : aadsqlmi.net - CloudId : 25614 - CloudKeyVersion : 53325 - CloudKeyUpdatedOn : 2/24/2022 9:03:15 AM - CloudTrustDisplay : - ``` - -1. Add the Trusted Domain Object. - - Run the [Set-AzureAdKerberosServer PowerShell cmdlet](../../active-directory/authentication/howto-authentication-passwordless-security-key-on-premises.md#create-a-kerberos-server-object) to add the Trusted Domain Object. Be sure to include `-SetupCloudTrust` parameter. If there is no Azure AD service account, this command will create a new Azure AD service account. If there is an Azure AD service account already, this command will only create the requested Trusted Domain object. - - ```powershell - Set-AzureAdKerberosServer -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName ` - -SetupCloudTrust - ``` - - After creating the Trusted Domain Object, you can check the updated Kerberos Settings using the `Get-AzureAdKerberosServer` PowerShell cmdlet, as shown in the previous step. If the `Set-AzureAdKerberosServer` cmdlet has been run successfully with the `-SetupCloudTrust` parameter, the `CloudTrustDisplay` field should now return `Microsoft.AzureAD.Kdc.Service.TrustDisplay`, as in the following sample output: - - ``` - ID : 25614 - UserAccount : CN=krbtgt-AzureAD, CN=Users, DC=aadsqlmi, DC=net - ComputerAccount : CN=AzureADKerberos, OU=Domain Controllers, DC=aadsqlmi, DC=net - DisplayName : krbtgt_25614 - DomainDnsName : aadsqlmi.net - KeyVersion : 53325 - KeyUpdatedOn : 2/24/2022 9:03:15 AM - KeyUpdatedFrom : ds-aad-auth-dem.aadsqlmi.net - CloudDisplayName : krbtgt_25614 - CloudDomainDnsName : aadsqlmi.net - CloudId : 25614 - CloudKeyVersion : 53325 - CloudKeyUpdatedOn : 2/24/2022 9:03:15 AM - CloudTrustDisplay : Microsoft.AzureAD.Kdc.Service.TrustDisplay - ``` - -## Configure the Group Policy Object (GPO) - -1. Identify your [Azure AD tenant ID](../../active-directory/fundamentals/active-directory-how-to-find-tenant.md). - -1. Deploy the following Group Policy setting to client machines using the incoming trust-based flow: - - 1. Edit the **Administrative Templates\System\Kerberos\Specify KDC proxy servers for Kerberos clients** policy setting. - 1. Select **Enabled**. - 1. Under **Options**, select **Show...**. This opens the Show Contents dialog box. - - :::image type="content" source="media/winauth-azuread/configure-policy-kdc-proxy.png" alt-text="Screenshot of dialog box to enable 'Specify KDC proxy servers for Kerberos clients'. The 'Show Contents' dialog allows input of a value name and the related value." lightbox="media/winauth-azuread/configure-policy-kdc-proxy.png"::: - - 1. Define the KDC proxy servers settings using mappings as follows. Substitute your Azure AD tenant ID for the `your_Azure_AD_tenant_id` placeholder. Note the space following `https` and the space prior to the closing `/` in the value mapping. - - |Value name |Value | - |---------|---------| - |KERBEROS.MICROSOFTONLINE.COM | | - - :::image type="content" source="media/winauth-azuread/configure-policy-kdc-proxy-server-settings-detail.png" alt-text="Screenshot of the 'Define KDC proxy server settings' dialog box. A table allows input of multiple rows. Each row consists of a value name and a value."::: - - 1. Select **OK** to close the 'Show Contents' dialog box. - 1. Select **Apply** on the 'Specify KDC proxy servers for Kerberos clients' dialog box. - -## Rotate the Kerberos Key - -You may periodically rotate the Kerberos Key for the created Azure AD Service account and Trusted Domain Object for management purposes. - -```powershell -Set-AzureAdKerberosServer -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName -SetupCloudTrust ` - -RotateServerKey -``` - -Once the key is rotated, it takes several hours to propagate the changed key between the Kerberos KDC servers. Due to this key distribution timing, you are limited to rotating key once within 24 hours. If you need to rotate the key again within 24 hours with any reason, for example, just after creating the Trusted Domain Object, you can add the `-Force` parameter: - -```powershell -Set-AzureAdKerberosServer -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName -SetupCloudTrust ` - -RotateServerKey -Force -``` - -## Remove the Trusted Domain Object - -You can remove the added Trusted Domain Object using the following command: - -```powershell -Remove-AzureADKerberosTrustedDomainObject -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName -``` - -This command will only remove the Trusted Domain Object. If your domain supports FIDO authentication, you can remove the Trusted Domain Object while maintaining the Azure AD Service account required for the FIDO authentication service. - -## Remove all Kerberos Settings - -You can remove both the Azure AD Service account and the Trusted Domain Object using the following command: - -```powershell -Remove-AzureAdKerberosServer -Domain $domain ` - -DomainCredential $domainCred ` - -UserPrincipalName $cloudUserName -``` - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview)](winauth-azuread-kerberos-managed-instance.md) -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-setup-modern-interactive-flow.md b/articles/azure-sql/managed-instance/winauth-azuread-setup-modern-interactive-flow.md deleted file mode 100644 index 0893ef57cd3b7..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-setup-modern-interactive-flow.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: How to set up Windows authentication for Azure Active Directory with the modern interactive flow (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn how to set up Windows Authentication for Azure Active Directory with the modern interactive flow. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# How to set up Windows Authentication for Azure Active Directory with the modern interactive flow (Preview) - -This article describes how to implement the modern interactive authentication flow to allow enlightened clients running Windows 10 20H1, Windows Server 2022, or a higher version of Windows to authenticate to Azure SQL Managed Instance using Windows Authentication. Clients must be joined to Azure Active Directory (Azure AD) or Hybrid Azure AD. - -Enabling the modern interactive authentication flow is one step in [setting up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md). The [incoming trust-based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) is available for AD joined clients running Windows 10 / Windows Server 2012 and higher. - -With this preview, Azure AD is now its own independent Kerberos realm. Windows 10 21H1 clients are already enlightened and will redirect clients to access Azure AD Kerberos to request a Kerberos ticket. The capability for clients to access Azure AD Kerberos is switched off by default and can be enabled by modifying group policy. Group policy can be used to deploy this feature in a staged manner by choosing specific clients you want to pilot on and then expanding it to all the clients across your environment. - -## Prerequisites - -There is no AD to Azure AD set up required for enabling software running on Azure AD Joined VMs to access Azure SQL Managed Instance using Windows Authentication. The following prerequisites are required to implement the modern interactive authentication flow: - -|Prerequisite |Description | -|---------|---------| -|Clients must run Windows 10 20H1, Windows Server 2022, or a higher version of Windows. | | -|Clients must be joined to Azure AD or Hybrid Azure AD. | You can determine if this prerequisite is met by running the [dsregcmd command](../../active-directory/devices/troubleshoot-device-dsregcmd.md): `dsregcmd.exe /status` | -|Application must connect to the managed instance via an interactive session. | This supports applications such as SQL Server Management Studio (SSMS) and web applications, but won't work for applications that run as a service. | -|Azure AD tenant. | | -|Azure AD Connect installed. | Hybrid environments where identities exist both in Azure AD and AD. | - - - -## Configure group policy - -Enable the following group policy setting `Administrative Templates\System\Kerberos\Allow retrieving the cloud Kerberos ticket during the logon`: - -1. Open the group policy editor. -1. Navigate to `Administrative Templates\System\Kerberos\`. -1. Select the **Allow retrieving the cloud kerberos ticket during the logon** setting. - - :::image type="content" source="media/winauth-azuread/policy-allow-retrieving-cloud-kerberos-ticket-during-logon.png" alt-text="A list of kerberos policy settings in the Windows policy editor. The 'Allow retrieving the cloud kerberos tikcet during the logon' policy is highlighted with a red box." lightbox="media/winauth-azuread/policy-allow-retrieving-cloud-kerberos-ticket-during-logon.png"::: - -1. In the setting dialog, select **Enabled**. -1. Select **OK**. - - :::image type="content" source="media/winauth-azuread/policy-enable-cloud-kerberos-ticket-during-logon-setting.png" alt-text="Screenshot of the 'Allow retrieving the cloud kerberos ticket during the logon' dialog. Select 'Enabled' and then 'OK' to enable the policy setting." lightbox="media/winauth-azuread/policy-enable-cloud-kerberos-ticket-during-logon-setting.png"::: - -## Refresh PRT (optional) - -Users with existing logon sessions may need to refresh their Azure AD Primary Refresh Token (PRT) if they attempt to use this feature immediately after it has been enabled. It can take up to a few hours for the PRT to refresh on its own. - -To refresh PRT manually, run this command from a command prompt: - -``` dos -dsregcmd.exe /RefreshPrt -``` - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview)](winauth-implementation-aad-kerberos.md) -- [How to set up Windows Authentication for Azure AD with the incoming trust-based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) -- [Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview)](winauth-azuread-kerberos-managed-instance.md) -- [Troubleshoot Windows Authentication for Azure AD principals on Azure SQL Managed Instance](winauth-azuread-troubleshoot.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-setup.md b/articles/azure-sql/managed-instance/winauth-azuread-setup.md deleted file mode 100644 index bdabeec17224f..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-setup.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn how to set up Windows Authentication access to Azure SQL Managed Instance using Azure Active Directory and Kerberos. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - - -# How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview) - -This article gives an overview of how to set up infrastructure and managed instances to implement [Windows Authentication for Azure AD principals on Azure SQL Managed Instance](winauth-azuread-overview.md). - -There are two phases to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory (Azure AD) and Kerberos. - -- **One-time infrastructure setup.** - - Synchronize Active Directory (AD) and Azure AD, if this hasn't already been done. - - Enable the modern interactive authentication flow, when available. The modern interactive flow is recommended for organizations with Azure AD joined or Hybrid AD joined clients running Windows 10 20H1 / Windows Server 2022 and higher where clients are joined to Azure AD or Hybrid AD. - - Set up the incoming trust-based authentication flow. This is recommended for customers who can’t use the modern interactive flow, but who have AD joined clients running Windows 10 / Windows Server 2012 and higher. -- **Configuration of Azure SQL Managed Instance.** - - Create a system assigned service principal for each managed instance. - -## One-time infrastructure setup - -The first step in infrastructure setup is to synchronize AD with Azure AD, if this hasn't already been completed. - -Following this, a system administrator configures authentication flows. Two authentication flows are available to implement Windows Authentication for Azure AD principals on Azure SQL Managed Instance: the incoming trust-based flow supports AD joined clients running Windows server 2012 or higher, and the modern interactive flow supports Azure AD joined clients running Windows 10 21H1 or higher. - -### Synchronize AD with Azure AD - -Customers should first implement [Azure AD Connect](../../active-directory/hybrid/whatis-azure-ad-connect.md) to integrate on-premises directories with Azure AD. - -### Select which authentication flow(s) you will implement - -The following diagram shows eligibility and the core functionality of the modern interactive flow and the incoming trust-based flow: - -:::image type="complex" source="media/winauth-azuread/decision-authentication.svg" alt-text="A decision tree showing criteria to select authentication flows." ::: -"A decision tree showing that the modern interactive flow is suitable for clients running Windows 10 20H1 or Windows Server 2022 or higher, where clients are Azure AD joined or Hybrid AD joined. The incoming trust-based flow is suitable for clients running Windows 10 or Windows Server 2012 or higher where clients are AD joined." -:::image-end::: - -The modern interactive flow works with enlightened clients running Windows 10 21H1 and higher that are Azure AD or Hybrid Azure AD joined. In the modern interactive flow, users can access Azure SQL Managed Instance without requiring a line of sight to Domain Controllers (DCs). There is no need for a trust object to be created in the customer's AD. To enable the modern interactive flow, an administrator will set group policy for Kerberos authentication tickets (TGT) to be used during login. - -The incoming trust-based flow works for clients running Windows 10 or Windows Server 2012 and higher. This flow requires that clients be joined to AD and have a line of sight to AD from on-premises. In the incoming trust-based flow, a trust object is created in the customer's AD and is registered in Azure AD. To enable the incoming trust-based flow, an administrator will set up an incoming trust with Azure AD and set up Kerberos Proxy via group policy. - -### Modern interactive authentication flow - -The following prerequisites are required to implement the modern interactive authentication flow: - -|Prerequisite |Description | -|---------|---------| -|Clients must run Windows 10 20H1, Windows Server 2022, or a higher version of Windows. | | -|Clients must be joined to Azure AD or Hybrid Azure AD. | You can determine if this prerequisite is met by running the [dsregcmd command](../../active-directory/devices/troubleshoot-device-dsregcmd.md): `dsregcmd.exe /status` | -|Application must connect to the managed instance via an interactive session. | This supports applications such as SQL Server Management Studio (SSMS) and web applications, but won't work for applications that run as a service. | -|Azure AD tenant. | | -|Azure AD Connect installed. | Hybrid environments where identities exist both in Azure AD and AD. | - - -See [How to set up Windows Authentication for Azure Active Directory with the modern interactive flow (Preview)](winauth-azuread-setup-modern-interactive-flow.md) for steps to enable this authentication flow. - -### Incoming trust-based authentication flow - -The following prerequisites are required to implement the incoming trust-based authentication flow: - -|Prerequisite |Description | -|---------|---------| -|Client must run Windows 10, Windows Server 2012, or a higher version of Windows. | | -|Clients must be joined to AD. The domain must have a functional level of Windows Server 2012 or higher. | You can determine if the client is joined to AD by running the [dsregcmd command](../../active-directory/devices/troubleshoot-device-dsregcmd.md): `dsregcmd.exe /status` | -|Azure AD Hybrid Authentication Management Module. | This PowerShell module provides management features for on-premises setup. | -|Azure tenant. | | -|Azure subscription under the same Azure AD tenant you plan to use for authentication.| | -|Azure AD Connect installed. | Hybrid environments where identities exist both in Azure AD and AD. | - - -See [How to set up Windows Authentication for Azure Active Directory with the incoming trust based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) for instructions on enabling this authentication flow. - - -## Configure Azure SQL Managed Instance - -The steps to set up Azure SQL Managed Instance are the same for both the incoming trust-based authentication flow and the modern interactive authentication flow. - -#### Prerequisites to configure a managed instance - -The following prerequisites are required to configure a managed instance for Windows Authentication for Azure AD principals: - -|Prerequisite | Description | -|---------|---------| -|Az.Sql PowerShell module | This PowerShell module provides management cmdlets for Azure SQL resources. Install this module by running the following PowerShell command: `Install-Module -Name Az.Sql` | -|Azure Active Directory PowerShell Module | This module provides management cmdlets for Azure AD administrative tasks such as user and service principal management. Install this module by running the following PowerShell command: `Install-Module –Name AzureAD` | -| A managed instance | You may [create a new managed instance](../../azure-arc/data/create-sql-managed-instance.md) or use an existing managed instance. | - -#### Configure each managed instance - -See [Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory](winauth-azuread-kerberos-managed-instance.md) for steps to configure each managed instance. - -## Limitations - -The following limitations apply to Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -### Not available for Linux clients - -Windows Authentication for Azure AD principals is currently supported only for client machines running Windows. - -### Azure AD cached logon - -Windows limits how often it connects to Azure AD, so there is a potential for user accounts to not have a refreshed Kerberos Ticket Granting Ticket (TGT) within 4 hours of an upgrade or fresh deployment of a client machine. User accounts who do not have a refreshed TGT results in failed ticket requests from Azure AD. - -As an administrator, you can trigger an online logon immediately to handle upgrade scenarios by running the following command on the client machine, then locking and unlocking the user session to get a refreshed TGT: - -```dos -dsregcmd.exe /RefreshPrt -``` - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview)](winauth-implementation-aad-kerberos.md) -- [How to set up Windows Authentication for Azure Active Directory with the modern interactive flow (Preview)](winauth-azuread-setup-modern-interactive-flow.md) -- [How to set up Windows Authentication for Azure AD with the incoming trust-based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) -- [Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview)](winauth-azuread-kerberos-managed-instance.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-azuread-troubleshoot.md b/articles/azure-sql/managed-instance/winauth-azuread-troubleshoot.md deleted file mode 100644 index b22030a277f10..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-azuread-troubleshoot.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Troubleshoot Windows Authentication for Azure AD principals on Azure SQL Managed Instance -titleSuffix: Azure SQL Managed Instance -description: Learn to troubleshoot Azure Active Directory Kerberos authentication for Azure SQL Managed Instance. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: how-to -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - - -# Troubleshoot Windows Authentication for Azure AD principals on Azure SQL Managed Instance - -This article contains troubleshooting steps for use when implementing [Windows Authentication for Azure AD principals](winauth-azuread-overview.md). - -## Verify tickets are getting cached - -Use the [klist](/windows-server/administration/windows-commands/klist) command to display a list of currently cached Kerberos tickets. - -The `klist get krbtgt` command should return a ticket from the on-premises Active Directory realm. - -```dos -klist get krbtgt/kerberos.microsoftonline.com -``` - -The `klist get MSSQLSvc` command should return a ticket from the `kerberos.microsoftonline.com` realm with a Service Principal Name (SPN) to `MSSQLSvc/..database.windows.net:1433`. - -```dos -klist get MSSQLSvc/..database.windows.net:1433 -``` - - -The following are some well-known error codes: - -- **0x6fb: SQL SPN not found** - Check that you’ve entered a valid SPN. If you've implemented the incoming trust-based authentication flow, revisit steps to [create and configure the Azure AD Kerberos Trusted Domain Object](winauth-azuread-setup-incoming-trust-based-flow.md#create-and-configure-the-azure-ad-kerberos-trusted-domain-object) to validate that you’ve performed all the configuration steps. -- **0x51f** - This error is likely related to a conflict with the Fiddler tool. Turn on Fiddler to mitigate the issue. - -## Investigate message flow failures - -Use Wireshark, or the network traffic analyzer of your choice, to monitor traffic between the client and on-prem Kerberos Key Distribution Center (KDC). - -When using Wireshark the following is expected: - -- AS-REQ: Client => on-prem KDC => returns on-prem TGT. -- TGS-REQ: Client => on-prem KDC => returns referral to `kerberos.microsoftonline.com`. - -## Next steps - -Learn more about implementing Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [What is Windows Authentication for Azure Active Directory principals on Azure SQL Managed Instance? (Preview)](winauth-azuread-overview.md) -- [How to set up Windows Authentication for Azure SQL Managed Instance using Azure Active Directory and Kerberos (Preview)](winauth-azuread-setup.md) -- [How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview)](winauth-implementation-aad-kerberos.md) -- [How to set up Windows Authentication for Azure Active Directory with the modern interactive flow (Preview)](winauth-azuread-setup-modern-interactive-flow.md) -- [How to set up Windows Authentication for Azure AD with the incoming trust-based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) \ No newline at end of file diff --git a/articles/azure-sql/managed-instance/winauth-implementation-aad-kerberos.md b/articles/azure-sql/managed-instance/winauth-implementation-aad-kerberos.md deleted file mode 100644 index be0ff04fd4a58..0000000000000 --- a/articles/azure-sql/managed-instance/winauth-implementation-aad-kerberos.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: How Windows Authentication for Azure SQL Managed Instance is implemented with Azure AD and Kerberos (Preview) -titleSuffix: Azure SQL Managed Instance -description: Learn how Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory (Azure AD) and Kerberos. -services: sql-database -ms.service: sql-managed-instance -ms.subservice: deployment-configuration -ms.devlang: -ms.topic: conceptual -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: mathoma, bonova, urmilano, wiassaf, kendralittle -ms.date: 03/01/2022 ---- - -# How Windows Authentication for Azure SQL Managed Instance is implemented with Azure Active Directory and Kerberos (Preview) - -[Windows Authentication for Azure AD principals on Azure SQL Managed Instance](winauth-azuread-overview.md) enables customers to move existing services to the cloud while maintaining a seamless user experience and provides the basis for security infrastructure modernization. To enable Windows Authentication for Azure Active Directory (Azure AD) principals, you will turn your Azure AD tenant into an independent Kerberos realm and create an incoming trust in the customer domain. - -This configuration allows users in the customer domain to access resources in your Azure AD tenant. It will not allow users in the Azure AD tenant to access resources in the customer domain. - -The following diagram gives an overview of how Windows Authentication is implemented for a managed instance using Azure AD and Kerberos: - -:::image type="content" source="media/winauth-azuread/auth-kerberos.svg" alt-text="An overview of authentication: a client submits an encrypted Kerberos ticket as part of an authentication request to a managed instance. The managed instance submits the encrypted Kerberos ticket to Azure AD, who exchanges it for an Azure AD token that is returned the managed instance. The managed instance uses this token to authenticate the user."::: - - -## How Azure AD provides Kerberos authentication - -To create an independent Kerberos realm for an Azure AD tenant, customers install the Azure AD Hybrid Authentication Management PowerShell module on any Windows server and run a cmdlet to create an Azure AD Kerberos object in their cloud and Active Directory. Trust created in this way enables existing Windows clients to access Azure AD with Kerberos. - -Windows 10 21H1 clients and above have been enlightened for interactive mode and do not need configuration for interactive login flows to work. Clients running previous versions of Windows can be configured to use Kerberos Key Distribution Center (KDC) proxy servers to use Kerberos authentication. - -Kerberos authentication in Azure AD enables: - -- Traditional on-premises applications to move to the cloud without changing their fundamental authentication scheme. - -- Applications running on enlightened clients authenticate using Azure AD directly. - - -## How Azure SQL Managed Instance works with Azure AD and Kerberos - -Customers use the Azure portal to enable a system assigned service principal on each managed instance. The service principal allows managed instance users to authenticate using the Kerberos protocol. - -## Next steps - -Learn more about enabling Windows Authentication for Azure AD principals on Azure SQL Managed Instance: - -- [How to set up Windows Authentication for Azure Active Directory with the modern interactive flow (Preview)](winauth-azuread-setup-modern-interactive-flow.md) -- [How to set up Windows Authentication for Azure AD with the incoming trust-based flow (Preview)](winauth-azuread-setup-incoming-trust-based-flow.md) -- [Configure Azure SQL Managed Instance for Windows Authentication for Azure Active Directory (Preview)](winauth-azuread-kerberos-managed-instance.md) -- [Troubleshoot Windows Authentication for Azure AD principals on Azure SQL Managed Instance](winauth-azuread-troubleshoot.md) diff --git a/articles/azure-sql/media/accelerated-database-recovery/adr-recovery-process.png b/articles/azure-sql/media/accelerated-database-recovery/adr-recovery-process.png deleted file mode 100644 index d59bfa60d7d18..0000000000000 Binary files a/articles/azure-sql/media/accelerated-database-recovery/adr-recovery-process.png and /dev/null differ diff --git a/articles/azure-sql/media/accelerated-database-recovery/current-recovery-process.png b/articles/azure-sql/media/accelerated-database-recovery/current-recovery-process.png deleted file mode 100644 index 8e50f8650c822..0000000000000 Binary files a/articles/azure-sql/media/accelerated-database-recovery/current-recovery-process.png and /dev/null differ diff --git a/articles/azure-sql/media/applies-to/no.png b/articles/azure-sql/media/applies-to/no.png deleted file mode 100644 index 1aa084e6a3326..0000000000000 Binary files a/articles/azure-sql/media/applies-to/no.png and /dev/null differ diff --git a/articles/azure-sql/media/applies-to/yes.png b/articles/azure-sql/media/applies-to/yes.png deleted file mode 100644 index dd2030fe2cb27..0000000000000 Binary files a/articles/azure-sql/media/applies-to/yes.png and /dev/null differ diff --git a/articles/azure-sql/media/azure-hybrid-benefit/pricing.png b/articles/azure-sql/media/azure-hybrid-benefit/pricing.png deleted file mode 100644 index 8f585fffb817c..0000000000000 Binary files a/articles/azure-sql/media/azure-hybrid-benefit/pricing.png and /dev/null differ diff --git a/articles/azure-sql/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png b/articles/azure-sql/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png deleted file mode 100644 index 18bc78eb26dbc..0000000000000 Binary files a/articles/azure-sql/media/azure-sql-iaas-vs-paas-what-is-overview/sqliaas_sql_server_cloud_continuum.png and /dev/null differ diff --git a/articles/azure-sql/media/capacity-errors-troubleshoot/register-with-sql-rp.png b/articles/azure-sql/media/capacity-errors-troubleshoot/register-with-sql-rp.png deleted file mode 100644 index 322b5223fe813..0000000000000 Binary files a/articles/azure-sql/media/capacity-errors-troubleshoot/register-with-sql-rp.png and /dev/null differ diff --git a/articles/azure-sql/media/identify-query-performance-issues/workload-states.png b/articles/azure-sql/media/identify-query-performance-issues/workload-states.png deleted file mode 100644 index df818b0203d98..0000000000000 Binary files a/articles/azure-sql/media/identify-query-performance-issues/workload-states.png and /dev/null differ diff --git a/articles/azure-sql/media/multi-model-features/image_1.png b/articles/azure-sql/media/multi-model-features/image_1.png deleted file mode 100644 index 1562a24737ec3..0000000000000 Binary files a/articles/azure-sql/media/multi-model-features/image_1.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal1.png b/articles/azure-sql/media/temporal-tables/azuretemporal1.png deleted file mode 100644 index 78734bda1314a..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal1.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal2.png b/articles/azure-sql/media/temporal-tables/azuretemporal2.png deleted file mode 100644 index 2f614ae72caf8..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal2.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal3.png b/articles/azure-sql/media/temporal-tables/azuretemporal3.png deleted file mode 100644 index f50138caa9cf1..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal3.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal4.png b/articles/azure-sql/media/temporal-tables/azuretemporal4.png deleted file mode 100644 index fe7155b875ac5..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal4.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal5.png b/articles/azure-sql/media/temporal-tables/azuretemporal5.png deleted file mode 100644 index 50fecb4faf18b..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal5.png and /dev/null differ diff --git a/articles/azure-sql/media/temporal-tables/azuretemporal6.png b/articles/azure-sql/media/temporal-tables/azuretemporal6.png deleted file mode 100644 index 08c0310ee85e3..0000000000000 Binary files a/articles/azure-sql/media/temporal-tables/azuretemporal6.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/access-to-sql-database-guide.md b/articles/azure-sql/migration-guides/database/access-to-sql-database-guide.md deleted file mode 100644 index 35a8ba3ccd299..0000000000000 --- a/articles/azure-sql/migration-guides/database/access-to-sql-database-guide.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: "Access to Azure SQL Database: Migration guide" -description: In this guide, you learn how to migrate your Microsoft Access databases to an Azure SQL database by using SQL Server Migration Assistant for Access (SSMA for Access). -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, kendralittle -ms.date: 03/19/2021 ---- - -# Migration guide: Access to Azure SQL Database - -In this guide, you learn [how to migrate](https://azure.microsoft.com/migration/migration-journey) your Microsoft Access database to an Azure SQL database by using [SQL Server Migration](https://azure.microsoft.com/migration/sql-server/) Assistant for Access (SSMA for Access). - -For other migration guides, see [Azure Database Migration Guide](/data-migration). - -## Prerequisites - -Before you begin migrating your Access database to a SQL database, do the following: - -- Verify that your source environment is supported. -- Download and install [SQL Server Migration Assistant for Access](https://www.microsoft.com/download/details.aspx?id=54255). -- Ensure that you have connectivity and sufficient permissions to access both source and target. - -## Pre-migration - -After you've met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your [Azure cloud migration](https://azure.microsoft.com/migration). - - -### Assess - -Use SSMA for Access to review database objects and data, and assess databases for migration. - -To create an assessment, do the following: - -1. Open [SSMA for Access](https://www.microsoft.com/download/details.aspx?id=54255). -1. Select **File**, and then select **New Project**. -1. Provide a project name and a location for your project and then, in the drop-down list, select **Azure SQL Database** as the migration target. -1. Select **OK**. - - ![Screenshot of the "New Project" pane for entering your migration project name and location.](./media/access-to-sql-database-guide/new-project.png) - -1. Select **Add Databases**, and then select the databases to be added to your new project. - - ![Screenshot of the "Add Databases" tab in SSMA for Access.](./media/access-to-sql-database-guide/add-databases.png) - -1. On the **Access Metadata Explorer** pane, right-click a database, and then select **Create Report**. Alternatively, you can select the **Create Report** tab at the upper right. - - ![Screenshot of the "Create Report" command in Access Metadata Explorer.](./media/access-to-sql-database-guide/create-report.png) - -1. Review the HTML report to understand the conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Access objects and understand the effort required to perform schema conversions. The default location for the report is in the report folder within SSMAProjects. For example: - - `drive:\\Documents\SSMAProjects\MyAccessMigration\report\report_` - - ![Screenshot of an example database report assessment in SSMA.](./media/access-to-sql-database-guide/sample-assessment.png) - -### Validate the data types - -Validate the default data type mappings, and change them based on your requirements, if necessary. To do so: - -1. In SSMA for Access, select **Tools**, and then select **Project Settings**. -1. Select the **Type Mapping** tab. - - ![Screenshot of the "Type Mapping" pane in SSMA for Access.](./media/access-to-sql-database-guide/type-mappings.png) - -1. You can change the type mapping for each table by selecting the table name on the **Access Metadata Explorer** pane. - - -### Convert the schema - -To convert database objects, do the following: - -1. Select the **Connect to Azure SQL Database** tab, and then do the following: - - a. Enter the details for connecting to your SQL database. - b. In the drop-down list, select your target SQL database. Or you can enter a new name, in which case a database will be created on the target server. - c. Provide authentication details. - d. Select **Connect**. - - ![Screenshot of the "Connect to Azure SQL Database" pane for entering connection details.](./media/access-to-sql-database-guide/connect-to-sqldb.png) - -1. On the **Access Metadata Explorer** pane, right-click the database, and then select **Convert Schema**. Alternatively, you can select your database and then select the **Convert Schema** tab. - - ![Screenshot of the "Convert Schema" command on the "Access Metadata Explorer" pane.](./media/access-to-sql-database-guide/convert-schema.png) - -1. After the conversion is completed, compare the converted objects to the original objects to identify potential problems, and address the problems based on the recommendations. - - ![Screenshot showing a comparison of the converted objects to the source objects.](./media/access-to-sql-database-guide/table-comparison.png) - - Compare the converted Transact-SQL text to the original code, and review the recommendations. - - ![Screenshot showing a comparison of converted queries to the source code.](./media/access-to-sql-database-guide/query-comparison.png) - -1. (Optional) To convert an individual object, right-click the object, and then select **Convert Schema**. Converted objects appear in bold text in **Access Metadata Explorer**: - - ![Screenshot showing that the objects in Access Metadata Explorer are converted.](./media/access-to-sql-database-guide/converted-items.png) - -1. On the **Output** pane, select the **Review results** icon, and review the errors on the **Error list** pane. -1. Save the project locally for an offline schema remediation exercise. To do so, select **File** > **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish them to your SQL database. - -## Migrate the databases - -After you've assessed your databases and addressed any discrepancies, you can run the migration process. Migrating data is a bulk-load operation that moves rows of data into an Azure SQL database in transactions. The number of rows to be loaded into your SQL database in each transaction is configured in the project settings. - -To publish your schema and migrate the data by using SSMA for Access, do the following: - -1. If you haven't already done so, select **Connect to Azure SQL Database**, and provide connection details. - -1. Publish the schema. On the **Azure SQL Database Metadata Explorer** pane, right-click the database you're working with, and then select **Synchronize with Database**. This action publishes the MySQL schema to the SQL database. - -1. On the **Synchronize with the Database** pane, review the mapping between your source project and your target: - - ![Screenshot of the "Synchronize with the Database" pane for reviewing the synchronization with the database.](./media/access-to-sql-database-guide/synchronize-with-database-review.png) - -1. On the **Access Metadata Explorer** pane, select the check boxes next to the items you want to migrate. To migrate the entire database, select the check box next to the database. - -1. Migrate the data. Right-click the database or object you want to migrate, and then select **Migrate Data**. Alternatively, you can select the **Migrate Data** tab at the upper right. - - To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. - - ![Screenshot of the "Migrate Data" command on the "Access Metadata Explorer" pane.](./media/access-to-sql-database-guide/migrate-data.png) - -1. After migration is completed, view the **Data Migration Report**. - - ![Screenshot of the "Migrate Data Report" pane showing an example report for review.](./media/access-to-sql-database-guide/migrate-data-review.png) - -1. Connect to your Azure SQL database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms), and validate the migration by reviewing the data and schema. - - ![Screenshot of SQL Server Management Studio Object Explorer for validating your migration in SSMA.](./media/access-to-sql-database-guide/validate-data.png) - -## Post-migration - -After you've successfully completed the *migration* stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. You must create the validation queries to run against both the source and target databases. Your validation queries should cover the scope you've defined. - -1. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. - -1. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. - -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing performance issues with the workload. - -For more information about these issues and the steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - -## Migration assets - -For more assistance with completing this migration scenario, see the following resource. It was developed in support of a real-world migration project engagement. - -| Title | Description | -| --- | --- | -| [Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130) | Provides suggested “best fit” target platforms, cloud readiness, and application/database remediation levels for specified workloads. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated, uniform target-platform decision process. | - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Database see: - - [An overview of SQL Database](../../database/sql-database-paas-overview.md) - - [Azure total cost of ownership calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - [Cloud Migration Resources](https://azure.microsoft.com/migration/resources) - - -- To assess the application access layer, see [Data Access Migration Toolkit (preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For information about how to perform Data Access Layer A/B testing, see [Overview of Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). \ No newline at end of file diff --git a/articles/azure-sql/migration-guides/database/db2-to-sql-database-guide.md b/articles/azure-sql/migration-guides/database/db2-to-sql-database-guide.md deleted file mode 100644 index 6252f36b972b4..0000000000000 --- a/articles/azure-sql/migration-guides/database/db2-to-sql-database-guide.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: "Db2 to Azure SQL Database: Migration guide" -description: This guide teaches you to migrate your IMB Db2 databases to Azure SQL Database, by using the SQL Server Migration Assistant for Db2 (SSMA for Db2). -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, kendralittle -ms.date: 05/14/2021 ---- -# Migration guide: IBM Db2 to Azure SQL Database -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqldb.md)] - -In this guide, you learn [how to migrate](https://azure.microsoft.com/migration/migration-journey) your IBM Db2 databases to Azure SQL Database, by using [SQL Server Migration](https://azure.microsoft.com/migration/sql-server/) Assistant for Db2. - -For other migration guides, see [Azure Database Migration Guides](/data-migration). - -## Prerequisites - -To migrate your Db2 database to SQL Database, you need: - -- To verify that your [source environment is supported](/sql/ssma/db2/installing-ssma-for-db2-client-db2tosql#prerequisites). -- To download [SQL Server Migration Assistant (SSMA) for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -- A target database in [Azure SQL Database](../../database/single-database-create-quickstart.md). -- Connectivity and sufficient permissions to access both source and target. - -## Pre-migration - -After you have met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your [Azure cloud migration](https://azure.microsoft.com/migration). - -### Assess and convert - -Use SSMA for DB2 to review database objects and data, and assess databases for migration. - -To create an assessment, follow these steps: - -1. Open [SSMA for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -1. Select **File** > **New Project**. -1. Provide a project name and a location to save your project. Then select Azure SQL Database as the migration target from the drop-down list, and select **OK**. - - :::image type="content" source="media/db2-to-sql-database-guide/new-project.png" alt-text="Screenshot that shows project details to specify."::: - - -1. On **Connect to Db2**, enter values for the Db2 connection details. - - :::image type="content" source="media/db2-to-sql-database-guide/connect-to-db2.png" alt-text="Screenshot that shows options to connect to your Db2 instance."::: - - -1. Right-click the Db2 schema you want to migrate, and then choose **Create report**. This will generate an HTML report. Alternatively, you can choose **Create report** from the navigation bar after selecting the schema. - - :::image type="content" source="media/db2-to-sql-database-guide/create-report.png" alt-text="Screenshot that shows how to create a report."::: - -1. Review the HTML report to understand conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Db2 objects and the effort required to perform schema conversions. The default location for the report is in the report folder within *SSMAProjects*. - - For example: `drive:\\Documents\SSMAProjects\MyDb2Migration\report\report_`. - - :::image type="content" source="media/db2-to-sql-database-guide/report.png" alt-text="Screenshot of the report that you review to identify any errors or warnings."::: - - -### Validate data types - -Validate the default data type mappings, and change them based on requirements if necessary. To do so, follow these steps: - -1. Select **Tools** from the menu. -1. Select **Project Settings**. -1. Select the **Type mappings** tab. - - :::image type="content" source="media/db2-to-sql-database-guide/type-mapping.png" alt-text="Screenshot that shows selecting the schema and type mapping."::: - -1. You can change the type mapping for each table by selecting the table in the **Db2 Metadata Explorer**. - -### Convert schema - -To convert the schema, follow these steps: - -1. (Optional) Add dynamic or ad-hoc queries to statements. Right-click the node, and then choose **Add statements**. -1. Select **Connect to Azure SQL Database**. - 1. Enter connection details to connect your database in Azure SQL Database. - 1. Choose your target SQL Database from the drop-down list, or provide a new name, in which case a database will be created on the target server. - 1. Provide authentication details. - 1. Select **Connect**. - - :::image type="content" source="media/db2-to-sql-database-guide/connect-to-sql-database.png" alt-text="Screenshot that shows the details needed to connect to the logical server in Azure."::: - - -1. Right-click the schema, and then choose **Convert Schema**. Alternatively, you can choose **Convert Schema** from the top navigation bar after selecting your schema. - - :::image type="content" source="media/db2-to-sql-database-guide/convert-schema.png" alt-text="Screenshot that shows selecting the schema and converting it."::: - -1. After the conversion completes, compare and review the structure of the schema to identify potential problems. Address the problems based on the recommendations. - - :::image type="content" source="media/db2-to-sql-database-guide/compare-review-schema-structure.png" alt-text="Screenshot that shows comparing and reviewing the structure of the schema to identify potential problems."::: - -1. In the **Output** pane, select **Review results**. In the **Error list** pane, review errors. -1. Save the project locally for an offline schema remediation exercise. From the **File** menu, select **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline, and perform remediation before you can publish the schema to SQL Database. - -## Migrate - -After you have completed assessing your databases and addressing any discrepancies, the next step is to execute the migration process. - -To publish your schema and migrate your data, follow these steps: - -1. Publish the schema. In **Azure SQL Database Metadata Explorer**, from the **Databases** node, right-click the database. Then select **Synchronize with Database**. - - :::image type="content" source="media/db2-to-sql-database-guide/synchronize-with-database.png" alt-text="Screenshot that shows the option to synchronize with database."::: - -1. Migrate the data. Right-click the database or object you want to migrate in **Db2 Metadata Explorer**, and choose **Migrate data**. Alternatively, you can select **Migrate Data** from the navigation bar. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. - - :::image type="content" source="media/db2-to-sql-database-guide/migrate-data.png" alt-text="Screenshot that shows selecting the schema and choosing to migrate data."::: - -1. Provide connection details for both Db2 and Azure SQL Database. -1. After migration completes, view the **Data Migration Report**. - - :::image type="content" source="media/db2-to-sql-database-guide/data-migration-report.png" alt-text="Screenshot that shows where to review the data migration report."::: - -1. Connect to your database in Azure SQL Database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). Validate the migration by reviewing the data and schema. - - :::image type="content" source="media/db2-to-sql-database-guide/compare-schema-in-ssms.png" alt-text="Screenshot that shows comparing the schema in SQL Server Management Studio."::: - -## Post-migration - -After the migration is complete, you need to go through a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - -### Perform tests - -Testing consists of the following activities: - -1. **Develop validation tests**: To test database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you have defined. -1. **Set up the test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run the validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -## Advanced features - -Be sure to take advantage of the advanced cloud-based features offered by SQL Database, such as [built-in high availability](../../database/high-availability-sla.md), [threat detection](../../database/azure-defender-for-sql.md), and [monitoring and tuning your workload](../../database/monitor-tune-overview.md). - -Some SQL Server features are only available when the [database compatibility level](/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) is changed to the latest compatibility level. - -## Migration assets - -For additional assistance, see the following resources, which were developed in support of a real-world migration project engagement: - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and application/database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing and automated and uniform target platform decision process.| -|[Db2 zOS data assets discovery and assessment package](https://www.microsoft.com/download/details.aspx?id=103108)|After running the SQL script on a database, you can export the results to a file on the file system. Several file formats are supported, including \*.csv, so that you can capture the results in external tools such as spreadsheets. This method can be useful if you want to easily share results with teams that do not have the workbench installed.| -|[IBM Db2 LUW inventory scripts and artifacts](https://www.microsoft.com/download/details.aspx?id=103109)|This asset includes a SQL query that hits IBM Db2 LUW version 11.1 system tables and provides a count of objects by schema and object type, a rough estimate of "raw data" in each schema, and the sizing of tables in each schema, with results stored in a CSV format.| -|[IBM Db2 to SQL DB - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|The Database Compare utility is a Windows console application that you can use to verify that the data is identical both on source and target platforms. You can use the tool to efficiently compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - - - -## Next steps - -- For Microsoft and third-party services and tools to assist you with various database and data migration scenarios, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Database, see: - - [An overview of SQL Database](../../database/sql-database-paas-overview.md) - - [Azure total cost of ownership calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - [Cloud Migration Resources](https://azure.microsoft.com/migration/resources) - -- To assess the application access layer, see [Data Access Migration Toolkit](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For details on how to perform data access layer A/B testing, see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/add-databases.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/add-databases.png deleted file mode 100644 index 0c540d7eb6c9d..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/add-databases.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/connect-to-sqldb.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/connect-to-sqldb.png deleted file mode 100644 index e80b0ef936408..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/connect-to-sqldb.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/convert-schema.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/convert-schema.png deleted file mode 100644 index f8f3834182ab1..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/converted-items.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/converted-items.png deleted file mode 100644 index 1a3e14b1a379c..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/converted-items.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/create-report.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/create-report.png deleted file mode 100644 index c6828bf9b6bff..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data-review.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data-review.png deleted file mode 100644 index 073a25ac0e2a1..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data.png deleted file mode 100644 index d34794b3863f7..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/new-project.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/new-project.png deleted file mode 100644 index 4ae4fae0c7d14..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/query-comparison.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/query-comparison.png deleted file mode 100644 index f36c7766d3aaf..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/query-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/sample-assessment.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/sample-assessment.png deleted file mode 100644 index 8bce3f17c4d9e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/sample-assessment.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database-review.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database-review.png deleted file mode 100644 index 0c0c3d75d4188..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database.png deleted file mode 100644 index 99c7c7fcc06e0..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/table-comparison.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/table-comparison.png deleted file mode 100644 index 7c73540deab58..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/table-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/type-mappings.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/type-mappings.png deleted file mode 100644 index 42572bbf9e046..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/type-mappings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/validate-data.png b/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/validate-data.png deleted file mode 100644 index fbf53278ef5b9..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/access-to-sql-database-guide/validate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-review-schema-structure.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-review-schema-structure.png deleted file mode 100644 index 2b354a9fc6af1..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-review-schema-structure.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-schema-in-ssms.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-schema-in-ssms.png deleted file mode 100644 index c2c441340eb4b..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/compare-schema-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-db2.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-db2.png deleted file mode 100644 index f49395ebf0fd6..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-db2.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-sql-database.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-sql-database.png deleted file mode 100644 index 7a9b8637c3b7f..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/connect-to-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/convert-schema.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/convert-schema.png deleted file mode 100644 index e7ab6356eeda5..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/create-report.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/create-report.png deleted file mode 100644 index c61f2d28b7c10..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/data-migration-report.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/data-migration-report.png deleted file mode 100644 index 1caa74599d5a5..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/migrate-data.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/migrate-data.png deleted file mode 100644 index b3c7d5a45709f..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/new-project.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/new-project.png deleted file mode 100644 index c654ffec05413..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/report.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/report.png deleted file mode 100644 index a7a4b56dcf216..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/synchronize-with-database.png deleted file mode 100644 index 259bac0b3846e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/type-mapping.png b/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/type-mapping.png deleted file mode 100644 index 2f7519117bb28..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/db2-to-sql-database-guide/type-mapping.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-mysql.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-mysql.png deleted file mode 100644 index 18ca238f3e524..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-mysql.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-sqldb.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-sqldb.png deleted file mode 100644 index 4eb38b4ce5bf0..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/connect-to-sqldb.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/conversion-report.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/conversion-report.png deleted file mode 100644 index c151c9850e3d2..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/conversion-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/convert-schema.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/convert-schema.png deleted file mode 100644 index ccb930d47bb9f..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/create-report.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/create-report.png deleted file mode 100644 index 1f7392beb00c4..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/data-migration-report.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/data-migration-report.png deleted file mode 100644 index ac6b613417488..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/migrate-data.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/migrate-data.png deleted file mode 100644 index 8c8563f379707..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/new-project.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/new-project.png deleted file mode 100644 index a87d08e0af5f4..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/procedure-comparison.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/procedure-comparison.png deleted file mode 100644 index 227e39b7d19f1..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/procedure-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/select-database.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/select-database.png deleted file mode 100644 index ac88f6560dff2..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/select-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database-review.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database-review.png deleted file mode 100644 index f636e6e6d0dcb..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database.png deleted file mode 100644 index 552ffd9ae3766..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/synchronize-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/table-comparison.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/table-comparison.png deleted file mode 100644 index 75e0b6cfa0667..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/table-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/type-mappings.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/type-mappings.png deleted file mode 100644 index d0265495ccefc..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/type-mappings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/validate-in-ssms.png b/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/validate-in-ssms.png deleted file mode 100644 index d5926937cc31e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/mysql-to-sql-database-guide/validate-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/assessment-report.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/assessment-report.png deleted file mode 100644 index 0ea3750877804..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/assessment-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-oracle.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-oracle.png deleted file mode 100644 index 76140c987c9f4..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-oracle.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-sql-database.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-sql-database.png deleted file mode 100644 index b44f56f12efae..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/connect-to-sql-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema-review.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema-review.png deleted file mode 100644 index a9c0fcc72bf75..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema.png deleted file mode 100644 index 25b8b2d392424..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/create-report.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/create-report.png deleted file mode 100644 index 98dcb21cc29bf..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/data-migration-report.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/data-migration-report.png deleted file mode 100644 index 9116dd0645678..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data-report.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data-report.png deleted file mode 100644 index 9f8f3625c357f..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data.png deleted file mode 100644 index c55dc3e16102d..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/new-project.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/new-project.png deleted file mode 100644 index 3149011329a16..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/procedure-comparison.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/procedure-comparison.png deleted file mode 100644 index 0e0ef23acd977..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/procedure-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/select-schema.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/select-schema.png deleted file mode 100644 index 3ba953289427b..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/select-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/ssma-tester-new.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/ssma-tester-new.png deleted file mode 100644 index e1758e05b28de..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/ssma-tester-new.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database-review.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database-review.png deleted file mode 100644 index 47b8ede83c2f5..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database.png deleted file mode 100644 index 161a29d11cc30..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/table-mapping.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/table-mapping.png deleted file mode 100644 index e6101db19c908..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/table-mapping.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/test-call-ordering.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/test-call-ordering.png deleted file mode 100644 index c479a8c763962..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/test-call-ordering.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-finalize-case.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-finalize-case.png deleted file mode 100644 index de1eee246279e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-finalize-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-init-test-case.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-init-test-case.png deleted file mode 100644 index 2e5a1df10f3b8..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-init-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-oracle-connect.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-oracle-connect.png deleted file mode 100644 index e190527dd6ecf..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-oracle-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-repo-run.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-repo-run.png deleted file mode 100644 index 05daa7a7f739e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-repo-run.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-status.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-status.png deleted file mode 100644 index e5bb3046c70ba..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-status.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-test-case.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-test-case.png deleted file mode 100644 index a99512c2a3c80..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-run-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-affected.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-affected.png deleted file mode 100644 index f1a66ed10d5b1..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-affected.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-objects.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-objects.png deleted file mode 100644 index 15d1b666152bf..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-select-configure-objects.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-sql-connect.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-sql-connect.png deleted file mode 100644 index c6d5f6e3772d8..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-sql-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-failed.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-failed.png deleted file mode 100644 index dcb5be28184b3..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-failed.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-repo.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-repo.png deleted file mode 100644 index 9db1b8913da8a..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-repo.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-result.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-result.png deleted file mode 100644 index 84b9360dd793d..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-result.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-success.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-success.png deleted file mode 100644 index b9f09614c671e..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/tester-test-success.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/type-mappings.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/type-mappings.png deleted file mode 100644 index e9826bae137c5..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/type-mappings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/validate-data.png b/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/validate-data.png deleted file mode 100644 index a109d7ecca6bd..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/oracle-to-sql-database-guide/validate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/migration-process-flow-small.png b/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/migration-process-flow-small.png deleted file mode 100644 index 1b04502dc966a..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/migration-process-flow-small.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database-settings.png b/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database-settings.png deleted file mode 100644 index 6b2c725eea172..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database-settings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database.png b/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database.png deleted file mode 100644 index 7ca417684ecd9..0000000000000 Binary files a/articles/azure-sql/migration-guides/database/media/sql-server-to-database-overview/sql-server-import-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/database/mysql-to-sql-database-guide.md b/articles/azure-sql/migration-guides/database/mysql-to-sql-database-guide.md deleted file mode 100644 index b1a0907d53f4f..0000000000000 --- a/articles/azure-sql/migration-guides/database/mysql-to-sql-database-guide.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: "MySQL to Azure SQL Database: Migration guide" -description: In this guide, you learn how to migrate your MySQL databases to an Azure SQL database by using SQL Server Migration Assistant for MySQL (SSMA for MySQL). -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: chadam -ms.reviewer: mathoma, kendralittle -ms.date: 03/19/2021 ---- - -# Migration guide: MySQL to Azure SQL Database -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqldb.md)] - -In this guide, you learn [how to migrate](https://azure.microsoft.com/migration/migration-journey) your MySQL database to an Azure SQL database by using [SQL Server Migration](https://azure.microsoft.com/migration/sql-server/) Assistant for MySQL (SSMA for MySQL). - -For other migration guides, see [Azure Database Migration Guide](/data-migration). - -## Prerequisites - -Before you begin migrating your MySQL database to a SQL database, do the following: - -- Verify that your source environment is supported. Currently, MySQL 5.6 and 5.7 are supported. -- Download and install [SQL Server Migration Assistant for MySQL](https://www.microsoft.com/download/details.aspx?id=54257). -- Ensure that you have connectivity and sufficient permissions to access both the source and the target. - -## Pre-migration - -After you've met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your [Azure cloud migration](https://azure.microsoft.com/migration). - -### Assess - -Use SQL Server Migration Assistant (SSMA) for MySQL to review database objects and data, and assess databases for migration. - -To create an assessment, do the following: - -1. Open [SSMA for MySQL](https://www.microsoft.com/download/details.aspx?id=54257). -1. Select **File**, and then select **New Project**. -1. In the **New Project** pane, enter a name and location for your project and then, in the **Migrate To** drop-down list, select **Azure SQL Database**. -1. Select **OK**. - - ![Screenshot of the "New Project" pane for entering your migration project name, location, and target.](./media/mysql-to-sql-database-guide/new-project.png) - -1. Select the **Connect to MySQL** tab, and then provide details for connecting your MySQL server. - - ![Screenshot of the "Connect to MySQL" pane for specifying connections to the source.](./media/mysql-to-sql-database-guide/connect-to-mysql.png) - -1. On the **MySQL Metadata Explorer** pane, right-click the MySQL schema, and then select **Create Report**. Alternatively, you can select the **Create Report** tab at the upper right. - - ![Screenshot of the "Create Report" links in SSMA for MySQL.](./media/mysql-to-sql-database-guide/create-report.png) - -1. Review the HTML report to understand the conversion statistics, errors, and warnings. Analyze it to understand the conversion issues and resolutions. - You can also open the report in Excel to get an inventory of MySQL objects and understand the effort that's required to perform schema conversions. The default location for the report is in the report folder within SSMAProjects. For example: - - `drive:\Users\\Documents\SSMAProjects\MySQLMigration\report\report_2016_11_12T02_47_55\` - - ![Screenshot of an example conversion report in SSMA.](./media/mysql-to-sql-database-guide/conversion-report.png) - -### Validate the data types - -Validate the default data type mappings and change them based on requirements, if necessary. To do so: - -1. Select **Tools**, and then select **Project Settings**. -1. Select the **Type Mappings** tab. - - ![Screenshot of the "Type Mapping" pane in SSMA for MySQL.](./media/mysql-to-sql-database-guide/type-mappings.png) - -1. You can change the type mapping for each table by selecting the table name on the **MySQL Metadata Explorer** pane. - -### Convert the schema - -To convert the schema, do the following: - -1. (Optional) To convert dynamic or specialized queries, right-click the node, and then select **Add statement**. - -1. Select the **Connect to Azure SQL Database** tab, and then do the following: - - a. Enter the details for connecting to your SQL database. - b. In the drop-down list, select your target SQL database. Or you can provide a new name, in which case a database will be created on the target server. - c. Provide authentication details. - d. Select **Connect**. - - ![Screenshot of the "Connect to Azure SQL Database" pane in SSMA for MySQL.](./media/mysql-to-sql-database-guide/connect-to-sqldb.png) - -1. Right-click the schema you're working with, and then select **Convert Schema**. Alternatively, you can select the **Convert schema** tab at the upper right. - - ![Screenshot of the "Convert Schema" command on the "MySQL Metadata Explorer" pane.](./media/mysql-to-sql-database-guide/convert-schema.png) - -1. After the conversion is completed, review and compare the converted objects to the original objects to identify potential problems and address them based on the recommendations. - - ![Screenshot showing a comparison of the converted objects to the original objects.](./media/mysql-to-sql-database-guide/table-comparison.png) - - Compare the converted Transact-SQL text to the original code, and review the recommendations. - - ![Screenshot showing a comparison of converted queries to the source code.](./media/mysql-to-sql-database-guide/procedure-comparison.png) - -1. On the **Output** pane, select **Review results**, and then review any errors on the **Error list** pane. -1. Save the project locally for an offline schema remediation exercise. To do so, select **File** > **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to your SQL database. - - Compare the converted procedures to the original procedures, as shown here: - - ![Screenshot showing a comparison of the converted procedures to the original procedures.](./media/mysql-to-sql-database-guide/procedure-comparison.png) - - -## Migrate the databases - -After you've assessed your databases and addressed any discrepancies, you can run the migration process. Migration involves two steps: publishing the schema and migrating the data. - -To publish the schema and migrate the data, do the following: - -1. Publish the schema. On the **Azure SQL Database Metadata Explorer** pane, right-click the database, and then select **Synchronize with Database**. This action publishes the MySQL schema to your SQL database. - - ![Screenshot of the "Synchronize with the Database" pane for reviewing database mapping.](./media/mysql-to-sql-database-guide/synchronize-database-review.png) - -1. Migrate the data. On the **MySQL Metadata Explorer** pane, right-click the MySQL schema you want to migrate, and then select **Migrate Data**. Alternatively, you can select the **Migrate Data** tab at the upper right. - - To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. - - ![Screenshot of the "Migrate Data" command on the "MySQL Metadata Explorer" pane.](./media/mysql-to-sql-database-guide/migrate-data.png) - -1. After the migration is completed, view the **Data Migration Report**. - - ![Screenshot of the Data Migration Report.](./media/mysql-to-sql-database-guide/data-migration-report.png) - -1. Connect to your SQL database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) and validate the migration by reviewing the data and schema. - - ![Screenshot of SQL Server Management Studio.](./media/mysql-to-sql-database-guide/validate-in-ssms.png) - -## Post-migration - -After you've successfully completed the *migration* stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. You must create the validation queries to run against both the source and target databases. Your validation queries should cover the scope you've defined. - -1. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. - -1. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. - -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing performance issues with the workload. - -For more information about these issues and the steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - -## Migration assets - -For more assistance with completing this migration scenario, see the following resource. It was developed in support of a real-world migration project engagement. - -| Title | Description | -| --- | --- | -| [Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130) | Provides suggested “best fit” target platforms, cloud readiness, and application/database remediation levels for specified workloads. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated, uniform target-platform decision process. | -|[MySQL to SQL DB - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|The Database Compare utility is a Windows console application that you can use to verify that the data is identical both on source and target platforms. You can use the tool to efficiently compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- To help estimate the cost savings you can realize by migrating your workloads to Azure, see the [Azure total cost of ownership calculator](https://aka.ms/azure-tco). - -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- For other migration guides, see [Azure Database Migration Guide](https://datamigration.microsoft.com/). - -- For migration videos, see [Overview of the migration journey and recommended migration and assessment tools and services](https://azure.microsoft.com/resources/videos/overview-of-migration-and-recommended-tools-services/). - -- For more [cloud migration resources](https://azure.microsoft.com/migration/resources/), see [cloud migration solutions](https://azure.microsoft.com/migration). diff --git a/articles/azure-sql/migration-guides/database/oracle-to-sql-database-guide.md b/articles/azure-sql/migration-guides/database/oracle-to-sql-database-guide.md deleted file mode 100644 index 667946d257f28..0000000000000 --- a/articles/azure-sql/migration-guides/database/oracle-to-sql-database-guide.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -title: "Oracle to Azure SQL Database: Migration guide" -description: In this guide, you learn how to migrate your Oracle schema to Azure SQL Database by using SQL Server Migration Assistant for Oracle. -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: chadam -ms.reviewer: mathoma, kendralittle -ms.date: 08/25/2020 ---- - -# Migration guide: Oracle to Azure SQL Database - -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqldb.md)] - -This guide teaches you [to migrate](https://azure.microsoft.com/migration/migration-journey) your Oracle schemas to Azure SQL Database by using [SQL Server Migration](https://azure.microsoft.com/migration/sql-server/) Assistant for Oracle (SSMA for Oracle). - -For other migration guides, see [Azure Database Migration Guides](/data-migration). - -## Prerequisites - -Before you begin migrating your Oracle schema to SQL Database: - -- Verify that your source environment is supported. -- Download [SSMA for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -- Have a target [SQL Database](../../database/single-database-create-quickstart.md) instance. -- Obtain the [necessary permissions for SSMA for Oracle](/sql/ssma/oracle/connecting-to-oracle-database-oracletosql) and [provider](/sql/ssma/oracle/connect-to-oracle-oracletosql). - -## Pre-migration - -After you've met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your [Azure cloud migration](https://azure.microsoft.com/migration). This part of the process involves conducting an inventory of the databases that you need to migrate, assessing those databases for potential migration issues or blockers, and then resolving any items you might have uncovered. - -### Assess - -By using SSMA for Oracle, you can review database objects and data, assess databases for migration, migrate database objects to SQL Database, and then finally migrate data to the database. - -To create an assessment: - -1. Open [SSMA for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -1. Select **File**, and then select **New Project**. -1. Enter a project name and a location to save your project. Then select **Azure SQL Database** as the migration target from the drop-down list and select **OK**. - - ![Screenshot that shows Connect to Oracle.](./media/oracle-to-sql-database-guide/connect-to-oracle.png) - -1. Select **Connect to Oracle**. Enter values for Oracle connection details in the **Connect to Oracle** dialog box. - -1. Select the Oracle schemas you want to migrate. - - ![Screenshot that shows selecting Oracle schema.](./media/oracle-to-sql-database-guide/select-schema.png) - -1. In **Oracle Metadata Explorer**, right-click the Oracle schema you want to migrate and then select **Create Report** to generate an HTML report. Instead, you can select a database and then select the **Create Report** tab. - - ![Screenshot that shows Create Report.](./media/oracle-to-sql-database-guide/create-report.png) - -1. Review the HTML report to understand conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Oracle objects and the effort required to perform schema conversions. The default location for the report is in the report folder within SSMAProjects. - - For example, see `drive:\\Documents\SSMAProjects\MyOracleMigration\report\report_2020_11_12T02_47_55\`. - - ![Screenshot that shows an Assessment report.](./media/oracle-to-sql-database-guide/assessment-report.png) - -### Validate the data types - -Validate the default data type mappings and change them based on requirements if necessary. To do so, follow these steps: - -1. In SSMA for Oracle, select **Tools**, and then select **Project Settings**. -1. Select the **Type Mapping** tab. - - ![Screenshot that shows Type Mapping.](./media/oracle-to-sql-database-guide/type-mappings.png) - -1. You can change the type mapping for each table by selecting the table in **Oracle Metadata Explorer**. - -### Convert the schema - -To convert the schema: - -1. (Optional) Add dynamic or ad-hoc queries to statements. Right-click the node, and then select **Add statements**. -1. Select the **Connect to Azure SQL Database** tab. - 1. In **SQL Database**, enter connection details to connect your database. - 1. Select your target SQL Database instance from the drop-down list, or enter a new name, in which case a database will be created on the target server. - 1. Enter authentication details, and select **Connect**. - - ![Screenshot that shows Connect to Azure SQL Database.](./media/oracle-to-sql-database-guide/connect-to-sql-database.png) - -1. In **Oracle Metadata Explorer**, right-click the Oracle schema and then select **Convert Schema**. Or, you can select your schema and then select the **Convert Schema** tab. - - ![Screenshot that shows Convert Schema.](./media/oracle-to-sql-database-guide/convert-schema.png) - -1. After the conversion finishes, compare and review the converted objects to the original objects to identify potential problems and address them based on the recommendations. - - ![Screenshot that shows the Review recommendations schema.](./media/oracle-to-sql-database-guide/table-mapping.png) - -1. Compare the converted Transact-SQL text to the original stored procedures, and review the recommendations. - - ![Screenshot that shows the Review recommendations.](./media/oracle-to-sql-database-guide/procedure-comparison.png) - -1. In the output pane, select **Review results** and review the errors in the **Error List** pane. -1. Save the project locally for an offline schema remediation exercise. On the **File** menu, select **Save Project**. This step gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to SQL Database. - -## Migrate - -After you've assessed your databases and addressed any discrepancies, the next step is to run the migration process. Migration involves two steps: publishing the schema and migrating the data. - -To publish your schema and migrate your data: - -1. Publish the schema by right-clicking the database from the **Databases** node in **Azure SQL Database Metadata Explorer** and selecting **Synchronize with Database**. - - ![Screenshot that shows Synchronize with Database.](./media/oracle-to-sql-database-guide/synchronize-with-database.png) - -1. Review the mapping between your source project and your target. - - ![Screenshot that shows Synchronize with the Database review.](./media/oracle-to-sql-database-guide/synchronize-with-database-review.png) - -1. Migrate the data by right-clicking the database or object you want to migrate in **Oracle Metadata Explorer** and selecting **Migrate Data**. Or, you can select the **Migrate Data** tab. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the checkboxes next to the tables. To omit data from individual tables, clear the checkboxes. - - ![Screenshot that shows Migrate Data.](./media/oracle-to-sql-database-guide/migrate-data.png) - -1. Enter connection details for both Oracle and SQL Database. -1. After the migration is completed, view the **Data Migration Report**. - - ![Screenshot that shows the Data Migration Report.](./media/oracle-to-sql-database-guide/data-migration-report.png) - -1. Connect to your SQL Database instance by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms), and validate the migration by reviewing the data and schema. - - ![Screenshot that shows validation in SQL Server Management Studio.](./media/oracle-to-sql-database-guide/validate-data.png) - -Or, you can also use SQL Server Integration Services to perform the migration. To learn more, see: - -- [Getting started with SQL Server Integration Services](/sql/integration-services/sql-server-integration-services) -- [SQL Server Integration Services for Azure and Hybrid Data Movement](https://download.microsoft.com/download/D/2/0/D20E1C5F-72EA-4505-9F26-FEF9550EFD44/SSIS%20Hybrid%20and%20Azure.docx) - -## Post-migration - -After you've successfully completed the *migration* stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this task will require changes to the applications in some cases. - -The [Data Access Migration Toolkit](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) is an extension for Visual Studio Code that allows you to analyze your Java source code and detect data access API calls and queries. The toolkit provides you with a single-pane view of what needs to be addressed to support the new database back end. To learn more, see the [Migrate your Java applications from Oracle](https://techcommunity.microsoft.com/t5/microsoft-data-migration/migrate-your-java-applications-from-oracle-to-sql-server-with/ba-p/368727) blog post. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you've defined. -1. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -### Validate migrated objects - -Microsoft SQL Server Migration Assistant for Oracle Tester (SSMA Tester) allows you to test migrated database objects. The SSMA Tester is used to verify that converted objects behave in the same way. - -#### Create test case - -1. Open SSMA for Oracle, select **Tester** followed by **New Test Case**. - ![Screenshot that shows to create new test case.](./media/oracle-to-sql-database-guide/ssma-tester-new.png) - -1. Provide the following information for the new test case: - - **Name:** Enter the name to identify the test case. - - **Creation date:** Today's current date, defined automatically. - - **Last Modified date:** Filled in automatically, should not be changed. - - **Description:** Enter any additional information to identify the purpose of the test case. - - ![Screenshot that shows steps to initialize a test case .](./media/oracle-to-sql-database-guide/tester-init-test-case.png) - -1. Select the objects that are part of the test case from the Oracle object tree located in the left side. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-select-configure-objects.png" alt-text="Screenshot that shows step to select and configure object."::: - - In this example, stored procedure `ADD_REGION` and table `REGION` is selected. - - To learn more, see [Selecting and configuring objects to test.](/sql/ssma/oracle/selecting-and-configuring-objects-to-test-oracletosql) - -1. Next, select the tables, foreign keys, and other dependent objects from the Oracle object tree in the left window. - - :::image type="content" source="./media//oracle-to-sql-database-guide/tester-select-configure-affected.png" alt-text="Screenshot that shows step to select and configure affected object."::: - - To learn more, see [Selecting and configuring affected objects.](/sql/ssma/oracle/selecting-and-configuring-affected-objects-oracletosql) - -1. Review the evaluation sequence of objects. Change the order by clicking the buttons in the grid. - - :::image type="content" source="./media/oracle-to-sql-database-guide/test-call-ordering.png" alt-text="Screenshot that shows step to sequence test object execution."::: - -1. Finalize the test case by reviewing the information provided in the previous steps. Configure the test execution options based on the test scenario. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-finalize-case.png" alt-text="Screenshot that shows step to finalize object."::: - - For more information on test case settings,[Finishing test case preparation](/sql/ssma/oracle/finishing-test-case-preparation-oracletosql) - -1. Click on finish to create the test case. - - :::image type="content" source="./media//oracle-to-sql-database-guide/tester-test-repo.png" alt-text="Screenshot that shows step to test repo."::: - -#### Run test case - -When SSMA Tester runs a test case, the test engine executes the objects selected for testing and generates a verification report. - -1. Select the test case from test repository and then click run. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-repo-run.png" alt-text="Screenshot that shows to review test repo."::: - -1. Review the launch test case and click run. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-run-test-case.png" alt-text="Screenshot that shows step to run test case"::: - -1. Next, provide Oracle source credentials. Click connect after entering the credentials. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-oracle-connect.png" alt-text="Screenshot that shows step to connect to oracle source"::: - -1. Provide target SQL Server credentials and click connect. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-sql-connect.png" alt-text="Screenshot that shows step to connect to sql target."::: - - On success, the test case moves to initialization stage. - -1. A real-time progress bar shows the execution status of the test run. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-run-status.png" alt-text="Screenshot that shows tester test progress."::: - -1. Review the report after the test is completed. The report provides the statistics, any errors during the test run and a detail report. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-test-result.png" alt-text="Screenshot that shows a sample tester test report"::: - -1. Click details to get more information. - - Example of positive data validation. - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-test-success.png" alt-text="Screenshot that shows a sample tester success report."::: - - Example of failed data validation. - - :::image type="content" source="./media/oracle-to-sql-database-guide/tester-test-failed.png" alt-text="Screenshot that shows tester failure report."::: - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing performance issues with the workload. - -> [!NOTE] -> For more information about these issues and the steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - -## Migration assets - -For more assistance with completing this migration scenario, see the following resources. They were developed in support of a real-world migration project engagement. - -| **Title/link** | **Description** | -| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Data Workload Assessment Model and Tool](https://www.microsoft.com/download/details.aspx?id=103130) | This tool provides suggested "best fit" target platforms, cloud readiness, and application or database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated and uniform target platform decision process. | -| [Oracle Inventory Script Artifacts](https://www.microsoft.com/download/details.aspx?id=103121) | This asset includes a PL/SQL query that hits Oracle system tables and provides a count of objects by schema type, object type, and status. It also provides a rough estimate of raw data in each schema and the sizing of tables in each schema, with results stored in a CSV format. | -| [Automate SSMA Oracle Assessment Collection & Consolidation](https://www.microsoft.com/download/details.aspx?id=103120) | This set of resources uses a .csv file as entry (sources.csv in the project folders) to produce the xml files that are needed to run an SSMA assessment in console mode. The source.csv is provided by the customer based on an inventory of existing Oracle instances. The output files are AssessmentReportGeneration_source_1.xml, ServersConnectionFile.xml, and VariableValueFile.xml.| -| [Oracle to SQL DB - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|SSMA for Oracle Tester is the recommended tool to automatically validate the database object conversion and data migration, and it's a superset of Database Compare functionality.

    If you're looking for an alternative data validation option, you can use the Database Compare utility to compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about SQL Database, see: - - [An overview of Azure SQL Database](../../database/sql-database-paas-overview.md) - - [Azure Total Cost of Ownership (TCO) Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - [Cloud Migration Resources](https://azure.microsoft.com/migration/resources) - -- For video content, see: - - [Overview of the migration journey and the tools and services recommended for performing assessment and migration](https://azure.microsoft.com/resources/videos/overview-of-migration-and-recommended-tools-services/) diff --git a/articles/azure-sql/migration-guides/database/sap-ase-to-sql-database.md b/articles/azure-sql/migration-guides/database/sap-ase-to-sql-database.md deleted file mode 100644 index bef10736b50c9..0000000000000 --- a/articles/azure-sql/migration-guides/database/sap-ase-to-sql-database.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: "SAP ASE to Azure SQL Database: Migration guide" -description: In this guide you learn how to migrate your SAP ASE databases to an Azure SQL database by using SQL Server Migration Assistant for SAP Adapter Server Enterprise. -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: chadam -ms.reviewer: mathoma, kendralittle -ms.date: 03/19/2021 ---- - -# Migration guide: SAP ASE to Azure SQL Database - -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqldb.md)] - -In this guide, you learn [how to migrate](https://azure.microsoft.com/migration/migration-journey) your SAP Adapter Server Enterprise (ASE) databases to an Azure SQL database by using [SQL Server Migration](https://azure.microsoft.com/migration/sql-server/) Assistant for SAP Adapter Server Enterprise. - -For other migration guides, see [Azure Database Migration Guide](/data-migration). - -## Prerequisites - -Before you begin migrating your SAP SE database to your SQL database, do the following: - -- Verify that your source environment is supported. -- Download and install [SQL Server Migration Assistant for SAP Adaptive Server Enterprise (formerly SAP Sybase ASE)](https://www.microsoft.com/download/details.aspx?id=54256). -- Ensure that you have connectivity and sufficient permissions to access both source and target. - -## Pre-migration - -After you've met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your [Azure cloud migration](https://azure.microsoft.com/migration). - -### Assess - -By using [SQL Server Migration Assistant (SSMA) for SAP Adaptive Server Enterprise (formally SAP Sybase ASE)](https://www.microsoft.com/download/details.aspx?id=54256), you can review database objects and data, assess databases for migration, migrate Sybase database objects to your SQL database, and then migrate data to the SQL database. To learn more, see [SQL Server Migration Assistant for Sybase (SybaseToSQL)](/sql/ssma/sybase/sql-server-migration-assistant-for-sybase-sybasetosql). - -To create an assessment, do the following: - -1. Open SSMA for Sybase. -1. Select **File**, and then select **New Project**. -1. In the **New Project** pane, enter a name and location for your project and then, in the **Migrate To** drop-down list, select **Azure SQL Database**. -1. Select **OK**. -1. On the **Connect to Sybase** pane, enter the SAP connection details. -1. Right-click the SAP database you want to migrate, and then select **Create report**. This generates an HTML report. Alternatively, you can select the **Create report** tab at the upper right. -1. Review the HTML report to understand the conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of SAP ASE objects and the effort that's required to perform schema conversions. The default location for the report is in the report folder within SSMAProjects. For example: - - `drive:\\Documents\SSMAProjects\MySAPMigration\report\report_` - -### Validate the type mappings - -Before you perform schema conversion, validate the default data-type mappings or change them based on requirements. You can do so by selecting **Tools** > **Project Settings**, or you can change the type mapping for each table by selecting the table in the **SAP ASE Metadata Explorer**. - -### Convert the schema - -To convert the schema, do the following: - -1. (Optional) To convert dynamic or specialized queries, right-click the node, and then select **Add statement**. -1. Select the **Connect to Azure SQL Database** tab, and then enter the details for your SQL database. You can choose to connect to an existing database or provide a new name, in which case a database will be created on the target server. -1. On the **Sybase Metadata Explorer** pane, right-click the SAP ASE schema you're working with, and then select **Convert Schema**. -1. After the schema has been converted, compare and review the converted structure to the original structure identify potential problems. - - After the schema conversion, you can save this project locally for an offline schema remediation exercise. To do so, select **File** > **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to your SQL database. - -1. On the **Output** pane, select **Review results**, and review any errors in the **Error list** pane. -1. Save the project locally for an offline schema remediation exercise. To do so, select **File** > **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to your SQL database. - -## Migrate the databases - -After you have the necessary prerequisites in place and have completed the tasks associated with the *pre-migration* stage, you're ready to run the schema and data migration. - -To publish the schema and migrate the data, do the following: - -1. Publish the schema. On the **Azure SQL Database Metadata Explorer** pane, right-click the database, and then select **Synchronize with Database**. This action publishes the SAP ASE schema to your SQL database. - -1. Migrate the data. On the **SAP ASE Metadata Explorer** pane, right-click the SAP ASE database or object you want to migrate, and then select **Migrate Data**. Alternatively, you can select the **Migrate Data** tab at the upper right. - - To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. -1. After the migration is completed, view the **Data Migration Report**. -1. Validate the migration by reviewing the data and schema. To do so, connect to your SQL database by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - -## Post-migration - -After you've successfully completed the *migration* stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. You must create the validation queries to run against both the source and target databases. Your validation queries should cover the scope you've defined. - -1. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. - -1. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. - -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing performance issues with the workload. - -For more information about these issues and the steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - - -## Next steps - -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Database, see: - - [An overview of SQL Database](../../database/sql-database-paas-overview.md) - - [Azure total cost of ownership calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - [Cloud Migration Resources](https://azure.microsoft.com/migration/resources) - -- To assess the application access layer, see [Data Access Migration Toolkit (preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). \ No newline at end of file diff --git a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-assessment-rules.md b/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-assessment-rules.md deleted file mode 100644 index 8808ee84bcae5..0000000000000 --- a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-assessment-rules.md +++ /dev/null @@ -1,509 +0,0 @@ ---- -title: "Assessment rules for SQL Server to Azure SQL Database migration" -description: Assessment rules to identify issues with the source SQL Server instance that must be addressed before migrating to Azure SQL Database. -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: ignite-fall-2021 -ms.devlang: -ms.topic: how-to -author: rajeshsetlem -ms.author: rsetlem -ms.reviewer: mathoma, kendralittle -ms.date: 12/15/2020 ---- -# Assessment rules for SQL Server to Azure SQL Database migration -[!INCLUDE[appliesto--sqldb](../../includes/appliesto-sqldb.md)] - -Migration tools validate your source SQL Server instance by running a number of assessment rules to identify issues that must be addressed before migrating your SQL Server database to Azure SQL Database. - -This article provides a list of the rules used to assess the feasibility of migrating your SQL Server database to Azure SQL Database. - -## Rules Summary - - | Rule Title | Level | Category | Details | - | - | - | - | - | - | AgentJobs | Instance | Warning | [SQL Server Agent jobs aren't available in Azure SQL Database.](#AgentJobs) | - | BulkInsert | Database | Issue | [BULK INSERT with non-Azure blob data source isn't supported in Azure SQL Database.](#BulkInsert) | - | ClrAssemblies | Database | Issue | [SQL CLR assemblies aren't supported in Azure SQL Database.](#ClrAssemblies) | - | ComputeClause | Database | Warning | [COMPUTE clause is no longer supported and has been removed.](#ComputeClause) | - | CrossDatabaseReferences | Database | Issue | [Cross-database queries aren't supported in Azure SQL Database.](#CrossDatabaseReferences) | - | CryptographicProvider | Database | Issue | [A use of CREATE CRYPTOGRAPHIC PROVIDER or ALTER CRYPTOGRAPHIC PROVIDER was found, which isn't supported in Azure SQL Database.](#CryptographicProvider) | - | DatabaseMail | Instance | Warning | [Database Mail isn't supported in Azure SQL Database.](#DatabaseMail) | - | DatabasePrincipalAlias | Database | Issue | [SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed.](#DatabasePrincipalAlias) | - | DbCompatLevelLowerThan100 | Database | Warning | [Azure SQL Database doesn’t support compatibility levels below 100.](#DbCompatLevelLowerThan100) | - | DisableDefCNSTCHK | Database | Issue | [SET option DISABLE_DEF_CNST_CHK is no longer supported and has been removed.](#DisableDefCNSTCHK) | - | FastFirstRowHint | Database | Warning | [FASTFIRSTROW query hint is no longer supported and has been removed.](#FastFirstRowHint) | - | FileStream | Database | Issue | [Filestream isn't supported in Azure SQL Database.](#FileStream) | - | LinkedServer | Database | Issue | [Linked server functionality isn't supported in Azure SQL Database.](#LinkedServer) | - | MSDTCTransactSQL | Database | Issue | [BEGIN DISTRIBUTED TRANSACTION isn't supported in Azure SQL Database. ](#MSDTCTransactSQL) | - | NextColumn | Database | Issue | [Tables and Columns named NEXT will lead to an error In Azure SQL Database.](#NextColumn) | - | NonANSILeftOuterJoinSyntax | Database | Warning | [Non-ANSI style left outer join is no longer supported and has been removed.](#NonANSILeftOuterJoinSyntax) | - | NonANSIRightOuterJoinSyntax | Database | Warning | [Non-ANSI style right outer join is no longer supported and has been removed.](#NonANSIRightOuterJoinSyntax) | - | OpenRowsetWithNonBlobDataSourceBulk | Database | Issue | [OpenRowSet used in bulk operation with non-Azure blob storage data source isn't supported in Azure SQL Database.](#OpenRowsetWithNonBlobDataSourceBulk) | - | OpenRowsetWithSQLAndNonSQLProvider | Database | Issue | [OpenRowSet with SQL or non-SQL provider isn't supported in Azure SQL Database. ](#OpenRowsetWithSQLAndNonSQLProvider) | - | RAISERROR | Database | Warning | [Legacy style RAISERROR calls should be replaced with modern equivalents.](#RAISERROR) | - | ServerAudits | Instance | Warning | [Server Audits isn't supported in Azure SQL Database.](#ServerAudits) | - | ServerCredentials | Instance | Warning | [Server scoped credential isn't supported in Azure SQL Database.](#ServerCredentials) | - | ServerScopedTriggers | Instance | Warning | [Server-scoped trigger isn't supported in Azure SQL Database.](#ServerScopedTriggers) | - | ServiceBroker | Database | Issue | [Service Broker feature isn't supported in Azure SQL Database.](#ServiceBroker) | - | SQLDBDatabaseSize | Database | Issue | [Azure SQL Database does not support database size greater than 100 TB.](#SQLDBDatabaseSize) | - | SqlMail | Database | Warning | [SQL Mail has been discontinued.](#SqlMail) | - | SystemProcedures110 | Database | Warning | [Detected statements that reference removed system stored procedures that aren't available in Azure SQL Database.](#SystemProcedures110) | - | TraceFlags | Instance | Warning | [Azure SQL Database does not support trace flags.](#TraceFlags) | - | WindowsAuthentication | Instance | Warning | [Database users mapped with Windows authentication (integrated security) aren't supported in Azure SQL Database. ](#WindowsAuthentication) | - | XpCmdshell | Database | Issue | [xp_cmdshell isn't supported in Azure SQL Database.](#XpCmdshell) | - -## Bulk insert - -**Title: BULK INSERT with non-Azure blob data source isn't supported in Azure SQL Database.** -**Category**: Issue - -**Description** -Azure SQL Database cannot access file shares or Windows folders. See the "Impacted Objects" section for the specific uses of BULK INSERT statements that do not reference an Azure blob. Objects with 'BULK INSERT' where the source isn't Azure blob storage will not work after migrating to Azure SQL Database. - - -**Recommendation** -You will need to convert BULK INSERT statements that use local files or file shares to use files from Azure blob storage instead, when migrating to Azure SQL Database. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -## Compute clause - -**Title: COMPUTE clause is no longer supported and has been removed.** -**Category**: Warning - -**Description** -The COMPUTE clause generates totals that appear as additional summary columns at the end of the result set. However, this clause is no longer supported in Azure SQL Database. - - -**Recommendation** -The T-SQL module needs to be rewritten using the ROLLUP operator instead. The code below demonstrates how COMPUTE can be replaced with ROLLUP: - -```sql -USE AdventureWorks -GO; - -SELECT SalesOrderID, UnitPrice, UnitPriceDiscount -FROM Sales.SalesOrderDetail -ORDER BY SalesOrderID COMPUTE SUM(UnitPrice), SUM(UnitPriceDiscount) BY SalesOrderID GO; - -SELECT SalesOrderID, UnitPrice, UnitPriceDiscount,SUM(UnitPrice) as UnitPrice , -SUM(UnitPriceDiscount) as UnitPriceDiscount -FROM Sales.SalesOrderDetail -GROUP BY SalesOrderID, UnitPrice, UnitPriceDiscount WITH ROLLUP; -``` - -More information: [Discontinued Database Engine functionality in SQL Server ](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## CLR assemblies - -**Title: SQL CLR assemblies aren't supported in Azure SQL Database** -**Category**: Issue - - -**Description** -Azure SQL Database does not support SQL CLR assemblies. - - -**Recommendation** -Currently, there is no way to achieve this in Azure SQL Database. The recommended alternative solutions will require application code and database changes to use only assemblies supported by Azure SQL Database. Alternatively migrate to Azure SQL Managed Instance or SQL Server on Azure Virtual Machine - -More information: [Unsupported Transact-SQL differences in SQL Database](../../database/transact-sql-tsql-differences-sql-server.md#t-sql-syntax-not-supported-in-azure-sql-database) - -## Cryptographic provider - -**Title: A use of CREATE CRYPTOGRAPHIC PROVIDER or ALTER CRYPTOGRAPHIC PROVIDER was found, which isn't supported in Azure SQL Database** -**Category**: Issue - -**Description** -Azure SQL Database does not support CRYPTOGRAPHIC PROVIDER statements because it cannot access files. See the Impacted Objects section for the specific uses of CRYPTOGRAPHIC PROVIDER statements. Objects with `CREATE CRYPTOGRAPHIC PROVIDER` or `ALTER CRYPTOGRAPHIC PROVIDER` will not work correctly after migrating to Azure SQL Database. - - -**Recommendation** -Review objects with `CREATE CRYPTOGRAPHIC PROVIDER` or `ALTER CRYPTOGRAPHIC PROVIDER`. In any such objects that are required, remove the uses of these features. Alternatively, migrate to SQL Server on Azure Virtual Machine - -## Cross database references - -**Title: Cross-database queries aren't supported in Azure SQL Database** -**Category**: Issue - -**Description** -Databases on this server use cross-database queries, which aren't supported in Azure SQL Database. - - -**Recommendation** -Azure SQL Database does not support cross-database queries. The following actions are recommended: -- Migrate the dependent database(s) to Azure SQL Database and use Elastic Database Query (Currently in preview) functionality to query across Azure SQL databases. -- Move the dependent datasets from other databases into the database that is being migrated. -- Migrate to Azure SQL Managed Instance. -- Migrate to SQL Server on Azure Virtual Machine. - -More information: [Check Azure SQL Database elastic database query (Preview)](../../database/elastic-query-overview.md) - -## Database compatibility - -**Title: Azure SQL Database doesn't support compatibility levels below 100.** -**Category**: Warning - -**Description** -Database compatibility level is a valuable tool to assist in database modernization, by allowing the SQL Server Database Engine to be upgraded, while keeping connecting applications functional status by maintaining the same pre-upgrade database compatibility level. Azure SQL Database doesn't support compatibility levels below 100. - - -**Recommendation** -Evaluate if the application functionality is intact when the database compatibility level is upgraded to 100 on Azure SQL Managed Instance. Alternatively, migrate to SQL Server on Azure Virtual Machine - -## Database mail - -**Title: Database Mail isn't supported in Azure SQL Database.** -**Category**: Warning - -**Description** -This server uses the Database Mail feature, which isn't supported in Azure SQL Database. - - -**Recommendation** -Consider migrating to Azure SQL Managed Instance that supports Database Mail. Alternatively, consider using Azure functions and Sendgrid to accomplish mail functionality on Azure SQL Database. - -## Database principal alias - -**Title: SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed.** -**Category**: Issue - -**Description** -SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed in Azure SQL Database. - - -**Recommendation** -Use roles instead of aliases. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - - -## DISABLE_DEF_CNST_CHK option - -**Title: SET option DISABLE_DEF_CNST_CHK is discontinued and has been removed.** -**Category**: Issue - -**Description** -SET option DISABLE_DEF_CNST_CHK is discontinued and has been removed in Azure SQL Database. - - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## FASTFIRSTROW hint - -**Title: FASTFIRSTROW query hint is no longer supported and has been removed.** -**Category**: Warning - -**Description** -FASTFIRSTROW query hint is no longer supported and has been removed in Azure SQL Database. - - -**Recommendation** -Instead of FASTFIRSTROW query hint use OPTION (FAST n). - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## FileStream - -**Title: Filestream isn't supported in Azure SQL Database** -**Category**: Issue - -**Description** -The Filestream feature, which allows you to store unstructured data such as text documents, images, and videos in NTFS file system, isn't supported in Azure SQL Database. - - -**Recommendation** -Upload the unstructured files to Azure Blob storage and store metadata related to these files (name, type, URL location, storage key etc.) in Azure SQL Database. You may have to re-engineer your application to enable streaming blobs to and from Azure SQL Database. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Streaming blobs to and from Azure SQL blog](https://azure.microsoft.com/blog/streaming-blobs-to-and-from-sql-azure/) - - -## Linked server - -**Title: Linked server functionality isn't supported in Azure SQL Database** -**Category**: Issue - -**Description** -Linked servers enable the SQL Server Database Engine to execute commands against OLE DB data sources outside of the instance of SQL Server. - - -**Recommendation** -Azure SQL Database does not support linked server functionality. The following actions are recommended to eliminate the need for linked servers: -- Identify the dependent datasets from remote SQL servers and consider moving these into the database being migrated. -- Migrate the dependent database(s) to Azure and use Elastic Database Query (preview) functionality to query across databases in Azure SQL Database. - -More information: [Check Azure SQL Database elastic query (Preview)](../../database/elastic-query-overview.md) - -## MS DTC - -**Title: BEGIN DISTRIBUTED TRANSACTION isn't supported in Azure SQL Database.** -**Category**: Issue - -**Description** -Distributed transaction started by Transact SQL BEGIN DISTRIBUTED TRANSACTION and managed by Microsoft Distributed Transaction Coordinator (MS DTC) isn't supported in Azure SQL Database. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all objects using BEGIN DISTRUBUTED TRANSACTION. Consider migrating the participant databases to Azure SQL Managed Instance where distributed transactions across multiple instances are supported (Currently in preview). Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Transactions across multiple servers for Azure SQL Managed Instance ](../../database/elastic-transactions-overview.md#transactions-for-sql-managed-instance) - - -## OPENROWSET (bulk) - -**Title: OpenRowSet used in bulk operation with non-Azure blob storage data source isn't supported in Azure SQL Database.** -**Category**: Issue - -**Description** -OPENROWSET supports bulk operations through a built-in BULK provider that enables data from a file to be read and returned as a rowset. OPENROWSET with non-Azure blob storage data source isn't supported in Azure SQL Database. - - -**Recommendation** -Azure SQL Database cannot access file shares and Windows folders, so the files must be imported from Azure blob storage. Therefore, only blob type DATASOURCE is supported in OPENROWSET function. Alternatively, migrate to SQL Server on Azure Virtual Machine - -More information: [Resolving Transact-SQL differences during migration to SQL Database](../../database/transact-sql-tsql-differences-sql-server.md#t-sql-syntax-not-supported-in-azure-sql-database) - - -## OPENROWSET (provider) - -**Title: OpenRowSet with SQL or non-SQL provider isn't supported in Azure SQL Database.** -**Category**: Issue - -**Description** -OpenRowSet with SQL or non-SQL provider is an alternative to accessing tables in a linked server and is a one-time, ad hoc method of connecting and accessing remote data by using OLE DB. OpenRowSet with SQL or non-SQL provider isn't supported in Azure SQL Database. - - -**Recommendation** -Azure SQL Database supports OPENROWSET only to import from Azure blob storage. Alternatively, migrate to SQL Server on Azure Virtual Machine - -More information: [Resolving Transact-SQL differences during migration to SQL Database](../../database/transact-sql-tsql-differences-sql-server.md#t-sql-syntax-not-supported-in-azure-sql-database) - - -## Non-ANSI left outer join - -**Title: Non-ANSI style left outer join is no longer supported and has been removed.** -**Category**: Warning - -**Description** -Non-ANSI style left outer join is no longer supported and has been removed in Azure SQL Database. - - -**Recommendation** -Use ANSI join syntax. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - - -## Non-ANSI right outer join - -**Title: Non-ANSI style right outer join is no longer supported and has been removed.** -**Category**: Warning - -**Description** -Non-ANSI style right outer join is no longer supported and has been removed in Azure SQL Database. - - -**Recommendation** -Use ANSI join syntax. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Next column - -**Title: Tables and Columns named NEXT will lead to an error In Azure SQL Database.** -**Category**: Issue - -**Description** -Tables or columns named NEXT were detected. Sequences, introduced in Microsoft SQL Server, use the ANSI standard NEXT VALUE FOR function. If a table or a column is named NEXT and the column is aliased as VALUE, and if the ANSI standard AS is omitted, the resulting statement can cause an error. - - -**Recommendation** -Rewrite statements to include the ANSI standard AS keyword when aliasing a table or column. For example, when a column is named NEXT and that column is aliased as VALUE, the query `SELECT NEXT VALUE FROM TABLE` will cause an error and should be rewritten as SELECT NEXT AS VALUE FROM TABLE. Similarly, when a table is named NEXT and that table is aliased as VALUE, the query `SELECT Col1 FROM NEXT VALUE` will cause an error and should be rewritten as `SELECT Col1 FROM NEXT AS VALUE`. - -## RAISERROR - -**Title: Legacy style RAISERROR calls should be replaced with modern equivalents.** -**Category**: Warning - -**Description** -RAISERROR calls like the below example are termed as legacy-style because they do not include the commas and the parenthesis. `RAISERROR 50001 'this is a test'`. This method of calling RAISERROR is no longer supported and removed in Azure SQL Database. - - -**Recommendation** -Rewrite the statement using the current RAISERROR syntax, or evaluate if the modern approach of `BEGIN TRY { } END TRY BEGIN CATCH { THROW; } END CATCH` is feasible. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Server audits - -**Title: Use Azure SQL Database audit features to replace Server Audits** -**Category**: Warning - -**Description** -Server Audits isn't supported in Azure SQL Database. - - -**Recommendation** -Consider Azure SQL Database audit features to replace Server Audits. Azure SQL supports audit and the features are richer than SQL Server. Azure SQL Database can audit various database actions and events, including: Access to data, Schema changes (DDL), Data changes (DML), Accounts, roles, and permissions (DCL, Security exceptions. Azure SQL Database Auditing increases an organization's ability to gain deep insight into events and changes that occur within their database, including updates and queries against the data. Alternatively migrate to Azure SQL Managed Instance or SQL Server on Azure Virtual Machine. - -More information: [Auditing for Azure SQL Database ](../../database/auditing-overview.md) - -## Server credentials - -**Title: Server scoped credential isn't supported in Azure SQL Database** -**Category**: Warning - -**Description** -A credential is a record that contains the authentication information (credentials) required to connect to a resource outside SQL Server. Azure SQL Database supports database credentials, but not the ones created at the SQL Server scope. - - -**Recommendation** -Azure SQL Database supports database scoped credentials. Convert server scoped credentials to database scoped credentials. Alternatively migrate to Azure SQL Managed Instance or SQL Server on Azure Virtual Machine - -More information: [Creating database scoped credential](/sql/t-sql/statements/create-database-scoped-credential-transact-sql) - -## Service Broker - -**Title: Service Broker feature isn't supported in Azure SQL Database** -**Category**: Issue - -**Description** -SQL Server Service Broker provides native support for messaging and queuing applications in the SQL Server Database Engine. Service Broker feature isn't supported in Azure SQL Database. - - -**Recommendation** -Service Broker feature isn't supported in Azure SQL Database. Consider migrating to Azure SQL Managed Instance that supports service broker within the same instance. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -## Server-scoped triggers - -**Title: Server-scoped trigger isn't supported in Azure SQL Database** -**Category**: Warning - -**Description** -A trigger is a special kind of stored procedure that executes in response to certain action on a table like insertion, deletion, or updating of data. Server-scoped triggers aren't supported in Azure SQL Database. Azure SQL Database does not support the following options for triggers: FOR LOGON, ENCRYPTION, WITH APPEND, NOT FOR REPLICATION, EXTERNAL NAME option (there is no external method support), ALL SERVER Option (DDL Trigger), Trigger on a LOGON event (Logon Trigger), Azure SQL Database does not support CLR-triggers. - - -**Recommendation** -Use database level trigger instead. Alternatively migrate to Azure SQL Managed Instance or SQL Server on Azure Virtual Machine - -More information: [Resolving Transact-SQL differences during migration to SQL Database](../../database/transact-sql-tsql-differences-sql-server.md#t-sql-syntax-not-supported-in-azure-sql-database) - - -## SQL Agent jobs - -**Title: SQL Server Agent jobs aren't available in Azure SQL Database** -**Category**: Warning - -**Description** -SQL Server Agent is a Microsoft Windows service that executes scheduled administrative tasks, which are called jobs in SQL Server. SQL Server Agent jobs aren't available in Azure SQL Database. - - -**Recommendation** -Use elastic jobs (preview), which are the replacement for SQL Server Agent jobs in Azure SQL Database. Elastic Database jobs for Azure SQL Database allow you to reliably execute T-SQL scripts that span multiple databases while automatically retrying and providing eventual completion guarantees. Alternatively consider migrating to Azure SQL Managed Instance or SQL Server on Azure Virtual Machines. - -More information: [Getting started with Elastic Database jobs (Preview) ](../../database/elastic-jobs-overview.md) - -## SQL Database size - -**Title: Azure SQL Database does not support database size greater than 100 TB.** -**Category**: Issue - -**Description** -The size of the database is greater than the maximum supported size of 100 TB. - - -**Recommendation** -Evaluate if the data can be archived or compressed or sharded into multiple databases. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [vCore resource limits](../../database/resource-limits-vcore-single-databases.md) - -## SQL Mail - -**Title: SQL Mail has been discontinued.** -**Category**: Warning - -**Description** -SQL Mail has been discontinued and removed in Azure SQL Database. - - -**Recommendation** -Consider migrating to Azure SQL Managed Instance or SQL Server on Azure Virtual Machines and use Database Mail. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## SystemProcedures110 - -**Title: Detected statements that reference removed system stored procedures that aren't available in Azure SQL Database.** -**Category**: Warning - -**Description** -Following unsupported system and extended stored procedures cannot be used in Azure SQL Database - `sp_dboption`, `sp_addserver`, `sp_dropalias`,`sp_activedirectory_obj`, `sp_activedirectory_scp`, `sp_activedirectory_start`. - - -**Recommendation** -Remove references to unsupported system procedures that have been removed in Azure SQL Database. - -More information: [Discontinued Database Engine functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Trace flags - -**Title: Azure SQL Database does not support trace flags** -**Category**: Warning - -**Description** -Trace flags are used to temporarily set specific server characteristics or to switch off a particular behavior. Trace flags are frequently used to diagnose performance issues or to debug stored procedures or complex computer systems. Azure SQL Database does not support trace flags. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all trace flags that aren't supported in Azure SQL Database and evaluate if they can be removed. Alternatively, migrate to Azure SQL Managed Instance which supports limited number of global trace flags or SQL Server on Azure Virtual Machine. - -More information: [Resolving Transact-SQL differences during migration to SQL Database](../../database/transact-sql-tsql-differences-sql-server.md#t-sql-syntax-not-supported-in-azure-sql-database) - - -## Windows authentication - -**Title: Database users mapped with Windows authentication (integrated security) aren't supported in Azure SQL Database.** -**Category**: Warning - -**Description** -Azure SQL Database supports two types of authentication -- SQL Authentication: uses a username and password -- Azure Active Directory Authentication: uses identities managed by Azure Active Directory and is supported for managed and integrated domains. - -Database users mapped with Windows authentication (integrated security) aren't supported in Azure SQL Database. - - - -**Recommendation** -Federate the local Active Directory with Azure Active Directory. The Windows identity can then be replaced with the equivalent Azure Active Directory identities. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Database security capabilities](../../database/security-overview.md#authentication) - -## XP_cmdshell - -**Title: xp_cmdshell isn't supported in Azure SQL Database.** -**Category**: Issue - -**Description** -xp_cmdshell which spawns a Windows command shell and passes in a string for execution isn't supported in Azure SQL Database. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all objects using xp_cmdshell and evaluate if the reference to xp_cmdshell or the impacted object can be removed. Also consider exploring Azure Automation that delivers cloud-based automation and configuration service. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -## Next steps - -To start migrating your SQL Server to Azure SQL Database, see the [SQL Server to SQL Database migration guide](sql-server-to-sql-database-guide.md). - -- For a matrix of available Microsoft and third-party services and tools to assist you with various database and data migration scenarios as well as specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about SQL Database, see: - - [Overview of Azure SQL Database](../../database/sql-database-paas-overview.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for Cloud migrations, see - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrate to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - -- To assess the Application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-guide.md b/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-guide.md deleted file mode 100644 index 7ea0cad9dbd68..0000000000000 --- a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-guide.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: "SQL Server to Azure SQL Database: Migration guide" -description: Follow this guide to migrate your SQL Server databases to Azure SQL Database. -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, kendralittle -ms.date: 03/19/2021 ---- -# Migration guide: SQL Server to Azure SQL Database -[!INCLUDE[appliesto--sqldb](../../includes/appliesto-sqldb.md)] - -In this guide, you learn [how to migrate](https://azure.microsoft.com/migration/migration-journey) your SQL Server instance to Azure SQL Database. - -You can migrate SQL Server running on-premises or on: - -- SQL Server on Virtual Machines -- Amazon Web Services (AWS) EC2 -- Amazon Relational Database Service (AWS RDS) -- Compute Engine (Google Cloud Platform - GCP) -- Cloud SQL for SQL Server (Google Cloud Platform – GCP) - -For more migration information, see the [migration overview](sql-server-to-sql-database-overview.md). For other migration guides, see [Database Migration](/data-migration). - -:::image type="content" source="media/sql-server-to-database-overview/migration-process-flow-small.png" alt-text="Migration process flow"::: - -## Prerequisites - -For your [SQL Server migration](https://azure.microsoft.com/migration/sql-server/) to Azure SQL Database, make sure you have: - -- Chosen [migration method](sql-server-to-sql-database-overview.md#compare-migration-options) and corresponding tools . -- Installed [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595) on a machine that can connect to your source SQL Server. -- Created a target [Azure SQL Database](../../database/single-database-create-quickstart.md). -- Configured connectivity and proper permissions to access both source and target. -- Reviewed the database engine features [available in Azure SQL Database](../../database/features-comparison.md). - - - -## Pre-migration - -After you've verified that your source environment is supported, start with the pre-migration stage. Discover all of the existing data sources, assess migration feasibility, and identify any blocking issues that might prevent your [Azure cloud migration](https://azure.microsoft.com/migration). - -### Discover - -In the Discover phase, scan the network to identify all SQL Server instances and features used by your organization. - -Use [Azure Migrate](../../../migrate/migrate-services-overview.md) to assess migration suitability of on-premises servers, perform performance-based sizing, and provide cost estimations for running them in Azure. - -Alternatively, use the [Microsoft Assessment and Planning Toolkit (the "MAP Toolkit")](https://www.microsoft.com/download/details.aspx?id=7826) to assess your current IT infrastructure. The toolkit provides a powerful inventory, assessment, and reporting tool to simplify the migration planning process. - -For more information about tools available to use for the Discover phase, see [Services and tools available for data migration scenarios](../../../dms/dms-tools-matrix.md). - -### Assess - -[!INCLUDE [assess-estate-with-azure-migrate](../../../../includes/azure-migrate-to-assess-sql-data-estate.md)] - -After data sources have been discovered, assess any on-premises SQL Server database(s) that can be migrated to Azure SQL Database to identify migration blockers or compatibility issues. - -You can use the Data Migration Assistant (version 4.1 and later) to assess databases to get: - -- [Azure target recommendations](/sql/dma/dma-assess-sql-data-estate-to-sqldb) -- [Azure SKU recommendations](/sql/dma/dma-sku-recommend-sql-db) - -To assess your environment using the Database Migration Assessment, follow these steps: - -1. Open the [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595). -1. Select **File** and then choose **New assessment**. -1. Specify a project name, select SQL Server as the source server type, and then select Azure SQL Database as the target server type. -1. Select the type(s) of assessment reports that you want to generate. For example, database compatibility and feature parity. Based on the type of assessment, the permissions required on the source SQL Server can be different. DMA will highlight the permissions required for the chosen advisor before running the assessment. - - The **feature parity** category provides a comprehensive set of recommendations, alternatives available in Azure, and mitigating steps to help you plan your migration project. (sysadmin permissions required) - - The **compatibility issues** category identifies partially supported or unsupported feature compatibility issues that might block migration as well as recommendations to address them (`CONNECT SQL`, `VIEW SERVER STATE`, and `VIEW ANY DEFINITION` permissions required). -1. Specify the source connection details for your SQL Server and connect to the source database. -1. Select **Start assessment**. -1. After the process completes, select and review the assessment reports for migration blocking and feature parity issues. The assessment report can also be exported to a file that can be shared with other teams or personnel in your organization. -1. Determine the database compatibility level that minimizes post-migration efforts. -1. Identify the best Azure SQL Database SKU for your on-premises workload. - -To learn more, see [Perform a SQL Server migration assessment with Data Migration Assistant](/sql/dma/dma-assesssqlonprem). - -If the assessment encounters multiple blockers to confirm that your database it not ready for an Azure SQL Database migration, then alternatively consider: - -- [Azure SQL Managed Instance](../managed-instance/sql-server-to-managed-instance-overview.md) if there are multiple instance-scoped dependencies -- [SQL Server on Azure Virtual Machines](../virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md) if both SQL Database and SQL Managed Instance fail to be suitable targets. - - - -#### Scaled Assessments and Analysis -Data Migration Assistant supports performing scaled assessments and consolidation of the assessment reports for analysis. - -If you have multiple servers and databases that need to be assessed and analyzed at scale to provide a wider view of the data estate, see the following links to learn more: - -- [Performing scaled assessments using PowerShell](/sql/dma/dma-consolidatereports) -- [Analyzing assessment reports using Power BI](/sql/dma/dma-consolidatereports#dma-reports) - -> [!IMPORTANT] -> Running assessments at scale for multiple databases, especially large ones, can also be automated using the [DMA Command Line Utility](/sql/dma/dma-commandline) and uploaded to [Azure Migrate](/sql/dma/dma-assess-sql-data-estate-to-sqldb#view-target-readiness-assessment-results) for further analysis and target readiness. - -## Migrate - -After you have completed tasks associated with the Pre-migration stage, you are ready to perform the schema and data migration. - -Migrate your data using your chosen [migration method](sql-server-to-sql-database-overview.md#compare-migration-options). - -This guide describes the two most popular options - Data Migration Assistant and Azure Database Migration Service. - -### Data Migration Assistant (DMA) - -To migrate a database from SQL Server to Azure SQL Database using DMA, follow these steps: - -1. Download and install the [Database Migration Assistant](https://www.microsoft.com/download/details.aspx?id=53595). -1. Create a new project and select **Migration** as the project type. -1. Set the source server type to **SQL Server** and the target server type to **Azure SQL Database**, select the migration scope as **Schema and data** and select **Create**. -1. In the migration project, specify the source server details such as the server name, credentials to connect to the server and the source database to migrate. -1. In the target server details, specify the Azure SQL Database server name, credentials to connect to the server and the target database to migrate to. -1. Select the schema objects and deploy them to the target Azure SQL Database. -1. Finally, select **Start data migration** and monitor the progress of migration. - -For a detailed tutorial, see [Migrate on-premises SQL Server or SQL Server on Azure VMs to Azure SQL Database using the Data Migration Assistant](/sql/dma/dma-migrateonpremsqltosqldb). - - -> [!NOTE] -> - Scale your database to a higher service tier and compute size during the import process to maximize import speed by providing more resources. You can then scale down after the import is successful.
    -> - The compatibility level of the imported database is based on the compatibility level of your source database. - - -### Azure Database Migration Service (DMS) - -To migrate databases from SQL Server to Azure SQL Database using DMS, follow the steps below: - -1. If you haven't already, register the **Microsoft.DataMigration** resource provider in your subscription. -1. Create an Azure Database Migration Service Instance in a desired location of your choice (preferably in the same region as your target Azure SQL Database). Select an existing virtual network or create a new one to host your DMS instance. -1. After your DMS instance is created, create a new migration project and specify the source server type as **SQL Server** and the target server type as **Azure SQL Database**. Choose **Offline data migration** as the activity type in the migration project creation blade. -1. Specify the source SQL Server details on the **Migration source** details page and the target Azure SQL Database details on the **Migration target** details page. -1. Map the source and target databases for migration and then select the tables you want to migrate. -1. Review the migration summary and select **Run migration**. You can then monitor the migration activity and check the progress of your database migration. - -For a detailed tutorial, see [Migrate SQL Server to an Azure SQL Database using DMS](../../../dms/tutorial-sql-server-to-azure-sql.md). - -## Data sync and cutover - -When using migration options that continuously replicate / sync data changes from source to the target, the source data and schema can change and drift from the target. During data sync, ensure that all changes on the source are captured and applied to the target during the migration process. - -After you verify that data is same on both the source and the target, you can cutover from the source to the target environment. It is important to plan the cutover process with business / application teams to ensure minimal interruption during cutover does not affect business continuity. - -> [!IMPORTANT] -> For details on the specific steps associated with performing a cutover as part of migrations using DMS, see [Performing migration cutover](../../../dms/tutorial-sql-server-to-azure-sql.md). - -## Migration recommendations - -To speed up migration to Azure SQL Database, you should consider the following recommendations: - -| | Resource contention | Recommendation | -|--|--|--| -| **Source (typically on premises)** |Primary bottleneck during migration in source is DATA I/O and latency on DATA file which needs to be monitored carefully. |Based on DATA IO and DATA file latency and depending on whether it’s a virtual machine or physical server, you will have to engage storage admin and explore options to mitigate the bottleneck. | -|**Target (Azure SQL Database)**|Biggest limiting factor is the log generation rate and latency on log file. With Azure SQL Database, you can get a maximum of 96-MB/s log generation rate. | To speed up migration, scale up the target SQL DB to Business Critical Gen5 8 vCore to get the maximum log generation rate of 96 MB/s and also achieve low latency for log file. The [Hyperscale](../../database/service-tier-hyperscale.md) service tier provides 100-MB/s log rate regardless of chosen service level | -|**Network** |Network bandwidth needed is equal to max log ingestion rate 96 MB/s (768 Mb/s) |Depending on network connectivity from your on-premises data center to Azure, check your network bandwidth (typically [Azure ExpressRoute](../../../expressroute/expressroute-introduction.md#bandwidth-options)) to accommodate for the maximum log ingestion rate. | -|**Virtual machine used for Data Migration Assistant (DMA)** |CPU is the primary bottleneck for the virtual machine running DMA |Things to consider to speed up data migration by using
    - Azure compute intensive VMs
    - Use at least F8s_v2 (8 vcore) VM for running DMA
    - Ensure the VM is running in the same Azure region as target | -|**Azure Database Migration Service (DMS)** |Compute resource contention and database objects consideration for DMS |Use Premium 4 vCore. DMS automatically takes care of database objects like foreign keys, triggers, constraints, and non-clustered indexes and doesn't need manual intervention. | - - -## Post-migration - -After you have successfully completed the migration stage, go through a series of post-migration tasks to ensure that everything is functioning smoothly and efficiently. - -The post-migration phase is crucial for reconciling any data accuracy issues and verifying completeness, as well as addressing performance issues with the workload. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will, in some cases, require changes to the applications. - -### Perform tests - -The test approach for database migration consists of the following activities: - -1. **Develop validation tests**: To test database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you have defined. -1. **Set up test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run the validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance test against the source and the target, and then analyze and compare the results. - - -## Leverage advanced features - -Be sure to take advantage of the advanced cloud-based features offered by SQL Database, such as [built-in high availability](../../database/high-availability-sla.md), [threat detection](../../database/azure-defender-for-sql.md), and [monitoring and tuning your workload](../../database/monitor-tune-overview.md). - -Some SQL Server features are only available once the [database compatibility level](/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) is changed to the latest compatibility level (150). - -To learn more, see [managing Azure SQL Database after migration](../../database/manage-data-after-migrating-to-database.md) - - -## Next steps - -- For a matrix of the Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios as well as specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - - -- To learn more about [Azure Migrate](https://azure.microsoft.com/services/azure-migrate) see - - [Azure Migrate](../../../migrate/migrate-services-overview.md) - -- To learn more about SQL Database see: - - [An Overview of Azure SQL Database](../../database/sql-database-paas-overview.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for Cloud migrations, see - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - - [Cloud Migration Resources](https://azure.microsoft.com/migration/resources) - -- To assess the Application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). \ No newline at end of file diff --git a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-overview.md b/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-overview.md deleted file mode 100644 index e849e134f6d2d..0000000000000 --- a/articles/azure-sql/migration-guides/database/sql-server-to-sql-database-overview.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: "SQL Server to Azure SQL Database: Migration overview" -description: Learn about the tools and options available to migrate your SQL Server databases to Azure SQL Database. -ms.service: sql-database -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, kendralittle -ms.date: 11/06/2020 ---- -# Migration overview: SQL Server to Azure SQL Database -[!INCLUDE[appliesto--sqldb](../../includes/appliesto-sqldb.md)] - -Learn about the options and considerations for migrating your SQL Server databases to Azure SQL Database. - -You can migrate existing SQL Server databases running on: - -- SQL Server on-premises. -- SQL Server on Azure Virtual Machines. -- Amazon Web Services (AWS) Elastic Compute Cloud (EC2). -- AWS Relational Database Service (RDS). -- Compute Engine in Google Cloud Platform (GCP). -- Cloud SQL for SQL Server in GCP. - -For other migration guides, see [Database Migration](/data-migration). - -## Overview - -[Azure SQL Database](../../database/sql-database-paas-overview.md) is a recommended target option for SQL Server workloads that require a fully managed platform as a service (PaaS). SQL Database handles most database management functions. It also has built-in high availability, intelligent query processing, scalability, and performance capabilities to suit many application types. - -SQL Database provides flexibility with multiple [deployment models](../../database/sql-database-paas-overview.md#deployment-models) and [service tiers](../../database/service-tiers-vcore.md#service-tiers) that cater to different types of applications or workloads. - -One of the key benefits of migrating to SQL Database is that you can modernize your application by using the PaaS capabilities. You can then eliminate any dependency on technical components that are scoped at the instance level, such as SQL Agent jobs. - -You can also save costs by using the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) for SQL Server to migrate your SQL Server on-premises licenses to Azure SQL Database. This option is available if you choose the [vCore-based purchasing model](../../database/service-tiers-vcore.md). - -Be sure to review the SQL Server database engine features [available in Azure SQL Database](../../database/features-comparison.md) to validate the supportability of your migration target. - -## Considerations - -The key factors to consider when you're evaluating migration options are: - -- Number of servers and databases -- Size of databases -- Acceptable business downtime during the migration process - -The migration options listed in this guide take these factors into account. For logical data migration to Azure SQL Database, the time to migrate can depend on both the number of objects in a database and the size of the database. - -Tools are available for various workloads and user preferences. Some tools can be used to perform a quick migration of a single database through a UI-based tool. Other tools can automate the migration of multiple databases to handle migrations at scale. - -## Choose an appropriate target - -Consider general guidelines to help you choose the right deployment model and service tier of Azure SQL Database. You can choose compute and storage resources during deployment and then [change them afterward by using the Azure portal](../../database/scale-resources.md) without incurring downtime for your application. - -**Deployment models**: Understand your application workload and the usage pattern to decide between a single database or an elastic pool. - -- A [single database](../../database/single-database-overview.md) represents a fully managed database that's suitable for most modern cloud applications and microservices. -- An [elastic pool](../../database/elastic-pool-overview.md) is a collection of single databases with a shared set of resources, such as CPU or memory. It's suitable for combining databases in a pool with predictable usage patterns that can effectively share the same set of resources. - -**Purchasing models**: Choose between the vCore, database transaction unit (DTU), or serverless purchasing models. - -- The [vCore model](../../database/service-tiers-vcore.md) lets you choose the number of vCores for Azure SQL Database, so it's the easiest choice when you're translating from on-premises SQL Server. This is the only option that supports saving license costs with the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/). -- The [DTU model](../../database/service-tiers-dtu.md) abstracts the underlying compute, memory, and I/O resources to provide a blended DTU. -- The [serverless model](../../database/serverless-tier-overview.md) is for workloads that require automatic on-demand scaling with compute resources billed per second of usage. The serverless compute tier automatically pauses databases during inactive periods (where only storage is billed). It automatically resumes databases when activity returns. - -**Service tiers**: Choose between three service tiers designed for different types of applications. - -- [General Purpose/standard service tier](../../database/service-tier-general-purpose.md) offers a balanced budget-oriented option with compute and storage suitable to deliver applications in the middle and lower tiers. Redundancy is built in at the storage layer to recover from failures. It's designed for most database workloads. -- [Business Critical/premium service tier](../../database/service-tier-business-critical.md) is for high-tier applications that require high transaction rates, low-latency I/O, and a high level of resiliency. Secondary replicas are available for failover and to offload read workloads. -- [Hyperscale service tier](../../database/service-tier-hyperscale.md) is for databases that have growing data volumes and need to automatically scale up to 100 TB in database size. It's designed for very large databases. - -> [!IMPORTANT] -> [Transaction log rate is governed](../../database/resource-limits-logical-server.md#transaction-log-rate-governance) in Azure SQL Database to limit high ingestion rates. As such, during migration, you might have to scale target database resources (vCores or DTUs) to ease pressure on CPU or throughput. Choose the appropriately sized target database, but plan to scale resources up for the migration if necessary. - - -### SQL Server VM alternative - -Your business might have requirements that make [SQL Server on Azure Virtual Machines](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) a more suitable target than Azure SQL Database. - -If one of the following conditions applies to your business, consider moving to a SQL Server virtual machine (VM) instead: - -- You require direct access to the operating system or file system, such as to install third-party or custom agents on the same virtual machine with SQL Server. -- You have strict dependency on features that are still not supported, such as FileStream/FileTable, PolyBase, and cross-instance transactions. -- You need to stay at a specific version of SQL Server (2012, for example). -- Your compute requirements are much lower than a managed instance offers (one vCore, for example), and database consolidation is not an acceptable option. - - -## Migration tools - -We recommend the following migration tools: - -|Technology | Description| -|---------|---------| -| [Azure Migrate](../../../migrate/how-to-create-azure-sql-assessment.md) | This Azure service helps you discover and assess your SQL data estate at scale on VMware. It provides Azure SQL deployment recommendations, target sizing, and monthly estimates. | -|[Data Migration Assistant](/sql/dma/dma-migrateonpremsqltosqldb)|This desktop tool from Microsoft provides seamless assessments of SQL Server and single-database migrations to Azure SQL Database (both schema and data).

    The tool can be installed on a server on-premises or on your local machine that has connectivity to your source databases. The migration process is a logical data movement between objects in the source and target databases.| -|[Azure Database Migration Service](../../../dms/tutorial-sql-server-to-azure-sql.md)|This Azure service can migrate SQL Server databases to Azure SQL Database through the Azure portal or automatically through PowerShell. Database Migration Service requires you to select a preferred Azure virtual network during provisioning to ensure connectivity to your source SQL Server databases. You can migrate single databases or at scale. | - - - -The following table lists alternative migration tools: - -|Technology |Description | -|---------|---------| -|[Transactional replication](../../database/replication-to-sql-database.md)|Replicate data from source SQL Server database tables to Azure SQL Database by providing a publisher-subscriber type migration option while maintaining transactional consistency. Incremental data changes are propagated to subscribers as they occur on the publishers.| -|[Import Export Service/BACPAC](../../database/database-import.md)|[BACPAC](/sql/relational-databases/data-tier-applications/data-tier-applications#bacpac) is a Windows file with a .bacpac extension that encapsulates a database's schema and data. You can use BACPAC to both export data from a SQL Server source and import the data into Azure SQL Database. A BACPAC file can be imported to a new SQL database through the Azure portal.

    For scale and performance with large databases sizes or a large number of databases, consider using the [SqlPackage](../../database/database-import.md#using-sqlpackage) command-line tool to export and import databases.| -|[Bulk copy](/sql/relational-databases/import-export/import-and-export-bulk-data-by-using-the-bcp-utility-sql-server)|The [bulk copy program (bcp) tool](/sql/tools/bcp-utility) copies data from an instance of SQL Server into a data file. Use the tool to export the data from your source and import the data file into the target SQL database.

    For high-speed bulk copy operations to move data to Azure SQL Database, you can use the [Smart Bulk Copy tool](/samples/azure-samples/smartbulkcopy/smart-bulk-copy/) to maximize transfer speed by taking advantage of parallel copy tasks.| -|[Azure Data Factory](../../../data-factory/connector-azure-sql-database.md)|The [Copy activity](../../../data-factory/copy-activity-overview.md) in Azure Data Factory migrates data from source SQL Server databases to Azure SQL Database by using built-in connectors and an [integration runtime](../../../data-factory/concepts-integration-runtime.md).

    Data Factory supports a wide range of [connectors](../../../data-factory/connector-overview.md) to move data from SQL Server sources to Azure SQL Database.| -|[SQL Data Sync](../../database/sql-data-sync-data-sql-server-sql-database.md)|SQL Data Sync is a service built on Azure SQL Database that lets you synchronize selected data bidirectionally across multiple databases, both on-premises and in the cloud.
    Data Sync is useful in cases where data needs to be kept updated across several databases in Azure SQL Database or SQL Server.| - - -## Compare migration options - -Compare migration options to choose the path that's appropriate to your business needs. - -The following table compares the migration options that we recommend: - -|Migration option |When to use |Considerations | -|---------|---------|---------| -|[Data Migration Assistant](/sql/dma/dma-migrateonpremsqltosqldb) | - Migrate single databases (both schema and data).
    - Can accommodate downtime during the data migration process.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Migration activity performs data movement between database objects (from source to target), so we recommend that you run it during off-peak times.
    - Data Migration Assistant reports the status of migration per database object, including the number of rows migrated.
    - For large migrations (number of databases or size of database), use Azure Database Migration Service.| -|[Azure Database Migration Service](../../../dms/tutorial-sql-server-to-azure-sql.md)| - Migrate single databases or at scale.
    - Can accommodate downtime during the migration process.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Migrations at scale can be automated via [PowerShell](../../../dms/howto-sql-server-to-azure-sql-powershell.md).
    - Time to complete migration depends on database size and the number of objects in the database.
    - Requires the source database to be set as read-only. | - - -The following table compares the alternative migration options: - -|Method or technology |When to use |Considerations | -|---------|---------|---------| -|[Transactional replication](../../database/replication-to-sql-database.md)| - Migrate by continuously publishing changes from source database tables to target SQL Database tables.
    - Do full or partial database migrations of selected tables (subset of a database).

    Supported sources:
    - [SQL Server (2016 to 2019) with some limitations](/sql/relational-databases/replication/replication-backward-compatibility)
    - AWS EC2
    - GCP Compute SQL Server VM | - Setup is relatively complex compared to other migration options.
    - Provides a continuous replication option to migrate data (without taking the databases offline).
    - Transactional replication has limitations to consider when you're setting up the publisher on the source SQL Server instance. See [Limitations on publishing objects](/sql/relational-databases/replication/publish/publish-data-and-database-objects#limitations-on-publishing-objects) to learn more.
    - It's possible to [monitor replication activity](/sql/relational-databases/replication/monitor/monitoring-replication). | -|[Import Export Service/BACPAC](../../database/database-import.md)| - Migrate individual line-of-business application databases.
    - Suited for smaller databases.
    - Does not require a separate migration service or tool.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Requires downtime because data needs to be exported at the source and imported at the destination.
    - The file formats and data types used in the export or import need to be consistent with table schemas to avoid truncation or data-type mismatch errors.
    - Time taken to export a database with a large number of objects can be significantly higher. | -|[Bulk copy](/sql/relational-databases/import-export/import-and-export-bulk-data-by-using-the-bcp-utility-sql-server)| - Do full or partial data migrations.
    - Can accommodate downtime.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Requires downtime for exporting data from the source and importing into the target.
    - The file formats and data types used in the export or import need to be consistent with table schemas. | -|[Azure Data Factory](../../../data-factory/connector-azure-sql-database.md)| - Migrate and/or transform data from source SQL Server databases.
    - Merging data from multiple sources of data to Azure SQL Database is typically for business intelligence (BI) workloads. | - Requires creating data movement pipelines in Data Factory to move data from source to destination.
    - [Cost](https://azure.microsoft.com/pricing/details/data-factory/data-pipeline/) is an important consideration and is based on factors like pipeline triggers, activity runs, and duration of data movement. | -|[SQL Data Sync](../../database/sql-data-sync-data-sql-server-sql-database.md)| - Synchronize data between source and target databases.
    - Suitable to run continuous sync between Azure SQL Database and on-premises SQL Server in a bidirectional flow. | - Azure SQL Database must be the hub database for sync with an on-premises SQL Server database as a member database.
    - Compared to transactional replication, SQL Data Sync supports bidirectional data sync between on-premises and Azure SQL Database.
    - Can have a higher performance impact, depending on the workload.| - - -## Feature interoperability - -There are more considerations when you're migrating workloads that rely on other SQL Server features. - -### SQL Server Integration Services -Migrate SQL Server Integration Services (SSIS) packages to Azure by redeploying the packages to the Azure-SSIS runtime in [Azure Data Factory](../../../data-factory/introduction.md). Azure Data Factory [supports migration of SSIS packages](../../../data-factory/scenario-ssis-migration-overview.md#azure-sql-database-as-database-workload-destination) by providing a runtime built to run SSIS packages in Azure. Alternatively, you can rewrite the SSIS ETL (extract, transform, load) logic natively in Azure Data Factory by using [data flows](../../../data-factory/concepts-data-flow-overview.md). - - -### SQL Server Reporting Services -Migrate SQL Server Reporting Services (SSRS) reports to paginated reports in Power BI. Use the [RDL Migration Tool](https://github.com/microsoft/RdlMigration) to help prepare and migrate your reports. Microsoft developed this tool to help customers migrate Report Definition Language (RDL) reports from their SSRS servers to Power BI. It's available on GitHub, and it documents an end-to-end walkthrough of the migration scenario. - -### High availability -Manual setup of SQL Server high-availability features like Always On failover cluster instances and Always On availability groups becomes obsolete on the target SQL database. High-availability architecture is already built into both [General Purpose (standard availability model)](../../database/high-availability-sla.md#basic-standard-and-general-purpose-service-tier-locally-redundant-availability) and [Business Critical (premium availability model)](../../database/high-availability-sla.md#premium-and-business-critical-service-tier-locally-redundant-availability) service tiers for Azure SQL Database. The Business Critical/premium service tier also provides read scale-out that allows connecting into one of the secondary nodes for read-only purposes. - -Beyond the high-availability architecture that's included in Azure SQL Database, the [auto-failover groups](../../database/auto-failover-group-overview.md) feature allows you to manage the replication and failover of databases in a managed instance to another region. - -### Logins and groups - -Windows logins are not supported in Azure SQL Database, create an Azure Active Directory login instead. Manually recreate any SQL logins. - -### SQL Agent jobs -SQL Agent jobs are not directly supported in Azure SQL Database and need to be deployed to [elastic database jobs (preview)](../../database/job-automation-overview.md). - -### System databases -For Azure SQL Database, the only applicable system databases are [master](/sql/relational-databases/databases/master-database) and tempdb. To learn more, see [Tempdb in Azure SQL Database](/sql/relational-databases/databases/tempdb-database#tempdb-database-in-sql-database). - -## Advanced features - -Be sure to take advantage of the advanced cloud-based features in SQL Database. For example, you don't need to worry about managing backups because the service does it for you. You can restore to any [point in time within the retention period](../../database/recovery-using-backups.md#point-in-time-restore). - -To strengthen security, consider using [Azure AD authentication](../../database/authentication-aad-overview.md), [auditing](../../database/auditing-overview.md), [threat detection](../../database/azure-defender-for-sql.md), [row-level security](/sql/relational-databases/security/row-level-security), and [dynamic data masking](/sql/relational-databases/security/dynamic-data-masking). - -In addition to advanced management and security features, SQL Database provides tools that can help you [monitor and tune your workload](../../database/monitor-tune-overview.md). [Azure SQL Analytics (Preview)](../../../azure-monitor/insights/azure-sql.md) is an advanced solution for monitoring the performance of all of your databases in Azure SQL Database at scale and across multiple subscriptions in a single view. Azure SQL Analytics collects and visualizes key performance metrics with built-in intelligence for performance troubleshooting. - -[Automatic tuning](/sql/relational-databases/automatic-tuning/automatic-tuning#automatic-plan-correction) continuously monitors performance of your SQL execution plan and automatically fixes identified performance issues. - - -## Migration assets - -For more assistance, see the following resources that were developed for real-world migration projects. - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and an application/database remediation level for a workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated and uniform decision process for target platforms.| -|[Bulk database creation with PowerShell](https://www.microsoft.com/download/details.aspx?id=103107)|You can use a set of three PowerShell scripts that create a resource group (create_rg.ps1), the [logical server in Azure](../../database/logical-servers.md) (create_sqlserver.ps1), and a SQL database (create_sqldb.ps1). The scripts include loop capabilities so you can iterate and create as many servers and databases as necessary.| -|[Bulk schema deployment with MSSQL-Scripter and PowerShell](https://www.microsoft.com/download/details.aspx?id=103032)|This asset creates a resource group, creates one or multiple [logical servers in Azure](../../database/logical-servers.md) to host Azure SQL Database, exports every schema from an on-premises SQL Server instance (or multiple SQL Server 2005+ instances), and imports the schemas to Azure SQL Database.| -|[Convert SQL Server Agent jobs into elastic database jobs](https://www.microsoft.com/download/details.aspx?id=103123)|This script migrates your source SQL Server Agent jobs to elastic database jobs.| -|[Utility to move on-premises SQL Server logins to Azure SQL Database](https://www.microsoft.com/download/details.aspx?id=103111)|A PowerShell script can create a T-SQL command script to re-create logins and select database users from on-premises SQL Server to Azure SQL Database. The tool allows automatic mapping of Windows Server Active Directory accounts to Azure AD accounts, along with optionally migrating SQL Server native logins.| -|[Perfmon data collection automation by using Logman](https://www.microsoft.com/download/details.aspx?id=103114)|You can use the Logman tool to collect Perfmon data (to help you understand baseline performance) and get migration target recommendations. This tool uses logman.exe to create the command that will create, start, stop, and delete performance counters set on a remote SQL Server instance.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- To start migrating your SQL Server databases to Azure SQL Database, see the [SQL Server to Azure SQL Database migration guide](sql-server-to-sql-database-guide.md). - -- For a matrix of services and tools that can help you with database and data migration scenarios as well as specialty tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about SQL Database, see: - - [Overview of Azure SQL Database](../../database/sql-database-paas-overview.md) - - [Azure Total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- To assess the application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). - -- For details on how to perform A/B testing for the data access layer, see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/index.yml b/articles/azure-sql/migration-guides/index.yml deleted file mode 100644 index f60eeaed56f68..0000000000000 --- a/articles/azure-sql/migration-guides/index.yml +++ /dev/null @@ -1,94 +0,0 @@ -### YamlMime:Landing - -title: Migrate to Azure SQL -summary: "Find documentation on how to migrate to the Azure SQL family of SQL Server database engine products in the cloud: Azure SQL Database, Azure SQL Managed Instance, and SQL Server on Azure VM. " - -metadata: - title: Migrate to Azure SQL - description: "Find documentation to help you migrate to Azure SQL, which is a family of SQL Server database engine products in the cloud, from a fully managed database in Azure SQL Database, a fully managed instance in Azure SQL Managed Instance, or SQL Server installed to a virtual machine in Azure." - services: sql-database - ms.service: sql-database - ms.subservice: migration-guide - ms.tgt_pltfrm: na - ms.devlang: - ms.topic: landing-page - author: mokabiru - ms.author: mokabiru - ms.reviewer: mathoma, kendralittle - ms.date: 11/06/2020 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: Azure SQL Database - linkLists: - - linkListType: get-started - links: - - text: Overview - url: database/sql-server-to-sql-database-overview.md - - text: From SQL Server - url: database/sql-server-to-sql-database-guide.md - - text: From Access - url: database/access-to-sql-database-guide.md - - text: From DB2 - url: database/db2-to-sql-database-guide.md - - text: From Oracle - url: database/oracle-to-sql-database-guide.md - - text: From MySQL - url: database/mysql-to-sql-database-guide.md - - text: From SAP ASE - url: database/sap-ase-to-sql-database.md - - # Card - - title: Azure SQL Managed Instance - linkLists: - - linkListType: get-started - links: - - text: Overview - url: managed-instance/sql-server-to-managed-instance-overview.md - - text: From SQL Server - url: managed-instance/sql-server-to-managed-instance-guide.md - - text: From DB2 - url: managed-instance/db2-to-managed-instance-guide.md - - text: From Oracle - url: managed-instance/oracle-to-managed-instance-guide.md - - # Card - - title: SQL Server on Azure VM - linkLists: - - linkListType: get-started - links: - - text: Overview - url: virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md - - text: From SQL Server - url: virtual-machines/sql-server-to-sql-on-azure-vm-individual-databases-guide.md - - text: From DB2 - url: virtual-machines/db2-to-sql-on-azure-vm-guide.md - - text: From Oracle - url: virtual-machines/oracle-to-sql-on-azure-vm-guide.md - - # Card - - title: Migration tools - linkLists: - - linkListType: deploy - links: - - text: Azure Migrate - url: ../../migrate/migrate-services-overview.md - - text: Azure Database Migration Service (DMS) - url: ../../dms/dms-overview.md - - text: Data Migration Assistant (DMA) - url: /sql/dma/dma-migrateonpremsqltosqldb - - text: Transactional replication - url: /sql/relational-databases/replication/transactional/transactional-replication - - text: Import & export service / BACPAC - url: ../database/database-import.md - - text: Bulk copy - url: /sql/relational-databases/import-export/import-and-export-bulk-data-by-using-the-bcp-utility-sql-server - - text: Azure Data Factory - url: ../../data-factory/connector-azure-sql-database.md - - text: SQL Data Sync - url: ../database/sql-data-sync-data-sql-server-sql-database.md \ No newline at end of file diff --git a/articles/azure-sql/migration-guides/managed-instance/db2-to-managed-instance-guide.md b/articles/azure-sql/migration-guides/managed-instance/db2-to-managed-instance-guide.md deleted file mode 100644 index a4863e2cb12e9..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/db2-to-managed-instance-guide.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: "Db2 to Azure SQL Managed Instance: Migration guide" -description: This guide teaches you to migrate your IBM Db2 databases to Azure SQL Managed Instance, by using SQL Server Migration Assistant for Db2. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma -ms.date: 05/14/2021 ---- -# Migration guide: IBM Db2 to Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqlmi.md)] - -This guide teaches you to migrate your IBM Db2 databases to Azure SQL Managed Instance, by using the SQL Server Migration Assistant for Db2. - -For other migration guides, see [Azure Database Migration Guides](/data-migration). - -## Prerequisites - -To migrate your Db2 database to SQL Managed Instance, you need: - -- To verify that your [source environment is supported](/sql/ssma/db2/installing-ssma-for-db2-client-db2tosql#prerequisites). -- To download [SQL Server Migration Assistant (SSMA) for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -- A target instance of [Azure SQL Managed Instance](../../managed-instance/instance-create-quickstart.md). -- Connectivity and sufficient permissions to access both source and target. - -## Pre-migration - -After you have met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your migration. - -### Assess and convert - -Create an assessment by using SQL Server Migration Assistant. - -To create an assessment, follow these steps: - -1. Open [SSMA for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -1. Select **File** > **New Project**. -1. Provide a project name and a location to save your project. Then select Azure SQL Managed Instance as the migration target from the drop-down list, and select **OK**. - - :::image type="content" source="media/db2-to-managed-instance-guide/new-project.png" alt-text="Screenshot that shows project details to specify."::: - - -1. On **Connect to Db2**, enter values for the Db2 connection details. - - :::image type="content" source="media/db2-to-managed-instance-guide/connect-to-db2.png" alt-text="Screenshot that shows options to connect to your Db2 instance."::: - - -1. Right-click the Db2 schema you want to migrate, and then choose **Create report**. This will generate an HTML report. Alternatively, you can choose **Create report** from the navigation bar after selecting the schema. - - :::image type="content" source="media/db2-to-managed-instance-guide/create-report.png" alt-text="Screenshot that shows how to create a report."::: - -1. Review the HTML report to understand conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Db2 objects and the effort required to perform schema conversions. The default location for the report is in the report folder within *SSMAProjects*. - - For example: `drive:\\Documents\SSMAProjects\MyDb2Migration\report\report_`. - - :::image type="content" source="media/db2-to-managed-instance-guide/report.png" alt-text="Screenshot of the report that you review to identify any errors or warnings"::: - - -### Validate data types - -Validate the default data type mappings, and change them based on requirements if necessary. To do so, follow these steps: - -1. Select **Tools** from the menu. -1. Select **Project Settings**. -1. Select the **Type mappings** tab. - - :::image type="content" source="media/db2-to-managed-instance-guide/type-mapping.png" alt-text="Screenshot that shows selecting the schema and type mapping."::: - -1. You can change the type mapping for each table by selecting the table in the **Db2 Metadata Explorer**. - -### Convert schema - -To convert the schema, follow these steps: - -1. (Optional) Add dynamic or ad-hoc queries to statements. Right-click the node, and then choose **Add statements**. -1. Select **Connect to Azure SQL Managed Instance**. - 1. Enter connection details to connect to Azure SQL Managed Instance. - 1. Choose your target database from the drop-down list, or provide a new name, in which case a database will be created on the target server. - 1. Provide authentication details. - 1. Select **Connect**. - - :::image type="content" source="media/db2-to-managed-instance-guide/connect-to-sql-managed-instance.png" alt-text="Screenshot that shows the details needed to connect to SQL Server."::: - - -1. Right-click the schema, and then choose **Convert Schema**. Alternatively, you can choose **Convert Schema** from the top navigation bar after selecting your schema. - - :::image type="content" source="media/db2-to-managed-instance-guide/convert-schema.png" alt-text="Screenshot that shows selecting the schema and converting it."::: - -1. After the conversion completes, compare and review the structure of the schema to identify potential problems. Address the problems based on the recommendations. - - :::image type="content" source="media/db2-to-managed-instance-guide/compare-review-schema-structure.png" alt-text="Screenshot that shows comparing and reviewing the structure of the schema to identify potential problems."::: - -1. In the **Output** pane, select **Review results**. In the **Error list** pane, review errors. -1. Save the project locally for an offline schema remediation exercise. From the **File** menu, select **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline, and perform remediation before you can publish the schema to SQL Managed Instance. - -## Migrate - -After you have completed assessing your databases and addressing any discrepancies, the next step is to execute the migration process. - -To publish your schema and migrate your data, follow these steps: - -1. Publish the schema. In **Azure SQL Managed Instance Metadata Explorer**, from the **Databases** node, right-click the database. Then select **Synchronize with Database**. - - :::image type="content" source="media/db2-to-managed-instance-guide/synchronize-with-database.png" alt-text="Screenshot that shows the option to synchronize with database."::: - -1. Migrate the data. Right-click the database or object you want to migrate in **Db2 Metadata Explorer**, and choose **Migrate data**. Alternatively, you can select **Migrate Data** from the navigation bar. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. - - :::image type="content" source="media/db2-to-managed-instance-guide/migrate-data.png" alt-text="Screenshot that shows selecting the schema and choosing to migrate data."::: - -1. Provide connection details for both Db2 and SQL Managed Instance. -1. After migration completes, view the **Data Migration Report**. - - :::image type="content" source="media/db2-to-managed-instance-guide/data-migration-report.png" alt-text="Screenshot that shows where to review the data migration report."::: - -1. Connect to your instance of Azure SQL Managed Instance by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). Validate the migration by reviewing the data and schema: - - :::image type="content" source="media/db2-to-managed-instance-guide/compare-schema-in-ssms.png" alt-text="Screenshot that shows comparing the schema in SQL Server Management Studio."::: - -## Post-migration - -After the migration is complete, you need to go through a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - - -### Perform tests - -Testing consists of the following activities: - -1. **Develop validation tests**: To test database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you have defined. -1. **Set up the test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run the validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -## Advanced features - -Be sure to take advantage of the advanced cloud-based features offered by Azure SQL Managed Instance, such as [built-in high availability](../../database/high-availability-sla.md), [threat detection](../../database/azure-defender-for-sql.md), and [monitoring and tuning your workload](../../database/monitor-tune-overview.md). - -Some SQL Server features are only available when the [database compatibility level](/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) is changed to the latest compatibility level. - -## Migration assets - -For additional assistance, see the following resources, which were developed in support of a real-world migration project engagement: - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and application/database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing and automated and uniform target platform decision process.| -|[Db2 zOS data assets discovery and assessment package](https://www.microsoft.com/download/details.aspx?id=103108)|After running the SQL script on a database, you can export the results to a file on the file system. Several file formats are supported, including \*.csv, so that you can capture the results in external tools such as spreadsheets. This method can be useful if you want to easily share results with teams that do not have the workbench installed.| -|[IBM Db2 LUW inventory scripts and artifacts](https://www.microsoft.com/download/details.aspx?id=103109)|This asset includes a SQL query that hits IBM Db2 LUW version 11.1 system tables and provides a count of objects by schema and object type, a rough estimate of "raw data" in each schema, and the sizing of tables in each schema, with results stored in a CSV format.| -|[IBM Db2 to SQL MI - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|The Database Compare utility is a Windows console application that you can use to verify that the data is identical both on source and target platforms. You can use the tool to efficiently compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- For Microsoft and third-party services and tools to assist you with various database and data migration scenarios, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Managed Instance, see: - - [An overview of SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md) - - [Azure total cost of ownership calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- To assess the application access layer, see [Data Access Migration Toolkit](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For details on how to perform data access layer A/B testing, see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-review-schema-structure.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-review-schema-structure.png deleted file mode 100644 index 927eb9d8654ed..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-review-schema-structure.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-schema-in-ssms.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-schema-in-ssms.png deleted file mode 100644 index c7689a5df9192..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/compare-schema-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-db2.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-db2.png deleted file mode 100644 index b12d5b5a9d70d..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-db2.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-sql-managed-instance.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-sql-managed-instance.png deleted file mode 100644 index 151074af8ba5c..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/connect-to-sql-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/convert-schema.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/convert-schema.png deleted file mode 100644 index d3843a5fb3976..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/create-report.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/create-report.png deleted file mode 100644 index 36dbb44521353..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/data-migration-report.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/data-migration-report.png deleted file mode 100644 index 3af14bab177e8..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/migrate-data.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/migrate-data.png deleted file mode 100644 index 2ef0a8d8ae521..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/new-project.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/new-project.png deleted file mode 100644 index 21d24207304a3..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/report.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/report.png deleted file mode 100644 index ddc49a827920b..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/synchronize-with-database.png deleted file mode 100644 index 91d87653a0833..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/type-mapping.png b/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/type-mapping.png deleted file mode 100644 index 5ffc452ba3d90..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/db2-to-managed-instance-guide/type-mapping.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/assessment-report.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/assessment-report.png deleted file mode 100644 index ea37c13dc18ce..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/assessment-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-oracle.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-oracle.png deleted file mode 100644 index 43a435dfdba07..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-oracle.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-sql-managed-instance.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-sql-managed-instance.png deleted file mode 100644 index 67ce0c5846bfe..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/connect-to-sql-managed-instance.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/convert-schema.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/convert-schema.png deleted file mode 100644 index 5a2990b25ca83..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/create-report.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/create-report.png deleted file mode 100644 index 154c37ea51e6e..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/data-migration-report.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/data-migration-report.png deleted file mode 100644 index 5e3a791feab93..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/migrate-data.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/migrate-data.png deleted file mode 100644 index b74a727907b5c..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/new-project.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/new-project.png deleted file mode 100644 index 1a3d6333ca83d..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/procedure-comparison.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/procedure-comparison.png deleted file mode 100644 index 3807b8344bdff..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/procedure-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/report-review.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/report-review.png deleted file mode 100644 index 4e2a1ac036018..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/report-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/select-schema.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/select-schema.png deleted file mode 100644 index 3b01bf01e2dd6..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/select-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/ssma-tester-new.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/ssma-tester-new.png deleted file mode 100644 index e1758e05b28de..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/ssma-tester-new.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database-review.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database-review.png deleted file mode 100644 index 5f36d383b2c13..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database.png deleted file mode 100644 index dcfea9bfa8186..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/table-comparison.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/table-comparison.png deleted file mode 100644 index 2b9e61a6515ab..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/table-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/test-call-ordering.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/test-call-ordering.png deleted file mode 100644 index c479a8c763962..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/test-call-ordering.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-finalize-case.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-finalize-case.png deleted file mode 100644 index de1eee246279e..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-finalize-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-init-test-case.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-init-test-case.png deleted file mode 100644 index 2e5a1df10f3b8..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-init-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-oracle-connect.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-oracle-connect.png deleted file mode 100644 index e190527dd6ecf..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-oracle-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-repo-run.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-repo-run.png deleted file mode 100644 index 05daa7a7f739e..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-repo-run.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-status.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-status.png deleted file mode 100644 index e5bb3046c70ba..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-status.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-test-case.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-test-case.png deleted file mode 100644 index a99512c2a3c80..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-run-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-affected.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-affected.png deleted file mode 100644 index f1a66ed10d5b1..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-affected.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-objects.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-objects.png deleted file mode 100644 index 15d1b666152bf..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-select-configure-objects.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-sqlmi-connect.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-sqlmi-connect.png deleted file mode 100644 index a9889ce8f999f..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-sqlmi-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-failed.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-failed.png deleted file mode 100644 index dcb5be28184b3..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-failed.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-repo.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-repo.png deleted file mode 100644 index 9db1b8913da8a..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-repo.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-result.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-result.png deleted file mode 100644 index 84b9360dd793d..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-result.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-success.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-success.png deleted file mode 100644 index b9f09614c671e..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/tester-test-success.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/type-mappings.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/type-mappings.png deleted file mode 100644 index 29ec392bef112..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/type-mappings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/validate-data.png b/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/validate-data.png deleted file mode 100644 index 26f002b0298fa..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/oracle-to-managed-instance-guide/validate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/managed-instance-sizing.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/managed-instance-sizing.png deleted file mode 100644 index df6cb150be211..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/managed-instance-sizing.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-flow.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-flow.png deleted file mode 100644 index 3d3b7fe601e52..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-flow.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-flow-small.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-flow-small.png deleted file mode 100644 index 1b04502dc966a..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-flow-small.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-sql-managed-instance-steps.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-sql-managed-instance-steps.png deleted file mode 100644 index 82aedd7e1de95..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process-sql-managed-instance-steps.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process.png deleted file mode 100644 index 82aedd7e1de95..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-process.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-restore.png b/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-restore.png deleted file mode 100644 index b304ed5c7aaff..0000000000000 Binary files a/articles/azure-sql/migration-guides/managed-instance/media/sql-server-to-managed-instance-overview/migration-restore.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/managed-instance/oracle-to-managed-instance-guide.md b/articles/azure-sql/migration-guides/managed-instance/oracle-to-managed-instance-guide.md deleted file mode 100644 index 104310312b4f9..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/oracle-to-managed-instance-guide.md +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: "Oracle to Azure SQL Managed Instance: Migration guide" -description: In this guide, you learn how to migrate your Oracle schemas to Azure SQL Managed Instance by using SQL Server Migration Assistant for Oracle. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma -ms.date: 11/06/2020 ---- -# Migration guide: Oracle to Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqlmi.md)] - - This guide teaches you to migrate your Oracle schemas to Azure SQL Managed Instance by using SQL Server Migration Assistant for Oracle. - -For other migration guides, see [Azure Database Migration Guides](/data-migration). - -## Prerequisites - -Before you begin migrating your Oracle schema to SQL Managed Instance: - -- Verify your source environment is supported. -- Download [SSMA for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -- Have a [SQL Managed Instance](../../managed-instance/instance-create-quickstart.md) target. -- Obtain the [necessary permissions for SSMA for Oracle](/sql/ssma/oracle/connecting-to-oracle-database-oracletosql) and [provider](/sql/ssma/oracle/connect-to-oracle-oracletosql). - -## Pre-migration - -After you've met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your migration. This part of the process involves conducting an inventory of the databases that you need to migrate, assessing those databases for potential migration issues or blockers, and then resolving any items you might have uncovered. - -### Assess - -By using SSMA for Oracle, you can review database objects and data, assess databases for migration, migrate database objects to SQL Managed Instance, and then finally migrate data to the database. - -To create an assessment: - -1. Open [SSMA for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -1. Select **File**, and then select **New Project**. -1. Enter a project name and a location to save your project. Then select **Azure SQL Managed Instance** as the migration target from the drop-down list and select **OK**. - - ![Screenshot that shows New Project.](./media/oracle-to-managed-instance-guide/new-project.png) - -1. Select **Connect to Oracle**. Enter values for Oracle connection details in the **Connect to Oracle** dialog box. - - ![Screenshot that shows Connect to Oracle.](./media/oracle-to-managed-instance-guide/connect-to-oracle.png) - -1. Select the Oracle schemas you want to migrate. - - ![Screenshot that shows selecting Oracle schema.](./media/oracle-to-managed-instance-guide/select-schema.png) - -1. In **Oracle Metadata Explorer**, right-click the Oracle schema you want to migrate and then select **Create Report** to generate an HTML report. Instead, you can select a database and then select the **Create Report** tab. - - ![Screenshot that shows Create Report.](./media/oracle-to-managed-instance-guide/create-report.png) - -1. Review the HTML report to understand conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Oracle objects and the effort required to perform schema conversions. The default location for the report is in the report folder within SSMAProjects. - - For example, see `drive:\\Documents\SSMAProjects\MyOracleMigration\report\report_2020_11_12T02_47_55\`. - - ![Screenshot that shows an Assessment report.](./media/oracle-to-managed-instance-guide/assessment-report.png) - -### Validate the data types - -Validate the default data type mappings and change them based on requirements if necessary. To do so, follow these steps: - -1. In SSMA for Oracle, select **Tools**, and then select **Project Settings**. -1. Select the **Type Mapping** tab. - - ![Screenshot that shows Type Mapping.](./media/oracle-to-managed-instance-guide/type-mappings.png) - -1. You can change the type mapping for each table by selecting the table in **Oracle Metadata Explorer**. - -### Convert the schema - -To convert the schema: - -1. (Optional) Add dynamic or ad-hoc queries to statements. Right-click the node, and then select **Add statements**. -1. Select the **Connect to Azure SQL Managed Instance** tab. - 1. Enter connection details to connect your database in **SQL Database Managed Instance**. - 1. Select your target database from the drop-down list, or enter a new name, in which case a database will be created on the target server. - 1. Enter authentication details, and select **Connect**. - - ![Screenshot that shows Connect to Azure SQL Managed Instance.](./media/oracle-to-managed-instance-guide/connect-to-sql-managed-instance.png) - -1. In **Oracle Metadata Explorer**, right-click the Oracle schema and then select **Convert Schema**. Or, you can select your schema and then select the **Convert Schema** tab. - - ![Screenshot that shows Convert Schema.](./media/oracle-to-managed-instance-guide/convert-schema.png) - -1. After the conversion finishes, compare and review the converted objects to the original objects to identify potential problems and address them based on the recommendations. - - ![Screenshot that shows comparing table recommendations.](./media/oracle-to-managed-instance-guide/table-comparison.png) - -1. Compare the converted Transact-SQL text to the original code, and review the recommendations. - - ![Screenshot that shows comparing procedure recommendations.](./media/oracle-to-managed-instance-guide/procedure-comparison.png) - -1. In the output pane, select **Review results** and review the errors in the **Error List** pane. -1. Save the project locally for an offline schema remediation exercise. On the **File** menu, select **Save Project**. This step gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to SQL Managed Instance. - -## Migrate - -After you've completed assessing your databases and addressing any discrepancies, the next step is to run the migration process. Migration involves two steps: publishing the schema and migrating the data. - -To publish your schema and migrate your data: - -1. Publish the schema by right-clicking the database from the **Databases** node in **Azure SQL Managed Instance Metadata Explorer** and selecting **Synchronize with Database**. - - ![Screenshot that shows Synchronize with Database.](./media/oracle-to-managed-instance-guide/synchronize-with-database.png) - - -1. Review the mapping between your source project and your target. - - ![Screenshot that shows Synchronize with the Database review.](./media/oracle-to-managed-instance-guide/synchronize-with-database-review.png) - -1. Migrate the data by right-clicking the schema or object you want to migrate in **Oracle Metadata Explorer** and selecting **Migrate Data**. Or, you can select the **Migrate Data** tab. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the checkboxes next to the tables. To omit data from individual tables, clear the checkboxes. - - ![Screenshot that shows Migrate Data.](./media/oracle-to-managed-instance-guide/migrate-data.png) - -1. Enter connection details for both Oracle and SQL Managed Instance. -1. After the migration is completed, view the **Data Migration Report**. - - ![Screenshot that shows Data Migration Report.](./media/oracle-to-managed-instance-guide/data-migration-report.png) - -1. Connect to your instance of SQL Managed Instance by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms), and validate the migration by reviewing the data and schema. - - ![Screenshot that shows validation in SSMA for Oracle.](./media/oracle-to-managed-instance-guide/validate-data.png) - -Or, you can also use SQL Server Integration Services to perform the migration. To learn more, see: - -- [Getting started with SQL Server Integration Services](/sql/integration-services/sql-server-integration-services) -- [SQL Server Integration Services for Azure and Hybrid Data Movement](https://download.microsoft.com/download/D/2/0/D20E1C5F-72EA-4505-9F26-FEF9550EFD44/SSIS%20Hybrid%20and%20Azure.docx) - -## Post-migration - -After you've successfully completed the *migration* stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this step will require changes to the applications in some cases. - -The [Data Access Migration Toolkit](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) is an extension for Visual Studio Code that allows you to analyze your Java source code and detect data access API calls and queries. The toolkit provides you with a single-pane view of what needs to be addressed to support the new database back end. To learn more, see the [Migrate our Java application from Oracle](https://techcommunity.microsoft.com/t5/microsoft-data-migration/migrate-your-java-applications-from-oracle-to-sql-server-with/ba-p/368727) blog post. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you've defined. -2. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -3. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. -4. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -### Validate migrated objects - -Microsoft SQL Server Migration Assistant for Oracle Tester (SSMA Tester) allows you to test migrated database objects. The SSMA Tester is used to verify that converted objects behave in the same way. - -#### Create test case - -1. Open SSMA for Oracle, select **Tester** followed by **New Test Case**. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/ssma-tester-new.png" alt-text="Screenshot that shows new test case."::: - -1. On the Test Case wizard, provide the following information: - - **Name:** Enter the name to identify the test case. - - **Creation date:** Today's current date, defined automatically. - - **Last Modified date:** Filled in automatically, should not be changed. - - **Description:** Enter any additional information to identify the purpose of the test case. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-init-test-case.png" alt-text="Screenshot that shows step to initialize a test case."::: - -1. Select the objects that are part of the test case from the Oracle object tree located in the left side. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-select-configure-objects.png" alt-text="Screenshot that shows step to select and configure object."::: - - In this example, stored procedure `ADD_REGION` and table `REGION` is selected. - - To learn more, see [Selecting and configuring objects to test.](/sql/ssma/oracle/selecting-and-configuring-objects-to-test-oracletosql) - -1. Next, select the tables, foreign keys and other dependent objects from the Oracle object tree in the left window. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-select-configure-affected.png" alt-text="Screenshot that shows step to select and configure affected object."::: - - To learn more, see [Selecting and configuring affected objects.](/sql/ssma/oracle/selecting-and-configuring-affected-objects-oracletosql) - -1. Review the evaluation sequence of objects. Change the order by clicking the buttons in the grid. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/test-call-ordering.png" alt-text="Screenshot that shows step to sequence test object execution."::: - -1. Finalize the test case by reviewing the information provided in the previous steps.Configure the test execution options based on the test scenario. - - :::image type="content" source="./media//oracle-to-managed-instance-guide/tester-finalize-case.png" alt-text="Screenshot that shows step to finalize object."::: - - For more information on test case settings,[Finishing test case preparation](/sql/ssma/oracle/finishing-test-case-preparation-oracletosql) - -1. Click on finish to create the test case. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-test-repo.png" alt-text="Screenshot that shows step to test repo."::: - -#### Run test case - -When SSMA Tester runs a test case, the test engine executes the objects selected for testing and generates a verification report. - -1. Select the test case from test repository and then click run. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-repo-run.png" alt-text="Screenshot that shows to review test repo."::: - -1. Review the launch test case and click run. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-run-test-case.png" alt-text="Screenshot that shows step to launch test case."::: - -1. Next, provide Oracle source credentials. Click connect after entering the credentials. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-oracle-connect.png" alt-text="Screenshot that shows step to connect to oracle source."::: - -1. Provide target SQL Server credentials and click connect. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-sqlmi-connect.png" alt-text="Screenshot that shows step to connect to sql target."::: - - On success, the test case moves to initialization stage. - -1. A real-time progress bar shows the execution status of the test run. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-run-status.png" alt-text="Screenshot that shows tester test progress."::: - -1. Review the report after the test is completed. The report provides the statistics, any errors during the test run and a detail report. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-test-result.png" alt-text="Screenshot that shows a sample tester test report"::: - -1. Click details to get more information. - - Example of positive data validation. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-test-success.png" alt-text="Screenshot that shows a sample tester success report."::: - - Example of failed data validation. - - :::image type="content" source="./media/oracle-to-managed-instance-guide/tester-test-failed.png" alt-text="Screenshot that shows tester failure report."::: - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing performance issues with the workload. - -> [!NOTE] -> For more information about these issues and the steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - -## Migration assets - -For more assistance with completing this migration scenario, see the following resources. They were developed in support of a real-world migration project engagement. - -| **Title/link** | **Description** | -| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Data Workload Assessment Model and Tool](https://www.microsoft.com/download/details.aspx?id=103130) | This tool provides suggested "best fit" target platforms, cloud readiness, and application or database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated and uniform target platform decision process. | -| [Oracle Inventory Script Artifacts](https://www.microsoft.com/download/details.aspx?id=103121) | This asset includes a PL/SQL query that hits Oracle system tables and provides a count of objects by schema type, object type, and status. It also provides a rough estimate of raw data in each schema and the sizing of tables in each schema, with results stored in a CSV format. | -| [Automate SSMA Oracle Assessment Collection & Consolidation](https://www.microsoft.com/download/details.aspx?id=103120) | This set of resources uses a .csv file as entry (sources.csv in the project folders) to produce the xml files that are needed to run an SSMA assessment in console mode. The source.csv is provided by the customer based on an inventory of existing Oracle instances. The output files are AssessmentReportGeneration_source_1.xml, ServersConnectionFile.xml, and VariableValueFile.xml.| -|[Oracle to SQL MI - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|SSMA for Oracle Tester is the recommended tool to automatically validate the database object conversion and data migration, and it's a superset of Database Compare functionality.

    If you're looking for an alternative data validation option, you can use the Database Compare utility to compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about SQL Managed Instance, see: - - [An overview of Azure SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md) - - [Azure Total Cost of Ownership (TCO) Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- For video content, see: - - [Overview of the migration journey and the tools and services recommended for performing assessment and migration](https://azure.microsoft.com/resources/videos/overview-of-migration-and-recommended-tools-services/) diff --git a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-guide.md b/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-guide.md deleted file mode 100644 index 0df3593ec9230..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-guide.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: "SQL Server to Azure SQL Managed Instance: Migration guide" -description: This guide teaches you to migrate your SQL Server databases to Azure SQL Managed Instance. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, danil, randolphwest -ms.date: 04/11/2022 ---- -# Migration guide: SQL Server to Azure SQL Managed Instance - -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqlmi.md)] - -This guide helps you migrate your SQL Server instance to Azure SQL Managed Instance. - -You can migrate SQL Server running on-premises or on: - -- SQL Server on Virtual Machines -- Amazon Web Services (AWS) EC2 -- Amazon Relational Database Service (AWS RDS) -- Compute Engine (Google Cloud Platform - GCP) -- Cloud SQL for SQL Server (Google Cloud Platform – GCP) - -For more migration information, see the [migration overview](sql-server-to-managed-instance-overview.md). For other migration guides, see [Database Migration](/data-migration). - -:::image type="content" source="media/sql-server-to-managed-instance-overview/migration-process-flow-small.png" alt-text="Migration process flow"::: - -## Prerequisites - -To migrate your SQL Server to Azure SQL Managed Instance, make sure you have: - -- Chosen a [migration method](sql-server-to-managed-instance-overview.md#compare-migration-options) and the corresponding tools for your method. -- Install the [Azure SQL migration extension for Azure Data Studio](/sql/azure-data-studio/extensions/azure-sql-migration-extension). -- Installed the [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595) on a machine that can connect to your source SQL Server. -- Created a target [Azure SQL Managed Instance](../../managed-instance/instance-create-quickstart.md) -- Configured connectivity and proper permissions to access both source and target. -- Reviewed the SQL Server database engine features [available in Azure SQL Managed Instance](../../database/features-comparison.md). - -## Pre-migration - -After you've verified that your source environment is supported, start with the pre-migration stage. Discover all of the existing data sources, assess migration feasibility, and identify any blocking issues that might prevent your migration. - -### Discover - -In the Discover phase, scan the network to identify all SQL Server instances and features used by your organization. - -Use [Azure Migrate](../../../migrate/migrate-services-overview.md) to assess migration suitability of on-premises servers, perform performance-based sizing, and provide cost estimations for running them in Azure. - -Alternatively, use the [Microsoft Assessment and Planning Toolkit (the "MAP Toolkit")](https://www.microsoft.com/download/details.aspx?id=7826) to assess your current IT infrastructure. The toolkit provides a powerful inventory, assessment, and reporting tool to simplify the migration planning process. - -For more information about tools available to use for the Discover phase, see [Services and tools available for data migration scenarios](../../../dms/dms-tools-matrix.md). - -After data sources have been discovered, assess any on-premises SQL Server instance(s) that can be migrated to Azure SQL Managed Instance to identify migration blockers or compatibility issues. -Proceed to the following steps to assess and migrate databases to Azure SQL Managed Instance: - -:::image type="content" source="media/sql-server-to-managed-instance-overview/migration-process-sql-managed-instance-steps.png" alt-text="Steps for migration to Azure SQL Managed Instance"::: - -- [Assess SQL Managed Instance compatibility](#assess) where you should ensure that there are no blocking issues that can prevent your migrations. - This step also includes creation of a [performance baseline](sql-server-to-managed-instance-performance-baseline.md#create-a-baseline) to determine resource usage on your source SQL Server instance. This step is needed if you want to deploy a properly sized managed instance and verify that performance after migration isn't affected. -- [Choose app connectivity options](../../managed-instance/connect-application-instance.md). -- [Deploy to an optimally sized managed instance](#deploy-to-an-optimally-sized-managed-instance) where you'll choose technical characteristics (number of vCores, amount of memory) and performance tier (Business Critical, General Purpose) of your managed instance. -- [Select migration method and migrate](sql-server-to-managed-instance-overview.md#compare-migration-options) where you migrate your databases using offline migration or online migration options. -- [Monitor and remediate applications](#monitor-and-remediate-applications) to ensure that you have expected performance. - -### Assess - -[!INCLUDE [assess-estate-with-azure-migrate](../../../../includes/azure-migrate-to-assess-sql-data-estate.md)] - -Determine whether SQL Managed Instance is compatible with the database requirements of your application. SQL Managed Instance is designed to provide easy lift and shift migration for most existing applications that use SQL Server. However, you may sometimes require features or capabilities that aren't yet supported and the cost of implementing a workaround is too high. - -The [Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) provides a seamless wizard based experience to assess, get Azure recommendations and migrate your SQL Server databases on-premises to SQL Server on Azure Virtual Machines. Besides, highlighting any migration blockers or warnings, the extension also includes an option for Azure recommendations to collect your databases' performance data [to recommend a right-sized Azure SQL Managed Instance](../../../dms/ads-sku-recommend.md) to meet the performance needs of your workload (with the least price). - -You can also use the Data Migration Assistant (version 4.1 and later) to assess databases to get: - -- [Azure target recommendations](/sql/dma/dma-assess-sql-data-estate-to-sqldb) -- [Azure SKU recommendations](/sql/dma/dma-sku-recommend-sql-db) - -To assess your environment using the Database Migration Assessment, follow these steps: - -1. Open the [Data Migration Assistant (DMA)](https://www.microsoft.com/download/details.aspx?id=53595). -1. Select **File** and then choose **New assessment**. -1. Specify a project name, select SQL Server as the source server type, and then select Azure SQL Managed Instance as the target server type. -1. Select the type(s) of assessment reports that you want to generate. For example, database compatibility and feature parity. Based on the type of assessment, the permissions required on the source SQL Server can be different. DMA will highlight the permissions required for the chosen advisor before running the assessment. - - The **feature parity** category provides a comprehensive set of recommendations, alternatives available in Azure, and mitigating steps to help you plan your migration project. (sysadmin permissions required) - - The **compatibility issues** category identifies partially supported or unsupported feature compatibility issues that might block migration, and recommendations to address them (`CONNECT SQL`, `VIEW SERVER STATE`, and `VIEW ANY DEFINITION` permissions required). -1. Specify the source connection details for your SQL Server and connect to the source database. -1. Select **Start assessment**. -1. When the process is complete, select and review the assessment reports for migration blocking and feature parity issues. The assessment report can also be exported to a file that can be shared with other teams or personnel in your organization. -1. Determine the database compatibility level that minimizes post-migration efforts. -1. Identify the best Azure SQL Managed Instance SKU for your on-premises workload. - -To learn more, see [Perform a SQL Server migration assessment with Data Migration Assistant](/sql/dma/dma-assesssqlonprem). - -If SQL Managed Instance isn't a suitable target for your workload, SQL Server on Azure VMs might be a viable alternative target for your business. - -#### Scaled assessments and analysis - -If you have multiple servers or databases that require Azure readiness assessment, you can automate the process by using scripts using one of the following options. To learn more about using scripting see [Migrate databases at scale using automation](../../../dms/migration-dms-powershell-cli.md). - -- [Az.DataMigration PowerShell module](/powershell/module/az.datamigration) -- [az datamigration CLI extension](/cli/azure/datamigration) -- [Data Migration Assistant command-line interface](/sql/dma/dma-commandline) - -Data Migration Assistant also supports consolidation of the assessment reports for analysis. If you have multiple servers and databases that need to be assessed and analyzed at scale to provide a wider view of the data estate, see the following links to learn more. - -- [Performing scaled assessments using PowerShell](/sql/dma/dma-consolidatereports) -- [Analyzing assessment reports using Power BI](/sql/dma/dma-consolidatereports#dma-reports) - -> [!IMPORTANT] -> ->Running assessments at scale for multiple databases can also be automated using [DMA's Command Line Utility](/sql/dma/dma-commandline) which also allows the results to be uploaded to [Azure Migrate](/sql/dma/dma-assess-sql-data-estate-to-sqldb#view-target-readiness-assessment-results) for further analysis and target readiness. - -### Deploy to an optimally sized managed instance - -You can use the [Azure SQL migration extension for Azure Data Studio](/sql/azure-data-studio/extensions/azure-sql-migration-extension) to get right-sized Azure SQL Managed Instance recommendation. The extension collects performance data from your source SQL Server instance to provide right-sized Azure recommendation that meets your workload's performance needs with minimal cost. To learn more, see [Get right-sized Azure recommendation for your on-premises SQL Server database(s)](../../../dms/ads-sku-recommend.md) - -Based on the information in the discover and assess phase, create an appropriately sized target SQL Managed Instance. You can do so by using the [Azure portal](../../managed-instance/instance-create-quickstart.md), [PowerShell](../../managed-instance/scripts/create-configure-managed-instance-powershell.md), or an [Azure Resource Manager (ARM) Template](../../managed-instance/create-template-quickstart.md). - -SQL Managed Instance is tailored for on-premises workloads that are planning to move to the cloud. It introduces a [purchasing model](../../database/service-tiers-vcore.md) that provides greater flexibility in selecting the right level of resources for your workloads. In the on-premises world, you're probably accustomed to sizing these workloads by using physical cores and IO bandwidth. The purchasing model for managed instance is based upon virtual cores, or "vCores," with additional storage and IO available separately. The vCore model is a simpler way to understand your compute requirements in the cloud versus what you use on-premises today. This purchasing model enables you to right-size your destination environment in the cloud. Some general guidelines that might help you to choose the right service tier and characteristics are described here: - -- Based on the baseline CPU usage, you can provision a managed instance that matches the number of cores that you're using on SQL Server, having in mind that CPU characteristics might need to be scaled to match [VM characteristics where the managed instance is installed](../../managed-instance/resource-limits.md#hardware-configuration-characteristics). -- Based on the baseline memory usage, choose [the service tier that has matching memory](../../managed-instance/resource-limits.md#hardware-configuration-characteristics). The amount of memory can't be directly chosen, so you would need to select the managed instance with the amount of vCores that has matching memory (for example, 5.1 GB/vCore in Gen5). -- Based on the baseline IO latency of the file subsystem, choose between the General Purpose (latency greater than 5 ms) and Business Critical (latency less than 3 ms) service tiers. -- Based on baseline throughput, pre-allocate the size of data or log files to get expected IO performance. - -You can choose compute and storage resources at deployment time and then change it afterward without introducing downtime for your application using the [Azure portal](../../database/scale-resources.md): - -:::image type="content" source="media/sql-server-to-managed-instance-overview/managed-instance-sizing.png" alt-text="Managed Instance Sizing"::: - -To learn how to create the VNet infrastructure and a managed instance, see [Create a managed instance](../../managed-instance/instance-create-quickstart.md). - -> [!IMPORTANT] -> -> It is important to keep your destination VNet and subnet in accordance with [managed instance VNet requirements](../../managed-instance/connectivity-architecture-overview.md#network-requirements). Any incompatibility can prevent you from creating new instances or using those that you already created. Learn more about [creating new](../../managed-instance/virtual-network-subnet-create-arm-template.md) and [configuring existing](../../managed-instance/vnet-existing-add-subnet.md) networks. - -## Migrate - -After you have completed tasks associated with the Pre-migration stage, you're ready to perform the schema and data migration. - -Migrate your data using your chosen [migration method](sql-server-to-managed-instance-overview.md#compare-migration-options). - -SQL Managed Instance targets user scenarios requiring mass database migration from on-premises or Azure VM database implementations. They are the optimal choice when you need to lift and shift the back end of the applications that regularly use instance level and/or cross-database functionalities. If this is your scenario, you can move an entire instance to a corresponding environment in Azure without the need to rearchitect your applications. - -To move SQL instances, you need to plan carefully: - -- The migration of all databases that need to be collocated (ones running on the same instance). -- The migration of instance-level objects that your application depends on, including logins, credentials, SQL Agent jobs and operators, and server-level triggers. - -SQL Managed Instance is a managed service that allows you to delegate some of the regular DBA activities to the platform as they're built in. Therefore, some instance-level data doesn't need to be migrated, such as maintenance jobs for regular backups or Always On configuration, as [high availability](../../database/high-availability-sla.md) is built in. - -This article covers two of the recommended migration options: - -- Azure SQL migration extension for Azure Data Studio - migration with near-zero downtime. -- Native `RESTORE DATABASE FROM URL` - uses native backups from SQL Server and requires some downtime. - -This guide describes the two most popular options - Azure Database Migration Service (DMS) and native backup and restore. - -For other migration tools, see [Compare migration options](sql-server-to-managed-instance-overview.md#compare-migration-options). - -### Migrate using the Azure SQL migration extension for Azure Data Studio (minimal downtime) - -To perform a minimal downtime migration using Azure Data Studio, follow the high level steps below. For a detailed step-by-step tutorial, see [Migrate SQL Server to an Azure SQL Managed Instance online using Azure Data Studio](../../../dms/tutorial-sql-server-managed-instance-online-ads.md): - -1. Download and install [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) and the [Azure SQL migration extension](/sql/azure-data-studio/extensions/azure-sql-migration-extension). -1. Launch the Migrate to Azure SQL wizard in the extension in Azure Data Studio. -1. Select databases for assessment and view migration readiness or issues (if any). Additionally, collect performance data and get right-sized Azure recommendation. -1. Select your Azure account and your target Azure SQL Managed Instance from your subscription. -1. Select the location of your database backups. Your database backups can either be located on an on-premises network share or in an Azure storage blob container. -1. Create a new Azure Database Migration Service using the wizard in Azure Data Studio. If you've previously created an Azure Database Migration Service using Azure Data Studio, you can reuse the same if desired. -1. *Optional*: If your backups are on an on-premises network share, download and install [self-hosted integration runtime](https://www.microsoft.com/download/details.aspx?id=39717) on a machine that can connect to the source SQL Server, and the location containing the backup files. -1. Start the database migration and monitor the progress in Azure Data Studio. You can also monitor the progress under the Azure Database Migration Service resource in Azure portal. -1. Complete the cutover. - 1. Stop all incoming transactions to the source database. - 1. Make application configuration changes to point to the target database in Azure SQL Managed Instance. - 1. Take any tail log backups for the source database in the backup location specified. - 1. Ensure all database backups have the status Restored in the monitoring details page. - 1. Select Complete cutover in the monitoring details page. - - -### Backup and restore - -One of the key capabilities of Azure SQL Managed Instance to enable quick and easy database migration is the native restore of database backup (`.bak`) files stored on [Azure Storage](https://azure.microsoft.com/services/storage/). Backing up and restoring are asynchronous operations based on the size of your database. - -The following diagram provides a high-level overview of the process: - -:::image type="content" source="./media/sql-server-to-managed-instance-overview/migration-restore.png" alt-text="Diagram shows SQL Server with an arrow labeled BACKUP / Upload to URL flowing to Azure Storage and a second arrow labeled RESTORE from URL flowing from Azure Storage to a SQL Managed Instance."::: - -> [!NOTE] -> -> The time to take the backup, upload it to Azure storage, and perform a native restore operation to Azure SQL Managed Instance is based on the size of the database. Factor a sufficient downtime to accommodate the operation for large databases. - -The following table provides more information regarding the methods you can use depending on -source SQL Server version you're running: - -|Step|SQL Engine and version|Backup/restore method| -|---|---|---| -|Put backup to Azure Storage|Prior to 2012 SP1 CU2|Upload .bak file directly to Azure Storage| -| |2012 SP1 CU2 - 2016|Direct backup using deprecated [WITH CREDENTIAL](/sql/t-sql/statements/restore-statements-transact-sql) syntax| -| |2016 and above|Direct backup using [WITH SAS CREDENTIAL](/sql/relational-databases/backup-restore/sql-server-backup-to-url)| -|Restore from Azure Storage to a managed instance| |[RESTORE FROM URL with SAS CREDENTIAL](../../managed-instance/restore-sample-database-quickstart.md)| - -> [!IMPORTANT] -> -> - When you're migrating a database protected by [Transparent Data Encryption](../../database/transparent-data-encryption-tde-overview.md) to a managed instance using native restore option, the corresponding certificate from the on-premises or Azure VM SQL Server needs to be migrated before database restore. For detailed steps, see [Migrate a TDE cert to a managed instance](../../managed-instance/tde-certificate-migrate.md). -> - Restore of system databases is not supported. To migrate instance-level objects (stored in `master` or `msdb` databases), we recommend to script them out and run T-SQL scripts on the destination instance. - -To migrate using backup and restore, follow these steps: - -1. Back up your database to Azure blob storage. For example, use [backup to url](/sql/relational-databases/backup-restore/sql-server-backup-to-url) in [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). Use the [Microsoft Azure Tool](https://go.microsoft.com/fwlink/?LinkID=324399) to support databases earlier than SQL Server 2012 SP1 CU2. -1. Connect to your Azure SQL Managed Instance using SQL Server Management Studio. -1. Create a credential using a Shared Access Signature to access your Azure Blob storage account with your database backups. For example: - - ```sql - CREATE CREDENTIAL [https://mitutorials.blob.core.windows.net/databases] - WITH IDENTITY = 'SHARED ACCESS SIGNATURE' - , SECRET = 'sv=2017-11-09&ss=bfqt&srt=sco&sp=rwdlacup&se=2028-09-06T02:52:55Z&st=2018-09-04T18:52:55Z&spr=https&sig=WOTiM%2FS4GVF%2FEEs9DGQR9Im0W%2BwndxW2CQ7%2B5fHd7Is%3D' - ``` -1. Restore the backup from the Azure storage blob container. For example: - - ```sql - RESTORE DATABASE [TargetDatabaseName] FROM URL = - 'https://mitutorials.blob.core.windows.net/databases/WideWorldImporters-Standard.bak' - ``` - -1. Once restore completes, view the database in **Object Explorer** within SQL Server Management Studio. - -To learn more about this migration option, see [Restore a database to Azure SQL Managed Instance with SSMS](../../managed-instance/restore-sample-database-quickstart.md). - -> [!NOTE] -> -> A database restore operation is asynchronous and can be retried. You might get an error in SQL Server Management Studio if the connection breaks or a time-out expires. Azure SQL Database will keep trying to restore database in the background, and you can track the progress of the restore using the [sys.dm_exec_requests](/sql/relational-databases/system-dynamic-management-views/sys-dm-exec-requests-transact-sql) and [sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database) views. - -## Data sync and cutover - -When using migration options that continuously replicate / sync data changes from source to the target, the source data and schema can change and drift from the target. During data sync, ensure that all changes on the source are captured and applied to the target during the migration process. - -After you verify that data is the same on both source and target, you can cut over from the source to the target environment. It's important to plan the cutover process with business / application teams to ensure minimal interruption during cutover doesn't affect business continuity. - -> [!IMPORTANT] -> -> For details on the specific steps associated with performing a cutover as part of migrations using DMS, see [Performing migration cutover](../../../dms/tutorial-sql-server-managed-instance-online.md#performing-migration-cutover). - -## Post-migration - -After you've successfully completed the migration stage, go through a series of post-migration tasks to ensure that everything is functioning smoothly and efficiently. - -The post-migration phase is crucial for reconciling any data accuracy issues and verifying completeness, and addressing performance issues with the workload. - -### Monitor and remediate applications -Once you've completed the migration to a managed instance, you should track the application behavior and performance of your workload. This process includes the following activities: - -- [Compare performance of the workload running on the managed instance](sql-server-to-managed-instance-performance-baseline.md#compare-performance) with the [performance baseline that you created on the source SQL Server instance](sql-server-to-managed-instance-performance-baseline.md#create-a-baseline). -- Continuously [monitor performance of your workload](sql-server-to-managed-instance-performance-baseline.md#monitor-performance) to identify potential issues and improvement. - -### Perform tests - -The test approach for database migration consists of the following activities: - -1. **Develop validation tests**: To test database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you've defined. -1. **Set up test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run the validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance test against the source and the target, and then analyze and compare the results. - -## Use advanced features - -You can take advantage of the advanced cloud-based features offered by SQL Managed Instance, such as [built-in high availability](../../database/high-availability-sla.md), [threat detection](../../database/azure-defender-for-sql.md), and [monitoring and tuning your workload](../../database/monitor-tune-overview.md). - -[Azure SQL Analytics](/azure/azure-sql/database/monitor-tune-overview) allows you to monitor a large set of managed instances in a centralized manner. - -Some SQL Server features are only available once the [database compatibility level](/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) is changed to the latest compatibility level (150). - -## Next steps - -- See [Service and tools for data migration](../../../dms/dms-tools-matrix.md) for a matrix of the Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios as well as specialty tasks. - -- To learn more about Azure SQL Managed Instance see: - - [Service Tiers in Azure SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md#service-tiers) - - [Differences between SQL Server and Azure SQL Managed Instance](../../managed-instance/transact-sql-tsql-differences-sql-server.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for Cloud migrations, see - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrate to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- To assess the Application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) - -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-overview.md b/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-overview.md deleted file mode 100644 index 7c7f168391ba4..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-overview.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: "SQL Server to SQL Managed Instance: Migration overview" -description: Learn about the tools and options available to migrate your SQL Server databases to Azure SQL Managed Instance. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, danil, randolphwest -ms.date: 04/11/2022 ---- -# Migration overview: SQL Server to Azure SQL Managed Instance - -[!INCLUDE[appliesto--sqlmi](../../includes/appliesto-sqlmi.md)] - -Learn about the options and considerations for migrating your SQL Server databases to Azure SQL Managed Instance. - -You can migrate SQL Server databases running on-premises or on: - -- SQL Server on Azure Virtual Machines. -- Amazon Web Services (AWS) Elastic Compute Cloud (EC2). -- AWS Relational Database Service (RDS). -- Compute Engine in Google Cloud Platform (GCP). -- Cloud SQL for SQL Server in GCP. - -For other migration guides, see [Database Migration](/data-migration). - -## Overview - -[Azure SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md) is a recommended target option for SQL Server workloads that require a fully managed service without having to manage virtual machines or their operating systems. SQL Managed Instance enables you to move your on-premises applications to Azure with minimal application or database changes. It offers complete isolation of your instances with native virtual network support. - -Be sure to review the SQL Server database engine features [available in Azure SQL Managed Instance](../../database/features-comparison.md) to validate the supportability of your migration target. - -## Considerations - -The key factors to consider when you're evaluating migration options are: -- Number of servers and databases -- Size of databases -- Acceptable business downtime during the migration process - -One of the key benefits of migrating your SQL Server databases to SQL Managed Instance is that you can choose to migrate the entire instance or just a subset of individual databases. Carefully plan to include the following in your migration process: -- All databases that need to be colocated to the same instance -- Instance-level objects required for your application, including logins, credentials, SQL Agent jobs and operators, and server-level triggers - -> [!NOTE] -> -> Azure SQL Managed Instance guarantees 99.99 percent availability, even in critical scenarios. Overhead caused by some features in SQL Managed Instance can't be disabled. For more information, see the [Key causes of performance differences between SQL Managed Instance and SQL Server](https://azure.microsoft.com/blog/key-causes-of-performance-differences-between-sql-managed-instance-and-sql-server/) blog entry. - -## Choose an appropriate target - -You can use the [Azure SQL migration extension for Azure Data Studio](/sql/azure-data-studio/extensions/azure-sql-migration-extension) to get right-sized Azure SQL Managed Instance recommendation. The extension collects performance data from your source SQL Server instance to provide right-sized Azure recommendation that meets your workload's performance needs with minimal cost. To learn more, see [Get right-sized Azure recommendation for your on-premises SQL Server database(s)](../../../dms/ads-sku-recommend.md) - -The following general guidelines can help you choose the right service tier and characteristics of SQL Managed Instance to help match your [performance baseline](sql-server-to-managed-instance-performance-baseline.md): - -- Use the CPU usage baseline to provision a managed instance that matches the number of cores that your instance of SQL Server uses. It might be necessary to scale resources to match the [hardware configuration characteristics](../../managed-instance/resource-limits.md#hardware-configuration-characteristics). -- Use the memory usage baseline to choose a [vCore option](../../managed-instance/resource-limits.md#service-tier-characteristics) that appropriately matches your memory allocation. -- Use the baseline I/O latency of the file subsystem to choose between the General Purpose (latency greater than 5 ms) and Business Critical (latency less than 3 ms) service tiers. -- Use the baseline throughput to preallocate the size of the data and log files to achieve expected I/O performance. - -You can choose compute and storage resources during deployment and then [change them afterward by using the Azure portal](../../database/scale-resources.md), without incurring downtime for your application. - -> [!IMPORTANT] -> -> Any discrepancy in the [virtual network requirements for managed instances](../../managed-instance/connectivity-architecture-overview.md#network-requirements) can prevent you from creating new instances or using existing ones. Learn more about [creating new](../../managed-instance/virtual-network-subnet-create-arm-template.md) and [configuring existing](../../managed-instance/vnet-existing-add-subnet.md) networks. - -Another key consideration in the selection of the target service tier in Azure SQL Managed Instance (General Purpose versus Business Critical) is the availability of certain features, like In-Memory OLTP, that are available only in the Business Critical tier. - -### SQL Server VM alternative - -Your business might have requirements that make [SQL Server on Azure Virtual Machines](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) a more suitable target than Azure SQL Managed Instance. - -If one of the following conditions applies to your business, consider moving to a SQL Server virtual machine (VM) instead: - -- You require direct access to the operating system or file system, such as to install third-party or custom agents on the same virtual machine with SQL Server. -- You have strict dependency on features that are still not supported, such as FileStream/FileTable, PolyBase, and cross-instance transactions. -- You need to stay at a specific version of SQL Server (2012, for example). -- Your compute requirements are much lower than a managed instance offers (one vCore, for example), and database consolidation is not an acceptable option. - -## Migration tools - -We recommend the following migration tools: - -|Technology | Description| -|---------|---------| -| [Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) | The Azure SQL migration extension for Azure Data Studio provides both the SQL Server assessment and migration capabilities in Azure Data Studio. It supports migrations in either online (for migrations that require minimal downtime) or offline (for migrations where downtime persists through the duration of the migration) modes. | -| [Azure Migrate](../../../migrate/how-to-create-azure-sql-assessment.md) | This Azure service helps you discover and assess your SQL data estate at scale on VMware. It provides Azure SQL deployment recommendations, target sizing, and monthly estimates. | -|[Azure Database Migration Service](../../../dms/tutorial-sql-server-to-managed-instance.md) | This Azure service supports migration in the offline mode for applications that can afford downtime during the migration process. Unlike the continuous migration in online mode, offline mode migration runs a one-time restore of a full database backup from the source to the target. | -|[Native backup and restore](../../managed-instance/restore-sample-database-quickstart.md) | SQL Managed Instance supports restore of native SQL Server database backups (.bak files). It's the easiest migration option for customers who can provide full database backups to Azure Storage.| -|[Log Replay Service](../../managed-instance/log-replay-service-migrate.md) | This cloud service is enabled for SQL Managed Instance based on SQL Server log-shipping technology. It's a migration option for customers who can provide full, differential, and log database backups to Azure Storage. Log Replay Service is used to restore backup files from Azure Blob Storage to SQL Managed Instance.| -|[Managed Instance link](../../managed-instance/managed-instance-link-feature-overview.md) | This feature enables online migration to Managed Instance using Always On technology. It’s a migration option for customers who require database on Managed Instance to be accessible in R/O mode while migration is in progress, who need to keep the migration running for prolonged periods of time (weeks or months at the time), who require true online replication to Business Critical service tier, and for customers who require the most performant minimum downtime migration. | - -The following table lists alternative migration tools: - -|**Technology** |**Description** | -|---------|---------| -|[Transactional replication](../../managed-instance/replication-transactional-overview.md) | Replicate data from source SQL Server database tables to SQL Managed Instance by providing a publisher-subscriber type migration option while maintaining transactional consistency. | -|[Bulk copy](/sql/relational-databases/import-export/import-and-export-bulk-data-by-using-the-bcp-utility-sql-server)| The [bulk copy program (bcp) tool](/sql/tools/bcp-utility) copies data from an instance of SQL Server into a data file. Use the tool to export the data from your source and import the data file into the target SQL managed instance.

    For high-speed bulk copy operations to move data to Azure SQL Managed Instance, you can use the [Smart Bulk Copy tool](/samples/azure-samples/smartbulkcopy/smart-bulk-copy/) to maximize transfer speed by taking advantage of parallel copy tasks. | -|[Import Export Wizard/BACPAC](../../database/database-import.md?tabs=azure-powershell)| [BACPAC](/sql/relational-databases/data-tier-applications/data-tier-applications#bacpac) is a Windows file with a .bacpac extension that encapsulates a database's schema and data. You can use BACPAC to both export data from a SQL Server source and import the data back into Azure SQL Managed Instance. | -|[Azure Data Factory](../../../data-factory/connector-azure-sql-managed-instance.md)| The [Copy activity](../../../data-factory/copy-activity-overview.md) in Azure Data Factory migrates data from source SQL Server databases to SQL Managed Instance by using built-in connectors and an [integration runtime](../../../data-factory/concepts-integration-runtime.md).

    Data Factory supports a wide range of [connectors](../../../data-factory/connector-overview.md) to move data from SQL Server sources to SQL Managed Instance. | - -## Compare migration options - -Compare migration options to choose the path that's appropriate to your business needs. - -The following table compares the recommended migration options: - -|Migration option |When to use |Considerations | -|---------|---------|---------| -|[Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) | - Migrate single databases or multiple databases at scale.
    - Can run in both online (minimal downtime) and offline (acceptable downtime) modes.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Easy to setup and get started.
    - Requires setup of self-hosted integration runtime to access on-premises SQL Server and backups.
    - Includes both assessment and migration capabilities. | -|[Azure Database Migration Service](../../../dms/tutorial-sql-server-to-managed-instance.md) | - Migrate single databases or multiple databases at scale.
    - Can accommodate downtime during the migration process.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Migrations at scale can be automated via [PowerShell](../../../dms/howto-sql-server-to-azure-sql-managed-instance-powershell-offline.md).
    - Time to complete migration depends on database size and is affected by backup and restore time.
    - Sufficient downtime might be required. | -|[Native backup and restore](../../managed-instance/restore-sample-database-quickstart.md) | - Migrate individual line-of-business application databases.
    - Quick and easy migration without a separate migration service or tool.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Database backup uses multiple threads to optimize data transfer to Azure Blob Storage, but partner bandwidth and database size can affect transfer rate.
    - Downtime should accommodate the time required to perform a full backup and restore (which is a size of data operation).| -|[Log Replay Service](../../managed-instance/log-replay-service-migrate.md) | - Migrate individual line-of-business application databases.
    - More control is needed for database migrations.

    Supported sources:
    - SQL Server (2008 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - The migration entails making full database backups on SQL Server and copying backup files to Azure Blob Storage. Log Replay Service is used to restore backup files from Azure Blob Storage to SQL Managed Instance.
    - Databases being restored during the migration process will be in a restoring mode and can't be used for read or write workloads until the process is complete.| -|[Link feature for Azure SQL Managed Instance](../../managed-instance/managed-instance-link-feature-overview.md) | - Migrate individual line-of-business application databases.
    - More control is needed for database migrations.
    - Minimum downtime migration is needed.

    Supported sources:
    - SQL Server (2016 to 2019) on-premises or Azure VM
    - AWS EC2
    - GCP Compute SQL Server VM | - The migration entails establishing a network connection between SQL Server and SQL Managed Instance, and opening communication ports.
    - Uses [Always On availability group](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) technology to replicate database near real-time, making an exact replica of the SQL Server database on SQL Managed Instance.
    - The database can be used for read-only access on SQL Managed Instance while migration is in progress.
    - Provides the best performance during migration with minimum downtime. | - -The following table compares the alternative migration options: - -|Method or technology |When to use |Considerations | -|---------|---------|---------| -|[Transactional replication](../../managed-instance/replication-transactional-overview.md) | - Migrate by continuously publishing changes from source database tables to target SQL Managed Instance database tables.
    - Do full or partial database migrations of selected tables (subset of a database).

    Supported sources:
    - SQL Server (2012 to 2019) with some limitations
    - AWS EC2
    - GCP Compute SQL Server VM |
    - Setup is relatively complex compared to other migration options.
    - Provides a continuous replication option to migrate data (without taking the databases offline).
    - Transactional replication has limitations to consider when you're setting up the publisher on the source SQL Server instance. See [Limitations on publishing objects](/sql/relational-databases/replication/publish/publish-data-and-database-objects#limitations-on-publishing-objects) to learn more.
    - Capability to [monitor replication activity](/sql/relational-databases/replication/monitor/monitoring-replication) is available. | -|[Bulk copy](/sql/relational-databases/import-export/import-and-export-bulk-data-by-using-the-bcp-utility-sql-server)| - Do full or partial data migrations.
    - Can accommodate downtime.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM | - Requires downtime for exporting data from the source and importing into the target.
    - The file formats and data types used in the export or import need to be consistent with table schemas. | -|[Import Export Wizard/BACPAC](../../database/database-import.md)| - Migrate individual line-of-business application databases.
    - Suited for smaller databases.
    Doesn't require a separate migration service or tool.

    Supported sources:
    - SQL Server (2005 to 2019) on-premises or Azure VM
    - AWS EC2
    - AWS RDS
    - GCP Compute SQL Server VM |
    - Requires downtime because data needs to be exported at the source and imported at the destination.
    - The file formats and data types used in the export or import need to be consistent with table schemas to avoid truncation or data-type mismatch errors.
    - Time taken to export a database with a large number of objects can be significantly higher. | -|[Azure Data Factory](../../../data-factory/connector-azure-sql-managed-instance.md)| - Migrate and/or transform data from source SQL Server databases.
    - Merging data from multiple sources of data to Azure SQL Managed Instance is typically for business intelligence (BI) workloads.
    - Requires creating data movement pipelines in Data Factory to move data from source to destination.
    - [Cost](https://azure.microsoft.com/pricing/details/data-factory/data-pipeline/) is an important consideration and is based on factors like pipeline triggers, activity runs, and duration of data movement. | - -## Feature interoperability - -There are more considerations when you're migrating workloads that rely on other SQL Server features. - -### SQL Server Integration Services - -Migrate SQL Server Integration Services (SSIS) packages and projects in SSISDB to Azure SQL Managed Instance by using [Azure Database Migration Service](../../../dms/how-to-migrate-ssis-packages-managed-instance.md). - -Only SSIS packages in SSISDB starting with SQL Server 2012 are supported for migration. Convert older SSIS packages before migration. See the [project conversion tutorial](/sql/integration-services/lesson-6-2-converting-the-project-to-the-project-deployment-model) to learn more. - -### SQL Server Reporting Services - -You can migrate SQL Server Reporting Services (SSRS) reports to paginated reports in Power BI. Use the [RDL Migration Tool](https://github.com/microsoft/RdlMigration) to help prepare and migrate your reports. Microsoft developed this tool to help customers migrate Report Definition Language (RDL) reports from their SSRS servers to Power BI. It's available on GitHub, and it documents an end-to-end walkthrough of the migration scenario. - -### SQL Server Analysis Services - -SQL Server Analysis Services tabular models from SQL Server 2012 and later can be migrated to Azure Analysis Services, which is a platform as a service (PaaS) deployment model for the Analysis Services tabular model in Azure. You can learn more about migrating on-premises models to Azure Analysis Services in [this video tutorial](https://azure.microsoft.com/resources/videos/azure-analysis-services-moving-models/). - -Alternatively, you can consider migrating your on-premises Analysis Services tabular models to [Power BI Premium by using the new XMLA read/write endpoints](/power-bi/admin/service-premium-connect-tools). - -### High availability - -The SQL Server high-availability features Always On failover cluster instances and Always On availability groups become obsolete on the target SQL managed instance. High-availability architecture is already built into both [General Purpose (standard availability model)](../../database/high-availability-sla.md#basic-standard-and-general-purpose-service-tier-locally-redundant-availability) and [Business Critical (premium availability model)](../../database/high-availability-sla.md#premium-and-business-critical-service-tier-locally-redundant-availability) service tiers for SQL Managed Instance. The premium availability model also provides read scale-out that allows connecting into one of the secondary nodes for read-only purposes. - -Beyond the high-availability architecture that's included in SQL Managed Instance, the [auto-failover groups](../../database/auto-failover-group-overview.md) feature allows you to manage the replication and failover of databases in a managed instance to another region. - -### SQL Agent jobs - -Use the offline Azure Database Migration Service option to migrate [SQL Agent jobs](../../../dms/howto-sql-server-to-azure-sql-managed-instance-powershell-offline.md). Otherwise, script the jobs in Transact-SQL (T-SQL) by using SQL Server Management Studio and then manually re-create them on the target SQL managed instance. - -> [!IMPORTANT] -> -> Currently, Azure Database Migration Service supports only jobs with T-SQL subsystem steps. Jobs with SSIS package steps have to be manually migrated. - -### Logins and groups - -Move SQL logins from the SQL Server source to Azure SQL Managed Instance by using Database Migration Service in offline mode. Use the [Select logins](../../../dms/tutorial-sql-server-to-managed-instance.md#select-logins) pane in the Migration Wizard to migrate logins to your target SQL managed instance. - -By default, Azure Database Migration Service supports migrating only SQL logins. However, you can enable the migration of Windows logins by: - -- Ensuring that the target SQL managed instance has Azure Active Directory (Azure AD) read access. A user who has the Global Administrator role can configure that access via the Azure portal. -- Configuring Azure Database Migration Service to enable Windows user or group login migrations. You set this up via the Azure portal, on the **Configuration** page. After you enable this setting, restart the service for the changes to take effect. - -After you restart the service, Windows user or group logins appear in the list of logins available for migration. For any Windows user or group logins that you migrate, you're prompted to provide the associated domain name. Service user accounts (accounts with the domain name NT AUTHORITY) and virtual user accounts (accounts with the domain name NT SERVICE) aren't supported. To learn more, see [How to migrate Windows users and groups in a SQL Server instance to Azure SQL Managed Instance using T-SQL](../../managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial.md). - -Alternatively, you can use the [PowerShell utility](https://www.microsoft.com/download/details.aspx?id=103111) specially designed by Microsoft data migration architects. The utility uses PowerShell to create a T-SQL script to re-create logins and select database users from the source to the target. - -The PowerShell utility automatically maps Windows Server Active Directory accounts to Azure AD accounts, and it can do a UPN lookup for each login against the source Active Directory instance. The utility scripts custom server and database roles, along with role membership and user permissions. Contained databases aren't yet supported, and only a subset of possible SQL Server permissions is scripted. - -### Encryption - -When you're migrating databases protected by [Transparent Data Encryption](../../database/transparent-data-encryption-tde-overview.md) to a managed instance by using the native restore option, [migrate the corresponding certificate](../../managed-instance/tde-certificate-migrate.md) from the source SQL Server instance to the target SQL managed instance *before* database restore. - -### System databases - -Restore of system databases isn't supported. To migrate instance-level objects (stored in the `master` and `msdb` databases), script them by using T-SQL and then re-create them on the target managed instance. - -### In-Memory OLTP (memory-optimized tables) - -SQL Server provides an In-Memory OLTP capability. It allows usage of memory-optimized tables, memory-optimized table types, and natively compiled SQL modules to run workloads that have high-throughput and low-latency requirements for transactional processing. - -> [!IMPORTANT] -> -> In-Memory OLTP is supported only in the Business Critical tier in Azure SQL Managed Instance. It's not supported in the General Purpose tier. - -If you have memory-optimized tables or memory-optimized table types in your on-premises SQL Server instance and you want to migrate to Azure SQL Managed Instance, you should either: - -- Choose the Business Critical tier for your target SQL managed instance that supports In-Memory OLTP. -- If you want to migrate to the General Purpose tier in Azure SQL Managed Instance, remove memory-optimized tables, memory-optimized table types, and natively compiled SQL modules that interact with memory-optimized objects before migrating your databases. You can use the following T-SQL query to identify all objects that need to be removed before migration to the General Purpose tier: - - ```tsql - SELECT * FROM sys.tables WHERE is_memory_optimized=1 - SELECT * FROM sys.table_types WHERE is_memory_optimized=1 - SELECT * FROM sys.sql_modules WHERE uses_native_compilation=1 - ``` - -To learn more about in-memory technologies, see [Optimize performance by using in-memory technologies in Azure SQL Database and Azure SQL Managed Instance](../../in-memory-oltp-overview.md). - -## Advanced features - -Be sure to take advantage of the advanced cloud-based features in SQL Managed Instance. For example, you don't need to worry about managing backups because the service does it for you. You can restore to any [point in time within the retention period](../../database/recovery-using-backups.md#point-in-time-restore). Additionally, you don't need to worry about setting up high availability, because [high availability is built in](../../database/high-availability-sla.md). - -To strengthen security, consider using [Azure AD authentication](../../database/authentication-aad-overview.md), [auditing](../../managed-instance/auditing-configure.md), [threat detection](../../database/azure-defender-for-sql.md), [row-level security](/sql/relational-databases/security/row-level-security), and [dynamic data masking](/sql/relational-databases/security/dynamic-data-masking). - -In addition to advanced management and security features, SQL Managed Instance provides advanced tools that can help you [monitor and tune your workload](../../database/monitor-tune-overview.md). [Azure SQL Analytics](../../../azure-monitor/insights/azure-sql.md) allows you to monitor a large set of managed instances in a centralized way. [Automatic tuning](/sql/relational-databases/automatic-tuning/automatic-tuning#automatic-plan-correction) in managed instances continuously monitors performance of your SQL plan execution and automatically fixes the identified performance problems. - -Some features are available only after the [database compatibility level](/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) is changed to the latest compatibility level (150). - -## Migration assets - -For more assistance, see the following resources that were developed for real-world migration projects. - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and an application/database remediation level for a workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated and uniform decision process for target platforms.| -|[Utility to move on-premises SQL Server logins to Azure SQL Managed Instance](https://www.microsoft.com/download/details.aspx?id=103111)|A PowerShell script can create a T-SQL command script to re-create logins and select database users from on-premises SQL Server to Azure SQL Managed Instance. The tool allows automatic mapping of Windows Server Active Directory accounts to Azure AD accounts, along with optionally migrating SQL Server native logins.| -|[Perfmon data collection automation by using Logman](https://www.microsoft.com/download/details.aspx?id=103114)|You can use the Logman tool to collect Perfmon data (to help you understand baseline performance) and get migration target recommendations. This tool uses logman.exe to create the command that will create, start, stop, and delete performance counters set on a remote SQL Server instance.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -- To start migrating your SQL Server databases to Azure SQL Managed Instance, see the [SQL Server to Azure SQL Managed Instance migration guide](sql-server-to-managed-instance-guide.md). - -- For a matrix of services and tools that can help you with database and data migration scenarios as well as specialty tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Managed Instance, see: - - [Service tiers in Azure SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md#service-tiers) - - [Differences between SQL Server and Azure SQL Managed Instance](../../managed-instance/transact-sql-tsql-differences-sql-server.md) - - [Azure Total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- To assess the application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). - -- For details on how to perform A/B testing at the data access layer, see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-performance-baseline.md b/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-performance-baseline.md deleted file mode 100644 index 15a6f9b925291..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-managed-instance-performance-baseline.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "SQL Server to Azure SQL Managed Instance: Performance baseline" -description: Learn to create and compare a performance baseline when migrating your SQL Server databases to Azure SQL Managed Instance. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma, wiassaf -ms.date: 11/06/2020 ---- -# Migration performance: SQL Server to Azure SQL Managed Instance performance baseline -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqlmi.md)] - -Create a performance baseline to compare the performance of your workload on a SQL Managed Instance with your original workload running on SQL Server. - -## Create a baseline - -Ideally, performance is similar or better after migration, so it is important to measure and record baseline performance values on the source and then compare them to the target environment. A performance baseline is a set of parameters that define your average workload on your source. - -Select a set of queries that are important to, and representative of your business workload. Measure and document the min/average/max duration and CPU usage for these queries, as well as performance metrics on the source server, such as average/max CPU usage, average/max disk IO latency, throughput, IOPS, average / max page life expectancy, and average max size of tempdb. - -The following resources can help define a performance baseline: - - - [Monitor CPU usage ](https://techcommunity.microsoft.com/t5/azure-sql-database/monitor-cpu-usage-on-sql-server-and-azure-sql/ba-p/680777#M131) - - [Monitor memory usage](/sql/relational-databases/performance-monitor/monitor-memory-usage) and determine the amount of memory used by different components such as buffer pool, plan cache, column-store pool, [In-Memory OLTP](/sql/relational-databases/in-memory-oltp/monitor-and-troubleshoot-memory-usage), etc. In addition, you should find average and peak values of the Page Life Expectancy memory performance counter. - - Monitor disk IO usage on the source SQL Server instance using the [sys.dm_io_virtual_file_stats](/sql/relational-databases/system-dynamic-management-views/sys-dm-io-virtual-file-stats-transact-sql) view or [performance counters](/sql/relational-databases/performance-monitor/monitor-disk-usage). - - Monitor workload and query performance by examining Dynamic Management Views (or Query Store if you are migrating from SQL Server 2016 and later). Identify average duration and CPU usage of the most important queries in your workload. - -Any performance issues on the source SQL Server should be addressed prior to migration. Migrating known issues to any new system might cause unexpected results and invalidate any performance comparison. - - -## Compare performance - -After you have defined a baseline, compare similar workload performance on the target SQL Managed Instance. For accuracy, it is important that the SQL Managed Instance environment is comparable to the SQL Server environment as much as possible. - -There are SQL Managed Instance infrastructure differences that make matching performance exactly unlikely. Some queries may run faster than expected, while others may be slower. The goal of this comparison is to verify that workload performance in the managed instance matches the performance on SQL Server (on average) and to identify any critical queries with performance that don’t match your original performance. - -Performance comparison is likely to result in the following outcomes: - -- Workload performance on the managed instance is aligned or better than the workload performance on your source SQL Server. In this case, you have successfully confirmed that migration is successful. - -- The majority of performance parameters and queries in the workload perform as expected, with some exceptions resulting in degraded performance. In this case, identify the differences and their importance. If there are some important queries with degraded performance, investigate whether the underlying SQL plans have changed or whether queries are hitting resource limits. You can mitigate this by applying some hints on critical queries (for example, change compatibility level, legacy cardinality estimator) either directly or using plan guides. Ensure statistics and indexes are up to date and equivalent in both environments. - -- Most queries are slower on a managed instance compared to your source SQL Server instance. In this case, try to identify the root causes of the difference such as [reaching some resource limit](../../managed-instance/resource-limits.md#service-tier-characteristics) such as IO, memory, or instance log rate limits. If there are no resource limits causing the difference, try changing the compatibility level of the database or change database settings like legacy cardinality estimation and rerun the test. Review the recommendations provided by the managed instance or Query Store views to identify the queries with regressed performance. - -SQL Managed Instance has a built-in automatic plan correction feature that is enabled by default. This feature ensures that queries that worked fine in the past do not degrade in the future. If this feature is not enabled, run the workload with the old settings so SQL Managed Instance can learn the performance baseline. Then, enable the feature and run the workload again with the new settings. - -Make changes in the parameters of your test or upgrade to higher service tiers to reach the optimal configuration for the workload performance that fits your needs. - -## Monitor performance - -SQL Managed Instance provides advanced tools for monitoring and troubleshooting, and you should use them to monitor performance on your instance. Some of the key metrics to monitor are: - -- CPU usage on the instance to determine if the number of vCores that you provisioned is the right match for your workload. -- Page-life expectancy on your managed instance to determine if you need [additional memory](https://techcommunity.microsoft.com/t5/azure-sql-database/do-you-need-more-memory-on-azure-sql-managed-instance/ba-p/563444). -- Statistics like INSTANCE_LOG_GOVERNOR or PAGEIOLATCH that identify storage IO issues, especially on the General Purpose tier, where you might need to pre-allocate files to get better IO performance. - - -## Considerations - -When comparing performance, consider the following: - -- Settings match between source and target. Validate that various instance, database, and tempdb settings are equivalent between the two environments. Differences in configuration, compatibility levels, encryption settings, trace flags etc., can all skew performance. - -- Storage is configured according to [best practices](https://techcommunity.microsoft.com/t5/datacat/storage-performance-best-practices-and-considerations-for-azure/ba-p/305525). For example, for General Purpose, you may need to pre-allocate the size of the files to improve performance. - -- There are [key environment differences](https://azure.microsoft.com/blog/key-causes-of-performance-differences-between-sql-managed-instance-and-sql-server/) that might cause the performance differences between a managed instance and SQL Server. Identify risks relevant to your environment that might contribute to a performance issue. - -- Query store and automatic tuning should be enabled on your SQL Managed Instance as they help you measure workload performance and automatically mitigate potential performance issues. - - - -## Next steps - -For more information to optimize your new Azure SQL Managed Instance environment, see the following resources: - -- [How to identify why workload performance on Azure SQL Managed Instance is different than SQL Server?](https://medium.com/azure-sqldb-managed-instance/what-to-do-when-azure-sql-managed-instance-is-slower-than-sql-server-dd39942aaadd) -- [Key causes of performance differences between SQL Managed Instance and SQL Server](https://azure.microsoft.com/blog/key-causes-of-performance-differences-between-sql-managed-instance-and-sql-server/) -- [Storage performance best practices and considerations for Azure SQL Managed Instance (General Purpose)](https://techcommunity.microsoft.com/t5/datacat/storage-performance-best-practices-and-considerations-for-azure/ba-p/305525) -- [Real-time performance monitoring for Azure SQL Managed Instance (this is archived, is this the intended target?)](/archive/blogs/sqlcat/real-time-performance-monitoring-for-azure-sql-database-managed-instance) \ No newline at end of file diff --git a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-sql-managed-instance-assessment-rules.md b/articles/azure-sql/migration-guides/managed-instance/sql-server-to-sql-managed-instance-assessment-rules.md deleted file mode 100644 index 8bbb3f950919f..0000000000000 --- a/articles/azure-sql/migration-guides/managed-instance/sql-server-to-sql-managed-instance-assessment-rules.md +++ /dev/null @@ -1,610 +0,0 @@ ---- -title: "Assessment rules for SQL Server to Azure SQL Managed Instance migration" -description: Assessment rules to identify issues with the source SQL Server instance that must be addressed before migrating to Azure SQL Managed Instance. -ms.service: sql-managed-instance -ms.subservice: migration-guide -ms.custom: ignite-fall-2021 -ms.devlang: -ms.topic: how-to -author: rajeshsetlem -ms.author: rsetlem -ms.reviewer: mathoma -ms.date: 04/06/2022 ---- -# Assessment rules for SQL Server to Azure SQL Managed Instance migration -[!INCLUDE[appliesto--sqlmi](../../includes/appliesto-sqlmi.md)] - -Migration tools validate your source SQL Server instance by running a number of assessment rules. The rules identify issues that must be addressed before migrating your SQL Server database to Azure SQL Managed Instance. - -This article provides a list of the rules used to assess the feasibility of migrating your SQL Server database to Azure SQL Managed Instance. - -## Rules Summary - - | Rule Title | Level | Category | Details | - | - | - | - | - | - | AnalysisCommandJob | Instance | Warning | [AnalysisCommand job step isn't supported in Azure SQL Managed Instance.](#AnalysisCommandJob) | - | AnalysisQueryJob | Instance | Warning | [AnalysisQuery job step isn't supported in Azure SQL Managed Instance.](#AnalysisQueryJob) | - | AssemblyFromFile | Database | Issue | ['CREATE ASSEMBLY' and 'ALTER ASSEMBLY' with a file parameter are unsupported in Azure SQL Managed Instance.](#AssemblyFromFile) | - | BulkInsert | Database | Issue | [BULK INSERT with non-Azure blob data source isn't supported in Azure SQL Managed Instance.](#BulkInsert) | - | ClrStrictSecurity | Database | Warning | [CLR assemblies marked as SAFE or EXTERNAL_ACCESS are considered UNSAFE.](#ClrStrictSecurity) | - | ComputeClause | Database | Warning | [COMPUTE clause is no longer supported and has been removed.](#ComputeClause) | - | CryptographicProvider | Database | Issue | [A use of CREATE CRYPTOGRAPHIC PROVIDER or ALTER CRYPTOGRAPHIC PROVIDER was found. This isn't supported in Azure SQL Managed Instance.](#CryptographicProvider) | - | DatabasePrincipalAlias | Database | Issue | [SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed.](#DatabasePrincipalAlias) | - | DbCompatLevelLowerThan100 | Database | Warning | [Database compatibility level below 100 isn't supported.](#DbCompatLevelLowerThan100) | - | DisableDefCNSTCHK | Database | Issue | [SET option DISABLE_DEF_CNST_CHK is no longer supported and has been removed.](#DisableDefCNSTCHK) | - | FastFirstRowHint | Database | Warning | [FASTFIRSTROW query hint is no longer supported and has been removed.](#FastFirstRowHint) | - | FileStream | Database | Issue | [Filestream and Filetable are not supported in Azure SQL Managed Instance.](#FileStream) | - | LinkedServerWithNonSQLProvider | Database | Issue | [Linked server with non-SQL Server Provider isn't supported in Azure SQL Managed Instance.](#LinkedServerWithNonSQLProvider) | - | MergeJob | Instance | Warning | [Merge job step isn't supported in Azure SQL Managed Instance.](#MergeJob) | - | MIDatabaseSize | Database | Issue | [Azure SQL Managed Instance does not support database size greater than 8 TB.](#MIDatabaseSize<) | - | MIHeterogeneousMSDTCTransactSQL | Database | Issue | [BEGIN DISTRIBUTED TRANSACTION with non-SQL Server remote server isn't supported in Azure SQL Managed Instance.](#MIHeterogeneousMSDTCTransactSQL) | - | MIHomogeneousMSDTCTransactSQL | Database | Issue | [BEGIN DISTRIBUTED TRANSACTION is supported across multiple servers for Azure SQL Managed Instance.](#MIHomogeneousMSDTCTransactSQL) | - | MIInstanceSize | Instance | Warning | [Maximum instance storage size in Azure SQL Managed Instance cannot be greater than 8 TB.](#MIInstanceSize<) | - | MultipleLogFiles | Database | Issue | [Azure SQL Managed Instance does not support databases with multiple log files.](#MultipleLogFiles<) | - | NextColumn | Database | Issue | [Tables and Columns named NEXT will lead to an error In Azure SQL Managed Instance.](#NextColumn) | - | NonANSILeftOuterJoinSyntax | Database | Warning | [Non-ANSI style left outer join is no longer supported and has been removed.](#NonANSILeftOuterJoinSyntax) | - | NonANSIRightOuterJoinSyntax | Database | Warning | [Non-ANSI style right outer join is no longer supported and has been removed.](#NonANSIRightOuterJoinSyntax) | - | NumDbExceeds100 | Instance | Warning | [Azure SQL Managed Instance supports a maximum of 100 databases per instance.](#NumDbExceeds100) | - | OpenRowsetWithNonBlobDataSourceBulk | Database | Issue | [OpenRowSet used in bulk operation with non-Azure blob storage data source isn't supported in Azure SQL Managed Instance.](#OpenRowsetWithNonBlobDataSourceBulk) | - | OpenRowsetWithNonSQLProvider | Database | Issue | [OpenRowSet with non-SQL provider isn't supported in Azure SQL Managed Instance.](#OpenRowsetWithNonSQLProvider) | - | PowerShellJob | Instance | Warning | [PowerShell job step isn't supported in Azure SQL Managed Instance.](#PowerShellJob) | - | QueueReaderJob | Instance | Warning | [Queue Reader job step isn't supported in Azure SQL Managed Instance.](#QueueReaderJob) | - | RAISERROR | Database | Warning | [Legacy style RAISERROR calls should be replaced with modern equivalents.](#RAISERROR) | - | SqlMail | Database | Warning | [SQL Mail is no longer supported.](#SqlMail) | - | SystemProcedures110 | Database | Warning | [Detected statements that reference removed system stored procedures that are not available in Azure SQL Managed Instance.](#SystemProcedures110) | - | TraceFlags | Instance | Warning | [Trace flags not supported in Azure SQL Managed Instance were found.](#TraceFlags) | - | TransactSqlJob | Instance | Warning | [TSQL job step includes unsupported commands in Azure SQL Managed Instance.](#TransactSqlJob) | - | WindowsAuthentication | Instance | Warning | [Database users mapped with Windows authentication (integrated security) are not supported in Azure SQL Managed Instance.](#WindowsAuthentication) | - | XpCmdshell | Database | Issue | [xp_cmdshell is not supported in Azure SQL Managed Instance.](#XpCmdshell) | - -## AnalysisCommand job - -**Title: AnalysisCommand job step is not supported in Azure SQL Managed Instance.** -**Category**: Warning - - -**Description** -It is a job step that runs an Analysis Services command. AnalysisCommand job step is not supported in Azure SQL Managed Instance. - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs using Analysis Service Command job step and evaluate if the job step or the impacted object can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - -## AnalysisQuery job - -**Title: AnalysisQuery job step is not supported in Azure SQL Managed Instance.** -**Category**: Warning - -**Description** -It is a job step that runs an Analysis Services query. AnalysisQuery job step is not supported in Azure SQL Managed Instance. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs using Analysis Service Query job step and evaluate if the job step or the impacted object can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - - -## Assembly from file - -**Title: 'CREATE ASSEMBLY' and 'ALTER ASSEMBLY' with a file parameter are unsupported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Azure SQL Managed Instance does not support 'CREATE ASSEMBLY' or 'ALTER ASSEMBLY' with a file parameter. A binary parameter is supported. See the Impacted Objects section for the specific object where the file parameter is used. - -**Recommendation** -Review objects using 'CREATE ASSEMBLY' or 'ALTER ASSEMBLY with a file parameter. If any such objects that are required, convert the file parameter to a binary parameter. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [CLR differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#clr) - -## Bulk insert - -**Title: BULK INSERT with non-Azure blob data source is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Azure SQL Managed Instance cannot access file shares or Windows folders. See the "Impacted Objects" section for the specific uses of BULK INSERT statements that do not reference an Azure blob. Objects with 'BULK INSERT' where the source is not Azure blob storage will not work after migrating to Azure SQL Managed Instance. - - -**Recommendation** -You will need to convert BULK INSERT statements that use local files or file shares to use files from Azure blob storage instead, when migrating to Azure SQL Managed Instance. - -More information: [Bulk Insert and OPENROWSET differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#bulk-insert--openrowset) - - -## CLR Security - -**Title: CLR assemblies marked as SAFE or EXTERNAL_ACCESS are considered UNSAFE** -**Category**: Warning - -**Description** -CLR Strict Security mode is enforced in Azure SQL Managed Instance. This mode is enabled by default and introduces breaking changes for databases containing user-defined CLR assemblies marked either SAFE or EXTERNAL_ACCESS. - - -**Recommendation** -CLR uses Code Access Security (CAS) in the .NET Framework, which is no longer supported as a security boundary. Beginning with SQL Server 2017 (14.x) database engine, an `sp_configure` option called clr strict security is introduced to enhance the security of CLR assemblies. Clr strict security is enabled by default, and treats SAFE and EXTERNAL_ACCESS CLR assemblies as if they were marked UNSAFE. When clr strict security is disabled, a CLR assembly created with PERMISSION_SET = SAFE may be able to access external system resources, call unmanaged code, and acquire sysadmin privileges. After enabling strict security, any assemblies that are not signed will fail to load. Also, if a database has SAFE or EXTERNAL_ACCESS assemblies, RESTORE or ATTACH DATABASE statements can complete, but the assemblies may fail to load. To load the assemblies, you must either alter or drop and recreate each assembly so that it is signed with a certificate or asymmetric key that has a corresponding login with the UNSAFE ASSEMBLY permission on the server. - -More information: [CLR strict security](/sql/database-engine/configure-windows/clr-strict-security) - -## Compute clause - -**Title: COMPUTE clause is no longer supported and has been removed.** -**Category**: Warning - -**Description** -The COMPUTE clause generates totals that appear as additional summary columns at the end of the result set. However, this clause is no longer supported in Azure SQL Managed Instance. - - - -**Recommendation** -The T-SQL module needs to be rewritten using the ROLLUP operator instead. The code below demonstrates how COMPUTE can be replaced with ROLLUP: - -```sql -USE AdventureWorks GO; - -SELECT SalesOrderID, UnitPrice, UnitPriceDiscount -FROM Sales.SalesOrderDetail -ORDER BY SalesOrderID COMPUTE SUM(UnitPrice), SUM(UnitPriceDiscount) -BY SalesOrderID GO; - -SELECT SalesOrderID, UnitPrice, UnitPriceDiscount,SUM(UnitPrice) as UnitPrice , -SUM(UnitPriceDiscount) as UnitPriceDiscount -FROM Sales.SalesOrderDetail -GROUP BY SalesOrderID, UnitPrice, UnitPriceDiscount WITH ROLLUP; -``` - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Cryptographic provider - -**Title: A use of CREATE CRYPTOGRAPHIC PROVIDER or ALTER CRYPTOGRAPHIC PROVIDER was found, which is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Azure SQL Managed Instance does not support CRYPTOGRAPHIC PROVIDER statements because it cannot access files. See the Impacted Objects section for the specific uses of CRYPTOGRAPHIC PROVIDER statements. Objects with 'CREATE CRYPTOGRAPHIC PROVIDER' or 'ALTER CRYPTOGRAPHIC PROVIDER' will not work correctly after migrating to Azure SQL Managed Instance. - - -**Recommendation** -Review objects with 'CREATE CRYPTOGRAPHIC PROVIDER' or 'ALTER CRYPTOGRAPHIC PROVIDER'. In any such objects that are required, remove the uses of these features. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Cryptographic provider differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#cryptographic-providers) - -## Database compatibility - -**Title: Database compatibility level below 100 is not supported** -**Category**: Warning - -**Description** -Database Compatibility Level is a valuable tool to assist in database modernization, by allowing the SQL Server Database Engine to be upgraded, while keeping connecting applications functional status by maintaining the same pre-upgrade Database Compatibility Level. Azure SQL Managed Instance doesn't support compatibility levels below 100. When the database with compatibility level below 100 is restored on Azure SQL Managed Instance, the compatibility level is upgraded to 100. - - -**Recommendation**... -Evaluate if the application functionality is intact when the database compatibility level is upgraded to 100 on Azure SQL Managed Instance. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Supported compatibility levels in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#compatibility-levels) - -## Database principal alias - -**Title: SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed.** -**Category**: Issue - -**Description** -SYS.DATABASE_PRINCIPAL_ALIASES is no longer supported and has been removed in Azure SQL Managed Instance. - - -**Recommendation** -Use roles instead of aliases. - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## DISABLE_DEF_CNST_CHK option - -**Title: SET option DISABLE_DEF_CNST_CHK is no longer supported and has been removed.** -**Category**: Issue - -**Description** -SET option DISABLE_DEF_CNST_CHK is no longer supported and has been removed in Azure SQL Managed Instance. - - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## FASTFIRSTROW hint - -**Title: FASTFIRSTROW query hint is no longer supported and has been removed.** -**Category**: Warning - -**Description** -FASTFIRSTROW query hint is no longer supported and has been removed in Azure SQL Managed Instance. - - -**Recommendation** -Instead of FASTFIRSTROW query hint use OPTION (FAST n). - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## FileStream - -**Title: Filestream and Filetable are not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -The Filestream feature, which allows you to store unstructured data such as text documents, images, and videos in NTFS file system, is not supported in Azure SQL Managed Instance. **This database can't be migrated as the backup containing Filestream filegroups can't be restored on Azure SQL Managed Instance.** - - -**Recommendation** -Upload the unstructured files to Azure Blob storage and store metadata related to these files (name, type, URL location, storage key etc.) in Azure SQL Managed Instance. You may have to re-engineer your application to enable streaming blobs to and from Azure SQL Managed Instance. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Streaming Blobs To and From SQL Azure blog](https://azure.microsoft.com/blog/streaming-blobs-to-and-from-sql-azure/) - -## Heterogeneous MS DTC - -**Title: BEGIN DISTRIBUTED TRANSACTION with non-SQL Server remote server is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Distributed transaction started by Transact SQL BEGIN DISTRIBUTED TRANSACTION and managed by Microsoft Distributed Transaction Coordinator (MS DTC) is not supported in Azure SQL Managed Instance if the remote server is not SQL Server. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all objects using BEGIN DISTRUBUTED TRANSACTION. Consider migrating the participant databases to Azure SQL Managed Instance where distributed transactions across multiple instances are supported (Currently in preview). Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Transactions across multiple servers for Azure SQL Managed Instance ](../../database/elastic-transactions-overview.md#transactions-for-sql-managed-instance) - -## Homogenous MS DTC - -**Title: BEGIN DISTRIBUTED TRANSACTION is supported across multiple servers for Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Distributed transaction started by Transact SQL BEGIN DISTRIBUTED TRANSACTION and managed by Microsoft Distributed Transaction Coordinator (MS DTC) is supported across multiple servers for Azure SQL Managed Instance. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all objects using BEGIN DISTRUBUTED TRANSACTION. Consider migrating the participant databases to Azure SQL Managed Instance where distributed transactions across multiple instances are supported (Currently in preview). Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Transactions across multiple servers for Azure SQL Managed Instance](../../database/elastic-transactions-overview.md#transactions-for-sql-managed-instance) - - -## Linked server (non-SQL provider) - -**Title: Linked server with non-SQL Server Provider is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Linked servers enable the SQL Server Database Engine to execute commands against OLE DB data sources outside of the instance of SQL Server. Linked server with non-SQL Server Provider is not supported in Azure SQL Managed Instance. - - -**Recommendation** -Azure SQL Managed Instance does not support linked server functionality if the remote server provider is non-SQL Server like Oracle, Sybase etc. - -The following actions are recommended to eliminate the need for linked servers: -- Identify the dependent database(s) from remote non-SQL servers and consider moving these into the database being migrated. -- Migrate the dependent database(s) to supported targets like SQL Managed Instance, SQL Database, Azure Synapse SQL and SQL Server instances. -- Consider creating linked server between Azure SQL Managed Instance and SQL Server on Azure Virtual Machine (SQL VM). Then from SQL VM create linked server to Oracle, Sybase etc. This approach does involve two hops but can be used as temporary workaround. -- Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Linked Server differences in Azure SQL Managed Instance](../../managed-instance/transact-sql-tsql-differences-sql-server.md#linked-servers) - -## Merge job - -**Title: Merge job step is not supported in Azure SQL Managed Instance.** -**Category**: Warning - -**Description** -It is a job step that activates the replication Merge Agent. The Replication Merge Agent is a utility executable that applies the initial snapshot held in the database tables to the Subscribers. It also merges incremental data changes that occurred at the Publisher after the initial snapshot was created, and reconciles conflicts either according to the rules you configure or using a custom resolver you create. Merge job step is not supported in Azure SQL Managed Instance. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs using Merge job step and evaluate if the job step or the impacted object can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - - -## MI database size - -**Title: Azure SQL Managed Instance does not support database size greater than 8 TB.** -**Category**: Issue - -**Description** -The size of the database is greater than maximum instance reserved storage. **This database can't be selected for migration as the size exceeded the allowed limit.** - - -**Recommendation** -Evaluate if the data can be archived compressed or sharded into multiple databases. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Hardware characteristics of Azure SQL Managed Instance ](../../managed-instance/resource-limits.md#hardware-configuration-characteristics) - - - -## MI instance size - -**Title: Maximum instance storage size in Azure SQL Managed Instance cannot be greater than 8 TB.** -**Category**: Warning - -**Description** -The size of all databases is greater than maximum instance reserved storage. - - -**Recommendation** -Consider migrating the databases to different Azure SQL Managed Instances or to SQL Server on Azure Virtual Machine if all the databases must exist on the same instance. - -More information: [Hardware characteristics of Azure SQL Managed Instance ](../../managed-instance/resource-limits.md#hardware-configuration-characteristics) - - -## Multiple log files - -**Title: Azure SQL Managed Instance does not support multiple log files.** -**Category**: Issue - -**Description** -SQL Server allows a database to log to multiple files. This database has multiple log files, which is not supported in Azure SQL Managed Instance. **This database can't be migrated as the backup can't be restored on Azure SQL Managed Instance. -** - -**Recommendation** -Azure SQL Managed Instance supports only a single log per database. You need to delete all but one of the log files before migrating this database to Azure: - -```sql -ALTER DATABASE [database_name] REMOVE FILE [log_file_name] -``` - -More information: [Unsupported database options in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#database-options) - - - -## Next column - -**Title: Tables and Columns named NEXT will lead to an error In Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -Tables or columns named NEXT were detected. Sequences, introduced in Microsoft SQL Server, use the ANSI standard NEXT VALUE FOR function. Tables or columns named NEXT and column aliased as VALUE with the ANSI standard AS omitted can cause an error. - - -**Recommendation** -Rewrite statements to include the ANSI standard AS keyword when aliasing a table or column. For example, when a column is named NEXT and that column is aliased as VALUE, the query SELECT NEXT VALUE FROM TABLE will cause an error and should be rewritten as SELECT NEXT AS VALUE FROM TABLE. Similarly, for a table named NEXT and aliased as VALUE, the query SELECT Col1 FROM NEXT VALUE will cause an error and should be rewritten as SELECT Col1 FROM NEXT AS VALUE. - - - -## Non-ANSI style left outer join - -**Title: Non-ANSI style left outer join is no longer supported and has been removed.** -**Category**: Warning - -**Description** -Non-ANSI style left outer join is no longer supported and has been removed in Azure SQL Managed Instance. - - -**Recommendation** -Use ANSI join syntax. - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Non-ANSI style right outer join - -**Title: Non-ANSI style right outer join is no longer supported and has been removed.** -**Category**: Warning - -**Description** -Non-ANSI style right outer join is no longer supported and has been removed in Azure SQL Managed Instance. - - - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -**Recommendation** -Use ANSI join syntax. - -## Databases exceed 100 - -**Title: Azure SQL Managed Instance supports a maximum of 100 databases per instance.** -**Category**: Warning - -**Description** -Maximum number of databases supported in Azure SQL Managed Instance is 100, unless the instance storage size limit has been reached. - - - -**Recommendation** -Consider migrating the databases to different Azure SQL Managed Instances or to SQL Server on Azure Virtual Machine if all the databases must exist on the same instance. - -More information: [Azure SQL Managed Instance Resource Limits ](../../managed-instance/resource-limits.md#service-tier-characteristics) - -## OPENROWSET (non-blob data source) - -**Title: OpenRowSet used in bulk operation with non-Azure Blob Storage data source is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -OPENROWSET supports bulk operations through a built-in BULK provider that enables data from a file to be read and returned as a rowset. OPENROWSET with non-Azure blob storage data source is not supported in Azure SQL Managed Instance. - -**Recommendation** -Azure SQL Managed Instance cannot access file shares and Windows folders, so the files must be imported from Azure blob storage. Therefore, only blob type DATASOURCE is supported in OPENROWSET function. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Bulk Insert and OPENROWSET differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#bulk-insert--openrowset) - -## OPENROWSET (non-SQL provider) - -**Title: OpenRowSet with non-SQL provider is not supported in Azure SQL Managed Instance.** -**Category**: Issue - -**Description** -This method is an alternative to accessing tables in a linked server and is a one-time, ad hoc method of connecting and accessing remote data by using OLE DB. OpenRowSet with non-SQL provider is not supported in Azure SQL Managed Instance. - - - -**Recommendation** -OPENROWSET function can be used to execute queries only on SQL Server instances (either managed, on-premises, or in Virtual Machines). Only SQLNCLI, SQLNCLI11, and SQLOLEDB values are supported as provider. Therefore, the recommendation action is that identify the dependent database(s) from remote non-SQL Servers and consider moving these into the database being migrated. - -More information: [Bulk Insert and OPENROWSET differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#bulk-insert--openrowset) - - -## PowerShell job - -**Title: PowerShell job step is not supported in Azure SQL Managed Instance.** -**Category**: Warning - -**Description** -It is a job step that runs a PowerShell script. PowerShell job step is not supported in Azure SQL Managed Instance. - - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs using PowerShell job step and evaluate if the job step or the impacted object can be removed. Evaluate if Azure Automation can be used. Alternatively, migrate to SQL Server on Azure Virtual Machine - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - -## Queue Reader job - -**Title: Queue Reader job step is not supported in Azure SQL Managed Instance.** -**Category**: Warning - -**Description** -It is a job step that activates the replication Queue Reader Agent. The Replication Queue Reader Agent is an executable that reads messages stored in a Microsoft SQL Server queue or a Microsoft Message Queue and then applies those messages to the Publisher. Queue Reader Agent is used with snapshot and transactional publications that allow queued updating. Queue Reader job step is not supported in Azure SQL Managed Instance. - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs using Queue Reader job step and evaluate if the job step or the impacted object can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - - -## RAISERROR - -**Title: Legacy style RAISERROR calls should be replaced with modern equivalents.** -**Category**: Warning - -**Description** -RAISERROR calls like the below example are termed as legacy-style because they do not include the commas and the parenthesis. RAISERROR 50001 'this is a test'. This method of calling RAISERROR is no longer supported and removed in Azure SQL Managed Instance. - - - -**Recommendation** -Rewrite the statement using the current RAISERROR syntax, or evaluate if the modern approach of `BEGIN TRY { } END TRY BEGIN CATCH { THROW; } END CATCH` is feasible. - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - - -## SQL Mail - -**Title: SQL Mail has been no longer supported.** -**Category**: Warning - - -**Description** -SQL Mail has been no longer supported and removed in Azure SQL Managed Instance. - - - -**Recommendation** -Use Database Mail. - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - - -## SystemProcedures110 - - -**Title: Detected statements that reference removed system stored procedures that are not available in Azure SQL Managed Instance.** -**Category**: Warning - -**Description** -Following unsupported system and extended stored procedures cannot be used in Azure SQL Managed Instance - `sp_dboption`, `sp_addserver`, `sp_dropalias`,`sp_activedirectory_obj`, `sp_activedirectory_scp`, and `sp_activedirectory_start`. - - - - -**Recommendation** -Remove references to unsupported system procedures that have been removed in Azure SQL Managed Instance. - -More information: [Discontinued Database Engine Functionality in SQL Server](/previous-versions/sql/2014/database-engine/discontinued-database-engine-functionality-in-sql-server-2016#Denali) - -## Transact-SQL job - -**Title: TSQL job step includes unsupported commands in Azure SQL Managed Instance** -**Category**: Warning - - -**Description** -It is a job step that runs TSQL scripts at scheduled time. TSQL job step includes unsupported commands which are not supported in Azure SQL Managed Instance. - - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all jobs that include unsupported commands in Azure SQL Managed Instance and evaluate if the job step or the impacted object can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Server Agent differences in Azure SQL Managed Instance ](../../managed-instance/transact-sql-tsql-differences-sql-server.md#sql-server-agent) - - -## Trace flags - -**Title: Trace flags not supported in Azure SQL Managed Instance were found** -**Category**: Warning - - -**Description** -Azure SQL Managed Instance supports only limited number of global trace flags. Session trace flags aren't supported. - - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all trace flags that are not supported in Azure SQL Managed Instance and evaluate if they can be removed. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Trace flags](/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql#trace-flags) - - -## Windows authentication - -**Title: Database users mapped with Windows authentication (integrated security) are not supported in Azure SQL Managed Instance** -**Category**: Warning - - -**Description** -Azure SQL Managed Instance supports two types of authentication: -- SQL Authentication, which uses a username and password -- Azure Active Directory Authentication, which uses identities managed by Azure Active Directory and is supported for managed and integrated domains. - -Database users mapped with Windows authentication (integrated security) are not supported in Azure SQL Managed Instance. - - -**Recommendation** -Federate the local Active Directory with Azure Active Directory. The Windows identity can then be replaced with the equivalent Azure Active Directory identities. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [SQL Managed Instance security capabilities](../../database/security-overview.md#authentication) - - -## XP_cmdshell - -**Title: xp_cmdshell is not supported in Azure SQL Managed Instance.** -**Category**: Issue - - -**Description** -Xp_cmdshell, which spawns a Windows command shell and passes in a string for execution isn't supported in Azure SQL Managed Instance. - - - -**Recommendation** -Review impacted objects section in Azure Migrate to see all objects using xp_cmdshell and evaluate if the reference to xp_cmdshell or the impacted object can be removed. Consider exploring Azure Automation that delivers cloud-based automation and configuration service. Alternatively, migrate to SQL Server on Azure Virtual Machine. - -More information: [Stored Procedure differences in Azure SQL Managed Instance](../../managed-instance/transact-sql-tsql-differences-sql-server.md#stored-procedures-functions-and-triggers) - -## Next steps - -To start migrating your SQL Server to Azure SQL Managed Instance, see the [SQL Server to SQL Managed Instance migration guide](sql-server-to-managed-instance-guide.md). - -- For a matrix of the Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios as well as specialty tasks, see [Service and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL Managed Instance, see: - - [Service Tiers in Azure SQL Managed Instance](../../managed-instance/sql-managed-instance-paas-overview.md#service-tiers) - - [Differences between SQL Server and Azure SQL Managed Instance](../../managed-instance/transact-sql-tsql-differences-sql-server.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for Cloud migrations, see - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrate to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- To assess the Application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/virtual-machines/db2-to-sql-on-azure-vm-guide.md b/articles/azure-sql/migration-guides/virtual-machines/db2-to-sql-on-azure-vm-guide.md deleted file mode 100644 index 2eddcd70ac2ef..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/db2-to-sql-on-azure-vm-guide.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: "Db2 to SQL Server on Azure VM: Migration guide" -titleSuffix: SQL Server on Azure VMs -description: This guide teaches you to migrate your IBM Db2 databases to SQL Server on Azure VM, by using SQL Server Migration Assistant for Db2. -ms.custom: "" -ms.service: virtual-machines-sql -ms.subservice: migration-guide -ms.devlang: -ms.topic: how-to -author: markjones-msft -ms.author: markjon -ms.reviewer: chadam, mathoma -ms.date: 05/14/2021 ---- -# Migration guide: IBM Db2 to SQL Server on Azure VM -[!INCLUDE[appliesto--sqlmi](../../includes/appliesto-sqlvm.md)] - -This guide teaches you to migrate your user databases from IBM Db2 to SQL Server on Azure VM, by using the SQL Server Migration Assistant for Db2. - -For other migration guides, see [Azure Database Migration Guides](/data-migration). - -## Prerequisites - -To migrate your Db2 database to SQL Server, you need: - -- To verify that your [source environment is supported](/sql/ssma/db2/installing-ssma-for-Db2-client-Db2tosql#prerequisites). -- [SQL Server Migration Assistant (SSMA) for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -- [Connectivity](../../virtual-machines/windows/ways-to-connect-to-sql.md) between your source environment and your SQL Server VM in Azure. -- A target [SQL Server on Azure VM](../../virtual-machines/windows/create-sql-vm-portal.md). - -## Pre-migration - -After you have met the prerequisites, you're ready to discover the topology of your environment and assess the feasibility of your migration. - -### Assess - -Use SSMA for DB2 to review database objects and data, and assess databases for migration. - -To create an assessment, follow these steps: - -1. Open [SSMA for Db2](https://www.microsoft.com/download/details.aspx?id=54254). -1. Select **File** > **New Project**. -1. Provide a project name and a location to save your project. Then select a SQL Server migration target from the drop-down list, and select **OK**. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/new-project.png" alt-text="Screenshot that shows project details to specify."::: - - -1. On **Connect to Db2**, enter values for the Db2 connection details. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/connect-to-Db2.png" alt-text="Screenshot that shows options to connect to your Db2 instance."::: - - -1. Right-click the Db2 schema you want to migrate, and then choose **Create report**. This will generate an HTML report. Alternatively, you can choose **Create report** from the navigation bar after selecting the schema. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/create-report.png" alt-text="Screenshot that shows how to create a report."::: - -1. Review the HTML report to understand conversion statistics and any errors or warnings. You can also open the report in Excel to get an inventory of Db2 objects and the effort required to perform schema conversions. The default location for the report is in the report folder within *SSMAProjects*. - - For example: `drive:\\Documents\SSMAProjects\MyDb2Migration\report\report_`. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/report.png" alt-text="Screenshot of the report that you review to identify any errors or warnings."::: - - -### Validate data types - -Validate the default data type mappings, and change them based on requirements if necessary. To do so, follow these steps: - -1. Select **Tools** from the menu. -1. Select **Project Settings**. -1. Select the **Type mappings** tab. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/type-mapping.png" alt-text="Screenshot that shows selecting the schema and type mapping."::: - -1. You can change the type mapping for each table by selecting the table in the **Db2 Metadata Explorer**. - -### Convert schema - -To convert the schema, follow these steps: - -1. (Optional) Add dynamic or ad hoc queries to statements. Right-click the node, and then choose **Add statements**. -1. Select **Connect to SQL Server**. - 1. Enter connection details to connect to your instance of SQL Server on your Azure VM. - 1. Choose to connect to an existing database on the target server, or provide a new name to create a new database on the target server. - 1. Provide authentication details. - 1. Select **Connect**. - - :::image type="content" source="../../../../includes/media/virtual-machines-sql-server-connection-steps/rm-ssms-connect.png" alt-text="Screenshot that shows the details needed to connect to your SQL Server on Azure VM."::: - -1. Right-click the schema and then choose **Convert Schema**. Alternatively, you can choose **Convert Schema** from the top navigation bar after selecting your schema. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/convert-schema.png" alt-text="Screenshot that shows selecting the schema and converting it."::: - -1. After the conversion finishes, compare and review the structure of the schema to identify potential problems. Address the problems based on the recommendations. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/compare-review-schema-structure.png" alt-text="Screenshot that shows comparing and reviewing the structure of the schema to identify potential problems."::: - -1. In the **Output** pane, select **Review results**. In the **Error list** pane, review errors. -1. Save the project locally for an offline schema remediation exercise. From the **File** menu, select **Save Project**. This gives you an opportunity to evaluate the source and target schemas offline, and perform remediation before you can publish the schema to SQL Server on Azure VM. - -## Migrate - -After you have completed assessing your databases and addressing any discrepancies, the next step is to execute the migration process. - -To publish your schema and migrate your data, follow these steps: - -1. Publish the schema. In **SQL Server Metadata Explorer**, from the **Databases** node, right-click the database. Then select **Synchronize with Database**. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/synchronize-with-database.png" alt-text="Screenshot that shows the option to synchronize with database."::: - -1. Migrate the data. Right-click the database or object you want to migrate in **Db2 Metadata Explorer**, and choose **Migrate data**. Alternatively, you can select **Migrate Data** from the navigation bar. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the check box next to the table. To omit data from individual tables, clear the check box. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/migrate-data.png" alt-text="Screenshot that shows selecting the schema and choosing to migrate data."::: - -1. Provide connection details for both the Db2 and SQL Server instances. -1. After migration finishes, view the **Data Migration Report**: - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/data-migration-report.png" alt-text="Screenshot that shows where to review the data migration report."::: - -1. Connect to your instance of SQL Server on Azure VM by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). Validate the migration by reviewing the data and schema. - - :::image type="content" source="media/db2-to-sql-on-azure-vm-guide/compare-schema-in-ssms.png" alt-text="Screenshot that shows comparing the schema in SQL Server Management Studio."::: - -## Post-migration - -After the migration is complete, you need to go through a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this will in some cases require changes to the applications. - -### Perform tests - -Testing consists of the following activities: - -1. **Develop validation tests**: To test database migration, you need to use SQL queries. You must create the validation queries to run against both the source and the target databases. Your validation queries should cover the scope you have defined. -1. **Set up the test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run the validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance tests against the source and the target, and then analyze and compare the results. - -## Migration assets - -For additional assistance, see the following resources, which were developed in support of a real-world migration project engagement: - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and application/database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing and automated and uniform target platform decision process.| -|[Db2 zOS data assets discovery and assessment package](https://www.microsoft.com/download/details.aspx?id=103108)|After running the SQL script on a database, you can export the results to a file on the file system. Several file formats are supported, including \*.csv, so that you can capture the results in external tools such as spreadsheets. This method can be useful if you want to easily share results with teams that do not have the workbench installed.| -|[IBM Db2 LUW inventory scripts and artifacts](https://www.microsoft.com/download/details.aspx?id=103109)|This asset includes a SQL query that hits IBM Db2 LUW version 11.1 system tables and provides a count of objects by schema and object type, a rough estimate of "raw data" in each schema, and the sizing of tables in each schema, with results stored in a CSV format.| -|[IBM Db2 to SQL Server - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|The Database Compare utility is a Windows console application that you can use to verify that the data is identical both on source and target platforms. You can use the tool to efficiently compare data down to the row or column level in all or selected tables, rows, and columns.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -After migration, review the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - -For Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios, see [Data migration services and tools](../../../dms/dms-tools-matrix.md). - -For video content, see [Overview of the migration journey](https://azure.microsoft.com/resources/videos/overview-of-migration-and-recommended-tools-services/). diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-review-schema-structure.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-review-schema-structure.png deleted file mode 100644 index 7cd8218480011..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-review-schema-structure.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-schema-in-ssms.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-schema-in-ssms.png deleted file mode 100644 index b5ad69d0efdc7..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/compare-schema-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-db2.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-db2.png deleted file mode 100644 index cac66fecdf781..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-db2.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-sql-server.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-sql-server.png deleted file mode 100644 index b666ffbd5c190..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/connect-to-sql-server.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/convert-schema.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/convert-schema.png deleted file mode 100644 index f278d5f1ccb85..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/create-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/create-report.png deleted file mode 100644 index ca4e6628ca497..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/data-migration-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/data-migration-report.png deleted file mode 100644 index 9c55dcc698876..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/migrate-data.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/migrate-data.png deleted file mode 100644 index f70bb63330030..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/new-project.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/new-project.png deleted file mode 100644 index 6e1ed15e84e6d..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/report.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/report.png deleted file mode 100644 index bb362f6b6c795..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/ssma-db2-new-project.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/ssma-db2-new-project.png deleted file mode 100644 index 302e9e37b1440..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/ssma-db2-new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/synchronize-with-database.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/synchronize-with-database.png deleted file mode 100644 index 434cfe0cd7f5b..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/synchronize-with-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/type-mapping.png b/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/type-mapping.png deleted file mode 100644 index 88f4fcee4ee4f..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/db2-to-sql-on-azure-vm-guide/type-mapping.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-credentials.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-credentials.png deleted file mode 100644 index 49d3f9e9ba9c4..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-credentials.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-oracle.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-oracle.png deleted file mode 100644 index 1dc90e2efac67..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-oracle.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-search-option.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-search-option.png deleted file mode 100644 index c4a903d885f4f..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/choose-search-option.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collect-inventory-data.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collect-inventory-data.png deleted file mode 100644 index 61ccaa598c674..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collect-inventory-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collection-summary-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collection-summary-report.png deleted file mode 100644 index a70445251d17f..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/collection-summary-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/compare-schema.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/compare-schema.png deleted file mode 100644 index b7b401c48fec8..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/compare-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-oracle.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-oracle.png deleted file mode 100644 index 842024b51be51..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-oracle.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-sql-vm.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-sql-vm.png deleted file mode 100644 index 534931f67d7dd..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/connect-to-sql-vm.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/conversion-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/conversion-report.png deleted file mode 100644 index ea37c13dc18ce..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/conversion-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/convert-schema.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/convert-schema.png deleted file mode 100644 index 4e5d838b9f989..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/convert-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-inventory-database.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-inventory-database.png deleted file mode 100644 index 2c187495bd78d..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-inventory-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-report.png deleted file mode 100644 index 03d6ede77276e..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/create-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/data-migration-report.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/data-migration-report.png deleted file mode 100644 index 2282525b414d9..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/data-migration-report.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/migrate-data.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/migrate-data.png deleted file mode 100644 index e07f32630304d..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/migrate-data.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/new-project.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/new-project.png deleted file mode 100644 index 96db049c28c47..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/new-project.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/procedure-comparison.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/procedure-comparison.png deleted file mode 100644 index e199395acd954..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/procedure-comparison.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/report-review.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/report-review.png deleted file mode 100644 index d763614ecac6d..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/report-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/review-summary.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/review-summary.png deleted file mode 100644 index c100eaeb69e65..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/review-summary.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-database.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-database.png deleted file mode 100644 index 6b2706d7cfeda..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-schema.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-schema.png deleted file mode 100644 index 3d72ae8427898..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/select-schema.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/set-credential-order.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/set-credential-order.png deleted file mode 100644 index e83ab61852ddb..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/set-credential-order.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/specify-credentials-for-each-computer.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/specify-credentials-for-each-computer.png deleted file mode 100644 index da756c88f4eaa..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/specify-credentials-for-each-computer.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/ssma-tester-new.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/ssma-tester-new.png deleted file mode 100644 index e1758e05b28de..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/ssma-tester-new.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database-review.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database-review.png deleted file mode 100644 index 309e861c3eeab..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database-review.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database.png deleted file mode 100644 index 4c5eb334412c8..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/synchronize-database.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/table-mapping.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/table-mapping.png deleted file mode 100644 index f67be7f9f7be7..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/table-mapping.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/test-call-ordering.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/test-call-ordering.png deleted file mode 100644 index c479a8c763962..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/test-call-ordering.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-finalize-case.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-finalize-case.png deleted file mode 100644 index de1eee246279e..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-finalize-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-init-test-case.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-init-test-case.png deleted file mode 100644 index 2e5a1df10f3b8..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-init-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-oracle-connect.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-oracle-connect.png deleted file mode 100644 index e190527dd6ecf..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-oracle-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-repo-run.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-repo-run.png deleted file mode 100644 index 05daa7a7f739e..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-repo-run.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-status.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-status.png deleted file mode 100644 index e5bb3046c70ba..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-status.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-test-case.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-test-case.png deleted file mode 100644 index a99512c2a3c80..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-run-test-case.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-affected.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-affected.png deleted file mode 100644 index f1a66ed10d5b1..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-affected.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-objects.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-objects.png deleted file mode 100644 index 15d1b666152bf..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-objects.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-sqlservervm-connect.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-sqlservervm-connect.png deleted file mode 100644 index 1e1a3c6512b14..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-sqlservervm-connect.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-failed.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-failed.png deleted file mode 100644 index dcb5be28184b3..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-failed.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-repo.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-repo.png deleted file mode 100644 index 9db1b8913da8a..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-repo.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-result.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-result.png deleted file mode 100644 index 84b9360dd793d..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-result.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-success.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-success.png deleted file mode 100644 index b9f09614c671e..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/tester-test-success.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/type-mappings.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/type-mappings.png deleted file mode 100644 index cfd3041e2cdd2..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/type-mappings.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/validate-in-ssms.png b/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/validate-in-ssms.png deleted file mode 100644 index 41bca417a4964..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/oracle-to-sql-on-azure-vm-guide/validate-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-ag/migrate-availability-group-with-dag.png b/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-ag/migrate-availability-group-with-dag.png deleted file mode 100644 index 4532eb508809c..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-ag/migrate-availability-group-with-dag.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-standalone-instance/migrate-single-instance-with-dag.png b/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-standalone-instance/migrate-single-instance-with-dag.png deleted file mode 100644 index b5b24ba1344ae..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-distributed-availability-group-migrate-standalone-instance/migrate-single-instance-with-dag.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-individual-databases-guide/virtual-machine-migration-downtime.png b/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-individual-databases-guide/virtual-machine-migration-downtime.png deleted file mode 100644 index cfc200801d56f..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-individual-databases-guide/virtual-machine-migration-downtime.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-migration-overview/migration-process-flow-small.png b/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-migration-overview/migration-process-flow-small.png deleted file mode 100644 index 1b04502dc966a..0000000000000 Binary files a/articles/azure-sql/migration-guides/virtual-machines/media/sql-server-to-sql-on-azure-vm-migration-overview/migration-process-flow-small.png and /dev/null differ diff --git a/articles/azure-sql/migration-guides/virtual-machines/oracle-to-sql-on-azure-vm-guide.md b/articles/azure-sql/migration-guides/virtual-machines/oracle-to-sql-on-azure-vm-guide.md deleted file mode 100644 index 6a47da506c534..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/oracle-to-sql-on-azure-vm-guide.md +++ /dev/null @@ -1,375 +0,0 @@ ---- -title: "Oracle to SQL Server on Azure Virtual Machines: Migration guide" -titleSuffix: SQL Server on Azure VMs -description: This guide teaches you to migrate your Oracle schemas to SQL Server on Azure Virtual Machines by using SQL Server Migration Assistant for Oracle. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -ms.custom: -ms.devlang: -ms.topic: how-to -author: mokabiru -ms.author: mokabiru -ms.reviewer: mathoma -ms.date: 11/06/2020 ---- -# Migration guide: Oracle to SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqldb-sqlmi](../../includes/appliesto-sqldb.md)] - -This guide teaches you to migrate your Oracle schemas to SQL Server on Azure Virtual Machines by using SQL Server Migration Assistant for Oracle. - -For other migration guides, see [Database Migration](/data-migration). - -## Prerequisites - -To migrate your Oracle schema to SQL Server on Azure Virtual Machines, you need: - -- A supported source environment. -- [SQL Server Migration Assistant (SSMA) for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -- A target [SQL Server VM](../../virtual-machines/windows/sql-vm-create-portal-quickstart.md). -- The [necessary permissions for SSMA for Oracle](/sql/ssma/oracle/connecting-to-oracle-database-oracletosql) and the [provider](/sql/ssma/oracle/connect-to-oracle-oracletosql). -- Connectivity and sufficient permissions to access the source and the target. - - -## Pre-migration - -To prepare to migrate to the cloud, verify that your source environment is supported and that you've addressed any prerequisites. Doing so will help to ensure an efficient and successful migration. - -This part of the process involves: -- Conducting an inventory of the databases that you need to migrate. -- Assessing those databases for potential migration problems or blockers. -- Resolving any problems that you uncover. - -### Discover - -Use [MAP Toolkit](https://go.microsoft.com/fwlink/?LinkID=316883) to identify existing data sources and details about the features your business is using. Doing so will give you a better understanding of the migration and help you plan for it. This process involves scanning the network to identify your organization's Oracle instances and the versions and features you're using. - -To use MAP Toolkit to do an inventory scan, follow these steps: - - -1. Open [MAP Toolkit](https://go.microsoft.com/fwlink/?LinkID=316883). - - -1. Select **Create/Select database**: - - ![Screenshot that shows the Create/Select database option.](./media/oracle-to-sql-on-azure-vm-guide/select-database.png) - -1. Select **Create an inventory database**. Enter the name for the new inventory database and a brief description, and then select **OK** - - :::image type="content" source="media/oracle-to-sql-on-azure-vm-guide/create-inventory-database.png" alt-text="Screenshot that shows the interface for creating an inventory database."::: - -1. Select **Collect inventory data** to open the **Inventory and Assessment Wizard**: - - :::image type="content" source="media/oracle-to-sql-on-azure-vm-guide/collect-inventory-data.png" alt-text="Screenshot that shows the Collect inventory data link."::: - - -1. In the **Inventory and Assessment Wizard**, select **Oracle**, and then select **Next**: - - ![Screenshot that shows the Inventory Scenarios page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/choose-oracle.png) - -1. Select the computer search option that best suits your business needs and environment, and then select **Next**: - - ![Screenshot that shows the Discovery Methods page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/choose-search-option.png) - -1. Either enter credentials or create new credentials for the systems that you want to explore, and then select **Next**: - - ![Screenshot that shows the All Computers Credentials page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/choose-credentials.png) - - -1. Set the order of the credentials, and then select **Next**: - - ![Screenshot that shows the Credentials Order page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/set-credential-order.png) - - -1. Enter the credentials for each computer you want to discover. You can use unique credentials for every computer/machine, or you can use the All Computers credential list. - - ![Screenshot that shows the Specify Computers and Credentials page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/specify-credentials-for-each-computer.png) - - -1. Verify your selections, and then select **Finish**: - - ![Screenshot that shows the Summary page of the Inventory and Assessment Wizard.](./media/oracle-to-sql-on-azure-vm-guide/review-summary.png) - - -1. After the scan finishes, view the **Data Collection** summary. The scan might take a few minutes, depending on the number of databases. Select **Close** when you're done: - - ![Screenshot that shows the Data Collection summary.](./media/oracle-to-sql-on-azure-vm-guide/collection-summary-report.png) - - -1. Select **Options** to generate a report about the Oracle assessment and database details. Select both options, one at a time, to generate the report. - - -### Assess - -After you identify the data sources, use [SQL Server Migration Assistant for Oracle](https://www.microsoft.com/download/details.aspx?id=54258) to assess the Oracle instances migrating to the SQL Server VM. The assistant will help you understand the gaps between the source and destination databases. You can review database objects and data, assess databases for migration, migrate database objects to SQL Server, and then migrate data to SQL Server. - -To create an assessment, follow these steps: - - -1. Open [SQL Server Migration Assistant for Oracle](https://www.microsoft.com/download/details.aspx?id=54258). -1. On the **File** menu, select **New Project**. -1. Provide a project name and a location for your project, and then select a SQL Server migration target from the list. Select **OK**: - - ![Screenshot that shows the New Project dialog box.](./media/oracle-to-sql-on-azure-vm-guide/new-project.png) - - -1. Select **Connect to Oracle**. Enter values for the Oracle connection in the **Connect to Oracle** dialog box: - - ![Screenshot that shows the Connect to Oracle dialog box.](./media/oracle-to-sql-on-azure-vm-guide/connect-to-oracle.png) - - Select the Oracle schemas that you want to migrate: - - ![Screenshot that shows the list of Oracle schemas that can be migrated.](./media/oracle-to-sql-on-azure-vm-guide/select-schema.png) - - -1. In **Oracle Metadata Explorer**, right-click the Oracle schema that you want to migrate, and then select **Create Report**. Doing so will generate an HTML report. Or, you can select the database and then select **Create report** in the top menu. - - ![Screenshot that shows how to create a report.](./media/oracle-to-sql-on-azure-vm-guide/create-report.png) - -1. Review the HTML report for conversion statistics, errors, and warnings. Analyze it to understand conversion problems and resolutions. - - You can also open the report in Excel to get an inventory of Oracle objects and the effort required to complete schema conversions. The default location for the report is the report folder in SSMAProjects. - - For example: `drive:\\Documents\SSMAProjects\MyOracleMigration\report\report_2016_11_12T02_47_55\` - - - ![Screenshot that shows a conversion report.](./media/oracle-to-sql-on-azure-vm-guide/conversion-report.png) - - -### Validate data types - -Validate the default data type mappings and change them based on requirements, if necessary. To do so, follow these steps: - - -1. On the **Tools** menu, select **Project Settings**. -1. Select the **Type Mappings** tab. - - ![Screenshot that shows the Type Mappings tab.](./media/oracle-to-sql-on-azure-vm-guide/type-mappings.png) - -1. You can change the type mapping for each table by selecting the table in **Oracle Metadata Explorer**. - -### Convert the schema - -To convert the schema, follow these steps: - -1. (Optional) To convert dynamic or ad-hoc queries, right-click the node and select **Add statement**. - -1. Select **Connect to SQL Server** in the top menu. - 1. Enter connection details for your SQL Server on Azure VM. - 1. Select your target database from the list, or provide a new name. If you provide a new name, a database will be created on the target server. - 1. Provide authentication details. - 1. Select **Connect**. - - - ![Screenshot that shows how to connect to SQL Server.](./media/oracle-to-sql-on-azure-vm-guide/connect-to-sql-vm.png) - -1. Right-click the Oracle schema in **Oracle Metadata Explorer** and select **Convert Schema**. Or, you can select **Convert schema** in the top menu: - - ![Screenshot that shows how to convert the schema.](./media/oracle-to-sql-on-azure-vm-guide/convert-schema.png) - - -1. After the schema conversion is complete, review the converted objects and compare them to the original objects to identify potential problems. Use the recommendations to address any problems: - - ![Screenshot that shows a comparison of two schemas.](./media/oracle-to-sql-on-azure-vm-guide/table-mapping.png) - - Compare the converted Transact-SQL text to the original stored procedures and review the recommendations: - - ![Screenshot that shows Transact-SQL, stored procedures, and a warning.](./media/oracle-to-sql-on-azure-vm-guide/procedure-comparison.png) - - You can save the project locally for an offline schema remediation exercise. To do so, select **Save Project** on the **File** menu. Saving the project locally lets you evaluate the source and target schemas offline and perform remediation before you publish the schema to SQL Server. - -1. Select **Review results** in the **Output** pane, and then review errors in the **Error list** pane. -1. Save the project locally for an offline schema remediation exercise. Select **Save Project** on the **File** menu. This gives you an opportunity to evaluate the source and target schemas offline and perform remediation before you publish the schema to SQL Server on Azure Virtual Machines. - - -## Migrate - -After you have the necessary prerequisites in place and have completed the tasks associated with the pre-migration stage, you're ready to start the schema and data migration. Migration involves two steps: publishing the schema and migrating the data. - - -To publish your schema and migrate the data, follow these steps: - -1. Publish the schema: right-click the database in **SQL Server Metadata Explorer** and select **Synchronize with Database**. Doing so publishes the Oracle schema to SQL Server on Azure Virtual Machines. - - ![Screenshot that shows the Synchronize with Database command.](./media/oracle-to-sql-on-azure-vm-guide/synchronize-database.png) - - Review the mapping between your source project and your target: - - ![Screenshot that shows the synchronization status.](./media/oracle-to-sql-on-azure-vm-guide/synchronize-database-review.png) - - - -1. Migrate the data: right-click the database or object that you want to migrate in **Oracle Metadata Explorer** and select **Migrate Data**. Or, you can select the **Migrate Data** tab. To migrate data for an entire database, select the check box next to the database name. To migrate data from individual tables, expand the database, expand **Tables**, and then select the checkboxes next to the tables. To omit data from individual tables, clear the checkboxes. - - ![Screenshot that shows the Migrate Data command.](./media/oracle-to-sql-on-azure-vm-guide/migrate-data.png) - -1. Provide connection details for Oracle and SQL Server on Azure Virtual Machines in the dialog box. -1. After the migration finishes, view the **Data Migration Report**: - - ![Screenshot that shows the Data Migration Report.](./media/oracle-to-sql-on-azure-vm-guide/data-migration-report.png) - -1. Connect to your SQL Server on Azure Virtual Machines instance by using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). Validate the migration by reviewing the data and schema: - - - ![Screenshot that shows a SQL Server instance in SSMA.](./media/oracle-to-sql-on-azure-vm-guide/validate-in-ssms.png) - -Instead of using SSMA, you could use SQL Server Integration Services (SSIS) to migrate the data. To learn more, see: -- The article [SQL Server Integration Services](/sql/integration-services/sql-server-integration-services). -- The white paper [SSIS for Azure and Hybrid Data Movement](https://download.microsoft.com/download/D/2/0/D20E1C5F-72EA-4505-9F26-FEF9550EFD44/SSIS%20Hybrid%20and%20Azure.docx). - - -## Post-migration - -After you complete the migration stage, you need to complete a series of post-migration tasks to ensure that everything is running as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that previously consumed the source need to start consuming the target. Making those changes might require changes to the applications. - -[Data Access Migration Toolkit](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) is an extension for Visual Studio Code. It allows you to analyze your Java source code and detect data access API calls and queries. The toolkit provides a single-pane view of what needs to be addressed to support the new database back end. To learn more, see [Migrate your Java application from Oracle](https://techcommunity.microsoft.com/t5/microsoft-data-migration/migrate-your-java-applications-from-oracle-to-sql-server-with/ba-p/368727). - -### Perform tests - -To test your database migration, complete these activities: - -1. **Develop validation tests**. To test database migration, you need to use SQL queries. Create the validation queries to run against both the source and target databases. Your validation queries should cover the scope that you've defined. - -2. **Set up a test environment**. The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. - -3. **Run validation tests**. Run the validation tests against the source and the target, and then analyze the results. - -4. **Run performance tests**. Run performance test against the source and the target, and then analyze and compare the results. - -### Validate migrated objects - -Microsoft SQL Server Migration Assistant for Oracle Tester (SSMA Tester) allows you to test migrated database objects. The SSMA Tester is used to verify that converted objects behave in the same way. - -#### Create test case - -1. Open SSMA for Oracle, select **Tester** followed by **New Test Case**. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/ssma-tester-new.png" alt-text="Screenshot that shows new test case."::: - -1. On the Test Case wizard, provide the following information: - - **Name**: Enter the name to identify the test case. - - **Creation date**: Today's current date, defined automatically. - - **Last Modified date**: filled in automatically, should not be changed. - - **Description**: Enter any additional information to identify the purpose of the test case. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-init-test-case.png" alt-text="Screenshot that shows step to initialize a test case."::: - -1. Select the objects that are part of the test case from the Oracle object tree located on the left side. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-objects.png" alt-text="Screenshot that shows step to select and configure object."::: - - In this example, stored procedure `ADD_REGION` and table `REGION` are selected. - - To learn more, see [Selecting and configuring objects to test.](/sql/ssma/oracle/selecting-and-configuring-objects-to-test-oracletosql) - -1. Next, select the tables, foreign keys and other dependent objects from the Oracle object tree in the left window. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-select-configure-affected.png" alt-text="Screenshot that shows step to select and configure affected object."::: - - To learn more, see [Selecting and configuring affected objects.](/sql/ssma/oracle/selecting-and-configuring-affected-objects-oracletosql) - -1. Review the evaluation sequence of objects. Change the order by clicking the buttons in the grid.. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/test-call-ordering.png" alt-text="Screenshot that shows step to sequence test object execution."::: - -1. Finalize the test case by reviewing the information provided in the previous steps. Configure the test execution options based on the test scenario. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-finalize-case.png" alt-text="Screenshot that shows step to finalize object."::: - - For more information on test case settings,[Finishing test case preparation](/sql/ssma/oracle/finishing-test-case-preparation-oracletosql) - -1. Click on finish to create the test case. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-test-repo.png" alt-text="Screenshot that shows step to test repo."::: - -#### Run test case - -When SSMA Tester runs a test case, the test engine executes the objects selected for testing and generates a verification report. - -1. Select the test case from test repository and then click run. - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-repo-run.png" alt-text="Screenshot that shows to review test repo."::: - -1. Review the launch test case and click run. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-run-test-case.png" alt-text="Screenshot that shows step to launch test case."::: - -1. Next, provide Oracle source credentials. Click connect after entering the credentials. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-oracle-connect.png" alt-text="Screenshot that shows step to connect to oracle source."::: - -1. Provide target SQL Server credentials and click connect. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-sqlservervm-connect.png" alt-text="Screenshot that shows step to connect to sql target."::: - - On success, the test case moves to initialization stage. - -1. A real-time progress bar shows the execution status of the test run. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-run-status.png" alt-text="Screenshot that shows tester test progress."::: - -1. Review the report after the test is completed. The report provides the statistics, any errors during the test run and a detail report. - - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-test-result.png" alt-text="Screenshot that shows a sample tester test report"::: - -7.Click details to get more information. - - Example of positive data validation. - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-test-success.png" alt-text="Screenshot that shows a sample tester success report."::: - - Example of failed data validation. - :::image type="content" source="./media/oracle-to-sql-on-azure-vm-guide/tester-test-failed.png" alt-text="Screenshot that shows tester failure report."::: - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy problems and verifying completeness. It's also critical for addressing performance issues with the workload. - -> [!Note] -> For more information about these problems and specific steps to mitigate them, see the [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide). - - -## Migration resources - -For more help with completing this migration scenario, see the following resources, which were developed to support a real-world migration project. - -| **Title/Link** | **Description** | -| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Data Workload Assessment Model and Tool](https://www.microsoft.com/download/details.aspx?id=103130) | This tool provides suggested best-fit target platforms, cloud readiness, and application/database remediation levels for a given workload. It offers simple one-click calculation and report generation that helps to accelerate large estate assessments by providing an automated and uniform target-platform decision process. | -| [Oracle Inventory Script Artifacts](https://www.microsoft.com/download/details.aspx?id=103121) | This asset includes a PL/SQL query that targets Oracle system tables and provides a count of objects by schema type, object type, and status. It also provides a rough estimate of raw data in each schema and the sizing of tables in each schema, with results stored in a CSV format. | -| [Automate SSMA Oracle Assessment Collection & Consolidation](https://www.microsoft.com/download/details.aspx?id=103120) | This set of resources uses a .csv file as entry (sources.csv in the project folders) to produce the XML files that you need to run an SSMA assessment in console mode. You provide the source.csv file by taking an inventory of existing Oracle instances. The output files are AssessmentReportGeneration_source_1.xml, ServersConnectionFile.xml, and VariableValueFile.xml.| -| [SSMA issues and possible remedies when migrating Oracle databases](https://aka.ms/dmj-wp-ssma-oracle-errors) | With Oracle, you can assign a non-scalar condition in a WHERE clause. SQL Server doesn't support this type of condition. So SSMA for Oracle doesn't convert queries that have a non-scalar condition in the WHERE clause. Instead, it generates an error: O2SS0001. This white paper provides details on the problem and ways to resolve it. | -| [Oracle to SQL Server Migration Handbook](https://github.com/microsoft/DataMigrationTeam/blob/master/Whitepapers/Oracle%20to%20SQL%20Server%20Migration%20Handbook.pdf) | This document focuses on the tasks associated with migrating an Oracle schema to the latest version of SQL Server. If the migration requires changes to features/functionality, you need to carefully consider the possible effect of each change on the applications that use the database. | -|[Oracle to SQL Server - Database Compare utility](https://www.microsoft.com/download/details.aspx?id=103016)|SSMA for Oracle Tester is the recommended tool to automatically validate the database object conversion and data migration, and it's a superset of Database Compare functionality.

    If you're looking for an alternative data validation option, you can use the Database Compare utility to compare data down to the row or column level in all or selected tables, rows, and columns.| - - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data-platform migration projects to the Microsoft Azure data platform. - - -## Next steps - -- To check the availability of services applicable to SQL Server, see the [Azure Global infrastructure center](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=synapse-analytics,virtual-machines,sql-database). - -- For a matrix of the Microsoft and third-party services and tools that are available to help you with various database and data migration scenarios and specialized tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). - -- To learn more about Azure SQL, see: - - [Deployment options](../../azure-sql-iaas-vs-paas-what-is-overview.md) - - [SQL Server on Azure Virtual Machines](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices to cost and size workloads migrated to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- For information about licensing, see: - - [Bring your own license with the Azure Hybrid Benefit](../../virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md) - - [Get free extended support for SQL Server 2008 and SQL Server 2008 R2](../../virtual-machines/windows/sql-server-2008-extend-end-of-support.md) - -- To assess the application access layer, use [Data Access Migration Toolkit Preview](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For details on how to do data access layer A/B testing, see [Overview of Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md deleted file mode 100644 index 6c6864a346b0f..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md +++ /dev/null @@ -1,355 +0,0 @@ ---- -title: Migrate availability group -titleSuffix: SQL Server on Azure VMs -description: Learn how to lift and shift your Always On availability group high availability solution to SQL Server on Azure VMs using Azure Migrate. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: rahugup -manager: bsiva -ms.topic: how-to -ms.date: 4/25/2021 -ms.author: rahugup -ms.reviewer: mathoma ---- -# Migrate availability group to SQL Server on Azure VM - -This article teaches you to migrate your SQL Server Always On availability group to SQL Server on Azure VMs using the [Azure Migrate: Server Migration tool](../../../migrate/migrate-services-overview.md#azure-migrate-server-migration-tool). Using the migration tool, you will be able to migrate each replica in the availability group to an Azure VM hosting SQL Server, as well as the cluster metadata, availability group metadata and other necessary high availability components. - -In this article, you learn how to: - -> [!div class="checklist"] -> * Prepare Azure and source environment for migration. -> * Start replicating servers. -> * Monitor replication. -> * Run a full server migration. -> * Reconfigure Always On availability group. - - -This guide uses the agent-based migration approach of Azure Migrate, which treats any server or virtual machine as a physical server. When migrating physical machines, Azure Migrate: Server Migration uses the same replication architecture as the agent-based disaster recovery in the Azure Site Recovery service, and some components share the same code base. Some content might link to Site Recovery documentation. - - -## Prerequisites - - -Before you begin this tutorial, you should complete the following prerequisites: - -1. An Azure subscription. Create a [free account](https://azure.microsoft.com/pricing/free-trial/), if necessary. -1. Install the [Azure PowerShell `Az` module](/powershell/azure/install-az-ps). -1. Download the [PowerShell samples scripts](https://github.com/Azure/azure-docs-powershell-samples/tree/master/azure-migrate/SQL%20Migration) from the GitHub repository. - -## Prepare Azure - -Prepare Azure for migration with the [Server Migration tool](../../../migrate/migrate-services-overview.md#azure-migrate-server-migration-tool). - -|**Task** | **Details**| -|--- | --- -|**Create an Azure Migrate project** | Your Azure account needs Contributor or Owner permissions to [create a new project](../../../migrate/create-manage-projects.md).| -|**Verify permissions for your Azure account** | Your Azure account needs Contributor or Owner permissions on the Azure subscription, permissions to register Azure Active Directory (Azure AD) apps, and User Access Administrator permissions on the Azure subscription to create a Key Vault, to create a VM, and to write to an Azure managed disk. | -|**Set up an Azure virtual network** | [Setup](../../../virtual-network/virtual-networks-overview.md) an Azure virtual network (VNet). When you replicate to Azure, Azure VMs are created and joined to the Azure VNet that you specify when you set up migration.| - - -To check you have proper permissions, follow these steps: - -1. In the Azure portal, open the subscription, and select **Access control (IAM)**. -2. In **Check access**, find the relevant account, and select it to view permissions. -3. You should have **Contributor** or **Owner** permissions. - - If you just created a free Azure account, you're the owner of your subscription. - - If you're not the subscription owner, work with the owner to assign the role. - -If you need to assign permissions, follow the steps in [Prepare for an Azure user account](../../../migrate/tutorial-discover-vmware.md#prepare-an-azure-user-account). - - -## Prepare for migration - -To prepare for server migration, verify the physical server settings, and prepare to deploy a replication appliance. - -### Check machine requirements - - -Ensure source machines comply with requirements to migrate to Azure. Follow these steps: - -1. [Verify](../../../migrate/migrate-support-matrix-physical-migration.md#physical-server-requirements) server requirements. -1. Verify that source machines that you replicate to Azure comply with [Azure VM requirements](../../../migrate/migrate-support-matrix-physical-migration.md#azure-vm-requirements). -1. Some [Windows](../../../migrate/prepare-for-migration.md#windows-machines) sources require a few additional changes. Migrating the source before making these changes could prevent the VM from booting in Azure. For some operating systems, Azure Migrate makes these changes automatically. - - -### Prepare for replication - -Azure Migrate: Server Migration uses a replication appliance to replicate machines to Azure. The replication appliance runs the following components: - -- **Configuration server**: The configuration server coordinates communications between on-premises and Azure, and manages data replication. -- **Process server**: The process server acts as a replication gateway. It receives replication data; optimizes it with caching, compression, and encryption, and sends it to a cache storage account in Azure. - -Prepare for appliance deployment as follows: - -- Create a Windows Server 2016 machine to host the replication appliance. Review the [machine requirements](../../../migrate/migrate-replication-appliance.md#appliance-requirements). -- The replication appliance uses MySQL. Review the [options](../../../migrate/migrate-replication-appliance.md#mysql-installation) for installing MySQL on the appliance. -- Review the Azure URLs required for the replication appliance to access [public](../../../migrate/migrate-replication-appliance.md#url-access) and [government](../../../migrate/migrate-replication-appliance.md#azure-government-url-access) clouds. -- Review [port](../../../migrate/migrate-replication-appliance.md#port-access) access requirements for the replication appliance. - -> [!NOTE] -> The replication appliance should be installed on a machine other than the source machine you are replicating or migrating, and not on any machine that has had the Azure Migrate discovery and assessment appliance installed before. - -### Download replication appliance installer - -To download the replication appliance installer, follow these steps: - -1. In the Azure Migrate project > **Servers**, in **Azure Migrate: Server Migration**, select **Discover**. - - ![Discover VMs](../../../migrate/media/tutorial-migrate-physical-virtual-machines/migrate-discover.png) - -1. In **Discover machines** > **Are your machines virtualized?**, select **Physical or other (AWS, GCP, Xen, etc.)**. -1. In **Target region**, select the Azure region to which you want to migrate the machines. -1. Select **Confirm that the target region for migration is region-name**. -1. Select **Create resources**. This creates an Azure Site Recovery vault in the background. - - If you've already set up migration with Azure Migrate: Server Migration, the target option can't be configured, since resources were set up previously. - - You can't change the target region for this project after selecting this button. - - All subsequent migrations are to this region. - -1. In **Do you want to install a new replication appliance?**, select **Install a replication appliance**. -1. In **Download and install the replication appliance software**, download the appliance installer, and the registration key. You need to the key in order to register the appliance. The key is valid for five days after it's downloaded. - - ![Download provider](../../../migrate/media/tutorial-migrate-physical-virtual-machines/download-provider.png) - -1. Copy the appliance setup file and key file to the Windows Server 2016 machine you created for the appliance. -1. After the installation completes, the Appliance configuration wizard will launch automatically (You can also launch the wizard manually by using the cspsconfigtool shortcut that is created on the desktop of the appliance machine). Use the **Manage Accounts** tab of the wizard to create a dummy account with the following details: - - - "guest" as the friendly name - - "username" as the username - - "password" as the password for the account. - - You will use this dummy account in the Enable Replication stage. - -1. After setup completes, and the appliance restarts, in **Discover machines**, select the new appliance in **Select Configuration Server**, and select **Finalize registration**. Finalize registration performs a couple of final tasks to prepare the replication appliance. - - ![Finalize registration](../../../migrate/media/tutorial-migrate-physical-virtual-machines/finalize-registration.png) - - -## Install Mobility service - -Install the Mobility service agent on the servers you want to migrate. The agent installers are available on the replication appliance. Find the right installer, and install the agent on each machine you want to migrate. - - -To install the Mobility service, follow these steps: - -1. Sign in to the replication appliance. -1. Navigate to **%ProgramData%\ASR\home\svsystems\pushinstallsvc\repository**. -1. Find the installer for the machine operating system and version. Review [supported operating systems](../../../site-recovery/vmware-physical-azure-support-matrix.md#replicated-machines). -1. Copy the installer file to the machine you want to migrate. -1. Make sure that you have the passphrase that was generated when you deployed the appliance. - - Store the file in a temporary text file on the machine. - - You can obtain the passphrase on the replication appliance. From the command line, run **C:\ProgramData\ASR\home\svsystems\bin\genpassphrase.exe -v** to view the current passphrase. - - Don't regenerate the passphrase. This will break connectivity and you will have to reregister the replication appliance. - - In the */Platform* parameter, specify *VMware* for both VMware machines and physical machines. - -1. Connect to the machine and extract the contents of the installer file to a local folder (such as c:\temp). Run this in an admin command prompt: - - ```cmd - ren Microsoft-ASR_UA*Windows*release.exe MobilityServiceInstaller.exe - MobilityServiceInstaller.exe /q /x:C:\Temp\Extracted - cd C:\Temp\Extracted - ``` - -2. Run the Mobility Service Installer: - - ```cmd - UnifiedAgent.exe /Role "MS" /Platform "VmWare" /Silent - ``` - -3. Register the agent with the replication appliance: - - ```cmd - cd C:\Program Files (x86)\Microsoft Azure Site Recovery\agent - UnifiedAgentConfigurator.exe /CSEndPoint /PassphraseFilePath - ``` - -It may take some time after installation for discovered machines to appear in Azure Migrate: Server Migration. As VMs are discovered, the **Discovered servers** count rises. - -![Discovered servers](../../../migrate/media/tutorial-migrate-physical-virtual-machines/discovered-servers.png) - -## Prepare source machines - -To prepare source machines, run the `Get-ClusterInfo.ps1` script on a cluster node to retrieve information on the cluster resources. The script will output the role name, resource name, IP, and probe port in the `Cluster-Config.csv` file. - -```powershell -./Get-ClusterInfo.ps1 -``` - -## Create load balancer - -For the cluster and cluster roles to respond properly to requests, an Azure Load balancer is required. Without a load balancer, the other VMs are unable to reach the cluster IP address as it's not recognized as belonging to the network or the cluster. - -To create the load balancer, follow these steps: - -1. Fill out the columns in the `Cluster-Config.csv` file: - -**Column Header** | **Description** ---- | --- -NewIP | Specify the IP address in the Azure virtual network (or subnet) for each resource in the CSV file. -ServicePort | Specify the service port to be used by each resource in the CSV file. For the SQL clustered resource, use the same value for service port as the probe port in the CSV. For other cluster roles, the default values used are 1433 but you can continue to use the port numbers that are configured in your current setup. - - -2. Run the `Create-ClusterLoadBalancer.ps1` script to create the load balancer using the following parameters: - -**Parameter** | **Type** | **Description** ---- | --- | --- -ConfigFilePath | Mandatory| Specify the path for the `Cluster-Config.csv` file that you have filled out in the previous step. -ResourceGroupName | Mandatory|Specify the name of the resource group in which the load balancer is to be created. -VNetName | Mandatory|Specify the name of the Azure virtual network that the load balancer will be associated to. -SubnetName | Mandatory|Specify the name of the subnet in the Azure virtual network that the load balancer will be associated to. -VNetResourceGroupName | Mandatory|Specify the name of the resource group for the Azure virtual network that the load balancer will be associated to. -Location | Mandatory|Specify the location in which the load balancer should be created. -LoadBalancerName | Mandatory|Specify the name of the load balancer to be created. - - -```powershell -./Create-ClusterLoadBalancer.ps1 -ConfigFilePath ./clsuterinfo.csv -ResourceGroupName $resoucegroupname -VNetName $vnetname -subnetName $subnetname -VnetResourceGroupName $vnetresourcegroupname -Location "eastus" -LoadBalancerName $loadbalancername -``` - -## Replicate machines - -Now, select machines for migration. You can replicate up to 10 machines together. If you need to replicate more, then replicate them simultaneously in batches of 10. - -To replicate machines, follow these steps: - -1. In the Azure Migrate project > **Servers**, **Azure Migrate: Server Migration**, select **Replicate**. - - ![Screenshot of the Azure Migrate - Servers screen showing the Replicate button selected in Azure Migrate: Server Migration under Migration tools](../../../migrate/media/tutorial-migrate-physical-virtual-machines/select-replicate.png) - -1. In **Replicate**, > **Source settings** > **Are your machines virtualized?**, select **Physical or other (AWS, GCP, Xen, etc.)**. -1. In **On-premises appliance**, select the name of the Azure Migrate appliance that you set up. -1. In **Process Server**, select the name of the replication appliance. -1. In **Guest credentials**, select the dummy account created previously during the [replication installer setup](#download-replication-appliance-installer) previously in this article. Then select **Next: Virtual machines**. - - ![Screenshot of the Source settings tab in the Replicate screen with the Guest credentials field highlighted.](../../../migrate/media/tutorial-migrate-physical-virtual-machines/source-settings.png) - -1. In **Virtual Machines**, in **Import migration settings from an assessment?**, leave the default setting **No, I'll specify the migration settings manually**. -1. Check each VM you want to migrate. Then select **Next: Target settings**. - - ![Select VMs](../../../migrate/media/tutorial-migrate-physical-virtual-machines/select-vms.png) - - -1. In **Target settings**, select the subscription, and target region to which you'll migrate, and specify the resource group in which the Azure VMs will reside after migration. -1. In **Virtual Network**, select the Azure VNet/subnet to which the Azure VMs will be joined after migration. -1. In **Availability options**, select: - - Availability Zone to pin the migrated machine to a specific Availability Zone in the region. Use this option to distribute servers that form a multi-node application tier across Availability Zones. If you select this option, you'll need to specify the Availability Zone to use for each of the selected machines in the Compute tab. This option is only available if the target region selected for the migration supports Availability Zones. - - Availability Set to place the migrated machine in an Availability Set. The target resource group that was selected must have one or more availability sets in order to use this option. - - No infrastructure redundancy required option if you don't need either of these availability configurations for the migrated machines. - -1. In **Disk encryption type**, select: - - Encryption-at-rest with platform-managed key - - Encryption-at-rest with customer-managed key - - Double encryption with platform-managed and customer-managed keys - - > [!NOTE] - > To replicate VMs with CMK, you'll need to [create a disk encryption set](../../../virtual-machines/disks-enable-customer-managed-keys-portal.md#set-up-your-disk-encryption-set) under the target Resource Group. A disk encryption set object maps Managed Disks to a Key Vault that contains the CMK to use for SSE. - -1. In **Azure Hybrid Benefit**: - - - Select **No** if you don't want to apply Azure Hybrid Benefit. Then select **Next**. - - Select **Yes** if you have Windows Server machines that are covered with active Software Assurance or Windows Server subscriptions, and you want to apply the benefit to the machines you're migrating. Then select **Next**. - - :::image type="content" source="../../../migrate/media/tutorial-migrate-vmware/target-settings.png" alt-text="Target settings"::: - -1. In **Compute**, review the VM name, size, OS disk type, and availability configuration (if selected in the previous step). VMs must conform with [Azure requirements](../../../migrate/migrate-support-matrix-physical-migration.md#azure-vm-requirements). - - - **VM size**: If you're using assessment recommendations, the VM size dropdown shows the recommended size. Otherwise Azure Migrate picks a size based on the closest match in the Azure subscription. Alternatively, pick a manual size in **Azure VM size**. - - **OS disk**: Specify the OS (boot) disk for the VM. The OS disk is the disk that has the operating system bootloader and installer. - - **Availability Zone**: Specify the Availability Zone to use. - - **Availability Set**: Specify the Availability Set to use. - - ![Compute settings](../../../migrate/media/tutorial-migrate-physical-virtual-machines/compute-settings.png) - -1. In **Disks**, specify whether the VM disks should be replicated to Azure, and select the disk type (standard SSD/HDD or premium managed disks) in Azure. Then select **Next**. - - ![Disk settings](../../../migrate/media/tutorial-migrate-physical-virtual-machines/disks.png) - -1. In **Review and start replication**, review the settings, and select **Replicate** to start the initial replication for the servers. - -> [!NOTE] -> You can update replication settings any time before replication starts, **Manage** > **Replicating machines**. Settings can't be changed after replication starts. - -## Track and monitor - -Replication proceeds in the following sequence: - -- When you select **Replicate**, a _Start Replication_ job begins. -- When the _Start Replication_ job finishes successfully, the machines begin their initial replication to Azure. -- After initial replication finishes, delta replication begins. Incremental changes to on-premises disks are periodically replicated to the replica disks in Azure. - - -You can track job status in the portal notifications. - -You can monitor replication status by selecting on **Replicating servers** in **Azure Migrate: Server Migration**. -![Monitor replication](../../../migrate/media/tutorial-migrate-physical-virtual-machines/replicating-servers.png) - - -## Migrate VMs - -After machines are replicated, they are ready for migration. To migrate your servers, follow these steps: - - -1. In the Azure Migrate project > **Servers** > **Azure Migrate: Server Migration**, select **Replicating servers**. - - ![Replicating servers](../../../migrate/media/tutorial-migrate-physical-virtual-machines/replicate-servers.png) - -2. To ensure the migrated server is synchronized with the source server, stop the SQL Server service on every replica in the availability group, starting with secondary replicas (in **SQL Server Configuration Manager** > **Services**) while ensuring the disks hosting SQL data are online. -3. In **Replicating machines** > select server name > **Overview**, ensure that the last synchronized timestamp is after you have stopped the SQL Server service on the servers to be migrated before you move onto the next step. This should only take a few minutes. -2. In **Replicating machines**, right-click the VM > **Migrate**. -3. In **Migrate** > **Shut down virtual machines and perform a planned migration with no data loss**, select **No** > **OK**. - - > [!NOTE] - > For physical server migration, shut down of source machine is not supported automatically. The recommendation is to bring the application down as part of the migration window (don't let the applications accept any connections) and then initiate the migration (the server needs to be kept running, so remaining changes can be synchronized) before the migration is completed. - -4. A migration job starts for the VM. Track the job in Azure notifications. -5. After the job finishes, you can view and manage the VM from the **Virtual Machines** page. - -## Reconfigure cluster - -After your VMs have migrated, reconfigure the cluster. Follow these steps: - -1. Shut down the migrated servers in Azure. -1. Add the migrated machines to the backend pool of the load balancer. Navigate to **Load Balancer** > **Backend pools** > select backend pool > **add migrated machines**. 3. Start the migrated servers in Azure and login to any node. -1. Copy the `ClusterConfig.csv` file and run the `Update-ClusterConfig.ps1` script passing the CSV as a parameter. This ensures the cluster resources are updated with the new configuration for the cluster to work in Azure. - - ```powershell - ./Update-ClusterConfig.ps1 -ConfigFilePath $filepath - ``` - -Your Always On availability group is ready. - -## Complete the migration - -1. After the migration is done, right-click the VM > **Stop migration**. This does the following: - - Stops replication for the on-premises machine. - - Removes the machine from the **Replicating servers** count in Azure Migrate: Server Migration. - - Cleans up replication state information for the machine. -2. Install the Azure VM [Windows](../../../virtual-machines/extensions/agent-windows.md) agent on the migrated machines. -3. Perform any post-migration app tweaks, such as updating database connection strings, and web server configurations. -4. Perform final application and migration acceptance testing on the migrated application now running in Azure. -5. Cut over traffic to the migrated Azure VM instance. -6. Remove the on-premises VMs from your local VM inventory. -7. Remove the on-premises VMs from local backups. -8. Update any internal documentation to show the new location and IP address of the Azure VMs. - -## Post-migration best practices - -- For SQL Server: - - Install [SQL Server IaaS Agent extension](../../virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md) to automate management and administration tasks. - - [Optimize](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md) SQL Server performance on Azure VMs. - - Understand [pricing](../../virtual-machines/windows/pricing-guidance.md#free-licensed-sql-server-editions) for SQL Server on Azure. -- For increased resilience: - - Keep data secure by backing up Azure VMs using the [Azure Backup service](../../../backup/quick-backup-vm-portal.md). - - Keep workloads running and continuously available by replicating Azure VMs to a secondary region with [Site Recovery](../../../site-recovery/azure-to-azure-tutorial-enable-replication.md). -- For increased security: - - Lock down and limit inbound traffic access with [Microsoft Defender for Cloud - Just in time administration](../../../security-center/security-center-just-in-time.md). - - Restrict network traffic to management endpoints with [Network Security Groups](../../../virtual-network/network-security-groups-overview.md). - - Deploy [Azure Disk Encryption](../../../security/fundamentals/azure-disk-encryption-vms-vmss.md) to help secure disks, and keep data safe from theft and unauthorized access. - - Read more about [securing IaaS resources](https://azure.microsoft.com/services/virtual-machines/secure-well-managed-iaas/), and visit the [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/). -- For monitoring and management: - - Consider deploying [Azure Cost Management](../../../cost-management-billing/cost-management-billing-overview.md) to monitor resource usage and spending. - - -## Next steps - -Investigate the [cloud migration journey](/azure/architecture/cloud-adoption/getting-started/migrate) in the Azure Cloud Adoption Framework. diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-complete-migration.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-complete-migration.md deleted file mode 100644 index 7075aa380c0c0..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-complete-migration.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Complete migration using a distributed availability group -titleSuffix: SQL Server on Azure VMs -description: Use a distributed availability group to complete the migration of your SQL Server databases to SQL Server on Azure VMs. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: MashaMSFT -ms.topic: how-to -ms.date: 12/15/2021 -ms.author: mathoma ---- -# Complete migration using a distributed AG - -Use a [distributed availability group (AG)](/sql/database-engine/availability-groups/windows/distributed-availability-groups) to migrate your databases from SQL Server to SQL Server on Azure Virtual Machines (VMs). - -This article assumes you've already configured your distributed ag for either your [standalone databases](sql-server-distributed-availability-group-migrate-standalone-instance.md) or your [availability group databases](sql-server-distributed-availability-group-migrate-ag.md) and now you're ready to finalize the migration to SQL Server on Azure VMs. - -## Monitor migration - -Use Transact-SQL (T-SQL) to monitor the progress of your migration. - -Run the following script on the global primary and the forwarder and validate that the state for `synchronization_state_desc` for the primary availability group (**OnPremAG**) and the secondary availability group (**AzureAG**) is `SYNCHRONIZED`. Confirm that the the `synchronization_state_desc` for the distributed AG (**DAG**) is synchronizing and the `last_hardened_lsn` is the same per database on both the global primary and the forwarder. - -If not, rerun the query on both sides every 5 seconds or so until it is the case. - -Use the following script to monitor the migration: - -```sql -SELECT ag.name - , drs.database_id - , db_name(drs.database_id) as database_name - , drs.group_id - , drs.replica_id - , drs.synchronization_state_desc - , drs.last_hardened_lsn -FROM sys.dm_hadr_database_replica_states drs -INNER JOIN sys.availability_groups ag on drs.group_id = ag.group_id; -``` - -## Complete migration - -Once you've validated the states of the availability group and the distributed ag, you're ready to complete the migration. This consists of failing over the distributed ag to the forwarder (the target SQL Server in Azure), and then cutting over the application to the new primary on the Azure side. - -To failover your distributed availability group, review [failover to secondary availability group](/sql/database-engine/availability-groups/windows/configure-distributed-availability-groups#failover). - -After the failover, update the connection string of your application to connect to the new primary replica in Azure. At this point, you can choose to maintain the distributed availability group, or use `DROP AVAILABILITY GROUP [DAG]` on the both the source and target SQL Server instances to drop it. - -If your domain controller is on the source side, validate that your target SQL Server VMs in Azure have joined the domain before abandoning the source SQL Server instances. Do not delete the domain controller on the source side until you [create a domain](../../virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#create-domain-controllers) on the source side in Azure and add your SQL Server VMs to this new domain. - - -## Next steps - -For a tutorial showing you how to migrate a database to SQL Server on Azure Virtual Machines using the T-SQL RESTORE command, see [Migrate a SQL Server database to SQL Server on a virtual machine](../../virtual-machines/windows/migrate-to-vm-from-sql-server.md). - -For information about SQL Server on Azure Virtual Machines, see the [Overview](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md). - -For information about connecting apps to SQL Server on Azure Virtual Machines, see [Connect applications](../../virtual-machines/windows/ways-to-connect-to-sql.md). - - - diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-ag.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-ag.md deleted file mode 100644 index 055e8d780d635..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-ag.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -title: Use distributed AG to migrate availability group -titleSuffix: SQL Server on Azure VMs -description: Learn to use a distributed availability group (AG) to migrate a database (or multiple databases) from a source SQL Server Always On availability group to a target SQL Server on Azure VM. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: MashaMSFT -ms.topic: how-to -ms.date: 12/15/2021 -ms.author: mathoma ---- -# Use distributed AG to migrate availability group - -Use a [distributed availability group (AG)](/sql/database-engine/availability-groups/windows/distributed-availability-groups) to migrate databases in an Always On availability group while maintaining high availability and disaster recovery (HADR) support post migration on your SQL Server on Azure Virtual Machines (VMs). - -Once you've validated your source SQL Server instances meet the [prerequisites](sql-server-distributed-availability-group-migrate-prerequisites.md), follow the steps in this article to create a distributed availability between your existing availability group, and your target availability group on your SQL Server on Azure VMs. - -This article is intended for databases participating in an availability group, and requires a Windows Server Failover Cluster (WSFC) and an availability group listener. It's also possible to [migrate databases from a standalone SQL Server instance](sql-server-distributed-availability-group-migrate-standalone-instance.md). - -:::image type="content" source="media/sql-server-distributed-availability-group-migrate-ag/migrate-availability-group-with-dag.png" alt-text="Diagram explaining availability group migration using a distributed availability group"::: - -## Initial setup - -The first step is to create your SQL Server VMs in Azure. You can do so by using the [Azure portal](../../virtual-machines/windows/sql-vm-create-portal-quickstart.md), [Azure PowerShell](../../virtual-machines/windows/sql-vm-create-powershell-quickstart.md), or an [ARM template](../../virtual-machines/windows/create-sql-vm-resource-manager-template.md). - -Be sure to configure your SQL Server VMs according to the [prerequisites](sql-server-distributed-availability-group-migrate-prerequisites.md). Choose between a single subnet deployment, which relies on an Azure Load Balancer or distributed network name to route traffic to your availability group listener, or a multi-subnet deployment which does not have such a requirement. The multi-subnet deployment is recommended. To learn more, see [connectivity](../../virtual-machines/windows/availability-group-overview.md#connectivity). - -For simplicity, join your target SQL Server VMs to the same domain as your source SQL Server instances. Otherwise, join your target SQL Server VM to a domain that's federated with the domain of your source SQL Server instances. - -To use automatic seeding to create your distributed availability group (DAG), the instance name for the global primary (source) of the DAG must match the instance name of the forwarder (target) of the DAG. If there is an instance name mismatch between the global primary and forwarder, then you must use manual seeding to create the DAG, and manually add any additional database files in the future. - -This article uses the following example parameters: - -- Database name: **Adventureworks** -- Source machine names : **OnPremNode1** (global primary in DAG), **OnPremNode2** -- Source SQL Server instance names: **MSSQLSERVER**, **MSSQLSERVER** -- Source availability group name : **OnPremAg** -- Source availability group listener name: **OnPremAG_LST** -- Target SQL Server VM names: **SQLVM1** (forwarder in DAG), **SQLVM2** -- Target SQL Server on Azure VM instance names: **MSSQLSERVER**, **MSSQLSERVER** -- Target availability group name: **AzureAG** -- Source availability group listener name: **AzureAG_LST** -- Endpoint name: **Hadr_endpoint** -- Distributed availability group name: **DAG** -- Domain name: **Contoso** - -## Create endpoints - -Use Transact-SQL (T-SQL) to create endpoints on both your two source instances (**OnPremNode1**, **OnPremNode2**) and target SQL Server instances (**SQLVM1**, **SQLVM2**). - -If you already have an availability group configured on the source instances, only run this script on the two target instances. - -To create your endpoints, run this T-SQL script on both source and target servers: - -```sql -CREATE ENDPOINT [Hadr_endpoint] - STATE=STARTED - AS TCP (LISTENER_PORT = 5022, LISTENER_IP = ALL) -FOR DATA_MIRRORING ( - ROLE = ALL, - AUTHENTICATION = WINDOWS NEGOTIATE, - ENCRYPTION = REQUIRED ALGORITHM AES -) -GO -``` - -Domain accounts automatically have access to endpoints, but service accounts may not automatically be part of the sysadmin group and may not have connect permission. To manually grant the SQL Server service account connect permission to the endpoint, run the following T-SQL script on both servers: - -```sql -GRANT CONNECT ON ENDPOINT::[Hadr_endpoint] TO [] -``` - -## Create source AG - -Since a distributed availability group is a special availability group that spans across two individual availability groups, you first need to create an availability group on the two source SQL Server instances. - -If you already have an availability group on your source instances, skip this section. - -Use Transact-SQL (T-SQL) to create an availability group (**OnPremAG**) between your two source instances (**OnPremNode1**, **OnPremNode2**) for the example **Adventureworks** database. - -To create the availability group on the source instances, run this script on the source primary replica (**OnPremNode1**): - -```sql -CREATE AVAILABILITY GROUP [OnPremAG] -WITH ( AUTOMATED_BACKUP_PREFERENCE = PRIMARY, - DB_FAILOVER = OFF, - DTC_SUPPORT = NONE ) -FOR DATABASE [Adventureworks] -REPLICA ON - N'OnPremNode1' WITH (ENDPOINT_URL = N'TCP://OnPremNode1.contoso.com:5022', - FAILOVER_MODE = AUTOMATIC, - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - SEEDING_MODE = AUTOMATIC, - SECONDARY_ROLE(ALLOW_CONNECTIONS = NO)), - N'OnPremNode2' WITH (ENDPOINT_URL = N'TCP://OnPremNode2.contoso.com:5022', - FAILOVER_MODE = AUTOMATIC, - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - SEEDING_MODE = AUTOMATIC, - SECONDARY_ROLE(ALLOW_CONNECTIONS = NO)); -``` - -Next, to join the secondary replica (**OnPremNode2**) to the availability group (**OnPremAg**). - -To join the availability group, run this script on the source secondary replica: - -```sql -ALTER AVAILABILITY GROUP [OnPremAG] JOIN; -GO -ALTER AVAILABILITY GROUP [OnPremAG] GRANT CREATE ANY DATABASE; -GO -``` - -Finally, create the listener for your global forwarder availability group (**OnPremAG**). - -To create the listener, run this script on the source primary replica: - -```sql -USE [master] -GO -ALTER AVAILABILITY GROUP [OnPremAG] -ADD LISTENER N'OnPremAG_LST' ( -WITH IP ((, ) -, PORT=60173); -GO - -``` - - -## Create target AG - -You also need to create an availability group on the target SQL Server VMs as well. - -If you already have an availability group configured between your SQL Server instances in Azure, skip this section. - -Use Transact-SQL (T-SQL) to create an availability group (**AzureAG**) on the target SQL Server instances (**SQLVM1** and **SQLVM2**). - -To create the availability group on the target, run this script on the target primary replica: - -```sql -CREATE AVAILABILITY GROUP [AzureAG] -FOR - REPLICA ON N'SQLVM1' WITH (ENDPOINT_URL = N'TCP://SQLVM1.contoso.com:5022', - FAILOVER_MODE = MANUAL, - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - BACKUP_PRIORITY = 50, - SECONDARY_ROLE(ALLOW_CONNECTIONS = NO), - SEEDING_MODE = AUTOMATIC), -N'SQLVM2' WITH (ENDPOINT_URL = N'TCP://SQLVM2.contoso.com:5022', - FAILOVER_MODE = MANUAL, - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - BACKUP_PRIORITY = 50, - SECONDARY_ROLE(ALLOW_CONNECTIONS = NO), - SEEDING_MODE = AUTOMATIC); -GO -``` - -Next, join the target secondary replica (**SQLVM2**) to the availability group (**AzureAG**). - -Run this script on the target secondary replica: - -```sql -ALTER AVAILABILITY GROUP [AzureAG] JOIN; -GO -ALTER AVAILABILITY GROUP [AzureAG] GRANT CREATE ANY DATABASE; -GO -``` - -Finally, create a listener (**AzureAG_LST**) for your target availability group (**AzureAG**). If you deployed your SQL Server VMs to multiple subnets, create your listener using Transact-SQL. If you deployed your SQL Server VMs to a single subnet, configure either an [Azure Load Balancer](../../virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure.md), or a [distributed network name](../../virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md) for your listener. - -To create your listener, run this script on the primary replica of the availability group in Azure. - -```sql -ALTER AVAILABILITY GROUP [AzureAG] -ADD LISTENER N'AzureAG_LST' ( -WITH IP -( (N'', N''), (N'', N'') ) -, PORT=); -GO -``` - - - -## Create distributed AG - -After you have your source (**OnPremAG**) and target (**AzureAG**) availability groups configured, create your distributed availability group to span both individual availability groups. - -Use Transact-SQL on the source SQL Server global primary (**OnPremNode1**) and AG (**OnPremAG**) to create the distributed availability group (**DAG**). - -To create the distributed AG on the source, run this script on the source global primary: - -```sql -CREATE AVAILABILITY GROUP [DAG] - WITH (DISTRIBUTED) - AVAILABILITY GROUP ON - 'OnPremAG' WITH - ( - LISTENER_URL = 'tcp://OnPremAG_LST.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ), - 'AzureAG' WITH - ( - LISTENER_URL = 'tcp://AzureAG_LST.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO - -``` - ->[!NOTE] -> The seeding mode is set to `AUTOMATIC` as the version of SQL Server on the target and source is the same. If your SQL Server target is a higher version, or if your global primary and forwarder have different instance names, then create the distributed ag, and join the secondary AG to the distributed ag with **SEEDING_MODE** set to `MANUAL`. Then manually restore your databases from the source to the target SQL Server instance. Review [upgrading versions during migration](/sql/database-engine/availability-groups/windows/distributed-availability-groups#cautions-when-using-distributed-availability-groups-to-migrate-to-higher-sql-server-versions) to learn more. - -After your distributed AG is created, join the target AG (**AzureAG**) on the target forwarder instance (**SQLVM1**) to the distributed AG (**DAG**). - -To join the target AG to the distributed AG, run this script on the target forwarder: - -```sql -ALTER AVAILABILITY GROUP [DAG] - JOIN - AVAILABILITY GROUP ON - 'OnPremAG' WITH - ( - LISTENER_URL = 'tcp://OnPremAG_LST.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ), - 'AzureAG' WITH - ( - LISTENER_URL = 'tcp://AzureAG_LST.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO -``` - -If you need to cancel, pause, or delay synchronization between the source and target availability groups (such as, for example, performance issues), run this script on the source global primary instance (**OnPremNode1**): - -```sql -ALTER AVAILABILITY GROUP [DAG] - MODIFY - AVAILABILITY GROUP ON - 'AzureAG' WITH - ( SEEDING_MODE = MANUAL ); -``` - -To learn more, review [cancel automatic seeding to forwarder](/sql/database-engine/availability-groups/windows/configure-distributed-availability-groups#cancel-automatic-seeding-to-forwarder). - -## Next steps - -After your distributed availability group is created, you are ready to [complete the migration](sql-server-distributed-availability-group-complete-migration.md). diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-prerequisites.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-prerequisites.md deleted file mode 100644 index 1f36abcb20fe7..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-prerequisites.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Prerequisites: Migrate to SQL Server VM using distributed AG" -titleSuffix: SQL Server on Azure VMs -description: Review the prerequisites to migrate your SQL Server to SQL Server on Azure VMs using a distributed availability group. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: MashaMSFT -ms.topic: how-to -ms.date: 12/15/2021 -ms.author: mathoma ---- -# Prerequisites: Migrate to SQL Server VM using distributed AG - -Use a [distributed availability group (AG)](/sql/database-engine/availability-groups/windows/distributed-availability-groups) to migrate either a [standalone instance](sql-server-distributed-availability-group-migrate-standalone-instance.md) of SQL Server or an [Always On availability group](sql-server-distributed-availability-group-migrate-ag.md) to SQL Server on Azure Virtual Machines (VMs). - -This article describes the prerequisites to prepare your source and target environments to migrate your SQL Server instance or availability group to SQL Server VMs using a distributed ag. - -Migrating a database (or multiple databases) from a standalone instance using a distributed availability group is a simple solution that does not require a Windows Server Failover Cluster, or an availability group listener on either the source or the target. Migrating an availability group requires a cluster, and a listener on both source and target. - -## Source SQL Server - -To migrate your instance or availability group, your source SQL Server should meet the following prerequisites: - -- For a standalone instance migration, the minimum supported version is SQL Server 2017. For an availability group migration, SQL Server 2016 or later is supported. -- Your SQL Server edition should be enterprise. -- You must enable the [Always On feature](/sql/database-engine/availability-groups/windows/enable-and-disable-always-on-availability-groups-sql-server). -- The databases you intend to migrate have been backed up in full mode. -- If you already have an availability group, it must be in a healthy state. If you create an availability group as part of this process, it must be in a healthy state before you start the migration. -- Ports used by the SQL Server instance (1433 by default) and the database mirroring endpoint (5022 by default) must be open in the firewall. To migrate databases in an availability group, make sure the port used by the listener is also open in the firewall. - -## Target SQL Server VM - -Before your target SQL Server VMs are ready for migration, make sure they meet the following prerequisites: - -- The Azure account performing the migration is assigned as the owner or contributor to the resource group that contains target the SQL Server VMs. -- To use automatic seeding to create your distributed availability group (DAG), the instance name for the global primary (source) of the DAG must match the instance name of the forwarder (target) of the DAG. If there is an instance name mismatch between the global primary and forwarder, then you must use manual seeding to create the DAG, and manually add any additional database files in the future. -- For simplicity, the target SQL Server instance should match the version of the source SQL Server instance. If you choose to upgrade during the migration process by using a higher version of SQL Server on the target, then you will need to manually seed your database rather than relying on autoseeding as is provided in this series of articles. Review [Migrate to higher SQL Server versions](/sql/database-engine/availability-groups/windows/distributed-availability-groups#cautions-when-using-distributed-availability-groups-to-migrate-to-higher-sql-server-versions) for more details. -- The SQL Server edition should be enterprise. -- You must enable the [Always On feature](/sql/database-engine/availability-groups/windows/enable-and-disable-always-on-availability-groups-sql-server). -- Ports used by the SQL Server instance (1433 by default) and the database mirroring endpoint (5022 by default) must be open in the firewall. To migrate databases in an availability group, make sure the port used by the listener is also open in the firewall. - -## Connectivity - -The source and target SQL Server instance must have an established network connection. - -If the source SQL Server instance is located on an on-premises network, configure a [Site-to-site VPN connection](/microsoft-365/enterprise/connect-an-on-premises-network-to-a-microsoft-azure-virtual-network) or an [Azure ExpressRoute connection](../../../expressroute/expressroute-introduction.md) between the on-premises network and the virtual network where your target SQL Server VM resides. - -If your source SQL Server instance is located on an Azure virtual network that is different than the target SQL Server VM, then configure [virtual network peering](../../../virtual-network/virtual-network-peering-overview.md). - -## Authentication - -To simplify authentication between your source and target SQL Server instance, join both servers to the same domain, preferably with the domain being on the source side and apply domain-based authentication. Since this is the recommended approach, the steps in this tutorial series assume both source and target SQL Server instance are part of the same domain. - -If the source and target servers are part of different domains, configure [federation](../../../active-directory/hybrid/whatis-fed.md) between the two domains, or configure a [domain-independent availability group](../../virtual-machines/windows/availability-group-clusterless-workgroup-configure.md). - - -## Next steps - -Once you have configured both source and target environment to meet the prerequisites, you're ready to migrate either your [standalone instance](sql-server-distributed-availability-group-migrate-standalone-instance.md) of SQL Server or an [Always On availability group](sql-server-distributed-availability-group-migrate-ag.md) to your target SQL Server VM(s). - - - diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-standalone-instance.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-standalone-instance.md deleted file mode 100644 index 68fe63d5814d0..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-standalone-instance.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Use distributed AG to migrate databases from a standalone instance -titleSuffix: SQL Server on Azure VMs -description: Learn to use a distributed availability group (AG) to migrate a database (or multiple databases) from a standalone instance of SQL Server to a target SQL Server on Azure VM. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: MashaMSFT -ms.topic: how-to -ms.date: 12/15/2021 -ms.author: mathoma ---- -# Use distributed AG to migrate databases from a standalone instance - -Use a [distributed availability group (AG)](/sql/database-engine/availability-groups/windows/distributed-availability-groups) to migrate a database (or multiple databases) from a standalone instance of SQL Server to SQL Server on Azure Virtual Machines (VMs). - -Once you've validated your source SQL Server instance meets the [prerequisites](sql-server-distributed-availability-group-migrate-prerequisites.md), follow the steps in this article to create an availability group on your standalone SQL Server instance and migrate your database (or group of databases) to your SQL Server VM in Azure. - -This article is intended for databases on a standalone instance of SQL Server. This solution does not require a Windows Server Failover Cluster (WSFC) or an availability group listener. It's also possible to [migrate databases in an availability group](sql-server-distributed-availability-group-migrate-ag.md). - -:::image type="content" source="media/sql-server-distributed-availability-group-migrate-standalone-instance/migrate-single-instance-with-dag.png" alt-text="Diagram explaining single instance migration using a distributed availability group"::: - -## Initial setup - -The first step is to create your SQL Server VM in Azure. You can do so by using the [Azure portal](../../virtual-machines/windows/sql-vm-create-portal-quickstart.md), [Azure PowerShell](../../virtual-machines/windows/sql-vm-create-powershell-quickstart.md), or an [ARM template](../../virtual-machines/windows/create-sql-vm-resource-manager-template.md). - -Be sure to configure your SQL Server VM according to the [prerequisites](sql-server-distributed-availability-group-migrate-prerequisites.md). - -For simplicity, join your target SQL Server VM to the same domain as your source SQL Server. Otherwise, join your target SQL Server VM to a domain that's federated with the domain of your source SQL Server. - -To use automatic seeding to create your distributed availability group (DAG), the instance name for the global primary (source) of the DAG must match the instance name of the forwarder (target) of the DAG. If there is an instance name mismatch between the global primary and forwarder, then you must use manual seeding to create the DAG, and manually add any additional database files in the future. - -This article uses the following example parameters: - -- Database name: **Adventureworks** -- Source machine name (global primary in DAG): **OnPremNode** -- Source SQL Server instance name: **MSSQLSERVER** -- Source availability group name: **OnPremAg** -- Target SQL Server VM name (forwarder in DAG): **SQLVM** -- Target SQL Server on Azure VM instance name: **MSSQLSERVER** -- Target availability group name: **AzureAG** -- Endpoint name: **Hadr_endpoint** -- Distributed availability group name: **DAG** -- Domain name: **Contoso** - -## Create endpoints - -Use Transact-SQL (T-SQL) to create endpoints on both your source (**OnPremNode**) and target (**SQLVM**) SQL Server instances. - -To create your endpoints, run this T-SQL script on both source and target servers: - -```sql -CREATE ENDPOINT [Hadr_endpoint] - STATE=STARTED - AS TCP (LISTENER_PORT = 5022, LISTENER_IP = ALL) -FOR DATA_MIRRORING ( - ROLE = ALL, - AUTHENTICATION = WINDOWS NEGOTIATE, - ENCRYPTION = REQUIRED ALGORITHM AES -) -GO -``` - -Domain accounts automatically have access to endpoints, but service accounts may not automatically be part of the sysadmin group and may not have connect permission. To manually grant the SQL Server service account connect permission to the endpoint, run the following T-SQL script on both servers: - -```sql -GRANT CONNECT ON ENDPOINT::[Hadr_endpoint] TO [] -``` - -## Create source AG - -Since a distributed availability group is a special availability group that spans across two individual availability groups, you first need to create an availability group on the source SQL Server instance. If you already have an availability group that you would like to maintain in Azure, then [migrate your availability group](sql-server-distributed-availability-group-migrate-ag.md) instead. - -Use Transact-SQL (T-SQL) to create an availability group (**OnPremAg**) on the source (**OnPremNode**) instance for the example **Adventureworks** database. - -To create the availability group, run this script on the source: - -```sql -CREATE AVAILABILITY GROUP [OnPremAG] - WITH (AUTOMATED_BACKUP_PREFERENCE = PRIMARY, - DB_FAILOVER = OFF, - DTC_SUPPORT = NONE, - CLUSTER_TYPE=NONE ) - FOR DATABASE [Adventureworks] - -REPLICA ON N'OnPremNode' -WITH (ENDPOINT_URL = N'TCP://OnPremNode.contoso.com:5022', FAILOVER_MODE = MANUAL, -AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, -SEEDING_MODE = AUTOMATIC, SECONDARY_ROLE(ALLOW_CONNECTIONS = NO)); - -GO -``` - -## Create target AG - -You also need to create an availability group on the target SQL Server VM as well. - -Use Transact-SQL (T-SQL) to create an availability group (**AzureAG**) on the target (**SQLVM**) instance. - -To create the availability group, run this script on the target: - -```sql -CREATE AVAILABILITY GROUP [AzureAG] - WITH (AUTOMATED_BACKUP_PREFERENCE = PRIMARY, - DB_FAILOVER = OFF, - DTC_SUPPORT = NONE, - CLUSTER_TYPE=NONE, - REQUIRED_SYNCHRONIZED_SECONDARIES_TO_COMMIT = 0) -FOR REPLICA ON N'SQLVM' -WITH (ENDPOINT_URL = N'TCP://SQLVM.contoso.com:5022', FAILOVER_MODE = MANUAL, -AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, -SEEDING_MODE = AUTOMATIC,SECONDARY_ROLE(ALLOW_CONNECTIONS = NO)); -GO - -``` - -## Create distributed AG - -After you have your source (**OnPremAG**) and target (**AzureAG**) availability groups configured, create your distributed availability group to span both individual availability groups. - -Use Transact-SQL on the source SQL Server instance (**OnPremNode**) and AG (**OnPremAG**) to create the distributed availability group (**DAG**). - -To create the distributed AG, run this script on the source: - -```sql -CREATE AVAILABILITY GROUP [DAG] - WITH (DISTRIBUTED) - AVAILABILITY GROUP ON - 'OnPremAG' WITH - ( - LISTENER_URL = 'tcp://OnPremNode.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ), - 'AzureAG' WITH - ( - LISTENER_URL = 'tcp://SQLVM.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO - -``` - ->[!NOTE] -> The seeding mode is set to `AUTOMATIC` as the version of SQL Server on the target and source is the same. If your SQL Server target is a higher version, or if your global primary and forwarder have different instance names, then create the distributed ag, and join the secondary AG to the distributed ag with **SEEDING_MODE** set to `MANUAL`. Then manually restore your databases from the source to the target SQL Server instance. Review [upgrading versions during migration](/sql/database-engine/availability-groups/windows/distributed-availability-groups#cautions-when-using-distributed-availability-groups-to-migrate-to-higher-sql-server-versions) to learn more. - -After your distributed AG is created, join the target AG (**AzureAG**) on the target instance (**SQLVM**) to the distributed AG (**DAG**). - -To join the target AG to the distributed AG, run this script on the target: - -```sql -ALTER AVAILABILITY GROUP [DAG] -JOIN -AVAILABILITY GROUP ON - 'OnPremAG' WITH - (LISTENER_URL = 'tcp://OnPremNode.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ), - 'AzureAG' WITH - (LISTENER_URL = 'tcp://SQLVM.contoso.com:5022', - AVAILABILITY_MODE = ASYNCHRONOUS_COMMIT, - FAILOVER_MODE = MANUAL, - SEEDING_MODE = AUTOMATIC - ); -GO -``` - -If you need to cancel, pause, or delay synchronization between the source and target availability groups (such as, for example, performance issues), run this script on the source global primary instance (**OnPremNode**): - -```sql -ALTER AVAILABILITY GROUP [DAG] - MODIFY - AVAILABILITY GROUP ON - 'AzureAG' WITH - ( SEEDING_MODE = MANUAL ); -``` - -To learn more, review [cancel automatic seeding to forwarder](/sql/database-engine/availability-groups/windows/configure-distributed-availability-groups#cancel-automatic-seeding-to-forwarder). - -## Next steps - -After your distributed availability group is created, you are ready to [complete the migration](sql-server-distributed-availability-group-complete-migration.md). diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md deleted file mode 100644 index 171a4ff754254..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -title: Migrate failover cluster instance -titleSuffix: SQL Server on Azure VMs -description: Learn how to lift and shift your Always On failover cluster instance high availability solution to SQL Server on Azure VMs using Azure Migrate. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -author: rahugup -manager: bsiva -ms.topic: how-to -ms.date: 4/25/2021 -ms.author: rahugup -ms.reviewer: mathoma ---- -# Migrate failover cluster instance to SQL Server on Azure VMs - -This article teaches you to migrate your Always On failover cluster instance (FCI) to SQL Server on Azure VMs using the [Azure Migrate: Server Migration tool](../../../migrate/migrate-services-overview.md#azure-migrate-server-migration-tool). Using the migration tool, you will be able to migrate each node in the failover cluster instance to an Azure VM hosting SQL Server, as well as the cluster and FCI metadata. - -In this article, you learn how to: - -> [!div class="checklist"] -> * Prepare Azure and source environment for migration. -> * Start replicating VMs. -> * Monitor replication. -> * Run a full VM migration. -> * Reconfigure SQL failover cluster with Azure shared disks. - - -This guide uses the agent-based migration approach of Azure Migrate, which treats any server or virtual machine as a physical server. When migrating physical machines, Azure Migrate: Server Migration uses the same replication architecture as the agent-based disaster recovery in the Azure Site Recovery service, and some components share the same code base. Some content might link to Site Recovery documentation. - - -## Prerequisites - -Before you begin this tutorial, you should: - -1. An Azure subscription. Create a [free account](https://azure.microsoft.com/pricing/free-trial/), if necessary. -1. Install the [Azure PowerShell `Az` module](/powershell/azure/install-az-ps). -1. Download the [PowerShell samples scripts](https://github.com/Azure/azure-docs-powershell-samples/tree/master/azure-migrate/SQL%20Migration) from the GitHub repository. - -## Prepare Azure - -Prepare Azure for migration with Server Migration. - -**Task** | **Details** ---- | --- -**Create an Azure Migrate project** | Your Azure account needs Contributor or Owner permissions to [create a new project](../../../migrate/create-manage-projects.md). -**Verify permissions for your Azure account** | Your Azure account needs Contributor or Owner permissions on the Azure subscription, permissions to register Azure Active Directory (Azure AD) apps, and User Access Administrator permissions on the Azure subscription to create a Key Vault, to create a VM, and to write to an Azure managed disk. -**Set up an Azure virtual network** | [Setup](../../../virtual-network/manage-virtual-network.md#create-a-virtual-network) an Azure virtual network (VNet). When you replicate to Azure, Azure VMs are created and joined to the Azure VNet that you specify when you set up migration. - - -To check you have proper permissions, follow these steps: - -1. In the Azure portal, open the subscription, and select **Access control (IAM)**. -2. In **Check access**, find the relevant account, and select it to view permissions. -3. You should have **Contributor** or **Owner** permissions. - - If you just created a free Azure account, you're the owner of your subscription. - - If you're not the subscription owner, work with the owner to assign the role. - -If you need to assign permissions, follow the steps in [Prepare for an Azure user account](../../../migrate/tutorial-discover-vmware.md#prepare-an-azure-user-account). - - -## Prepare for migration - -To prepare for server migration, you need to verify the server settings, and prepare to deploy a replication appliance. - -### Check machine requirements - -Make sure machines comply with requirements for migration to Azure. - -1. [Verify](../../../migrate/migrate-support-matrix-physical-migration.md#physical-server-requirements) server requirements. -2. Verify that source machines that you replicate to Azure comply with [Azure VM requirements](../../../migrate/migrate-support-matrix-physical-migration.md#azure-vm-requirements). -1. Some [Windows](../../../migrate/prepare-for-migration.md#windows-machines) sources require a few additional changes. Migrating the source before making these changes could prevent the VM from booting in Azure. For some operating systems, Azure Migrate makes these changes automatically. - -### Prepare for replication - -Azure Migrate: Server Migration uses a replication appliance to replicate machines to Azure. The replication appliance runs the following components: - -- **Configuration server**: The configuration server coordinates communications between on-premises and Azure, and manages data replication. -- **Process server**: The process server acts as a replication gateway. It receives replication data; optimizes it with caching, compression, and encryption, and sends it to a cache storage account in Azure. - -Prepare for appliance deployment as follows: - -- Create a Windows Server 2016 machine to host the replication appliance. Review the [machine requirements](../../../migrate/migrate-replication-appliance.md#appliance-requirements). -- The replication appliance uses MySQL. Review the [options](../../../migrate/migrate-replication-appliance.md#mysql-installation) for installing MySQL on the appliance. -- Review the Azure URLs required for the replication appliance to access [public](../../../migrate/migrate-replication-appliance.md#url-access) and [government](../../../migrate/migrate-replication-appliance.md#azure-government-url-access) clouds. -- Review [port](../../../migrate/migrate-replication-appliance.md#port-access) access requirements for the replication appliance. - -> [!NOTE] -> The replication appliance should be installed on a machine other than the source machine you are replicating or migrating, and not on any machine that has had the Azure Migrate discovery and assessment appliance installed to before. - -### Download replication appliance installer - -To download the replication appliance installer, follow these steps: - -1. In the Azure Migrate project > **Servers**, in **Azure Migrate: Server Migration**, select **Discover**. - - ![Discover VMs](../../../migrate/media/tutorial-migrate-physical-virtual-machines/migrate-discover.png) - -1. In **Discover machines** > **Are your machines virtualized?**, select **Physical or other (AWS, GCP, Xen, etc.)**. -1. In **Target region**, select the Azure region to which you want to migrate the machines. -1. Select **Confirm that the target region for migration is region-name**. -1. Select **Create resources**. This creates an Azure Site Recovery vault in the background. - - If you've already set up migration with Azure Migrate Server Migration, the target option can't be configured, since resources were set up previously. - - You can't change the target region for this project after selecting this button. - - All subsequent migrations are to this region. - -1. In **Do you want to install a new replication appliance?**, select **Install a replication appliance**. -1. In **Download and install the replication appliance software**, download the appliance installer, and the registration key. You need to the key in order to register the appliance. The key is valid for five days after it's downloaded. - - ![Download provider](../../../migrate/media/tutorial-migrate-physical-virtual-machines/download-provider.png) - -1. Copy the appliance setup file and key file to the Windows Server 2016 machine you created for the appliance. -1. After the installation completes, the Appliance configuration wizard will launch automatically (You can also launch the wizard manually by using the cspsconfigtool shortcut that is created on the desktop of the appliance machine). Use the **Manage Accounts** tab of the wizard to create a dummy account with the following details: - - - "guest" as the friendly name - - "username" as the username - - "password" as the password for the account. - - You will use this dummy account in the Enable Replication stage. - -1. After setup completes, and the appliance restarts, in **Discover machines**, select the new appliance in **Select Configuration Server**, and select **Finalize registration**. Finalize registration performs a couple of final tasks to prepare the replication appliance. - - ![Finalize registration](../../../migrate/media/tutorial-migrate-physical-virtual-machines/finalize-registration.png) - - -## Install the Mobility service - -Install the Mobility service agent on the servers you want to migrate. The agent installers are available on the replication appliance. Find the right installer, and install the agent on each machine you want to migrate. - -To install the Mobility service, follow these steps: - -1. Sign in to the replication appliance. -2. Navigate to **%ProgramData%\ASR\home\svsystems\pushinstallsvc\repository**. -3. Find the installer for the machine operating system and version. Review [supported operating systems](../../../site-recovery/vmware-physical-azure-support-matrix.md#replicated-machines). -4. Copy the installer file to the machine you want to migrate. -5. Make sure that you have the passphrase that was generated when you deployed the appliance. - - Store the file in a temporary text file on the machine. - - You can obtain the passphrase on the replication appliance. From the command line, run **C:\ProgramData\ASR\home\svsystems\bin\genpassphrase.exe -v** to view the current passphrase. - - Don't regenerate the passphrase. This will break connectivity and you will have to reregister the replication appliance. - - In the */Platform* parameter, specify *VMware* for both VMware machines and physical machines. - -1. Connect to the machine and extract the contents of the installer file to a local folder (such as c:\temp). Run this in an admin command prompt: - - ``` - ren Microsoft-ASR_UA*Windows*release.exe MobilityServiceInstaller.exe - MobilityServiceInstaller.exe /q /x:C:\Temp\Extracted - cd C:\Temp\Extracted - ``` - -2. Run the Mobility Service Installer: - - ``` - UnifiedAgent.exe /Role "MS" /Platform "VmWare" /Silent - ``` - -3. Register the agent with the replication appliance: - - ``` - cd C:\Program Files (x86)\Microsoft Azure Site Recovery\agent - UnifiedAgentConfigurator.exe /CSEndPoint /PassphraseFilePath - ``` - -It may take some time after installation for discovered machines to appear in Azure Migrate: Server Migration. As VMs are discovered, the **Discovered servers** count rises. - -![Discovered servers](../../../migrate/media/tutorial-migrate-physical-virtual-machines/discovered-servers.png) - -## Prepare source machines - -To prepare source machines, you'll need information from the cluster. - -> [!CAUTION] -> - Maintain disk ownership throughout the replication process until the final cutover. If there is a change in disk ownership, there is a chance that the volumes could be corrupted and replication would need to be to retriggered. Set the preferred owner for each disk to avoid transfer of ownership during the replication process. -> - Avoid patching activities and system reboots during the replication process to avoid transfer of disk ownership. - -To prepare source machines, do the following: - -1. **Identify disk ownership:** Log in to one of the cluster nodes and open Failover Cluster Manager. Identify the owner node for the disks to determine the disks that need to be migrated with each server. -2. **Retrieve cluster information:** Run the `Get-ClusterInfo.ps1` script on a cluster node to retrieve information on the cluster resources. The script will output the role name, resource name, IP, and probe port in the `Cluster-Config.csv` file. Use this CSV file to create and assign resource in Azure later in this article. - - ```powershell - ./Get-ClusterInfo.ps1 - ``` - -## Create load balancer - -For the cluster and cluster roles to respond properly to requests, an Azure Load balancer is required. Without a load balancer, the other VMs are unable to reach the cluster IP address as it's not recognized as belonging to the network or the cluster. - -1. Fill out the columns in the `Cluster-Config.csv` file: - - **Column Header** | **Description** - --- | --- - NewIP | Specify the IP address in the Azure virtual network (or subnet) for each resource in the CSV file. - ServicePort | Specify the service port to be used by each resource in the CSV file. For SQL cluster resource, use the same value for service port as the probe port in the CSV. For other cluster roles, the default values used are 1433 but you can continue to use the port numbers that are configured in your current setup. - - -1. Run the `Create-ClusterLoadBalancer.ps1` script to create the load balancer using the following mandatory parameters: - - **Parameter** | **Type** | **Description** - --- | --- | --- - ConfigFilePath | Mandatory | Specify the path for the `Cluster-Config.csv` file that you have filled out in the previous step. - ResourceGroupName | Mandatory| Specify the name of the resource Group in which the load balancer is to be created. - VNetName | Mandatory| Specify the name of the Azure virtual network that the load balancer will be associated to. - SubnetName | Mandatory| Specify the name of the subnet in the Azure virtual network that the load balancer will be associated to. - VNetResourceGroupName | Mandatory| Specify the name of the resource group for the Azure virtual network that the load balancer will be associated to. - Location | Mandatory| Specify the location in which the load balancer should be created. - LoadBalancerName | Mandatory| Specify the name of the load balancer to be created. - - - ```powershell - ./Create-ClusterLoadBalancer.ps1 -ConfigFilePath ./cluster-config.csv -ResourceGroupName $resoucegroupname -VNetName $vnetname -subnetName $subnetname -VnetResourceGroupName $vnetresourcegroupname -Location “eastus” -LoadBalancerName $loadbalancername - ``` - -## Replicate machines - -Now, select machines for migration. You can replicate up to 10 machines together. If you need to replicate more, then replicate them simultaneously in batches of 10. - -1. In the Azure Migrate project > **Servers**, **Azure Migrate: Server Migration**, select **Replicate**. - - ![Screenshot of the Azure Migrate - Servers screen showing the Replicate button selected in Azure Migrate: Server Migration under Migration tools.](../../../migrate/media/tutorial-migrate-physical-virtual-machines/select-replicate.png) - -1. In **Replicate**, > **Source settings** > **Are your machines virtualized?**, select **Physical or other (AWS, GCP, Xen, etc.)**. -1. In **On-premises appliance**, select the name of the Azure Migrate appliance that you set up. -1. In **Process Server**, select the name of the replication appliance. -1. In **Guest credentials**, select the dummy account created previously during the [replication installer setup](#download-replication-appliance-installer). Then select **Next: Virtual machines**. - - ![Screenshot of the Source settings tab in the Replicate screen with the Guest credentials field highlighted.](../../../migrate/media/tutorial-migrate-physical-virtual-machines/source-settings.png) - -1. In **Virtual Machines**, in **Import migration settings from an assessment?**, leave the default setting **No, I'll specify the migration settings manually**. -1. Check each VM you want to migrate. Then select **Next: Target settings**. - - ![Select VMs](../../../migrate/media/tutorial-migrate-physical-virtual-machines/select-vms.png) - - -1. In **Target settings**, select the subscription, and target region to which you'll migrate, and specify the resource group in which the Azure VMs will reside after migration. -1. In **Virtual Network**, select the Azure VNet/subnet to which the Azure VMs will be joined after migration. -1. In **Availability options**, select: - - Availability Zone to pin the migrated machine to a specific Availability Zone in the region. Use this option to distribute servers that form a multi-node application tier across Availability Zones. If you select this option, you'll need to specify the Availability Zone to use for each of the selected machine in the Compute tab. This option is only available if the target region selected for the migration supports Availability Zones - - Availability Set to place the migrated machine in an Availability Set. The target Resource Group that was selected must have one or more availability sets in order to use this option. - - No infrastructure redundancy required option if you don't need either of these availability configurations for the migrated machines. - -1. In **Disk encryption type**, select: - - Encryption-at-rest with platform-managed key - - Encryption-at-rest with customer-managed key - - Double encryption with platform-managed and customer-managed keys - - > [!NOTE] - > To replicate VMs with CMK, you'll need to [create a disk encryption set](../../../virtual-machines/disks-enable-customer-managed-keys-portal.md#set-up-your-disk-encryption-set) under the target Resource Group. A disk encryption set object maps Managed Disks to a Key Vault that contains the CMK to use for SSE. - -1. In **Azure Hybrid Benefit**: - - - Select **No** if you don't want to apply Azure Hybrid Benefit. Then select **Next**. - - Select **Yes** if you have Windows Server machines that are covered with active Software Assurance or Windows Server subscriptions, and you want to apply the benefit to the machines you're migrating. Then select **Next**. - - :::image type="content" source="../../../migrate/media/tutorial-migrate-vmware/target-settings.png" alt-text="Target settings"::: - -1. In **Compute**, review the VM name, size, OS disk type, and availability configuration (if selected in the previous step). VMs must conform with [Azure requirements](../../../migrate/migrate-support-matrix-physical-migration.md#azure-vm-requirements). - - - **VM size**: If you're using assessment recommendations, the VM size dropdown shows the recommended size. Otherwise Azure Migrate picks a size based on the closest match in the Azure subscription. Alternatively, pick a manual size in **Azure VM size**. - - **OS disk**: Specify the OS (boot) disk for the VM. The OS disk is the disk that has the operating system bootloader and installer. - - **Availability Zone**: Specify the Availability Zone to use. - - **Availability Set**: Specify the Availability Set to use. - - ![Compute settings](../../../migrate/media/tutorial-migrate-physical-virtual-machines/compute-settings.png) - -1. In **Disks**, specify whether the VM disks should be replicated to Azure, and select the disk type (standard SSD/HDD or premium managed disks) in Azure. Then select **Next**. - - Use the list that you had made earlier to select the disks to be replicated with each server. Exclude other disks from replication. - - - ![Disk settings](../../../migrate/media/tutorial-migrate-physical-virtual-machines/disks.png) - -1. In **Review and start replication**, review the settings, and select **Replicate** to start the initial replication for the servers. - -> [!NOTE] -> You can update replication settings any time before replication starts, **Manage** > **Replicating machines**. Settings can't be changed after replication starts. - -## Track and monitor - -Replication proceeds in the following sequence: - -- When you select **Replicate** a _Start Replication_ job begins. -- When the _Start Replication_ job finishes successfully, the machines begin their initial replication to Azure. -- After initial replication finishes, delta replication begins. Incremental changes to on-premises disks are periodically replicated to the replica disks in Azure. -- After the initial replication is completed, configure the Compute and Network items for each VM. Clusters typically have multiple NICs but only one NIC is required for the migration (set the others as do not create). - -You can track job status in the portal notifications. - -You can monitor replication status by selecting on **Replicating servers** in **Azure Migrate: Server Migration**. -![Monitor replication](../../../migrate/media/tutorial-migrate-physical-virtual-machines/replicating-servers.png) - - -## Migrate VMs - -After machines are replicated, they are ready for migration. To migrate your servers, follow these steps: - -1. In the Azure Migrate project > **Servers** > **Azure Migrate: Server Migration**, select **Replicating servers**. - - ![Replicating servers](../../../migrate/media/tutorial-migrate-physical-virtual-machines/replicate-servers.png) - -1. To ensure that the migrated server is synchronized with the source server, stop the SQL Server resource (in **Failover Cluster Manager** > **Roles** > **Other resources**) while ensuring that the cluster disks are online. -1. In **Replicating machines** > select on server name > **Overview**, ensure that the last synchronized timestamp is after you have stopped SQL Server resource on the servers to be migrated before you move onto the next step. This should only take a few of minutes. -1. In **Replicating machines**, right-click the VM > **Migrate**. -1. In **Migrate** > **Shut down virtual machines and perform a planned migration with no data loss**, select **No** > **OK**. - - > [!NOTE] - > For Physical Server Migration, shut down of source machine is not supported automatically. The recommendation is to bring the application down as part of the migration window (don't let the applications accept any connections) and then initiate the migration (the server needs to be kept running, so remaining changes can be synchronized) before the migration is completed. - -1. A migration job starts for the VM. Track the job in Azure notifications. -1. After the job finishes, you can view and manage the VM from the **Virtual Machines** page. - -## Reconfigure cluster - -After your VMs have migrated, reconfigure the cluster. Follow these steps: - -1. Shut down the migrated servers in Azure. -1. Add the migrated machines to the backend pool of the load balancer. Navigate to **Load Balancer** > **Backend pools** > select backend pool > **add migrated machines**. - -1. Reconfigure the migrated disks of the servers as shared disks by running the `Create-SharedDisks.ps1` script. The script is interactive and will prompt for a list of machines and then show available disks to be extracted (only data disks). You will be prompted once to select which machines contain the drives to be turned into shared disks. Once selected, you will be prompted again, once per machine, to pick the specific disks. - - **Parameter** | **Type** | **Description** - --- | --- | --- - ResourceGroupName | Mandatory | Specify the name of the resource group containing the migrated servers. - NumberofNodes | Optional | Specify the number of nodes in your failover cluster instance. This parameter is used to identify the right SKU for the shared disks to be created. By default, the script assumes the number of nodes in the cluster to be 2. - DiskNamePrefix | Optional | Specify the prefix that you'd want to add to the names of your shared disks. - - ```powershell - ./Create-SharedDisks.ps1 -ResourceGroupName $resoucegroupname -NumberofNodes $nodesincluster -DiskNamePrefix $disknameprefix - ``` - -1. Attach the shared disks to the migrated servers by running the `Attach-SharedDisks.ps1` script. - - **Parameter** | **Type** |**Description** - --- | --- | --- - ResourceGroupName | Mandatory | Specify the name of the resource group containing the migrated servers. - StartingLunNumber | Optional |Specify the starting LUN number that is available for the shared disks to be attached to. By default, the script tries to attach shared disks to LUN starting 0. - - ```powershell - ./Attach-ShareDisks.ps1 -ResourceGroupName $resoucegroupname - ``` - -1. Start the migrated servers in Azure and login to any node. - -1. Copy the `ClusterConfig.csv` file and run the `Update-ClusterConfig.ps1` script passing the CSV as a parameter. This will ensure the cluster resources are updated with the new configuration for the cluster to work in Azure. - - ```powershell - ./Update-ClusterConfig.ps1 -ConfigFilePath $filepath - ``` - -Your SQL Server failover cluster instance is ready. - -## Complete the migration - -1. After the migration is done, right-click the VM > **Stop migration**. This does the following: - - Stops replication for the on-premises machine. - - Removes the machine from the **Replicating servers** count in Azure Migrate: Server Migration. - - Cleans up replication state information for the machine. -1. Install the Azure VM [Windows](../../../virtual-machines/extensions/agent-windows.md) agent on the migrated machines. -1. Perform any post-migration app tweaks, such as updating database connection strings, and web server configurations. -1. Perform final application and migration acceptance testing on the migrated application now running in Azure. -1. Cut over traffic to the migrated Azure VM instance. -1. Remove the on-premises VMs from your local VM inventory. -1. Remove the on-premises VMs from local backups. -1. Update any internal documentation to show the new location and IP address of the Azure VMs. - -## Post-migration best practices - -- For SQL Server: - - Install [SQL Server IaaS Agent extension](../../virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md) to automate management and administration tasks. - - [Optimize](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md) SQL Server performance on Azure VMs. - - Understand [pricing](../../virtual-machines/windows/pricing-guidance.md#free-licensed-sql-server-editions) for SQL Server on Azure. -- For increased resilience: - - Keep data secure by backing up Azure VMs using the [Azure Backup service](../../../backup/quick-backup-vm-portal.md). - - Keep workloads running and continuously available by replicating Azure VMs to a secondary region with [Site Recovery](../../../site-recovery/azure-to-azure-tutorial-enable-replication.md). -- For increased security: - - Lock down and limit inbound traffic access with [Microsoft Defender for Cloud - Just in time administration](../../../security-center/security-center-just-in-time.md). - - Restrict network traffic to management endpoints with [Network Security Groups](../../../virtual-network/network-security-groups-overview.md). - - Deploy [Azure Disk Encryption](../../../security/fundamentals/azure-disk-encryption-vms-vmss.md) to help secure disks, and keep data safe from theft and unauthorized access. - - Read more about [securing IaaS resources](https://azure.microsoft.com/services/virtual-machines/secure-well-managed-iaas/), and visit the [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/). -- For monitoring and management: - - Consider deploying [Azure Cost Management](../../../cost-management-billing/cost-management-billing-overview.md) to monitor resource usage and spending. - - -## Next steps - -Investigate the [cloud migration journey](/azure/architecture/cloud-adoption/getting-started/migrate) in the Azure Cloud Adoption Framework. diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-individual-databases-guide.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-individual-databases-guide.md deleted file mode 100644 index 40a2c8f24b291..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-individual-databases-guide.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: "SQL Server to SQL Server on Azure Virtual Machines: Migration guide" -titleSuffix: SQL Server on Azure VMs -description: In this guide, you learn how to migrate your individual SQL Server databases to SQL Server on Azure Virtual Machines. -ms.custom: "" -ms.service: virtual-machines-sql -ms.subservice: migration-guide -ms.devlang: -ms.topic: how-to -author: markjones-msft -ms.author: markjon -ms.reviewer: chadam, mathoma, randolphwest -ms.date: 04/11/2022 ---- - -# Migration guide: SQL Server to SQL Server on Azure Virtual Machines - -[!INCLUDE[appliesto--sqlmi](../../includes/appliesto-sqlvm.md)] - -In this guide, you learn how to *discover*, *assess*, and *migrate* your user databases from SQL Server to an instance of SQL Server on Azure Virtual Machines by tools and techniques based on your requirements. - -You can migrate SQL Server running on-premises or on: - -- SQL Server on virtual machines (VMs). -- Amazon Web Services (AWS) EC2. -- Amazon Relational Database Service (AWS RDS). -- Compute Engine (Google Cloud Platform [GCP]). - -For information about extra migration strategies, see the [SQL Server VM migration overview](sql-server-to-sql-on-azure-vm-migration-overview.md). For other migration guides, see [Azure Database Migration Guides](/data-migration). - -:::image type="content" source="media/sql-server-to-sql-on-azure-vm-migration-overview/migration-process-flow-small.png" alt-text="Diagram that shows a migration process flow."::: - -## Prerequisites - -Migrating to SQL Server on Azure Virtual Machines requires the following resources: - -- [Azure SQL migration extension for Azure Data Studio](/sql/azure-data-studio/extensions/azure-sql-migration-extension). -- An [Azure Migrate project](../../../migrate/create-manage-projects.md) (only required for SQL Server discovery in your data estate). -- A prepared target [SQL Server on Azure Virtual Machines](../../virtual-machines/windows/create-sql-vm-portal.md) instance that's the same or greater version than the SQL Server source. -- [Connectivity between Azure and on-premises](/azure/architecture/reference-architectures/hybrid-networking). -- [Choosing an appropriate migration strategy](sql-server-to-sql-on-azure-vm-migration-overview.md#migrate). - -## Pre-migration - -Before you begin your migration, you need to discover the topology of your SQL environment and assess the feasibility of your intended migration. - -### Discover - -Azure Migrate assesses migration suitability of on-premises computers, performs performance-based sizing, and provides cost estimations for running on-premises. To plan for the migration, use Azure Migrate to [identify existing data sources and details about the features](../../../migrate/concepts-assessment-calculation.md) your SQL Server instances use. This process involves scanning the network to identify all of your SQL Server instances in your organization with the version and features in use. - -> [!IMPORTANT] -> When you choose a target Azure virtual machine for your SQL Server instance, be sure to consider the [Performance guidelines for SQL Server on Azure Virtual Machines](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md). - -For more discovery tools, see the [services and tools](../../../dms/dms-tools-matrix.md#business-justification-phase) available for data migration scenarios. - -### Assess - -When migrating from SQL Server on-premises to SQL Server on Azure Virtual Machines, it is unlikely that you'll have any compatibility or feature parity issues if the source and target SQL Server versions are the same. If you're *not* upgrading the version of SQL Server, skip this step and move to the [Migrate](#migrate) section. - -Before migration, it's still a good practice to run an assessment of your SQL Server databases to identify migration blockers (if any) and the [Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) does that before migration. - -[!INCLUDE [assess-estate-with-azure-migrate](../../../../includes/azure-migrate-to-assess-sql-data-estate.md)] - -#### Assess user databases - -The [Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) provides a seamless wizard based experience to assess, get Azure recommendations and migrate your SQL Server databases on-premises to SQL Server on Azure Virtual Machines. Besides, highlighting any migration blockers or warnings, the extension also includes an option for Azure recommendations to collect your databases' performance data to recommend a right-sized SQL Server on Azure Virtual Machines to meet the performance needs of your workload (with the least price). - -To learn more about Azure recommendations, see [Get right-sized Azure recommendation for your on-premises SQL Server database(s)](../../../dms/ads-sku-recommend.md). - -> [!IMPORTANT] ->To assess databases using the Azure SQL migration extension, ensure that the logins used to connect the source SQL Server are members of the sysadmin server role or have CONTROL SERVER permission. - -For a version upgrade, use [Data Migration Assistant](/sql/dma/dma-overview) to assess on-premises SQL Server instances if you are upgrading to an instance of SQL Server on Azure Virtual Machines with a higher version to understand the gaps between the source and target versions. - -#### Assess the applications - -Typically, an application layer accesses user databases to persist and modify data. Data Migration Assistant can assess the data access layer of an application in two ways: - -- By using captured [extended events](/sql/relational-databases/extended-events/extended-events) or [SQL Server Profiler traces](/sql/tools/sql-server-profiler/create-a-trace-sql-server-profiler) of your user databases. You can also use the [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-capture-trace) to create a trace log that can also be used for A/B testing. -- By using the [Data Access Migration Toolkit (preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit), which provides discovery and assessment of SQL queries within the code and is used to migrate application source code from one database platform to another. This tool supports popular file types like C#, Java, XML, and plain text. For a guide on how to perform a Data Access Migration Toolkit assessment, see the [Use Data Migration Assistant](https://techcommunity.microsoft.com/t5/microsoft-data-migration/using-data-migration-assistant-to-assess-an-application-s-data/ba-p/990430) blog post. - -During the assessment of user databases, use Data Migration Assistant to [import](/sql/dma/dma-assesssqlonprem#add-databases-and-extended-events-trace-to-assess) captured trace files or Data Access Migration Toolkit files. - -#### Assessments at scale - -If you have multiple servers that require Azure readiness assessment, you can automate the process by using scripts using one of the following options. To learn more about using scripting see [Migrate databases at scale using automation](../../../dms/migration-dms-powershell-cli.md). -- [Az.DataMigration PowerShell module](/powershell/module/az.datamigration) -- [az datamigration CLI extension](/cli/azure/datamigration) -- [Data Migration Assistant command-line interface](/sql/dma/dma-commandline) - -For summary reporting across large estates, Data Migration Assistant assessments can also be [consolidated into Azure Migrate](/sql/dma/dma-assess-sql-data-estate-to-sqldb). - -#### Upgrade databases with Data Migration Assistant - -For upgrade scenario, you might have a series of recommendations to ensure your user databases perform and function correctly after upgrade. Data Migration Assistant provides details on the impacted objects and resources for how to resolve each issue. Make sure to resolve all breaking changes and behavior changes before you start production upgrade. - -For deprecated features, you can choose to run your user databases in their original [compatibility](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level) mode if you want to avoid making these changes and speed up migration. This action will prevent [upgrading your database compatibility](/sql/database-engine/install-windows/compatibility-certification#compatibility-levels-and-database-engine-upgrades) until the deprecated items have been resolved. - -> [!CAUTION] -> Not all SQL Server versions support all compatibility modes. Check that your [target SQL Server version](/sql/t-sql/statements/alter-database-transact-sql-compatibility-level) supports your chosen database compatibility. For example, SQL Server 2019 doesn't support databases with level 90 compatibility (which is SQL Server 2005). These databases would require, at least, an upgrade to compatibility level 100. -> - -## Migrate - -After you've completed the pre-migration steps, you're ready to migrate the user databases and components. Migrate your databases by using your preferred [migration method](sql-server-to-sql-on-azure-vm-migration-overview.md#migrate). - -The following sections provide steps for performing either a migration by using backup and restore or a minimal downtime migration by using backup and restore along with log shipping. - -### Migrate using the Azure SQL migration extension for Azure Data Studio (minimal downtime) - -To perform a minimal downtime migration using Azure Data Studio, follow the high level steps below. For a detailed step-by-step tutorial, see [Migrate SQL Server to SQL Server on Azure Virtual Machine online using Azure Data Studio](../../../dms/tutorial-sql-server-to-virtual-machine-online-ads.md): - -1. Download and install [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio) and the [Azure SQL migration extension](/sql/azure-data-studio/extensions/azure-sql-migration-extension). -1. Launch the Migrate to Azure SQL wizard in the extension in Azure Data Studio. -1. Select databases for assessment and view migration readiness or issues (if any). Additionally, collect performance data and get right-sized Azure recommendation. -1. Select your Azure account and your target SQL Server on Azure Machine from your subscription. -1. Select the location of your database backups. Your database backups can either be located on an on-premises network share or in an Azure storage blob container. -1. Create a new Azure Database Migration Service using the wizard in Azure Data Studio. If you have previously created a Azure Database Migration Service using Azure Data Studio, you can reuse the same if desired. -1. *Optional*: If your backups are on an on-premises network share, download and install [self-hosted integration runtime](https://www.microsoft.com/download/details.aspx?id=39717) on a machine that can connect to source SQL Server and the location containing the backup files. -1. Start the database migration and monitor the progress in Azure Data Studio. You can also monitor the progress under the Azure Database Migration Service resource in Azure portal. -1. Complete the cutover. - 1. Stop all incoming transactions to the source database. - 1. Make application configuration changes to point to the target database in SQL Server on Azure Virtual Machine. - 1. Take any tail log backups for the source database in the backup location specified. - 1. Ensure all database backups have the status Restored in the monitoring details page. - 1. Select Complete cutover in the monitoring details page. - -### Backup and restore - -To perform a standard migration by using backup and restore: - -1. Set up connectivity to SQL Server on Azure Virtual Machines based on your requirements. For more information, see [Connect to a SQL Server virtual machine on Azure (Resource Manager)](../../virtual-machines/windows/ways-to-connect-to-sql.md). -1. Pause or stop any applications that are using databases intended for migration. -1. Ensure user databases are inactive by using [single user mode](/sql/relational-databases/databases/set-a-database-to-single-user-mode). -1. Perform a full database backup to an on-premises location. -1. Copy your on-premises backup files to your VM by using a remote desktop, [Azure Data Explorer](/azure/data-explorer/data-explorer-overview), or the [AzCopy command-line utility](../../../storage/common/storage-use-azcopy-v10.md). (Greater than 2-TB backups are recommended.) -1. Restore full database backups to the SQL Server on Azure Virtual Machines. - -### Migrate objects outside user databases - -More SQL Server objects might be required for the seamless operation of your user databases post migration. - -The following table provides a list of components and recommended migration methods that can be completed before or after migration of your user databases. - -| **Feature** | **Component** | **Migration methods** | -| --- | --- | --- | -| **Databases** | Model | Script with SQL Server Management Studio. | -|| TempDB | Plan to move tempDB onto [Azure VM temporary disk (SSD)](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md#storage)) for best performance. Be sure to pick a VM size that has a sufficient local SSD to accommodate your tempDB. | -|| User databases with FileStream | Use the [Backup and restore](../../virtual-machines/windows/migrate-to-vm-from-sql-server.md#back-up-and-restore) methods for migration. Data Migration Assistant doesn't support databases with FileStream. | -| **Security** | SQL Server and Windows logins | Use Data Migration Assistant to [migrate user logins](/sql/dma/dma-migrateserverlogins). | -|| SQL Server roles | Script with SQL Server Management Studio. | -|| Cryptographic providers | Recommend [converting to use Azure Key Vault](../../virtual-machines/windows/azure-key-vault-integration-configure.md). This procedure uses the [SQL VM resource provider](../../virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md). | -| **Server objects** | Backup devices | Replace with database backup by using [Azure Backup](../../../backup/backup-sql-server-database-azure-vms.md), or write backups to [Azure Storage](../../virtual-machines/windows/azure-storage-sql-server-backup-restore-use.md) (SQL Server 2012 SP1 CU2 +). This procedure uses the [SQL VM resource provider](../../virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md).| -|| Linked servers | Script with SQL Server Management Studio. | -|| Server triggers | Script with SQL Server Management Studio. | -| **Replication** | Local publications | Script with SQL Server Management Studio. | -|| Local subscribers | Script with SQL Server Management Studio. | -| **PolyBase** | PolyBase | Script with SQL Server Management Studio. | -| **Management** | Database mail | Script with SQL Server Management Studio. | -| **SQL Server Agent** | Jobs | Script with SQL Server Management Studio. | -|| Alerts | Script with SQL Server Management Studio. | -|| Operators | Script with SQL Server Management Studio. | -|| Proxies | Script with SQL Server Management Studio. | -| **Operating system** | Files, file shares | Make a note of any other files or file shares that are used by your SQL servers and replicate on the Azure Virtual Machines target. | - -## Post-migration - -After you've successfully completed the migration stage, you need to complete a series of post-migration tasks to ensure that everything is functioning as smoothly and efficiently as possible. - -### Remediate applications - -After the data is migrated to the target environment, all the applications that formerly consumed the source need to start consuming the target. Accomplishing this task might require changes to the applications in some cases. - -Apply any fixes recommended by Data Migration Assistant to user databases. You need to script these fixes to ensure consistency and allow for automation. - -### Perform tests - -The test approach to database migration consists of the following activities: - -1. **Develop validation tests**: To test the database migration, you need to use SQL queries. Create validation queries to run against both the source and target databases. Your validation queries should cover the scope you've defined. -1. **Set up a test environment**: The test environment should contain a copy of the source database and the target database. Be sure to isolate the test environment. -1. **Run validation tests**: Run validation tests against the source and the target, and then analyze the results. -1. **Run performance tests**: Run performance tests against the source and target, and then analyze and compare the results. - -> [!TIP] -> Use the [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview) to assist with evaluating the target SQL Server performance. - -### Optimize - -The post-migration phase is crucial for reconciling any data accuracy issues, verifying completeness, and addressing potential performance issues with the workload. - -For more information about these issues and the steps to mitigate them, see: - -- [Post-migration validation and optimization guide](/sql/relational-databases/post-migration-validation-and-optimization-guide) -- [Tuning performance in Azure SQL virtual machines](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md) -- [Azure cost optimization center](https://azure.microsoft.com/overview/cost-optimization/) - -## Next steps - -- To check the availability of services that apply to SQL Server, see the [Azure global infrastructure center](https://azure.microsoft.com/global-infrastructure/services/?regions=all&products=synapse-analytics,virtual-machines,sql-database). -- For a matrix of Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios and specialty tasks, see [Services and tools for data migration](../../../dms/dms-tools-matrix.md). -- To learn more about Azure SQL, see: - - [Deployment options](../../azure-sql-iaas-vs-paas-what-is-overview.md) - - [SQL Server on Azure Virtual Machines](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) - - [Azure Total Cost of Ownership (TCO) Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - -- To learn more about the framework and adoption cycle for cloud migrations, see: - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads for migration to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- For information about licensing, see: - - [Bring your own license with the Azure Hybrid Benefit](../../virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md) - - [Get free extended support for SQL Server 2008 and SQL Server 2008 R2](../../virtual-machines/windows/sql-server-2008-extend-end-of-support.md) - -- To assess the application access layer, see [Data Access Migration Toolkit (preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit). -- For information about how to perform A/B testing for the data access layer, see [Overview of Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md b/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md deleted file mode 100644 index 08ea8fe599cde..0000000000000 --- a/articles/azure-sql/migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: SQL Server to SQL Server on Azure VM (Migration overview) -titleSuffix: SQL Server on Azure VMs -description: Learn about the different migration strategies when you want to migrate your SQL Server to SQL Server on Azure VMs. -ms.service: virtual-machines-sql -ms.subservice: migration-guide -ms.topic: how-to -author: markjones-msft -ms.author: markjon -ms.reviewer: chadam, mathoma -ms.date: 09/07/2021 ---- - -# Migration overview: SQL Server to SQL Server on Azure VMs -[!INCLUDE[appliesto--sqlmi](../../includes/appliesto-sqlvm.md)] - -Learn about the different migration strategies to migrate your SQL Server to SQL Server on Azure Virtual Machines (VMs). - -You can migrate SQL Server running on-premises or on: - -- SQL Server on Virtual Machines -- Amazon Web Services (AWS) EC2 -- Amazon Relational Database Service (AWS RDS) -- Compute Engine (Google Cloud Platform - GCP) - -For other migration guides, see [Database Migration](/data-migration). - -## Overview - -Migrate to [SQL Server on Azure Virtual Machines (VMs)](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) when you want to use the familiar SQL Server environment with OS control, and want to take advantage of cloud-provided features such as built-in VM high availability, [automated backups](../../virtual-machines/windows/automated-backup.md), and [automated patching](../../virtual-machines/windows/automated-patching.md). - -Save on costs by bringing your own license with the [Azure Hybrid Benefit licensing model](../../virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md) or extend support for SQL Server 2008 and SQL Server 2008 R2 by getting [free security updates](../../virtual-machines/windows/sql-server-2008-extend-end-of-support.md). - - -## Choose appropriate target - -Azure Virtual Machines run in many different regions of Azure and also offer a variety of [machine sizes](../../../virtual-machines/sizes.md) and [Storage options](../../../virtual-machines/disks-types.md). -When determining the correct size of VM and Storage for your SQL Server workload, refer to the [Performance Guidelines for SQL Server on Azure Virtual Machines.](../../virtual-machines/windows/performance-guidelines-best-practices-checklist.md#vm-size). - -You can use the [Azure SQL migration extension for Azure Data Studio](/sql/azure-data-studio/extensions/azure-sql-migration-extension) to get right-sized SQL Server on Azure Virtual Machines recommendation. The extension collects performance data from your source SQL Server instance to provide right-sized Azure recommendation that meets your workload's performance needs with minimal cost. To learn more, see [Get right-sized Azure recommendation for your on-premises SQL Server database(s)](../../../dms/ads-sku-recommend.md) - -To determine the VM size and storage requirements for all your workloads in your data estate, it is recommended that these are sized through a Performance-Based [Azure Migrate Assessment](../../../migrate/concepts-assessment-calculation.md#types-of-assessments). If this is not an available option, see the following article on creating your own [baseline for performance](https://azure.microsoft.com/services/virtual-machines/sql-server/). - -Consideration should also be made on the correct installation and configuration of SQL Server on a VM. It is recommended to use the [Azure SQL virtual machine image gallery](../../virtual-machines/windows/create-sql-vm-portal.md) as this allows you to create a SQL Server VM with the right version, edition, and operating system. This will also register the Azure VM with the SQL Server [Resource Provider](../../virtual-machines/windows/create-sql-vm-portal.md) automatically, enabling features such as Automated Backups and Automated Patching. - -## Migration strategies - -There are two migration strategies to migrate your user databases to an instance of SQL Server on Azure VMs: -**migrate**, and **lift and shift**. - -The appropriate approach for your business typically depends on the following factors: - -- Size and scale of migration -- Speed of migration -- Application support for code change -- Need to change SQL Server Version, Operating System, or both. -- Supportability life cycle of your existing products -- Window for application downtime during migration - -The following table describes differences in the two migration strategies: -
    - -| **Migration strategy** | **Description** | **When to use** | -| --- | --- | --- | -| **Lift & shift** | Use the lift and shift migration strategy to move the entire physical or virtual SQL Server from its current location onto an instance of SQL Server on Azure VM without any changes to the operating system, or SQL Server version. To complete a lift and shift migration, see [Azure Migrate](../../../migrate/migrate-services-overview.md).

    The source server remains online and services requests while the source and destination server synchronize data allowing for an almost seamless migration. | Use for single to very large-scale migrations, even applicable to scenarios such as data center exit.

    Minimal to no code changes required to user SQL databases or applications, allowing for faster overall migrations.

    No additional steps required for migrating the Business Intelligence services such as [SSIS](/sql/integration-services/sql-server-integration-services), [SSRS](/sql/reporting-services/create-deploy-and-manage-mobile-and-paginated-reports), and [SSAS](/analysis-services/analysis-services-overview). | -|**Migrate** | Use a migration strategy when you want to upgrade the target SQL Server and/or operating system version.

    Select an Azure VM from Azure Marketplace or a prepared SQL Server image that matches the source SQL Server version.

    Use the [Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md) to assess, get recommendations for right-sized Azure configuration (VM series, compute and storage) and migrate SQL Server database(s) to SQL Server on Azure virtual machines with minimal downtime. | Use when there is a requirement or desire to migrate to SQL Server on Azure Virtual Machines, or if there is a requirement to upgrade legacy SQL Server and/or OS versions that are no longer in support.

    May require some application or user database changes to support the SQL Server upgrade.

    There may be additional considerations for migrating [Business Intelligence](#business-intelligence) services if in the scope of migration. | - - -## Lift and shift - -The following table details the available method for the **lift and shift** migration strategy to migrate your SQL Server database to SQL Server on Azure VMs: -
    - -|**Method** | **Minimum source version** | **Minimum target version** | **Source backup size constraint** | **Notes** | -| --- | --- | --- | --- | --- | -| [Azure Migrate](../../../migrate/index.yml) | SQL Server 2008 SP4| SQL Server 2008 SP4| [Azure VM storage limit](../../../index.yml) | Existing SQL Server to be moved as-is to instance of SQL Server on an Azure VM. Can scale migration workloads of up to 35,000 VMs.

    Source server(s) remain online and servicing requests during synchronization of server data, minimizing downtime.

    **Automation & scripting**: [Azure Site Recovery Scripts](../../../migrate/how-to-migrate-at-scale.md) and [Example of scaled migration and planning for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale)| - -> [!NOTE] -> It's now possible to lift and shift both your [failover cluster instance](sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) and [availability group](sql-server-availability-group-to-sql-on-azure-vm.md) solution to SQL Server on Azure VMs using Azure Migrate. - -## Migrate - -Due to the ease of setup, the recommended migration approach is to take a native SQL Server [backup](/sql/t-sql/statements/backup-transact-sql) locally and then copy the file to Azure. This method supports larger databases (>1 TB) for all versions of SQL Server starting from 2008 and larger database backups (>1 TB). However, for databases starting in SQL Server 2014, that are smaller than 1 TB, and that have good connectivity to Azure, then [SQL Server backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url) is the better approach. - -When migrating SQL Server databases to an instance of SQL Server on Azure VMs, it is important to choose an approach that suits when you need to cutover to the target server as this affects the application downtime window. - -The following table details all available methods to migrate your SQL Server database to SQL Server on Azure VMs: -
    - -|**Method** | **Minimum source version** | **Minimum target version** | **Source backup size constraint** | **Notes** | -| --- | --- | --- | --- | --- | -| **[Azure SQL migration extension for Azure Data Studio](../../../dms/migration-using-azure-data-studio.md)** | SQL Server 2008 | SQL Server 2008 | [Azure VM storage limit](../../../index.yml) | This is an easy to use wizard based extension in Azure Data Studio for migrating SQL Server database(s) to SQL Server on Azure virtual machines. Use compression to minimize backup size for transfer.

    The Azure SQL migration extension for Azure Data Studio provides assessment, Azure recommendation and migration capabilities in a simple user interface and supports minimal downtime migrations. | -| **[Distributed availability group](sql-server-distributed-availability-group-migrate-prerequisites.md)** | SQL Server 2016| SQL Server 2016 | [Azure VM storage limit](../../../index.yml) | A [distributed availability group](/sql/database-engine/availability-groups/windows/distributed-availability-groups) is a special type of availability group that spans two separate availability groups. The availability groups that participate in a distributed availability group do not need to be in the same location and include cross-domain support.

    This method minimizes downtime, use when you have an availability group configured on-premises.

    **Automation & scripting**: [T-SQL](/sql/t-sql/statements/alter-availability-group-transact-sql) | -| **[Backup to a file](sql-server-to-sql-on-azure-vm-individual-databases-guide.md#migrate)** | SQL Server 2008 SP4 | SQL Server 2008 SP4| [Azure VM storage limit](../../../index.yml) | This is a simple and well-tested technique for moving databases across machines. Use compression to minimize backup size for transfer.

    **Automation & scripting**: [Transact-SQL (T-SQL)](/sql/t-sql/statements/backup-transact-sql) and [AzCopy to Blob storage](../../../storage/common/storage-use-azcopy-v10.md) | -| **[Backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url)** | SQL Server 2012 SP1 CU2 | SQL Server 2012 SP1 CU2| 12.8 TB for SQL Server 2016, otherwise 1 TB | An alternative way to move the backup file to the VM using Azure storage. Use compression to minimize backup size for transfer.

    **Automation & scripting**: [T-SQL or maintenance plan](/sql/relational-databases/backup-restore/sql-server-backup-to-url) | -| **[Database Migration Assistant (DMA)](/sql/dma/dma-overview)** | SQL Server 2005| SQL Server 2008 SP4| [Azure VM storage limit](../../../index.yml) | The [DMA](/sql/dma/dma-overview) assesses SQL Server on-premises and then seamlessly upgrades to later versions of SQL Server or migrates to SQL Server on Azure VMs, Azure SQL Database or Azure SQL Managed Instance.

    Should not be used on Filestream-enabled user databases.

    DMA also includes capability to migrate [SQL and Windows logins](/sql/dma/dma-migrateserverlogins) and assess [SSIS Packages](/sql/dma/dma-assess-ssis).

    **Automation & scripting**: [Command line interface](/sql/dma/dma-commandline) | -| **[Detach and attach](../../virtual-machines/windows/migrate-to-vm-from-sql-server.md#detach-and-attach-from-a-url)** | SQL Server 2008 SP4 | SQL Server 2014 | [Azure VM storage limit](../../../index.yml) | Use this method when you plan to [store these files using the Azure Blob storage service](/sql/relational-databases/databases/sql-server-data-files-in-microsoft-azure) and attach them to an instance of SQL Server on an Azure VM, particularly useful with very large databases or when the time to backup and restore is too long.

    **Automation & scripting**: [T-SQL](/sql/relational-databases/databases/detach-a-database#TsqlProcedure) and [AzCopy to Blob storage](../../../storage/common/storage-use-azcopy-v10.md)| -|**[Log shipping](sql-server-to-sql-on-azure-vm-individual-databases-guide.md#migrate)** | SQL Server 2008 SP4 (Windows Only) | SQL Server 2008 SP4 (Windows Only) | [Azure VM storage limit](../../../index.yml) | Log shipping replicates transactional log files from on-premises on to an instance of SQL Server on an Azure VM.

    This provides minimal downtime during failover and has less configuration overhead than setting up an Always On availability group.

    **Automation & scripting**: [T-SQL](/sql/database-engine/log-shipping/log-shipping-tables-and-stored-procedures) | - - -  -  -   -  -  - -> [!TIP] -> - For large data transfers with limited to no network options, see [Large data transfers with limited connectivity](../../../storage/common/storage-solution-large-dataset-low-network.md). -> - It's now possible to lift and shift both your [failover cluster instance](sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) and [availability group](sql-server-availability-group-to-sql-on-azure-vm.md) solution to SQL Server on Azure VMs using Azure Migrate. - -### Considerations - -The following is a list of key points to consider when reviewing migration methods: - -- For optimum data transfer performance, migrate databases and files onto an instance of SQL Server on Azure VM using a compressed backup file. For larger databases, in addition to compression, [split the backup file into smaller files](/sql/relational-databases/backup-restore/back-up-files-and-filegroups-sql-server) for increased performance during backup and transfer. -- If migrating from SQL Server 2014 or higher, consider [encrypting the backups](/sql/relational-databases/backup-restore/backup-encryption) to protect data during network transfer. -- To minimize downtime during database migration, use the Azure SQL migration extension in Azure Data Studio or Always On availability group option. -- For limited to no network options, use offline migration methods such as backup and restore, or [disk transfer services](../../../storage/common/storage-solution-large-dataset-low-network.md) available in Azure. -- To also change the version of SQL Server on a SQL Server on Azure VM, see [change SQL Server edition](../../virtual-machines/windows/change-sql-server-edition.md). - -## Business Intelligence - -There may be additional considerations when migrating SQL Server Business Intelligence services outside the scope of database migrations. - -### SQL Server Integration Services - -You can migrate SQL Server Integration Services (SSIS) packages and projects in SSISDB to SQL Server on Azure VM using one of the two methods below. - -- Backup and restore the SSISDB from the source SQL Server instance to SQL Server on Azure VM. This will restore your packages in the SSISDB to the [Integration Services Catalog on your target SQL Server on Azure VM](/sql/integration-services/catalog/ssis-catalog). -- Re-deploy your SSIS packages on your target SQL Server on Azure VM using one of the [deployment options](/sql/integration-services/packages/deploy-integration-services-ssis-projects-and-packages). - -If you have SSIS packages deployed as package deployment model, you can convert them before migration. See the [project conversion tutorial](/sql/integration-services/lesson-6-2-converting-the-project-to-the-project-deployment-model) to learn more. - - -### SQL Server Reporting Services -To migrate your SQL Server Reporting Services (SSRS) reports to your target SQL Server on Azure VM, see [Migrate a Reporting Services Installation (Native Mode)](/sql/reporting-services/install-windows/migrate-a-reporting-services-installation-native-mode) - -Alternatively, you can also migrate SSRS reports to paginated reports in Power BI. Use the [RDL Migration Tool](https://github.com/microsoft/RdlMigration) to help prepare and migrate your reports. Microsoft developed this tool to help customers migrate Report Definition Language (RDL) reports from their SSRS servers to Power BI. It's available on GitHub, and it documents an end-to-end walkthrough of the migration scenario. - -### SQL Server Analysis Services -SQL Server Analysis Services databases (multidimensional or tabular models) can be migrated from your source SQL Server to SQL Server on Azure VM using one of the following options: - -- Interactively using SSMS -- Programmatically using Analysis Management Objects (AMO) -- By script using XMLA (XML for Analysis) - -See [Move an Analysis Services Database](/analysis-services/multidimensional-models/move-an-analysis-services-database?view=asallproducts-allversions&preserve-view=true) to learn more. - -Alternatively, you can consider migrating your on-premises Analysis Services tabular models to [Azure Analysis Services](https://azure.microsoft.com/resources/videos/azure-analysis-services-moving-models/) or to [Power BI Premium by using the new XMLA read/write endpoints](/power-bi/admin/service-premium-connect-tools). - -## Server objects - -Depending on the setup in your source SQL Server, there may be additional SQL Server features that will require manual intervention to migrate them to SQL Server on Azure VM by generating scripts in Transact-SQL (T-SQL) using SQL Server Management Studio and then running the scripts on the target SQL Server on Azure VM. Some of the commonly used features are: - -- Logins and roles -- Linked server(s) -- External Data Sources -- Agent jobs -- Alerts -- Database Mail -- Replication - -## Supported versions - -As you prepare for migrating SQL Server databases to SQL Server on Azure VMs, be sure to consider the versions of SQL Server that are supported. For a list of current supported SQL Server versions on Azure VMs, please see [SQL Server on Azure VMs](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md#getting-started). - -## Migration assets - -For additional assistance, see the following resources that were developed for real world migration projects. - -|Asset |Description | -|---------|---------| -|[Data workload assessment model and tool](https://www.microsoft.com/download/details.aspx?id=103130)| This tool provides suggested "best fit" target platforms, cloud readiness, and application/database remediation level for a given workload. It offers simple, one-click calculation and report generation that helps to accelerate large estate assessments by providing and automated and uniform target platform decision process.| -|[Perfmon data collection automation using Logman](https://www.microsoft.com/download/details.aspx?id=103114)|A tool that collects Perform data to understand baseline performance that assists in the migration target recommendation. This tool that uses logman.exe to create the command that will create, start, stop, and delete performance counters set on a remote SQL Server.| -|[Multiple-SQL-VM-VNet-ILB](https://www.microsoft.com/download/details.aspx?id=103104)|This whitepaper outlines the steps to setup multiple Azure virtual machines in a SQL Server Always On Availability Group configuration.| -|[Azure virtual machines supporting Ultra SSD per Region](https://www.microsoft.com/download/details.aspx?id=103105)|These PowerShell scripts provide a programmatic option to retrieve the list of regions that support Azure virtual machines supporting Ultra SSDs.| - -The Data SQL Engineering team developed these resources. This team's core charter is to unblock and accelerate complex modernization for data platform migration projects to Microsoft's Azure data platform. - -## Next steps - -To start migrating your SQL Server databases to SQL Server on Azure VMs, see the [Individual database migration guide](sql-server-to-sql-on-azure-vm-individual-databases-guide.md). - -- For a matrix of the Microsoft and third-party services and tools that are available to assist you with various database and data migration scenarios as well as specialty tasks, see the article [Service and tools for data migration.](../../../dms/dms-tools-matrix.md) - -- To learn more about Azure SQL see: - - [Deployment options](../../azure-sql-iaas-vs-paas-what-is-overview.md) - - [SQL Server on Azure VMs](../../virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md) - - [Azure total Cost of Ownership Calculator](https://azure.microsoft.com/pricing/tco/calculator/) - - -- To learn more about the framework and adoption cycle for Cloud migrations, see - - [Cloud Adoption Framework for Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/contoso-migration-scale) - - [Best practices for costing and sizing workloads migrate to Azure](/azure/cloud-adoption-framework/migrate/azure-best-practices/migrate-best-practices-costs) - -- For information about licensing, see - - [Bring your own license with the Azure Hybrid Benefit](../../virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md) - - [Get free extended support for SQL Server 2008 and SQL Server 2008 R2](../../virtual-machines/windows/sql-server-2008-extend-end-of-support.md) - - -- To assess the Application access layer, see [Data Access Migration Toolkit (Preview)](https://marketplace.visualstudio.com/items?itemName=ms-databasemigration.data-access-migration-toolkit) -- For details on how to perform Data Access Layer A/B testing see [Database Experimentation Assistant](/sql/dea/database-experimentation-assistant-overview). diff --git a/articles/azure-sql/multi-model-features.md b/articles/azure-sql/multi-model-features.md deleted file mode 100644 index 4ce86d8c6e62d..0000000000000 --- a/articles/azure-sql/multi-model-features.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Multi-model capabilities -description: Microsoft Azure SQL enables you to work with multiple data models in the same database. -services: sql-database -ms.service: sql-db-mi -ms.subservice: service-overview -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: conceptual -author: yorek -ms.author: damauri -ms.reviewer: mathoma, urmilano, kendralittle -ms.date: 12/17/2018 ---- -# Multi-model capabilities of Azure SQL Database and SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -Multi-model databases enable you to store and work with data in multiple formats, such as relational data, graph, JSON or XML documents, spatial data, and key-value pairs. - -The [Azure SQL family of products](azure-sql-iaas-vs-paas-what-is-overview.md) uses a relational model that provides the best performance for a variety of general-purpose applications. However, Azure SQL products like Azure SQL Database and SQL Managed Instance are not limited to relational data. They enable you to use non-relational formats that are tightly integrated into the relational model. - -Consider using the multi-model capabilities of Azure SQL in the following cases: - -- You have some information or structures that are a better fit for NoSQL models, and you don't want to use a separate NoSQL database. -- A majority of your data is suitable for a relational model, and you need to model some parts of your data in a NoSQL style. -- You want to use the Transact-SQL language to query and analyze both relational and NoSQL data, and then integrate that data with tools and applications that can use the SQL language. -- You want to apply database features such as [in-memory technologies](in-memory-oltp-overview.md) to improve the performance of your analytics or the processing of your NoSQL data structures. You can use [transactional replication](managed-instance/replication-transactional-overview.md) or [readable replicas](database/read-scale-out.md) to create copies of your data and offload some analytic workloads from the primary database. - -The following sections describe the most important multi-model capabilities of Azure SQL. - -> [!Note] -> You can use JSONPath expressions, XQuery/XPath expressions, spatial functions, and graph query expressions in the same Transact-SQL query to access any data that you stored in the database. Any tool or programming language that can execute Transact-SQL queries can also use that query interface to access multi-model data. This is the key difference from multi-model databases such as [Azure Cosmos DB](../cosmos-db/index.yml), which provide specialized APIs for data models. - -## Graph features - -Azure SQL products offer graph database capabilities to model many-to-many relationships in a database. A graph is a collection of nodes (or vertices) and edges (or relationships). A node represents an entity (for example, a person or an organization). An edge represents a relationship between the two nodes that it connects (for example, likes or friends). - -Here are some features that make a graph database unique: - -- Edges are first-class entities in a graph database. They can have attributes or properties associated with them. -- A single edge can flexibly connect multiple nodes in a graph database. -- You can express pattern matching and multi-hop navigation queries easily. -- You can express transitive closure and polymorphic queries easily. - -[Graph relationships and graph query capabilities](/sql/relational-databases/graphs/sql-graph-overview) are integrated into Transact-SQL and receive the benefits of using the SQL Server database engine as the foundational database management system. Graph features use standard Transact-SQL queries enhanced with the graph `MATCH` operator to query the graph data. - -A relational database can achieve anything that a graph database can. However, a graph database can make it easier to express certain queries. Your decision to choose one over the other can be based on the following factors: - -- You need to model hierarchical data where one node can have multiple parents, so you can't use [the hierarchyId data type](/sql/t-sql/data-types/hierarchyid-data-type-method-reference). -- Your application has complex many-to-many relationships. As the application evolves, new relationships are added. -- You need to analyze interconnected data and relationships. -- You want to use graph-specific T-SQL search conditions such as [SHORTEST_PATH](/sql/relational-databases/graphs/sql-graph-shortest-path). - -## JSON features - -In Azure SQL products, you can parse and query data represented in [JavaScript Object Notation (JSON)](https://www.json.org/) format, and export your relational data as JSON text. [JSON](/sql/relational-databases/json/json-data-sql-server) is a core feature of the SQL Server database engine. - -JSON features enable you to put JSON documents in tables, transform relational data into JSON documents, and transform JSON documents into relational data. You can use the standard Transact-SQL language enhanced with JSON functions for parsing documents. You can also use non-clustered indexes, columnstore indexes, or memory-optimized tables to optimize your queries. - -JSON is a popular data format for exchanging data in modern web and mobile applications. JSON is also used for storing semistructured data in log files or in NoSQL databases. Many REST web services return results formatted as JSON text or accept data formatted as JSON. - -Most Azure services have REST endpoints that return or consume JSON. These services include [Azure Cognitive Search](https://azure.microsoft.com/services/search/), [Azure Storage](https://azure.microsoft.com/services/storage/), and [Azure Cosmos DB](https://azure.microsoft.com/services/cosmos-db/). - -If you have JSON text, you can extract data from JSON or verify that JSON is properly formatted by using the built-in functions [JSON_VALUE](/sql/t-sql/functions/json-value-transact-sql), [JSON_QUERY](/sql/t-sql/functions/json-query-transact-sql), and [ISJSON](/sql/t-sql/functions/isjson-transact-sql). The other functions are: - -- [JSON_MODIFY](/sql/t-sql/functions/json-modify-transact-sql): Lets you update values inside JSON text. -- [OPENJSON](/sql/t-sql/functions/openjson-transact-sql): Can transform an array of JSON objects into a set of rows, for more advanced querying and analysis. Any SQL query can be executed on the returned result set. -- [FOR JSON](/sql/relational-databases/json/format-query-results-as-json-with-for-json-sql-server): Lets you format data stored in your relational tables as JSON text. - -![Diagram that illustrates JSON functions.](./media/multi-model-features/image_1.png) - -For more information, see [How to work with JSON data](database/json-features.md). - -You can use document models instead of the relational models in some specific scenarios: - -- High normalization of the schema doesn't bring significant benefits because you access all the fields of the objects at once, or you never update normalized parts of the objects. However, the normalized model increases the complexity of your queries because you need to join a large number of tables to get the data. -- You're working with applications that natively use JSON documents for communication or data models, and you don't want to introduce more layers that transform relational data into JSON and vice versa. -- You need to simplify your data model by denormalizing child tables or Entity-Object-Value patterns. -- You need to load or export data stored in JSON format without an additional tool that parses the data. - -## XML features - -XML features enable you to store and index XML data in your database and use native XQuery/XPath operations to work with XML data. Azure SQL products have a specialized, built-in XML data type and query functions that process XML data. - -The SQL Server database engine provides a powerful platform for developing applications to manage semistructured data. [Support for XML](/sql/relational-databases/xml/xml-data-sql-server) is integrated into all the components of the database engine and includes: - -- The ability to store XML values natively in an XML data-type column that can be typed according to a collection of XML schemas or left untyped. You can index the XML column. -- The ability to specify an XQuery query against XML data stored in columns and variables of the XML type. You can use XQuery functionalities in any Transact-SQL query that accesses a data model that you use in your database. -- Automatic indexing of all elements in XML documents by using the [primary XML index](/sql/relational-databases/xml/xml-indexes-sql-server#primary-xml-index). Or you can specify the exact paths that should be indexed by using the [secondary XML index](/sql/relational-databases/xml/xml-indexes-sql-server#secondary-xml-indexes). -- `OPENROWSET`, which allows the bulk loading of XML data. -- The ability to transform relational data into XML format. - -You can use document models instead of the relational models in some specific scenarios: - -- High normalization of the schema doesn't bring significant benefits because you access all the fields of the objects at once, or you never update normalized parts of the objects. However, the normalized model increases the complexity of your queries because you need to join a large number of tables to get the data. -- You're working with applications that natively use XML documents for communication or data models, and you don't want to introduce more layers that transform relational data into JSON and vice versa. -- You need to simplify your data model by denormalizing child tables or Entity-Object-Value patterns. -- You need to load or export data stored in XML format without an additional tool that parses the data. - -## Spatial features - -Spatial data represents information about the physical location and shape of objects. These objects can be point locations or more complex objects such as countries/regions, roads, or lakes. - -Azure SQL supports two spatial data types: - -- The geometry type represents data in a Euclidean (flat) coordinate system. -- The geography type represents data in a round-earth coordinate system. - -Spatial features in Azure SQL enable you to store geometrical and geographical data. You can use spatial objects in Azure SQL to parse and query data represented in JSON format, and export your relational data as JSON text. These spatial objects include [Point](/sql/relational-databases/spatial/point), [LineString](/sql/relational-databases/spatial/linestring), and [Polygon](/sql/relational-databases/spatial/polygon). Azure SQL also provides specialized [spatial indexes](/sql/relational-databases/spatial/spatial-indexes-overview) that you can use to improve the performance of your spatial queries. - -[Spatial support](/sql/relational-databases/spatial/spatial-data-sql-server) is a core feature of the SQL Server database engine. - -## Key-value pairs - -Azure SQL products don't have specialized types or structures that support key-value pairs, because key-value structures can be natively represented as standard relational tables: - -```sql -CREATE TABLE Collection ( - Id int identity primary key, - Data nvarchar(max) -) -``` - -You can customize this key-value structure to fit your needs without any constraints. As an example, the value can be an XML document instead of the `nvarchar(max)` type. If the value is a JSON document, you can use a `CHECK` constraint that verifies the validity of JSON content. You can put any number of values related to one key in the additional columns. For example: - -- Add computed columns and indexes to simplify and optimize data access. -- Define the table as a memory-optimized, schema-only table to get better performance. - -For an example of how a relational model can be effectively used as a key-value pair solution in practice, see [How bwin is using SQL Server 2016 In-Memory OLTP to achieve unprecedented performance and scale](/archive/blogs/sqlcat/how-bwin-is-using-sql-server-2016-in-memory-oltp-to-achieve-unprecedented-performance-and-scale). In this case study, bwin used a relational model for its ASP.NET caching solution to achieve 1.2 million batches per second. - -## Next steps - -Multi-model capabilities are core SQL Server database engine features that are shared among Azure SQL products. To learn more about these features, see these articles: - -- [Graph processing with SQL Server and Azure SQL Database](/sql/relational-databases/graphs/sql-graph-overview) -- [JSON data in SQL Server](/sql/relational-databases/json/json-data-sql-server) -- [Spatial data in SQL Server](/sql/relational-databases/spatial/spatial-data-sql-server) -- [XML data in SQL Server](/sql/relational-databases/xml/xml-data-sql-server) -- [Key-value store performance in Azure SQL Database](https://devblogs.microsoft.com/azure-sql/azure-sql-database-as-a-key-value-store/) diff --git a/articles/azure-sql/performance-improve-use-batching.md b/articles/azure-sql/performance-improve-use-batching.md deleted file mode 100644 index 351dd26ac0464..0000000000000 --- a/articles/azure-sql/performance-improve-use-batching.md +++ /dev/null @@ -1,665 +0,0 @@ ---- -title: How to use batching to improve application performance -description: The topic provides evidence that batching database operations greatly improves the speed and scalability of your Azure SQL Database and Azure SQL Managed Instance applications. Although these batching techniques work for any SQL database, the focus of the article is on Azure. -services: sql-database -ms.service: sql-database -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: WilliamDAssafMSFT -ms.author: wiassaf -ms.reviewer: mathoma, kendralittle -ms.date: 06/22/2021 ---- -# How to use batching to improve Azure SQL Database and Azure SQL Managed Instance application performance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -Batching operations to Azure SQL Database and Azure SQL Managed Instance significantly improves the performance and scalability of your applications. In order to understand the benefits, the first part of this article covers some sample test results that compare sequential and batched requests to a database in Azure SQL Database or Azure SQL Managed Instance. The remainder of the article shows the techniques, scenarios, and considerations to help you to use batching successfully in your Azure applications. - -## Why is batching important for Azure SQL Database and Azure SQL Managed Instance? - -Batching calls to a remote service is a well-known strategy for increasing performance and scalability. There are fixed processing costs to any interactions with a remote service, such as serialization, network transfer, and deserialization. Packaging many separate transactions into a single batch minimizes these costs. - -In this article, we want to examine various batching strategies and scenarios. Although these strategies are also important for on-premises applications that use SQL Server, there are several reasons for highlighting the use of batching for Azure SQL Database and Azure SQL Managed Instance: - -* There is potentially greater network latency in accessing Azure SQL Database and Azure SQL Managed Instance, especially if you are accessing Azure SQL Database or Azure SQL Managed Instance from outside the same Microsoft Azure datacenter. -* The multitenant characteristics of Azure SQL Database and Azure SQL Managed Instance means that the efficiency of the data access layer correlates to the overall scalability of the database. In response to usage in excess of predefined quotas, Azure SQL Database and Azure SQL Managed Instance can reduce throughput or respond with throttling exceptions. Efficiencies, such as batching, enable you to do more work before reaching these limits. -* Batching is also effective for architectures that use multiple databases (sharding). The efficiency of your interaction with each database unit is still a key factor in your overall scalability. - -One of the benefits of using Azure SQL Database or Azure SQL Managed Instance is that you don't have to manage the servers that host the database. However, this managed infrastructure also means that you have to think differently about database optimizations. You can no longer look to improve the database hardware or network infrastructure. Microsoft Azure controls those environments. The main area that you can control is how your application interacts with Azure SQL Database and Azure SQL Managed Instance. Batching is one of these optimizations. - -The first part of this article examines various batching techniques for .NET applications that use Azure SQL Database or Azure SQL Managed Instance. The last two sections cover batching guidelines and scenarios. - -## Batching strategies - -### Note about timing results in this article - -> [!NOTE] -> Results are not benchmarks but are meant to show **relative performance**. Timings are based on an average of at least 10 test runs. Operations are inserts into an empty table. These tests were measured pre-V12, and they do not necessarily correspond to throughput that you might experience in a V12 database using the new [DTU service tiers](database/service-tiers-dtu.md) or [vCore service tiers](database/service-tiers-vcore.md). The relative benefit of the batching technique should be similar. - -### Transactions - -It seems strange to begin a review of batching by discussing transactions. But the use of client-side transactions has a subtle server-side batching effect that improves performance. And transactions can be added with only a few lines of code, so they provide a fast way to improve performance of sequential operations. - -Consider the following C# code that contains a sequence of insert and update operations on a simple table. - -```csharp -List dbOperations = new List(); -dbOperations.Add("update MyTable set mytext = 'updated text' where id = 1"); -dbOperations.Add("update MyTable set mytext = 'updated text' where id = 2"); -dbOperations.Add("update MyTable set mytext = 'updated text' where id = 3"); -dbOperations.Add("insert MyTable values ('new value',1)"); -dbOperations.Add("insert MyTable values ('new value',2)"); -dbOperations.Add("insert MyTable values ('new value',3)"); -``` - -The following ADO.NET code sequentially performs these operations. - -```csharp -using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) -{ - conn.Open(); - - foreach(string commandString in dbOperations) - { - SqlCommand cmd = new SqlCommand(commandString, conn); - cmd.ExecuteNonQuery(); - } -} -``` - -The best way to optimize this code is to implement some form of client-side batching of these calls. But there is a simple way to increase the performance of this code by simply wrapping the sequence of calls in a transaction. Here is the same code that uses a transaction. - -```csharp -using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) -{ - conn.Open(); - SqlTransaction transaction = conn.BeginTransaction(); - - foreach (string commandString in dbOperations) - { - SqlCommand cmd = new SqlCommand(commandString, conn, transaction); - cmd.ExecuteNonQuery(); - } - - transaction.Commit(); -} -``` - -Transactions are actually being used in both of these examples. In the first example, each individual call is an implicit transaction. In the second example, an explicit transaction wraps all of the calls. Per the documentation for the [write-ahead transaction log](/sql/relational-databases/sql-server-transaction-log-architecture-and-management-guide?view=sql-server-ver15&preserve-view=true#WAL), log records are flushed to the disk when the transaction commits. So by including more calls in a transaction, the write to the transaction log can delay until the transaction is committed. In effect, you are enabling batching for the writes to the server's transaction log. - -The following table shows some ad hoc testing results. The tests performed the same sequential inserts with and without transactions. For more perspective, the first set of tests ran remotely from a laptop to the database in Microsoft Azure. The second set of tests ran from a cloud service and database that both resided within the same Microsoft Azure datacenter (West US). The following table shows the duration in milliseconds of sequential inserts with and without transactions. - -**On-premises to Azure**: - -| Operations | No transaction (ms) | Transaction (ms) | -| --- | --- | --- | -| 1 |130 |402 | -| 10 |1208 |1226 | -| 100 |12662 |10395 | -| 1000 |128852 |102917 | - -**Azure to Azure (same datacenter)**: - -| Operations | No transaction (ms) | Transaction (ms) | -| --- | --- | --- | -| 1 |21 |26 | -| 10 |220 |56 | -| 100 |2145 |341 | -| 1000 |21479 |2756 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -Based on the previous test results, wrapping a single operation in a transaction actually decreases performance. But as you increase the number of operations within a single transaction, the performance improvement becomes more marked. The performance difference is also more noticeable when all operations occur within the Microsoft Azure datacenter. The increased latency of using Azure SQL Database or Azure SQL Managed Instance from outside the Microsoft Azure datacenter overshadows the performance gain of using transactions. - -Although the use of transactions can increase performance, continue to [observe best practices for transactions and connections](/previous-versions/sql/sql-server-2008-r2/ms187484(v=sql.105)). Keep the transaction as short as possible, and close the database connection after the work completes. The using statement in the previous example assures that the connection is closed when the subsequent code block completes. - -The previous example demonstrates that you can add a local transaction to any ADO.NET code with two lines. Transactions offer a quick way to improve the performance of code that makes sequential insert, update, and delete operations. However, for the fastest performance, consider changing the code further to take advantage of client-side batching, such as table-valued parameters. - -For more information about transactions in ADO.NET, see [Local Transactions in ADO.NET](/dotnet/framework/data/adonet/local-transactions). - -### Table-valued parameters - -Table-valued parameters support user-defined table types as parameters in Transact-SQL statements, stored procedures, and functions. This client-side batching technique allows you to send multiple rows of data within the table-valued parameter. To use table-valued parameters, first define a table type. The following Transact-SQL statement creates a table type named **MyTableType**. - -```sql - CREATE TYPE MyTableType AS TABLE - ( mytext TEXT, - num INT ); -``` - -In code, you create a **DataTable** with the exact same names and types of the table type. Pass this **DataTable** in a parameter in a text query or stored procedure call. The following example shows this technique: - -```csharp -using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) -{ - connection.Open(); - - DataTable table = new DataTable(); - // Add columns and rows. The following is a simple example. - table.Columns.Add("mytext", typeof(string)); - table.Columns.Add("num", typeof(int)); - for (var i = 0; i < 10; i++) - { - table.Rows.Add(DateTime.Now.ToString(), DateTime.Now.Millisecond); - } - - SqlCommand cmd = new SqlCommand( - "INSERT INTO MyTable(mytext, num) SELECT mytext, num FROM @TestTvp", - connection); - - cmd.Parameters.Add( - new SqlParameter() - { - ParameterName = "@TestTvp", - SqlDbType = SqlDbType.Structured, - TypeName = "MyTableType", - Value = table, - }); - - cmd.ExecuteNonQuery(); -} -``` - -In the previous example, the **SqlCommand** object inserts rows from a table-valued parameter, **\@TestTvp**. The previously created **DataTable** object is assigned to this parameter with the **SqlCommand.Parameters.Add** method. Batching the inserts in one call significantly increases the performance over sequential inserts. - -To improve the previous example further, use a stored procedure instead of a text-based command. The following Transact-SQL command creates a stored procedure that takes the **SimpleTestTableType** table-valued parameter. - -```sql -CREATE PROCEDURE [dbo].[sp_InsertRows] -@TestTvp as MyTableType READONLY -AS -BEGIN -INSERT INTO MyTable(mytext, num) -SELECT mytext, num FROM @TestTvp -END -GO -``` - -Then change the **SqlCommand** object declaration in the previous code example to the following. - -```csharp -SqlCommand cmd = new SqlCommand("sp_InsertRows", connection); -cmd.CommandType = CommandType.StoredProcedure; -``` - -In most cases, table-valued parameters have equivalent or better performance than other batching techniques. Table-valued parameters are often preferable, because they are more flexible than other options. For example, other techniques, such as SQL bulk copy, only permit the insertion of new rows. But with table-valued parameters, you can use logic in the stored procedure to determine which rows are updates and which are inserts. The table type can also be modified to contain an "Operation" column that indicates whether the specified row should be inserted, updated, or deleted. - -The following table shows ad hoc test results for the use of table-valued parameters in milliseconds. - -| Operations | On-premises to Azure (ms) | Azure same datacenter (ms) | -| --- | --- | --- | -| 1 |124 |32 | -| 10 |131 |25 | -| 100 |338 |51 | -| 1000 |2615 |382 | -| 10000 |23830 |3586 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -The performance gain from batching is immediately apparent. In the previous sequential test, 1000 operations took 129 seconds outside the datacenter and 21 seconds from within the datacenter. But with table-valued parameters, 1000 operations take only 2.6 seconds outside the datacenter and 0.4 seconds within the datacenter. - -For more information on table-valued parameters, see [Table-Valued Parameters](/sql/relational-databases/tables/use-table-valued-parameters-database-engine). - -### SQL bulk copy - -SQL bulk copy is another way to insert large amounts of data into a target database. .NET applications can use the **SqlBulkCopy** class to perform bulk insert operations. **SqlBulkCopy** is similar in function to the command-line tool, **Bcp.exe**, or the Transact-SQL statement, **BULK INSERT**. The following code example shows how to bulk copy the rows in the source **DataTable**, table, to the destination table, MyTable. - -```csharp -using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) -{ - connection.Open(); - - using (SqlBulkCopy bulkCopy = new SqlBulkCopy(connection)) - { - bulkCopy.DestinationTableName = "MyTable"; - bulkCopy.ColumnMappings.Add("mytext", "mytext"); - bulkCopy.ColumnMappings.Add("num", "num"); - bulkCopy.WriteToServer(table); - } -} -``` - -There are some cases where bulk copy is preferred over table-valued parameters. See the comparison table of Table-Valued parameters versus BULK INSERT operations in the article [Table-Valued Parameters](/sql/relational-databases/tables/use-table-valued-parameters-database-engine). - -The following ad hoc test results show the performance of batching with **SqlBulkCopy** in milliseconds. - -| Operations | On-premises to Azure (ms) | Azure same datacenter (ms) | -| --- | --- | --- | -| 1 |433 |57 | -| 10 |441 |32 | -| 100 |636 |53 | -| 1000 |2535 |341 | -| 10000 |21605 |2737 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -In smaller batch sizes, the use table-valued parameters outperformed the **SqlBulkCopy** class. However, **SqlBulkCopy** performed 12-31% faster than table-valued parameters for the tests of 1,000 and 10,000 rows. Like table-valued parameters, **SqlBulkCopy** is a good option for batched inserts, especially when compared to the performance of non-batched operations. - -For more information on bulk copy in ADO.NET, see [Bulk Copy Operations](/dotnet/framework/data/adonet/sql/bulk-copy-operations-in-sql-server). - -### Multiple-row parameterized INSERT statements - -One alternative for small batches is to construct a large parameterized INSERT statement that inserts multiple rows. The following code example demonstrates this technique. - -```csharp -using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) -{ - connection.Open(); - - string insertCommand = "INSERT INTO [MyTable] ( mytext, num ) " + - "VALUES (@p1, @p2), (@p3, @p4), (@p5, @p6), (@p7, @p8), (@p9, @p10)"; - - SqlCommand cmd = new SqlCommand(insertCommand, connection); - - for (int i = 1; i <= 10; i += 2) - { - cmd.Parameters.Add(new SqlParameter("@p" + i.ToString(), "test")); - cmd.Parameters.Add(new SqlParameter("@p" + (i+1).ToString(), i)); - } - - cmd.ExecuteNonQuery(); -} -``` - -This example is meant to show the basic concept. A more realistic scenario would loop through the required entities to construct the query string and the command parameters simultaneously. You are limited to a total of 2100 query parameters, so this limits the total number of rows that can be processed in this manner. - -The following ad hoc test results show the performance of this type of insert statement in milliseconds. - -| Operations | Table-valued parameters (ms) | Single-statement INSERT (ms) | -| --- | --- | --- | -| 1 |32 |20 | -| 10 |30 |25 | -| 100 |33 |51 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -This approach can be slightly faster for batches that are less than 100 rows. Although the improvement is small, this technique is another option that might work well in your specific application scenario. - -### DataAdapter - -The **DataAdapter** class allows you to modify a **DataSet** object and then submit the changes as INSERT, UPDATE, and DELETE operations. If you are using the **DataAdapter** in this manner, it is important to note that separate calls are made for each distinct operation. To improve performance, use the **UpdateBatchSize** property to the number of operations that should be batched at a time. For more information, see [Performing Batch Operations Using DataAdapters](/dotnet/framework/data/adonet/performing-batch-operations-using-dataadapters). - -### Entity Framework - -[Entity Framework Core](/ef/efcore-and-ef6/#saving-data) supports batching. - -### XML - -For completeness, we feel that it is important to talk about XML as a batching strategy. However, the use of XML has no advantages over other methods and several disadvantages. The approach is similar to table-valued parameters, but an XML file or string is passed to a stored procedure instead of a user-defined table. The stored procedure parses the commands in the stored procedure. - -There are several disadvantages to this approach: - -* Working with XML can be cumbersome and error prone. -* Parsing the XML on the database can be CPU-intensive. -* In most cases, this method is slower than table-valued parameters. - -For these reasons, the use of XML for batch queries is not recommended. - -## Batching considerations - -The following sections provide more guidance for the use of batching in Azure SQL Database and Azure SQL Managed Instance applications. - -### Tradeoffs - -Depending on your architecture, batching can involve a tradeoff between performance and resiliency. For example, consider the scenario where your role unexpectedly goes down. If you lose one row of data, the impact is smaller than the impact of losing a large batch of unsubmitted rows. There is a greater risk when you buffer rows before sending them to the database in a specified time window. - -Because of this tradeoff, evaluate the type of operations that you batch. Batch more aggressively (larger batches and longer time windows) with data that is less critical. - -### Batch size - -In our tests, there was typically no advantage to breaking large batches into smaller chunks. In fact, this subdivision often resulted in slower performance than submitting a single large batch. For example, consider a scenario where you want to insert 1000 rows. The following table shows how long it takes to use table-valued parameters to insert 1000 rows when divided into smaller batches. - -| Batch size | Iterations | Table-valued parameters (ms) | -| --- | --- | --- | -| 1000 |1 |347 | -| 500 |2 |355 | -| 100 |10 |465 | -| 50 |20 |630 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -You can see that the best performance for 1000 rows is to submit them all at once. In other tests (not shown here), there was a small performance gain to break a 10000-row batch into two batches of 5000. But the table schema for these tests is relatively simple, so you should perform tests on your specific data and batch sizes to verify these findings. - -Another factor to consider is that if the total batch becomes too large, Azure SQL Database or Azure SQL Managed Instance might throttle and refuse to commit the batch. For the best results, test your specific scenario to determine if there is an ideal batch size. Make the batch size configurable at runtime to enable quick adjustments based on performance or errors. - -Finally, balance the size of the batch with the risks associated with batching. If there are transient errors or the role fails, consider the consequences of retrying the operation or of losing the data in the batch. - -### Parallel processing - -What if you took the approach of reducing the batch size but used multiple threads to execute the work? Again, our tests showed that several smaller multithreaded batches typically performed worse than a single larger batch. The following test attempts to insert 1000 rows in one or more parallel batches. This test shows how more simultaneous batches actually decreased performance. - -| Batch size [Iterations] | Two threads (ms) | Four threads (ms) | Six threads (ms) | -| --- | --- | --- | --- | -| 1000 [1] |277 |315 |266 | -| 500 [2] |548 |278 |256 | -| 250 [4] |405 |329 |265 | -| 100 [10] |488 |439 |391 | - -> [!NOTE] -> Results are not benchmarks. See the [note about timing results in this article](#note-about-timing-results-in-this-article). - -There are several potential reasons for the degradation in performance due to parallelism: - -* There are multiple simultaneous network calls instead of one. -* Multiple operations against a single table can result in contention and blocking. -* There are overheads associated with multithreading. -* The expense of opening multiple connections outweighs the benefit of parallel processing. - -If you target different tables or databases, it is possible to see some performance gain with this strategy. Database sharding or federations would be a scenario for this approach. Sharding uses multiple databases and routes different data to each database. If each small batch is going to a different database, then performing the operations in parallel can be more efficient. However, the performance gain is not significant enough to use as the basis for a decision to use database sharding in your solution. - -In some designs, parallel execution of smaller batches can result in improved throughput of requests in a system under load. In this case, even though it is quicker to process a single larger batch, processing multiple batches in parallel might be more efficient. - -If you do use parallel execution, consider controlling the maximum number of worker threads. A smaller number might result in less contention and a faster execution time. Also, consider the additional load that this places on the target database both in connections and transactions. - -### Related performance factors - -Typical guidance on database performance also affects batching. For example, insert performance is reduced for tables that have a large primary key or many nonclustered indexes. - -If table-valued parameters use a stored procedure, you can use the command **SET NOCOUNT ON** at the beginning of the procedure. This statement suppresses the return of the count of the affected rows in the procedure. However, in our tests, the use of **SET NOCOUNT ON** either had no effect or decreased performance. The test stored procedure was simple with a single **INSERT** command from the table-valued parameter. It is possible that more complex stored procedures would benefit from this statement. But don't assume that adding **SET NOCOUNT ON** to your stored procedure automatically improves performance. To understand the effect, test your stored procedure with and without the **SET NOCOUNT ON** statement. - -## Batching scenarios - -The following sections describe how to use table-valued parameters in three application scenarios. The first scenario shows how buffering and batching can work together. The second scenario improves performance by performing master-detail operations in a single stored procedure call. The final scenario shows how to use table-valued parameters in an "UPSERT" operation. - -### Buffering - -Although there are some scenarios that are obvious candidate for batching, there are many scenarios that could take advantage of batching by delayed processing. However, delayed processing also carries a greater risk that the data is lost in the event of an unexpected failure. It is important to understand this risk and consider the consequences. - -For example, consider a web application that tracks the navigation history of each user. On each page request, the application could make a database call to record the user's page view. But higher performance and scalability can be achieved by buffering the users' navigation activities and then sending this data to the database in batches. You can trigger the database update by elapsed time and/or buffer size. For example, a rule could specify that the batch should be processed after 20 seconds or when the buffer reaches 1000 items. - -The following code example uses [Reactive Extensions - Rx](/previous-versions/dotnet/reactive-extensions/hh242985(v=vs.103)) to process buffered events raised by a monitoring class. When the buffer fills or a timeout is reached, the batch of user data is sent to the database with a table-valued parameter. - -The following NavHistoryData class models the user navigation details. It contains basic information such as the user identifier, the URL accessed, and the access time. - -```csharp -public class NavHistoryData -{ - public NavHistoryData(int userId, string url, DateTime accessTime) - { UserId = userId; URL = url; AccessTime = accessTime; } - public int UserId { get; set; } - public string URL { get; set; } - public DateTime AccessTime { get; set; } -} -``` - -The NavHistoryDataMonitor class is responsible for buffering the user navigation data to the database. It contains a method, RecordUserNavigationEntry, which responds by raising an **OnAdded** event. The following code shows the constructor logic that uses Rx to create an observable collection based on the event. It then subscribes to this observable collection with the Buffer method. The overload specifies that the buffer should be sent every 20 seconds or 1000 entries. - -```csharp -public NavHistoryDataMonitor() -{ - var observableData = - Observable.FromEventPattern(this, "OnAdded"); - - observableData.Buffer(TimeSpan.FromSeconds(20), 1000).Subscribe(Handler); -} -``` - -The handler converts all of the buffered items into a table-valued type and then passes this type to a stored procedure that processes the batch. The following code shows the complete definition for both the NavHistoryDataEventArgs and the NavHistoryDataMonitor classes. - -```csharp -public class NavHistoryDataEventArgs : System.EventArgs -{ - public NavHistoryDataEventArgs(NavHistoryData data) { Data = data; } - public NavHistoryData Data { get; set; } -} - -public class NavHistoryDataMonitor -{ - public event EventHandler OnAdded; - - public NavHistoryDataMonitor() - { - var observableData = - Observable.FromEventPattern(this, "OnAdded"); - - observableData.Buffer(TimeSpan.FromSeconds(20), 1000).Subscribe(Handler); - } -``` - -The handler converts all of the buffered items into a table-valued type and then passes this type to a stored procedure that processes the batch. The following code shows the complete definition for both the NavHistoryDataEventArgs and the NavHistoryDataMonitor classes. - -```csharp - public class NavHistoryDataEventArgs : System.EventArgs - { - if (OnAdded != null) - OnAdded(this, new NavHistoryDataEventArgs(data)); - } - - protected void Handler(IList> items) - { - DataTable navHistoryBatch = new DataTable("NavigationHistoryBatch"); - navHistoryBatch.Columns.Add("UserId", typeof(int)); - navHistoryBatch.Columns.Add("URL", typeof(string)); - navHistoryBatch.Columns.Add("AccessTime", typeof(DateTime)); - foreach (EventPattern item in items) - { - NavHistoryData data = item.EventArgs.Data; - navHistoryBatch.Rows.Add(data.UserId, data.URL, data.AccessTime); - } - - using (SqlConnection connection = new SqlConnection(CloudConfigurationManager.GetSetting("Sql.ConnectionString"))) - { - connection.Open(); - - SqlCommand cmd = new SqlCommand("sp_RecordUserNavigation", connection); - cmd.CommandType = CommandType.StoredProcedure; - - cmd.Parameters.Add( - new SqlParameter() - { - ParameterName = "@NavHistoryBatch", - SqlDbType = SqlDbType.Structured, - TypeName = "NavigationHistoryTableType", - Value = navHistoryBatch, - }); - - cmd.ExecuteNonQuery(); - } - } -} -``` - -To use this buffering class, the application creates a static NavHistoryDataMonitor object. Each time a user accesses a page, the application calls the NavHistoryDataMonitor.RecordUserNavigationEntry method. The buffering logic proceeds to take care of sending these entries to the database in batches. - -### Master detail - -Table-valued parameters are useful for simple INSERT scenarios. However, it can be more challenging to batch inserts that involve more than one table. The "master/detail" scenario is a good example. The master table identifies the primary entity. One or more detail tables store more data about the entity. In this scenario, foreign key relationships enforce the relationship of details to a unique master entity. Consider a simplified version of a PurchaseOrder table and its associated OrderDetail table. The following Transact-SQL creates the PurchaseOrder table with four columns: OrderID, OrderDate, CustomerID, and Status. - -```sql -CREATE TABLE [dbo].[PurchaseOrder]( -[OrderID] [int] IDENTITY(1,1) NOT NULL, -[OrderDate] [datetime] NOT NULL, -[CustomerID] [int] NOT NULL, -[Status] [nvarchar](50) NOT NULL, -CONSTRAINT [PrimaryKey_PurchaseOrder] -PRIMARY KEY CLUSTERED ( [OrderID] ASC )) -``` - -Each order contains one or more product purchases. This information is captured in the PurchaseOrderDetail table. The following Transact-SQL creates the PurchaseOrderDetail table with five columns: OrderID, OrderDetailID, ProductID, UnitPrice, and OrderQty. - -```sql -CREATE TABLE [dbo].[PurchaseOrderDetail]( -[OrderID] [int] NOT NULL, -[OrderDetailID] [int] IDENTITY(1,1) NOT NULL, -[ProductID] [int] NOT NULL, -[UnitPrice] [money] NULL, -[OrderQty] [smallint] NULL, -CONSTRAINT [PrimaryKey_PurchaseOrderDetail] PRIMARY KEY CLUSTERED -( [OrderID] ASC, [OrderDetailID] ASC )) -``` - -The OrderID column in the PurchaseOrderDetail table must reference an order from the PurchaseOrder table. The following definition of a foreign key enforces this constraint. - -```sql -ALTER TABLE [dbo].[PurchaseOrderDetail] WITH CHECK ADD -CONSTRAINT [FK_OrderID_PurchaseOrder] FOREIGN KEY([OrderID]) -REFERENCES [dbo].[PurchaseOrder] ([OrderID]) -``` - -In order to use table-valued parameters, you must have one user-defined table type for each target table. - -```sql -CREATE TYPE PurchaseOrderTableType AS TABLE -( OrderID INT, - OrderDate DATETIME, - CustomerID INT, - Status NVARCHAR(50) ); -GO - -CREATE TYPE PurchaseOrderDetailTableType AS TABLE -( OrderID INT, - ProductID INT, - UnitPrice MONEY, - OrderQty SMALLINT ); -GO -``` - -Then define a stored procedure that accepts tables of these types. This procedure allows an application to locally batch a set of orders and order details in a single call. The following Transact-SQL provides the complete stored procedure declaration for this purchase order example. - -```sql -CREATE PROCEDURE sp_InsertOrdersBatch ( -@orders as PurchaseOrderTableType READONLY, -@details as PurchaseOrderDetailTableType READONLY ) -AS -SET NOCOUNT ON; - --- Table that connects the order identifiers in the @orders --- table with the actual order identifiers in the PurchaseOrder table -DECLARE @IdentityLink AS TABLE ( -SubmittedKey int, -ActualKey int, -RowNumber int identity(1,1) -); - --- Add new orders to the PurchaseOrder table, storing the actual --- order identifiers in the @IdentityLink table -INSERT INTO PurchaseOrder ([OrderDate], [CustomerID], [Status]) -OUTPUT inserted.OrderID INTO @IdentityLink (ActualKey) -SELECT [OrderDate], [CustomerID], [Status] FROM @orders ORDER BY OrderID; - --- Match the passed-in order identifiers with the actual identifiers --- and complete the @IdentityLink table for use with inserting the details -WITH OrderedRows As ( -SELECT OrderID, ROW_NUMBER () OVER (ORDER BY OrderID) As RowNumber -FROM @orders -) -UPDATE @IdentityLink SET SubmittedKey = M.OrderID -FROM @IdentityLink L JOIN OrderedRows M ON L.RowNumber = M.RowNumber; - --- Insert the order details into the PurchaseOrderDetail table, --- using the actual order identifiers of the master table, PurchaseOrder -INSERT INTO PurchaseOrderDetail ( -[OrderID], -[ProductID], -[UnitPrice], -[OrderQty] ) -SELECT L.ActualKey, D.ProductID, D.UnitPrice, D.OrderQty -FROM @details D -JOIN @IdentityLink L ON L.SubmittedKey = D.OrderID; -GO -``` - -In this example, the locally defined @IdentityLink table stores the actual OrderID values from the newly inserted rows. These order identifiers are different from the temporary OrderID values in the @orders and @details table-valued parameters. For this reason, the @IdentityLink table then connects the OrderID values from the @orders parameter to the real OrderID values for the new rows in the PurchaseOrder table. After this step, the @IdentityLink table can facilitate inserting the order details with the actual OrderID that satisfies the foreign key constraint. - -This stored procedure can be used from code or from other Transact-SQL calls. See the table-valued parameters section of this paper for a code example. The following Transact-SQL shows how to call the sp_InsertOrdersBatch. - -```sql -declare @orders as PurchaseOrderTableType -declare @details as PurchaseOrderDetailTableType - -INSERT @orders -([OrderID], [OrderDate], [CustomerID], [Status]) -VALUES(1, '1/1/2013', 1125, 'Complete'), -(2, '1/13/2013', 348, 'Processing'), -(3, '1/12/2013', 2504, 'Shipped') - -INSERT @details -([OrderID], [ProductID], [UnitPrice], [OrderQty]) -VALUES(1, 10, $11.50, 1), -(1, 12, $1.58, 1), -(2, 23, $2.57, 2), -(3, 4, $10.00, 1) - -exec sp_InsertOrdersBatch @orders, @details -``` - -This solution allows each batch to use a set of OrderID values that begin at 1. These temporary OrderID values describe the relationships in the batch, but the actual OrderID values are determined at the time of the insert operation. You can run the same statements in the previous example repeatedly and generate unique orders in the database. For this reason, consider adding more code or database logic that prevents duplicate orders when using this batching technique. - -This example demonstrates that even more complex database operations, such as master-detail operations, can be batched using table-valued parameters. - -### UPSERT - -Another batching scenario involves simultaneously updating existing rows and inserting new rows. This operation is sometimes referred to as an "UPSERT" (update + insert) operation. Rather than making separate calls to INSERT and UPDATE, the MERGE statement can be a suitable replacement. The MERGE statement can perform both insert and update operations in a single call. The MERGE statement locking mechanics work differently from separate INSERT and UPDATE statements. Test your specific workloads before deploying to production. - -Table-valued parameters can be used with the MERGE statement to perform updates and inserts. For example, consider a simplified Employee table that contains the following columns: EmployeeID, FirstName, LastName, SocialSecurityNumber: - -```sql -CREATE TABLE [dbo].[Employee]( -[EmployeeID] [int] IDENTITY(1,1) NOT NULL, -[FirstName] [nvarchar](50) NOT NULL, -[LastName] [nvarchar](50) NOT NULL, -[SocialSecurityNumber] [nvarchar](50) NOT NULL, -CONSTRAINT [PrimaryKey_Employee] PRIMARY KEY CLUSTERED -([EmployeeID] ASC )) -``` - -In this example, you can use the fact that the SocialSecurityNumber is unique to perform a MERGE of multiple employees. First, create the user-defined table type: - -```sql -CREATE TYPE EmployeeTableType AS TABLE -( Employee_ID INT, - FirstName NVARCHAR(50), - LastName NVARCHAR(50), - SocialSecurityNumber NVARCHAR(50) ); -GO -``` - -Next, create a stored procedure or write code that uses the MERGE statement to perform the update and insert. The following example uses the MERGE statement on a table-valued parameter, @employees, of type EmployeeTableType. The contents of the @employees table are not shown here. - -```sql -MERGE Employee AS target -USING (SELECT [FirstName], [LastName], [SocialSecurityNumber] FROM @employees) -AS source ([FirstName], [LastName], [SocialSecurityNumber]) -ON (target.[SocialSecurityNumber] = source.[SocialSecurityNumber]) -WHEN MATCHED THEN -UPDATE SET -target.FirstName = source.FirstName, -target.LastName = source.LastName -WHEN NOT MATCHED THEN - INSERT ([FirstName], [LastName], [SocialSecurityNumber]) - VALUES (source.[FirstName], source.[LastName], source.[SocialSecurityNumber]); -``` - -For more information, see the documentation and examples for the MERGE statement. Although the same work could be performed in a multiple-step stored procedure call with separate INSERT and UPDATE operations, the MERGE statement is more efficient. Database code can also construct Transact-SQL calls that use the MERGE statement directly without requiring two database calls for INSERT and UPDATE. - -## Recommendation summary - -The following list provides a summary of the batching recommendations discussed in this article: - -* Use buffering and batching to increase the performance and scalability of Azure SQL Database and Azure SQL Managed Instance applications. -* Understand the tradeoffs between batching/buffering and resiliency. During a role failure, the risk of losing an unprocessed batch of business-critical data might outweigh the performance benefit of batching. -* Attempt to keep all calls to the database within a single datacenter to reduce latency. -* If you choose a single batching technique, table-valued parameters offer the best performance and flexibility. -* For the fastest insert performance, follow these general guidelines but test your scenario: - * For < 100 rows, use a single parameterized INSERT command. - * For < 1000 rows, use table-valued parameters. - * For >= 1000 rows, use SqlBulkCopy. -* For update and delete operations, use table-valued parameters with stored procedure logic that determines the correct operation on each row in the table parameter. -* Batch size guidelines: - * Use the largest batch sizes that make sense for your application and business requirements. - * Balance the performance gain of large batches with the risks of temporary or catastrophic failures. What is the consequence of retries or loss of the data in the batch? - * Test the largest batch size to verify that Azure SQL Database or Azure SQL Managed Instance does not reject it. - * Create configuration settings that control batching, such as the batch size or the buffering time window. These settings provide flexibility. You can change the batching behavior in production without redeploying the cloud service. -* Avoid parallel execution of batches that operate on a single table in one database. If you do choose to divide a single batch across multiple worker threads, run tests to determine the ideal number of threads. After an unspecified threshold, more threads will decrease performance rather than increase it. -* Consider buffering on size and time as a way of implementing batching for more scenarios. - -## Next steps - -This article focused on how database design and coding techniques related to batching can improve your application performance and scalability. But this is just one factor in your overall strategy. For more ways to improve performance and scalability, see [Database performance guidance](database/performance-guidance.md) and [Price and performance considerations for an elastic pool](database/elastic-pool-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/public-data-sets.md b/articles/azure-sql/public-data-sets.md deleted file mode 100644 index e3cd208444fe2..0000000000000 --- a/articles/azure-sql/public-data-sets.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Public data sets for Azure analytics -description: Learn about public data sets that you can use to prototype and test Azure analytics services and solutions. -services: sql-database -ms.service: sql-database -ms.subservice: development -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: reference -author: VanMSFT -ms.author: vanto -ms.reviewer: mathoma, kendralittle -ms.date: 10/01/2018 ---- -# Public data sets for testing and prototyping -[!INCLUDE[appliesto-asf](includes/appliesto-asf.md)] - -Browse this list of public data sets for data that you can use to prototype and test storage and analytics services and solutions. - -## U.S. Government and agency data - -| Data source | About the data | About the files | -|---|---|---| -| [US Government data](https://catalog.data.gov/dataset) | Over 250,000 data sets covering agriculture, climate, consumer, ecosystems, education, energy, finance, health, local government, manufacturing, maritime, ocean, public safety, and science and research in the U.S. | Files of various sizes in various formats including HTML, XML, CSV, JSON, Excel, and many others. You can filter available data sets by file format. | -| [US Census data](https://www.census.gov/data.html) | Statistical data about the population of the U.S. | Data sets are in various formats. | -| [Earth science data from NASA](https://earthdata.nasa.gov/) | Over 32,000 data collections covering agriculture, atmosphere, biosphere, climate, cryosphere, human dimensions, hydrosphere, land surface, oceans, sun-earth interactions, and more. | Data sets are in various formats. | -| [Airline flight delays and other transportation data](https://www.transtats.bts.gov/OT_Delay/OT_DelayCause1.asp) | "The U.S. Department of Transportation's (DOT) Bureau of Transportation Statistics (BTS) tracks the on-time performance of domestic flights operated by large air carriers. Summary information on the number of on-time, delayed, canceled, and diverted flights appears ... in summary tables posted on this website." | Files are in CSV format. | -| [Traffic fatalities - US Fatality Analysis Reporting System (FARS)](https://www.nhtsa.gov/FARS) | "FARS is a nationwide census providing NHTSA, Congress, and the American public yearly data regarding fatal injuries suffered in motor vehicle traffic crashes." | "Create your own fatality data run online by using the FARS Query System. Or download all FARS data from 1975 to present from the FTP Site." | -| [Toxic chemical data - EPA Toxicity ForeCaster (ToxCast™) data](https://www.epa.gov/chemical-research/toxicity-forecaster-toxcasttm-data) | "EPA's most updated, publicly available high-throughput toxicity data on thousands of chemicals. This data is generated through the EPA's ToxCast research effort." | Data sets are available in various formats including spreadsheets, R packages, and MySQL database files. | -| [Toxic chemical data - NIH Tox21 Data Challenge 2014](https://tripod.nih.gov/tox21) | "The 2014 Tox21 data challenge is designed to help scientists understand the potential of the chemicals and compounds being tested through the Toxicology in the 21st Century initiative to disrupt biological pathways in ways that may result in toxic effects." | Data sets are available in SMILES and SDF formats. The data provides "assay activity data and chemical structures on the Tox21 collection of ~10,000 compounds (Tox21 10K)." | -| [Biotechnology and genome data from the NCBI](https://www.ncbi.nlm.nih.gov/guide/data-software/) | Multiple data sets covering genes, genomes, and proteins. | Data sets are in text, XML, BLAST, and other formats. A BLAST app is available. | - -## Other statistical and scientific data - -| Data source | About the data | About the files | -|---|---|---| -| [New York City taxi data](http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml) | "Taxi trip records include fields capturing pick-up and dropoff dates/times, pick-up and dropoff locations, trip distances, itemized fares, rate types, payment types, and driver-reported passenger counts." | Data sets are in CSV files by month. | -| [Microsoft Research data sets - "Data Science for Research"](https://www.microsoft.com/research/academic-program/data-science-microsoft-research/) | Multiple data sets covering human-computer interaction, audio/video, data mining/information retrieval, geospatial/location, natural language processing, and robotics/computer vision. | Data sets are in various formats, zipped for download. | -| [Open Science Data Cloud data](https://www.opensciencedatacloud.org/projects/) | "The Open Science Data Cloud provides the scientific community with resources for storing, sharing, and analyzing terabyte and petabyte-scale scientific datasets."| Data sets are in various formats. | -| [Global climate data - WorldClim](https://worldclim.org/) | "WorldClim is a set of global climate layers (gridded climate data) with a spatial resolution of about 1 km2. These data can be used for mapping and spatial modeling." | These files contain geospatial data. For more info, see [Data format](https://worldclim.org/formats1). | -| [Data about human society - The GDELT Project](https://www.gdeltproject.org/data.html) | "The GDELT Project is the largest, most comprehensive, and highest resolution open database of human society ever created." | The raw data files are in CSV format. | -| [Advertising click prediction data for machine learning from Criteo](https://labs.criteo.com/2013/12/download-terabyte-click-logs/) | "The largest ever publicly released ML dataset." For more info, see [Criteo's 1 TB Click Prediction Dataset](/archive/blogs/machinelearning/now-available-on-azure-ml-criteos-1tb-click-prediction-dataset). | | -| [ClueWeb09 text mining data set from The Lemur Project](https://www.lemurproject.org/clueweb09.php/) | "The ClueWeb09 dataset was created to support research on information retrieval and related human language technologies. It consists of about 1 billion web pages in 10 languages that were collected in January and February 2009." | See [Dataset Information](https://www.lemurproject.org/clueweb09/datasetInformation.php).| - -## Online service data - -| Data source | About the data | About the files | -|---|---|---| -| [GitHub archive](https://www.githubarchive.org/) | "GitHub Archive is a project to record the public GitHub timeline [of events], archive it, and make it easily accessible for further analysis." | Download JSON-encoded event archives in .gz (Gzip) format from a web client. | -| [GitHub activity data from The GHTorrent project](http://ghtorrent.org/) | "The GHTorrent project [is] an effort to create a scalable, queryable, offline mirror of data offered through the GitHub REST API. GHTorrent monitors the GitHub public event time line. For each event, it retrieves its contents and their dependencies, exhaustively." | MySQL database dumps are in CSV format. | -| [Stack Overflow data dump](https://archive.org/details/stackexchange) | "This is an anonymized dump of all user-contributed content on the Stack Exchange network [including Stack Overflow]." | "Each site [such as Stack Overflow] is formatted as a separate archive consisting of XML files zipped via 7-zip using bzip2 compression. Each site archive includes Posts, Users, Votes, Comments, PostHistory, and PostLinks." | \ No newline at end of file diff --git a/articles/azure-sql/temporal-tables.md b/articles/azure-sql/temporal-tables.md deleted file mode 100644 index 97eb092d6f2e7..0000000000000 --- a/articles/azure-sql/temporal-tables.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Getting started with temporal tables -description: Learn how to get started with using temporal tables in Azure SQL Database and Azure SQL Managed Instance. -services: sql-database -ms.service: sql-db-mi -ms.subservice: performance -ms.custom: sqldbrb=2 -ms.devlang: -ms.topic: how-to -author: MladjoA -ms.author: mlandzic -ms.reviewer: mathoma, kendralittle -ms.date: 10/18/2021 ---- -# Getting started with temporal tables in Azure SQL Database and Azure SQL Managed Instance -[!INCLUDE[appliesto-sqldb-sqlmi](includes/appliesto-sqldb-sqlmi.md)] - -Temporal tables are a programmability feature of Azure SQL Database and Azure SQL Managed Instance that allows you to track and analyze the full history of changes in your data, without the need for custom coding. Temporal tables keep data closely related to time context so that stored facts can be interpreted as valid only within the specific period. This property of temporal tables allows for efficient time-based analysis and getting insights from data evolution. - -## Temporal scenario - -This article illustrates the steps to utilize temporal tables in an application scenario. Suppose that you want to track user activity on a new website that is being developed from scratch or on an existing website that you want to extend with user activity analytics. In this simplified example, we assume that the number of visited web pages during a period of time is an indicator that needs to be captured and monitored in the website database that is hosted on Azure SQL Database or Azure SQL Managed Instance. The goal of the historical analysis of user activity is to get inputs to redesign website and provide better experience for the visitors. - -The database model for this scenario is very simple - user activity metric is represented with a single integer field, **PageVisited**, and is captured along with basic information on the user profile. Additionally, for time-based analysis, you would keep a series of rows for each user, where every row represents the number of pages a particular user visited within a specific period of time. - -![Schema](./media/temporal-tables/AzureTemporal1.png) - -Fortunately, you do not need to put any effort in your app to maintain this activity information. With temporal tables, this process is automated - giving you full flexibility during website design and more time to focus on the data analysis itself. The only thing you have to do is to ensure that `WebSiteInfo` table is configured as [temporal system-versioned](/sql/relational-databases/tables/temporal-tables#what-is-a-system-versioned-temporal-table). The exact steps to utilize temporal tables in this scenario are described below. - -## Step 1: Configure tables as temporal - -Depending on whether you are starting new development or upgrading existing application, you will either create temporal tables or modify existing ones by adding temporal attributes. In general case, your scenario can be a mix of these two options. Perform these action using [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms) (SSMS), [SQL Server Data Tools](/sql/ssdt/download-sql-server-data-tools-ssdt) (SSDT), [Azure Data Studio](/sql/azure-data-studio/download-azure-data-studio), or any other Transact-SQL development tool. - -> [!IMPORTANT] -> It is recommended that you always use the latest version of Management Studio to remain synchronized with updates to Azure SQL Database and Azure SQL Managed Instance. [Update SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - -### Create new table - -Use context menu item "New System-Versioned Table" in SSMS Object Explorer to open the query editor with a temporal table template script and then use "Specify Values for Template Parameters" (Ctrl+Shift+M) to populate the template: - -![SSMSNewTable](./media/temporal-tables/AzureTemporal2.png) - -In SSDT, choose "Temporal Table (System-Versioned)" template when adding new items to the database project. That will open table designer and enable you to easily specify the table layout: - -![SSDTNewTable](./media/temporal-tables/AzureTemporal3.png) - -You can also create temporal table by specifying the Transact-SQL statements directly, as shown in the example below. Note that the mandatory elements of every temporal table are the PERIOD definition and the SYSTEM_VERSIONING clause with a reference to another user table that will store historical row versions: - -```sql -CREATE TABLE WebsiteUserInfo -( - [UserID] int NOT NULL PRIMARY KEY CLUSTERED - , [UserName] nvarchar(100) NOT NULL - , [PagesVisited] int NOT NULL - , [ValidFrom] datetime2 (0) GENERATED ALWAYS AS ROW START - , [ValidTo] datetime2 (0) GENERATED ALWAYS AS ROW END - , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo) - ) - WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.WebsiteUserInfoHistory)); -``` - -When you create system-versioned temporal table, the accompanying history table with the default configuration is automatically created. The default history table contains a clustered B-tree index on the period columns (end, start) with page compression enabled. This configuration is optimal for the majority of scenarios in which temporal tables are used, especially for [data auditing](/sql/relational-databases/tables/temporal-table-usage-scenarios#enabling-system-versioning-on-a-new-table-for-data-audit). - -In this particular case, we aim to perform time-based trend analysis over a longer data history and with bigger data sets, so the storage choice for the history table is a clustered columnstore index. A clustered columnstore provides very good compression and performance for analytical queries. Temporal tables give you the flexibility to configure indexes on the current and temporal tables completely independently. - -> [!NOTE] -> Columnstore indexes are available in the Business Critical, General Purpose, and Premium tiers and in the Standard tier, S3 and above. - -The following script shows how default index on history table can be changed to the clustered columnstore: - -```sql -CREATE CLUSTERED COLUMNSTORE INDEX IX_WebsiteUserInfoHistory -ON dbo.WebsiteUserInfoHistory -WITH (DROP_EXISTING = ON); -``` - -Temporal tables are represented in the Object Explorer with the specific icon for easier identification, while its history table is displayed as a child node. - -![AlterTable](./media/temporal-tables/AzureTemporal4.png) - -### Alter existing table to temporal - -Let's cover the alternative scenario in which the WebsiteUserInfo table already exists, but was not designed to keep a history of changes. In this case, you can simply extend the existing table to become temporal, as shown in the following example: - -```sql -ALTER TABLE WebsiteUserInfo -ADD - ValidFrom datetime2 (0) GENERATED ALWAYS AS ROW START HIDDEN - constraint DF_ValidFrom DEFAULT DATEADD(SECOND, -1, SYSUTCDATETIME()) - , ValidTo datetime2 (0) GENERATED ALWAYS AS ROW END HIDDEN - constraint DF_ValidTo DEFAULT '9999.12.31 23:59:59.99' - , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo); - -ALTER TABLE WebsiteUserInfo -SET (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.WebsiteUserInfoHistory)); -GO - -CREATE CLUSTERED COLUMNSTORE INDEX IX_WebsiteUserInfoHistory -ON dbo.WebsiteUserInfoHistory -WITH (DROP_EXISTING = ON); -``` - -## Step 2: Run your workload regularly - -The main advantage of temporal tables is that you do not need to change or adjust your website in any way to perform change tracking. Once created, temporal tables transparently persist previous row versions every time you perform modifications on your data. - -In order to leverage automatic change tracking for this particular scenario, let's just update column **PagesVisited** every time a user ends their session on the website: - -```sql -UPDATE WebsiteUserInfo SET [PagesVisited] = 5 -WHERE [UserID] = 1; -``` - -It is important to notice that the update query doesn't need to know the exact time when the actual operation occurred nor how historical data will be preserved for future analysis. Both aspects are automatically handled by Azure SQL Database and Azure SQL Managed Instance. The following diagram illustrates how history data is being generated on every update. - -![TemporalArchitecture](./media/temporal-tables/AzureTemporal5.png) - -## Step 3: Perform historical data analysis - -Now when temporal system-versioning is enabled, historical data analysis is just one query away from you. In this article, we will provide a few examples that address common analysis scenarios - to learn all details, explore various options introduced with the [FOR SYSTEM_TIME](/sql/relational-databases/tables/temporal-tables#how-do-i-query-temporal-data) clause. - -To see the top 10 users ordered by the number of visited web pages as of an hour ago, run this query: - -```sql -DECLARE @hourAgo datetime2 = DATEADD(HOUR, -1, SYSUTCDATETIME()); -SELECT TOP 10 * FROM dbo.WebsiteUserInfo FOR SYSTEM_TIME AS OF @hourAgo -ORDER BY PagesVisited DESC -``` - -You can easily modify this query to analyze the site visits as of a day ago, a month ago or at any point in the past you wish. - -To perform basic statistical analysis for the previous day, use the following example: - -```sql -DECLARE @twoDaysAgo datetime2 = DATEADD(DAY, -2, SYSUTCDATETIME()); -DECLARE @aDayAgo datetime2 = DATEADD(DAY, -1, SYSUTCDATETIME()); - -SELECT UserID, SUM (PagesVisited) as TotalVisitedPages, AVG (PagesVisited) as AverageVisitedPages, -MAX (PagesVisited) AS MaxVisitedPages, MIN (PagesVisited) AS MinVisitedPages, -STDEV (PagesVisited) as StDevViistedPages -FROM dbo.WebsiteUserInfo -FOR SYSTEM_TIME BETWEEN @twoDaysAgo AND @aDayAgo -GROUP BY UserId -``` - -To search for activities of a specific user, within a period of time, use the CONTAINED IN clause: - -```sql -DECLARE @hourAgo datetime2 = DATEADD(HOUR, -1, SYSUTCDATETIME()); -DECLARE @twoHoursAgo datetime2 = DATEADD(HOUR, -2, SYSUTCDATETIME()); -SELECT * FROM dbo.WebsiteUserInfo -FOR SYSTEM_TIME CONTAINED IN (@twoHoursAgo, @hourAgo) -WHERE [UserID] = 1; -``` - -Graphic visualization is especially convenient for temporal queries as you can show trends and usage patterns in an intuitive way very easily: - -![TemporalGraph](./media/temporal-tables/AzureTemporal6.png) - -## Evolving table schema - -Typically, you will need to change the temporal table schema while you are doing app development. For that, simply run regular ALTER TABLE statements and Azure SQL Database or Azure SQL Managed Instance appropriately propagates changes to the history table. The following script shows how you can add additional attribute for tracking: - -```sql -/*Add new column for tracking source IP address*/ -ALTER TABLE dbo.WebsiteUserInfo -ADD [IPAddress] varchar(128) NOT NULL CONSTRAINT DF_Address DEFAULT 'N/A'; -``` - -Similarly, you can change column definition while your workload is active: - -```sql -/*Increase the length of name column*/ -ALTER TABLE dbo.WebsiteUserInfo - ALTER COLUMN UserName nvarchar(256) NOT NULL; -``` - -Finally, you can remove a column that you do not need anymore. - -```sql -/*Drop unnecessary column */ -ALTER TABLE dbo.WebsiteUserInfo - DROP COLUMN TemporaryColumn; -``` - -Alternatively, use latest [SSDT](/sql/ssdt/download-sql-server-data-tools-ssdt) to change temporal table schema while you are connected to the database (online mode) or as part of the database project (offline mode). - -## Controlling retention of historical data - -With system-versioned temporal tables, the history table may increase the database size more than regular tables. A large and ever-growing history table can become an issue both due to pure storage costs as well as imposing a performance tax on temporal querying. Hence, developing a data retention policy for managing data in the history table is an important aspect of planning and managing the lifecycle of every temporal table. With Azure SQL Database and Azure SQL Managed Instance, you have the following approaches for managing historical data in the temporal table: - -- [Table Partitioning](/sql/relational-databases/tables/manage-retention-of-historical-data-in-system-versioned-temporal-tables#using-table-partitioning-approach) -- [Custom Cleanup Script](/sql/relational-databases/tables/manage-retention-of-historical-data-in-system-versioned-temporal-tables#using-custom-cleanup-script-approach) - -## Next steps - -- For more information on temporal tables, see check out [Temporal Tables](/sql/relational-databases/tables/temporal-tables). diff --git a/articles/azure-sql/toc.yml b/articles/azure-sql/toc.yml deleted file mode 100644 index 4a18a67c6fe9e..0000000000000 --- a/articles/azure-sql/toc.yml +++ /dev/null @@ -1,1718 +0,0 @@ -- name: Azure SQL Documentation - href: index.yml -- name: Azure SQL - items: - - name: What is Azure SQL? - href: azure-sql-iaas-vs-paas-what-is-overview.md - - name: Migrate to Azure SQL - href: migration-guides/index.yml - -- name: Shared SQL DB & SQL MI docs - items: - - name: Billing options - items: - - name: vCore purchasing model - href: database/service-tiers-vcore.md - - name: Azure Hybrid Benefit - href: azure-hybrid-benefit.md - - name: Reserved capacity - href: database/reserved-capacity-overview.md - - name: Service tiers - items: - - name: General Purpose - href: database/service-tier-general-purpose.md - - name: Business Critical - href: database/service-tier-business-critical.md - - - name: Shared concepts - items: - - name: Feature comparison - href: database/features-comparison.md - - name: Multi-model features - href: multi-model-features.md - - name: In-memory OLTP - href: in-memory-oltp-overview.md - - name: Temporal tables - href: temporal-tables.md - - name: Scale up / down - href: database/scale-resources.md - - name: Read Scale-Out - href: database/read-scale-out.md - - name: Distributed transactions - href: database/elastic-transactions-overview.md - - name: Scheduled maintenance - items: - - name: Maintenance window - href: database/maintenance-window.md - - name: Configure maintenance window - href: database/maintenance-window-configure.md - - name: Maintenance window FAQ - href: database/maintenance-window-faq.yml - - name: Advance notifications - href: database/advance-notifications.md - - name: Security - items: - - name: Overview - href: database/security-overview.md - displayName: compliance, fedramp, soc, fact, fisc, hitrust, pci, iso - - name: Best practices - href: database/security-best-practice.md - - name: Security controls by Azure Policy - displayName: regulatory, compliance, standards, domains - href: ./database/security-controls-policy.md - - name: Security baseline - href: /security/benchmark/azure/baselines/sql-database-security-baseline?toc=/azure/azure-sql/toc.json - - name: Always Encrypted - href: database/always-encrypted-landing.yml - - name: Microsoft Defender for SQL - href: database/azure-defender-for-sql.md - - name: Advanced Threat Protection - href: database/threat-detection-overview.md - - name: Data discovery and classification - href: database/data-discovery-and-classification-overview.md - - name: Dynamic data masking - href: database/dynamic-data-masking-overview.md - - name: SQL Vulnerability Assessment - items: - - name: SQL Vulnerability Assessment - href: database/sql-vulnerability-assessment.md - - name: Vulnerability Assessment rules - href: database/sql-database-vulnerability-assessment-rules.md - - name: Vulnerability Assessment rules changelog - href: database/sql-database-vulnerability-assessment-rules-changelog.md - - name: Store vulnerability scans in storage - href: database/sql-database-vulnerability-assessment-storage.md - - name: Logins, user accounts, roles, and permissions - href: database/logins-create-manage.md - - name: Azure AD Authentication - href: database/authentication-aad-overview.md - items: - - name: Configure Azure AD auth - href: database/authentication-aad-configure.md - - name: Multi-factor Azure AD auth - href: database/authentication-mfa-ssms-overview.md - - name: Configure multi-factor auth - href: database/authentication-mfa-ssms-configure.md - - name: Conditional Access - href: database/conditional-access-configure.md - - name: Server principals (logins) - href: database/authentication-azure-ad-logins.md - - name: Service principals (Applications) - href: database/authentication-aad-service-principal.md - - name: Directory Readers role - href: database/authentication-aad-directory-readers-role.md - - name: Azure AD-only authentication - href: database/authentication-azure-ad-only-authentication.md - - name: Azure Policy for Azure AD-only authentication - href: database/authentication-azure-ad-only-authentication-policy.md - - name: User-assigned managed identity - href: database/authentication-azure-ad-user-assigned-managed-identity.md - - name: Transparent Data Encryption (TDE) - items: - - name: Overview - href: database/transparent-data-encryption-tde-overview.md - - name: Bring Your Own Key (BYOK) - href: database/transparent-data-encryption-byok-overview.md - - name: Managed identities with BYOK - href: database/transparent-data-encryption-byok-identity.md - - - name: Business continuity - displayName: Backup, back up, restore, recovery - items: - - name: Overview - href: database/business-continuity-high-availability-disaster-recover-hadr-overview.md - - name: High availability - href: database/high-availability-sla.md - - name: Backup and recovery - items: - - name: Automated backups - href: database/automated-backups-overview.md - - name: Accelerated database recovery - href: accelerated-database-recovery.md - - name: Recovery using backups - href: database/recovery-using-backups.md - - name: Long-term backup retention - href: database/long-term-retention-overview.md - - - name: Monitor and tune - items: - - name: Documentation - href: database/monitoring-tuning-index.yml - - name: Overview - href: database/monitor-tune-overview.md - - name: Intelligent Insights - href: database/intelligent-insights-overview.md - - name: SQL Analytics - href: ../azure-monitor/insights/azure-sql.md?toc=%2fazure%2fazure-sql%2ftoc.json - - name: Automatic tuning - href: database/automatic-tuning-overview.md - - name: In-memory OLTP - href: in-memory-oltp-overview.md - - name: Extended events - items: - - name: Extended events - href: database/xevent-db-diff-from-svr.md - - name: Extended events - event file - href: database/xevent-code-event-file.md - - name: Extended events - ring buffer - href: database/xevent-code-ring-buffer.md - - - name: Shared how-to's - items: - - name: Connect and query from apps - items: - - name: .NET with Visual Studio - href: database/connect-query-dotnet-visual-studio.md - - name: .NET Core - href: database/connect-query-dotnet-core.md - - name: Go - href: database/connect-query-go.md - - name: Node.js - href: database/connect-query-nodejs.md - - name: PHP - href: database/connect-query-php.md - - name: Python - href: database/connect-query-python.md - - name: Ruby - href: database/connect-query-ruby.md - - - - name: Business continuity - items: - - name: Configure temporal retention policy - href: database/temporal-tables-retention-policy.md - - name: Configure backup retention using Azure Blob storage - href: database/long-term-backup-retention-configure.md - - - name: Security - items: - - name: Azure AD Authentication - items: - - name: Create Azure AD guest users and set as an Azure AD admin - href: database/authentication-aad-guest-users.md - - name: Assign Directory Readers role to groups - href: database/authentication-aad-directory-readers-role-tutorial.md - - name: Enable Azure AD-only authentication - href: database/authentication-azure-ad-only-authentication-tutorial.md - - name: Enforce Azure AD-only authentication using Azure Policy - href: database/authentication-azure-ad-only-authentication-policy-how-to.md - - name: Create server with Azure AD-only authentication enabled - href: database/authentication-azure-ad-only-authentication-create-server.md - - name: Create and utilize Azure AD server logins - href: database/authentication-azure-ad-logins-tutorial.md - - name: Configure TDE with BYOK - href: database/transparent-data-encryption-byok-configure.md - - name: Always Encrypted - items: - - name: Use the Azure key vault - href: database/always-encrypted-azure-key-vault-configure.md - - name: Use the certificate store - href: database/always-encrypted-certificate-store-configure.md - - - - name: Monitor & tune - items: - - name: Identify query performance issues - href: identify-query-performance-issues.md - - name: Troubleshoot performance issues - href: database/intelligent-insights-troubleshoot-performance.md - - name: Batching for performance - href: performance-improve-use-batching.md - - name: Load data with BCP - href: load-from-csv-with-bcp.md - - name: Application and database tuning guidance - href: database/performance-guidance.md - - name: Use DMVs to monitor performance - href: database/monitoring-with-dmvs.md - - name: Log diagnostic telemetry - href: database/metrics-diagnostic-telemetry-logging-streaming-export-configure.md - - name: Azure Monitor for SQL Database - href: database/monitoring-sql-database-azure-monitor.md - - name: Azure Monitor for SQL Database reference - href: database/monitoring-sql-database-azure-monitor-reference.md - - name: In-memory OLTP - items: - - name: Configure In-Memory OLTP - href: in-memory-oltp-configure.md - - name: Try in-memory features - href: in-memory-sample.md - - name: Monitor In-memory OLTP space - href: in-memory-oltp-monitor-space.md - - - name: Load and move data - items: - - name: Import a database from a BACPAC file - href: database/database-import.md - - name: Export a database to a BACPAC file - href: database/database-export.md - - name: Move resources to a new region - href: database/move-resources-across-regions.md - - name: Load data with ADF - href: ../data-factory/connector-azure-sql-database.md?toc=/azure/azure-sql/toc.json - - name: Develop data applications - items: - - name: Overview - href: database/develop-overview.md - - name: Working with JSON data - href: database/json-features.md - - name: Use Spark Connector - href: database/spark-connector.md - - name: Use ASP.NET App Service - href: ../app-service/app-service-web-tutorial-dotnet-sqldatabase.md?toc=%2fazure%2fazure-sql%2ftoc.json - - name: Use Azure Functions - href: ../azure-functions/functions-scenario-database-table-cleanup.md?toc=%2fazure%2fazure-sql%2ftoc.json - - name: Use Azure Logic Apps - href: ../connectors/connectors-create-api-sqlazure.md?toc=%2fazure%2fazure-sql%2ftoc.json - - name: Index with Azure Cognitive Search - href: ../search/search-howto-connecting-azure-sql-database-to-azure-search-using-indexers.md?toc=%2fazure%2fazure-sql%2ftoc.json - - name: Server-side CLR/.NET integration - href: /sql/relational-databases/clr-integration/common-language-runtime-integration-overview?toc=/azure/azure-sql/toc.json - - name: Java - items: - - name: Use Java and JDBC - href: database/connect-query-java.md - - name: Use Spring Data JDBC - href: /azure/developer/java/spring-framework/configure-spring-data-jdbc-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - - name: Use Spring Data JPA - href: /azure/developer/java/spring-framework/configure-spring-data-jpa-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - - name: Use Spring Data R2DBC - href: /azure/developer/java/spring-framework/configure-spring-data-r2dbc-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - -- name: SQL Database (SQL DB) - items: - - name: Documentation - href: database/index.yml - - name: Overview - items: - - name: What is SQL Database? - href: database/sql-database-paas-overview.md - - name: What's new? - href: database/doc-changes-updates-release-notes-whats-new.md - - name: Try for free - href: database/free-sql-db-free-account-how-to-deploy.md - - - name: Quickstarts - href: database/quickstart-content-reference-guide.md - items: - - name: Create database - items: - - name: Azure portal, PowerShell, Az CLI - href: database/single-database-create-quickstart.md - - name: Hyperscale - displayName: Create Hyperscale database - href: database/hyperscale-database-create-quickstart.md - - name: ARM template - displayName: Resource Manager - href: database/single-database-create-arm-template-quickstart.md - - name: With ledger and digest storage - displayName: Create Azure SQL Database with ledger - href: database/ledger-create-a-single-database-with-ledger-enabled.md - - name: With user-assigned managed identity - displayName: Create Azure SQL Database with user-assigned managed identity - href: database/authentication-azure-ad-user-assigned-managed-identity-create-server.md - - name: Configure - items: - - name: Server-level IP firewall rules - href: database/firewall-create-server-level-portal-quickstart.md - - name: GitHub Actions - href: database/connect-github-actions-sql-db.md - - - name: Tutorials - items: - - name: Design a database - items: - - name: Design database using SSMS - href: database/design-first-database-tutorial.md - - name: Design database using .NET - href: database/design-first-database-csharp-tutorial.md - - name: Business continuity - items: - - name: Add db to failover group - href: database/failover-group-add-single-database-tutorial.md - - name: Add pool to failover group - href: database/failover-group-add-elastic-pool-tutorial.md - - name: Configure security for replicas - href: database/active-geo-replication-security-configure.md - - name: Geo-distributed application - href: database/geo-distributed-application-configure-tutorial.md - - name: Active geo-replication - href: database/active-geo-replication-configure-portal.md - - name: Security - items: - - name: Always Encrypted with secure enclaves - href: database/always-encrypted-enclaves-getting-started.md - - name: Configure security - href: database/secure-database-tutorial.md - - name: Create users using service principals - href: database/authentication-aad-service-principal-tutorial.md - - name: Rotate TDE BYOK keys - href: database/transparent-data-encryption-byok-key-rotation.md - - name: Remove TDE protector - href: database/transparent-data-encryption-byok-remove-tde-protector.md - - name: Move data - items: - - name: Migrate using DMS - href: ../dms/tutorial-sql-server-to-azure-sql.md?toc=/azure/azure-sql/toc.json - - name: Set up SQL Data Sync - href: database/sql-data-sync-sql-server-configure.md - - name: Migrate SQLite to serverless - href: database/migrate-sqlite-db-to-azure-sql-serverless-offline-tutorial.md - - name: Scale out - items: - - name: Configure security for named replicas - displayName: Configure named replicas for Hyperscale - href: database/hyperscale-named-replica-security-configure.md - - - name: Concepts - items: - - name: Single databases - href: database/single-database-overview.md - - name: Elastic pools - href: database/elastic-pool-overview.md - - name: Logical servers - href: database/logical-servers.md - - name: Serverless - href: database/serverless-tier-overview.md - - name: Hyperscale - items: - - name: Overview - displayName: Hyperscale service tier - href: database/service-tier-hyperscale.md - - name: Hyperscale architecture - href: database/hyperscale-architecture.md - - name: FAQ - displayName: Hyperscale frequently asked questions - href: database/service-tier-hyperscale-frequently-asked-questions-faq.yml - - name: Replicas - displayName: Hyperscale replicas - href: database/service-tier-hyperscale-replicas.md - - name: Replicas FAQ - displayName: Hyperscale replicas frequently asked questions - href: database/service-tier-hyperscale-named-replicas-faq.yml - - - name: Purchasing models - items: - - name: Overview - href: database/purchasing-models.md - - name: vCore model - displayName: provisioned compute tier - href: database/service-tiers-sql-database-vcore.md - - name: DTU model - href: database/service-tiers-dtu.md - - - name: Connectivity - items: - - name: Connectivity architecture - href: database/connectivity-architecture.md - - name: Connectivity settings - href: database/connectivity-settings.md - - - name: Security - items: - - name: Always Encrypted - href: database/always-encrypted-landing.yml - - name: Always Encrypted with secure enclaves - href: database/always-encrypted-with-secure-enclaves-landing.yml - items: - - name: Configure and use Always Encrypted with secure enclaves > - href: /sql/relational-databases/security/encryption/configure-always-encrypted-enclaves - - name: Plan for Intel SGX enclaves and attestation - href: database/always-encrypted-enclaves-plan.md - - name: Enable Intel SGX - href: database/always-encrypted-enclaves-enable-sgx.md - - name: Configure Azure Attestation - href: database/always-encrypted-enclaves-configure-attestation.md - - name: Azure SQL Auditing - href: database/auditing-overview.md - - name: Audit log format - href: database/audit-log-format.md - - name: DNS aliases - href: database/dns-alias-overview.md - - name: Ledger - items: - - name: Ledger - href: database/ledger-landing.yml - - name: Ledger overview - href: database/ledger-overview.md - - name: Database ledger - href: database/ledger-database-ledger.md - - name: Updatable ledger tables - href: database/ledger-updatable-ledger-tables.md - - name: Append-only ledger tables - href: database/ledger-append-only-ledger-tables.md - - name: Digest management and database verification - href: database/ledger-digest-management-and-database-verification.md - - name: Ledger auditing - href: database/ledger-audit.md - - name: Ledger limitations - href: database/ledger-limits.md - - name: Network access controls - href: database/network-access-controls-overview.md - - name: Outbound firewall rules - href: database/outbound-firewall-rule-overview.md - - name: Private Link - href: database/private-endpoint-overview.md - - name: VNet endpoints - href: database/vnet-service-endpoint-rule-overview.md - - name: Server roles - href: database/security-server-roles.md - - - - name: Business continuity - items: - - name: Active geo-replication - href: database/active-geo-replication-overview.md - - name: Auto-failover groups - href: database/auto-failover-group-sql-db.md - - name: Outage recovery guidance - href: database/disaster-recovery-guidance.md - - name: Recovery drills - href: database/disaster-recovery-drills.md - - - name: SQL Data Sync - items: - - name: Overview - href: database/sql-data-sync-data-sql-server-sql-database.md - - name: Data Sync Agent - href: database/sql-data-sync-agent-overview.md - - name: Best practices for Data Sync - href: database/sql-data-sync-best-practices.md - - name: Troubleshoot Data Sync - href: database/sql-data-sync-troubleshoot.md - - - - name: Database sharding - items: - - name: Database sharding - href: database/elastic-scale-introduction.md - - name: Elastic transactions - href: database/elastic-transactions-overview.md - - name: Elastic queries - href: database/elastic-query-overview.md - - name: Elastic client library - href: database/elastic-database-client-library.md - - name: Shard maps - href: database/elastic-scale-shard-map-management.md - - name: Query routing - href: database/elastic-scale-data-dependent-routing.md - - name: Manage credentials - href: database/elastic-scale-manage-credentials.md - - name: Move sharded data - href: database/elastic-scale-overview-split-and-merge.md - - name: Elastic tools FAQ - href: database/elastic-scale-faq.yml - - name: Glossary - href: database/elastic-scale-glossary.md - - - name: Resource limits - items: - - name: Logical server limits - href: database/resource-limits-logical-server.md - - name: Single database resources - items: - - name: vCore resource limits - href: database/resource-limits-vcore-single-databases.md - - name: DTU resource limits - href: database/resource-limits-dtu-single-databases.md - - name: Elastic pool resources - items: - - name: vCore resource limits - href: database/resource-limits-vcore-elastic-pools.md - - name: DTU resource limits - href: database/resource-limits-dtu-elastic-pools.md - - - name: Migration guides - items: - - name: From Access - href: migration-guides/database/access-to-sql-database-guide.md - - name: From Db2 - href: migration-guides/database/db2-to-sql-database-guide.md - - name: From Oracle - href: migration-guides/database/oracle-to-sql-database-guide.md - - name: From MySQL - href: migration-guides/database/mysql-to-sql-database-guide.md - - name: From SAP ASE - href: migration-guides/database/sap-ase-to-sql-database.md - - name: From SQL Server - items: - - name: Overview - href: migration-guides/database/sql-server-to-sql-database-overview.md - - name: Migrate - href: migration-guides/database/sql-server-to-sql-database-guide.md - - name: Assessment rules - href: migration-guides/database/sql-server-to-sql-database-assessment-rules.md - - - name: How to - href: database/how-to-content-reference-guide.md - items: - - name: T-SQL differences - href: database/transact-sql-tsql-differences-sql-server.md - - - name: Plan and manage costs - href: database/cost-management.md - - name: Connect and query - href: database/connect-query-content-reference-guide.md - items: - - name: Connect and run ad-hoc queries - items: - - name: Azure Data Studio - href: /sql/azure-data-studio/quickstart-sql-database?toc=/azure/azure-sql/toc.json - - name: SSMS - href: database/connect-query-ssms.md - - name: Azure portal - href: database/connect-query-portal.md - - name: VS Code - href: database/connect-query-vscode.md - - name: Connect and query from apps - items: - - name: .NET with Active Directory MFA - href: database/active-directory-interactive-connect-azure-sql-db.md - - name: Java - href: database/connect-query-java.md - - name: Java with Spring Data JDBC - href: /azure/developer/java/spring-framework/configure-spring-data-jdbc-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - - name: Java with Spring Data JPA - href: /azure/developer/java/spring-framework/configure-spring-data-jpa-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - - name: Java with Spring Data R2DBC - href: /azure/developer/java/spring-framework/configure-spring-data-r2dbc-with-azure-sql-server?toc=/azure/azure-sql/toc.json&bc=/azure/bread/toc.json - - - name: Manage - items: - - name: Management API reference - href: database/single-database-manage.md - - name: DNS alias PowerShell - href: database/dns-alias-powershell-create.md - - name: Manage file space - href: database/file-space-manage.md - - name: Use Resource Health for connectivity issues - href: database/resource-health-to-troubleshoot-connectivity.md - - name: Migrate DTU to vCore - href: database/migrate-dtu-to-vcore.md - - name: Scale database resources - href: database/single-database-scale.md - - name: Scale pool resources - href: database/elastic-pool-scale.md - - name: Manage pool resources - href: database/elastic-pool-manage.md - - name: Resource management in dense elastic pools - href: database/elastic-pool-resource-management.md - - name: Manage Hyperscale databases - href: database/manage-hyperscale-database.md - - name: Hyperscale performance diagnostics - href: database/hyperscale-performance-diagnostics.md - - name: Block T-SQL CRUD - href: database/block-crud-tsql.md - - name: Azure Automation - href: database/automation-manage.md - - name: Elastic jobs (preview) - items: - - name: Job automation with elastic jobs - href: database/job-automation-overview.md - - name: Configure jobs - href: database/elastic-jobs-overview.md - - name: Create and manage (PowerShell) - href: database/elastic-jobs-powershell-create.md - - name: Create and manage (T-SQL) - href: database/elastic-jobs-tsql-create-manage.md - - name: Migrate (from old Elastic jobs) - href: database/elastic-jobs-migrate.md - - - - name: Secure - items: - - name: Audit to storage account behind VNet or firewall - href: database/audit-write-storage-account-behind-vnet-firewall.md - - name: Configure threat detection - href: database/threat-detection-configure.md - - name: Configure dynamic data masking - href: database/dynamic-data-masking-configure-portal.md - - name: Create server configured with UMI and customer-managed TDE - href: database/transparent-data-encryption-byok-create-server.md - - name: IP-based firewall - href: database/firewall-configure.md - - name: Ledger - items: - - name: Create append-only ledger tables - href: database/ledger-how-to-append-only-ledger-tables.md - - name: Create updatable ledger tables - href: database/ledger-how-to-updatable-ledger-tables.md - - name: Access Azure Confidential Ledger digest - displayName: Access Azure Confidential Ledger digest - href: database/ledger-how-to-access-acl-digest.md - - name: Verify ledger database for tampering - href: database/ledger-verify-database.md - - name: vNet endpoints - PowerShell - href: database/scripts/vnet-service-endpoint-rule-powershell-create.md - - - name: Business continuity - items: - - name: Configure backup retention using Azure Blob storage - href: database/long-term-backup-retention-configure.md - - name: Create auto-failover group - href: database/auto-failover-group-configure-sql-db.md - - name: Configure geo-replication - Portal - href: database/active-geo-replication-configure-portal.md - - name: Configure security for geo-replicas - href: database/active-geo-replication-security-configure.md - - - name: Performance - items: - - name: Use Query Performance Insights - href: database/query-performance-insight-use.md - - name: Enable automatic tuning - href: database/automatic-tuning-enable.md - - name: Enable e-mail notifications for automatic tuning - href: database/automatic-tuning-email-notifications-configure.md - - name: Apply performance recommendations - href: database/database-advisor-find-recommendations-portal.md - - name: Create alerts - href: database/alerts-insights-configure-portal.md - - name: Implement database advisor recommendations - href: database/database-advisor-implement-performance-recommendations.md - - name: Stream data with Stream Analytics - href: database/stream-data-stream-analytics-integration.md - - name: Diagnose and troubleshoot high CPU - href: database/high-cpu-diagnose-troubleshoot.md - - name: Understand and resolve blocking - href: database/understand-resolve-blocking.md - - name: Analyze and prevent deadlocks - href: database/analyze-prevent-deadlocks.md - - name: Configure the max degree of parallelism (MAXDOP) - href: database/configure-max-degree-of-parallelism.md - - - name: Load and move data - items: - - name: Migrate to SQL Database - href: database/migrate-to-database-from-sql-server.md - - name: Manage SQL Database after migration - href: database/manage-data-after-migrating-to-database.md - - name: Import/export (allow Azure services disabled) - href: database/database-import-export-azure-services-off.md - - name: Import/export using Private endpoints - href: database/database-import-export-private-link.md - - name: Import a database from a BACPAC file - href: database/database-import.md - - name: Copy a database within Azure - href: database/database-copy.md - - name: Replicate to SQL Database - href: database/replication-to-sql-database.md - - name: Replicate schema changes (Data sync) - href: database/sql-data-sync-update-sync-schema.md - - - - name: Database sharding - items: - - name: Upgrade client library - href: database/elastic-scale-upgrade-client-library.md - - name: Create sharded app - href: database/elastic-scale-get-started.md - - name: Query horizontally-sharded data - href: database/elastic-query-getting-started.md - - name: Multi-shard queries - href: database/elastic-scale-multishard-querying.md - - name: Move sharded data - href: database/elastic-scale-configure-deploy-split-and-merge.md - - name: Security configuration - href: database/elastic-scale-split-merge-security-configuration.md - - name: Add a shard - href: database/elastic-scale-add-a-shard.md - - name: Fix shard map problems - href: database/elastic-database-recovery-manager.md - - name: Migrate sharded database - href: database/elastic-convert-to-use-elastic-tools.md - - name: Create counters - href: database/elastic-database-perf-counters.md - - name: Use entity framework - href: database/elastic-scale-use-entity-framework-applications-visual-studio.md - - name: Use Dapper framework - href: database/elastic-scale-working-with-dapper.md - - name: Query distributed data - items: - - name: Query vertically partitioned data - href: database/elastic-query-getting-started-vertical.md - - name: Report across scaled-out data tier - href: database/elastic-query-horizontal-partitioning.md - - name: Query across tables with different schemas - href: database/elastic-query-vertical-partitioning.md - - - - name: Design data applications - items: - - name: Authenticate app - href: database/application-authentication-get-client-id-keys.md - - name: Design for disaster recovery - href: database/designing-cloud-solutions-for-disaster-recovery.md - - name: Design for elastic pools - href: database/disaster-recovery-strategies-for-applications-with-elastic-pool.md - - name: Design for app upgrades - href: database/manage-application-rolling-upgrade.md - - name: C and C ++ - href: database/develop-cplusplus-simple.md - - name: Excel - href: database/connect-excel.md - - name: Ports - ADO.NET - href: database/adonet-v12-develop-direct-route-ports.md - - name: Multi-tenant SaaS - items: - - name: SaaS design patterns - href: database/saas-tenancy-app-design-patterns.md - - name: SaaS video indexer - href: database/saas-tenancy-video-index-wingtip-brk3120-20171011.md - - name: SaaS app security - href: database/saas-tenancy-elastic-tools-multi-tenant-row-level-security.md - - name: Multi-tenant SaaS sample application - items: - - name: Wingtip Tickets sample - href: database/saas-tenancy-welcome-wingtip-tickets-app.md - - name: General guidance - href: database/saas-tenancy-wingtip-app-guidance-tips.md - - name: Single application - items: - - name: Deploy example app - href: database/saas-standaloneapp-get-started-deploy.md - - name: Provision tenants - href: database/saas-standaloneapp-provision-and-catalog.md - - name: Database per tenant - items: - - name: Tutorial intro - href: database/saas-dbpertenant-wingtip-app-overview.md - - name: Deploy example app - href: database/saas-dbpertenant-get-started-deploy.md - - name: Provision tenants - href: database/saas-dbpertenant-provision-and-catalog.md - - name: Monitor database performance - href: database/saas-dbpertenant-performance-monitoring.md - - name: Monitor with Azure Monitor logs - href: database/saas-dbpertenant-log-analytics.md - - name: Restore one tenant - href: database/saas-dbpertenant-restore-single-tenant.md - - name: Manage tenant schema - href: database/saas-tenancy-schema-management.md - - name: Cross-tenant reporting - href: database/saas-tenancy-cross-tenant-reporting.md - - name: Tenant analytics - items: - - name: With SQL Database - href: database/saas-tenancy-tenant-analytics.md - - name: With Azure Synapse Analytics - href: database/saas-tenancy-tenant-analytics-adf.md - - name: Disaster recovery using geo-restore - href: database/saas-dbpertenant-dr-geo-restore.md - - name: Disaster recovery using database geo-replication - href: database/saas-dbpertenant-dr-geo-replication.md - - name: Multi-tenant database - items: - - name: Deploy example app - href: database/saas-multitenantdb-get-started-deploy.md - - name: Provision tenants - href: database/saas-multitenantdb-provision-and-catalog.md - - name: Monitor database performance - href: database/saas-multitenantdb-performance-monitoring.md - - name: Run ad-hoc queries - href: database/saas-multitenantdb-adhoc-reporting.md - - name: Manage tenant schema - href: database/saas-multitenantdb-schema-management.md - - name: ETL for analytics - href: database/saas-multitenantdb-tenant-analytics.md - - - name: Samples - items: - - name: Azure CLI - items: - - name: Samples overview - href: database/az-cli-script-samples-content-guide.md?tabs=single-database - - name: Create databases - items: - - name: Create single database - href: database/scripts/create-and-configure-database-cli.md - - name: Create pooled database - href: database/scripts/move-database-between-elastic-pools-cli.md - - name: Scale databases - items: - - name: Scale single database - href: database/scripts/monitor-and-scale-database-cli.md - - name: Scale pooled database - href: database/scripts/scale-pool-cli.md - - name: Configure geo-replication - items: - - name: Single database - href: database/scripts/setup-geodr-failover-database-cli.md - - name: Pooled database - href: database/scripts/setup-geodr-failover-pool-cli.md - - name: Configure failover group - items: - - name: Failover group - href: database/scripts/setup-geodr-failover-group-cli.md - - name: Single database - href: database/scripts/add-database-to-failover-group-cli.md - - name: Pooled database - href: database/scripts/add-elastic-pool-to-failover-group-cli.md - - name: Auditing and threat detection - items: - - name: Configure auditing and threat-detection - href: database/scripts/auditing-threat-detection-cli.md - - name: Database back up, restore, copy, and import - items: - - name: Back up a database - href: database/scripts/backup-database-cli.md - - name: Restore a database - href: database/scripts/restore-database-cli.md - - name: Copy a database to a new server - href: database/scripts/copy-database-to-new-server-cli.md - - name: Import a database from a BACPAC file - href: database/scripts/import-from-bacpac-cli.md - - name: Azure PowerShell - href: database/powershell-script-content-guide.md?tabs=single-database - - name: Azure Resource Manager - href: database/arm-templates-content-guide.md?tabs=single-database - - name: Code samples - href: https://azure.microsoft.com/resources/samples/?service=sql-database - - name: Azure Resource Graph queries - href: database/resource-graph-samples.md - -- name: SQL Managed Instance (SQL MI) - items: - - name: Documentation - href: managed-instance/index.yml - - name: Overview - items: - - name: What is SQL Managed Instance? - href: managed-instance/sql-managed-instance-paas-overview.md - - name: What's new? - href: managed-instance/doc-changes-updates-release-notes-whats-new.md - - name: Resource limits - href: managed-instance/resource-limits.md - - name: vCore purchasing model - href: managed-instance/service-tiers-managed-instance-vcore.md - - name: Frequently asked questions - displayName: faq - href: managed-instance/frequently-asked-questions-faq.yml - - - name: Quickstarts - href: managed-instance/quickstart-content-reference-guide.md - items: - - name: Create SQL Managed Instance - items: - - name: Azure portal - href: managed-instance/instance-create-quickstart.md - - name: PowerShell - href: managed-instance/create-configure-managed-instance-powershell-quickstart.md - - name: ARM template - displayName: Resource Manager - href: managed-instance/create-template-quickstart.md - - name: Create instance pools - href: managed-instance/instance-pools-configure.md - - name: With user-assigned managed identity - displayName: Create Azure SQL Managed Instance with user-assigned managed identity - href: managed-instance/authentication-azure-ad-user-assigned-managed-identity-create-managed-instance.md - - name: Configure - items: - - name: Service-aided subnet configuration - href: managed-instance/subnet-service-aided-configuration-enable.md - - name: Public endpoint - href: managed-instance/public-endpoint-configure.md - - name: Minimal TLS version - href: managed-instance/minimal-tls-version-configure.md - - name: Client VM connection - href: managed-instance/connect-vm-instance-configure.md - - name: Point-to-site connection - href: managed-instance/point-to-site-p2s-configure.md - - name: Long-term backup retention - href: managed-instance/long-term-backup-retention-configure.md - - name: Load data - items: - - name: Restore sample database - href: managed-instance/restore-sample-database-quickstart.md - - - name: Tutorials - items: - - name: Migrate using DMS - href: ../dms/tutorial-sql-server-to-managed-instance.md?toc=/azure/azure-sql/toc.json - - name: Configure security - href: managed-instance/aad-security-configure-tutorial.md - - name: Add instance to failover group - href: managed-instance/failover-group-add-instance-tutorial.md - - name: Migrate on-premises users and groups - href: managed-instance/migrate-sql-server-users-to-instance-transact-sql-tsql-tutorial.md - - name: Transactional replication - items: - - name: MI pub to MI sub - href: managed-instance/replication-between-two-instances-configure-tutorial.md - - name: MI pub, MI dist, SQL sub - href: managed-instance/replication-two-instances-and-sql-server-configure-tutorial.md - - - name: Migration guides - items: - - name: From Db2 - href: migration-guides/managed-instance/db2-to-managed-instance-guide.md - - name: From Oracle - href: migration-guides/managed-instance/oracle-to-managed-instance-guide.md - - name: From SQL Server - items: - - name: Overview - href: migration-guides/managed-instance/sql-server-to-managed-instance-overview.md - - name: Migrate - href: migration-guides/managed-instance/sql-server-to-managed-instance-guide.md - - name: Performance baseline - href: migration-guides/managed-instance/sql-server-to-managed-instance-performance-baseline.md - - name: Assessment rules - href: migration-guides/managed-instance/sql-server-to-sql-managed-instance-assessment-rules.md - - - - name: Concepts - items: - - name: Connectivity architecture - href: managed-instance/connectivity-architecture-overview.md - - name: Auto-failover groups - href: managed-instance/auto-failover-group-sql-mi.md - - name: T-SQL differences - href: managed-instance/transact-sql-tsql-differences-sql-server.md - - name: Transactional replication - href: managed-instance/replication-transactional-overview.md - - name: Managed Instance link - href: managed-instance/managed-instance-link-feature-overview.md - - name: Instance pools - href: managed-instance/instance-pools-overview.md - - name: Data virtualization - href: managed-instance/data-virtualization-overview.md - - name: Management operations - items: - - name: Overview - href: managed-instance/management-operations-overview.md - - name: Monitor operations - href: managed-instance/management-operations-monitor.md - - name: Cancel operations - href: managed-instance/management-operations-cancel.md - - name: API reference - href: managed-instance/api-references-create-manage-instance.md - - - name: Machine Learning Services - items: - - name: Overview - href: managed-instance/machine-learning-services-overview.md - - name: Key differences - href: managed-instance/machine-learning-services-differences.md - - name: Quickstarts - items: - - name: Python - items: - - name: Run Python scripts - href: /sql/machine-learning/tutorials/quickstart-python-create-script?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Data structures and objects - href: /sql/machine-learning/tutorials/quickstart-python-data-structures?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Python functions - href: /sql/machine-learning/tutorials/quickstart-python-functions?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Train and score a model - href: /sql/machine-learning/tutorials/quickstart-python-train-score-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Deploy ONNX models - href: ../azure-sql-edge/deploy-onnx.md?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: R - items: - - name: Run R scripts - href: /sql/machine-learning/tutorials/quickstart-r-create-script?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Data types and objects - href: /sql/machine-learning/tutorials/quickstart-r-data-types-and-objects?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: R functions - href: /sql/machine-learning/tutorials/quickstart-r-functions?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Train and score a model - href: /sql/machine-learning/tutorials/quickstart-r-train-score-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Tutorials - items: - - name: Python - items: - - name: Ski rental (linear regression) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/python-ski-rental-linear-regression?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Prepare data - href: /sql/machine-learning/tutorials/python-ski-rental-linear-regression-prepare-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Train model - href: /sql/machine-learning/tutorials/python-ski-rental-linear-regression-train-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Deploy model - href: /sql/machine-learning/tutorials/python-ski-rental-linear-regression-deploy-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Categorize customers (k-means clustering) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/python-clustering-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Prepare the data - href: /sql/machine-learning/tutorials/python-clustering-model-prepare-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Create the model - href: /sql/machine-learning/tutorials/python-clustering-model-build?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Deploy the model - href: /sql/machine-learning/tutorials/python-clustering-model-deploy?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: NYC taxi tips (classification) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/python-taxi-classification-introduction?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Data exploration - href: /sql/machine-learning/tutorials/python-taxi-classification-explore-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Feature engineering - href: /sql/machine-learning/tutorials/python-taxi-classification-create-features?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Train and deploy - href: /sql/machine-learning/tutorials/python-taxi-classification-train-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 5 - Predictions - href: /sql/machine-learning/tutorials/python-taxi-classification-deploy-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: R - items: - - name: Ski rental (decision tree) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/r-predictive-model-introduction?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Prepare data - href: /sql/machine-learning/tutorials/r-predictive-model-prepare-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Train model - href: /sql/machine-learning/tutorials/r-predictive-model-train?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Deploy model - href: /sql/machine-learning/tutorials/r-predictive-model-deploy?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Categorize customers (k-means clustering) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/r-clustering-model-introduction?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Prepare the data - href: /sql/machine-learning/tutorials/r-clustering-model-prepare-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Create the model - href: /sql/machine-learning/tutorials/r-clustering-model-build?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Deploy the model - href: /sql/machine-learning/tutorials/r-clustering-model-deploy?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: NYC taxi tips (classification) - items: - - name: 1 - Introduction - href: /sql/machine-learning/tutorials/r-taxi-classification-introduction?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 2 - Data exploration - href: /sql/machine-learning/tutorials/r-taxi-classification-explore-data?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 3 - Feature engineering - href: /sql/machine-learning/tutorials/r-taxi-classification-create-features?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 4 - Train and deploy - href: /sql/machine-learning/tutorials/r-taxi-classification-train-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: 5 - Predictions - href: /sql/machine-learning/tutorials/r-taxi-classification-deploy-model?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: How to - items: - - name: Data exploration and modeling - items: - - name: Python - items: - - name: Plot Histogram in Python - href: /sql/machine-learning/data-exploration/python-plot-histogram?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Import data into pandas dataframe - href: /sql/machine-learning/data-exploration/python-dataframe-pandas?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Insert dataframe into SQL - href: /sql/machine-learning/data-exploration/python-dataframe-sql-server?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Data type conversions - items: - - name: Python to SQL - href: /sql/machine-learning/python/python-libraries-and-data-types?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: R to SQL - href: /sql/machine-learning/r/r-libraries-and-data-types?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Deploy - items: - - name: Operationalize using stored procedures - href: /sql/machine-learning/tutorials/python-ski-rental-linear-regression-deploy-model?view=azuresqldb-mi-current&preserve-view=true - - name: Convert R code for SQL Server - href: /sql/machine-learning/deploy/modify-r-python-code-to-run-in-sql-server?view=azuresqldb-mi-current&preserve-view=true - - name: Create a stored procedure using sqlrutils - href: /sql/machine-learning/r/reference/sqlrutils/how-to-create-stored-procedure-from-r - - name: Predictions - items: - - name: Native scoring with PREDICT T-SQL - href: /sql/machine-learning/predictions/native-scoring-predict-transact-sql?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Package management - items: - - name: Install new Python packages - items: - - name: Get Python package information - href: /sql/machine-learning/package-management/python-package-information?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Install with sqlmlutils - href: /sql/machine-learning/package-management/install-additional-python-packages-on-sql-server?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Install new R packages - items: - - name: Get R package information - href: /sql/machine-learning/package-management/r-package-information?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Install with sqlmlutils - href: /sql/machine-learning/package-management/install-additional-r-packages-on-sql-server?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Create a miniCRAN repo - href: /sql/machine-learning/package-management/create-a-local-package-repository-using-minicran?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Tips for using R packages - href: /sql/machine-learning/package-management/tips-for-using-r-packages?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Administration - items: - - name: Monitor - items: - - name: Monitor using SSMS reports - href: /sql/machine-learning/administration/monitor-sql-server-machine-learning-services-using-custom-reports-management-studio?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Monitor using DMVs - href: /sql/machine-learning/administration/monitor-sql-server-machine-learning-services-using-dynamic-management-views?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Extended events - items: - - name: Monitor using extended events - href: /sql/machine-learning/administration/extended-events?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: Monitor PREDICT T-SQL - href: /sql/machine-learning/administration/extended-events-predict-tsql?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - name: SQL Insights (preview) - items: - - name: Overview - href: ../azure-monitor/insights/sql-insights-overview.md - - name: FAQ - href: ../azure-monitor/faq.yml - - name: Enable - href: ../azure-monitor/insights/sql-insights-enable.md - - name: Alerts - href: ../azure-monitor/insights/sql-insights-alerts.md - - name: Troubleshoot - href: ../azure-monitor/insights/sql-insights-troubleshoot.md - - name: Security - items: - - name: Give users permission - href: /sql/machine-learning/security/user-permission?context=%2fazure%2fazure-sql%2fmanaged-instance%2fcontext%2fml-context&preserve-view=true&view=azuresqldb-mi-current - - - name: Features - items: - - name: Linked servers - href: /sql/relational-databases/linked-servers/linked-servers-database-engine?toc=/azure/azure-sql/toc.json - - name: Service Broker - href: /sql/database-engine/configure-windows/sql-server-service-broker?toc=/azure/azure-sql/toc.json - - name: Database mail - href: /sql/relational-databases/database-mail/database-mail?toc=/azure/azure-sql/toc.json - - name: Security - items: - - name: Always Encrypted - href: database/always-encrypted-landing.yml - - name: Auditing - href: managed-instance/auditing-configure.md - - name: Secure public endpoints - href: managed-instance/public-endpoint-overview.md - - name: Server trust groups - href: managed-instance/server-trust-group-overview.md - - name: Windows Auth for Azure AD Principals - items: - - name: Overview - displayName: Windows Authentication for Azure AD Principals with Kerberos Overview - href: managed-instance/winauth-azuread-overview.md - - name: Implementation with Kerberos - displayName: Windows Authentication for Azure AD Principals Implementation with Kerberos - href: managed-instance\winauth-implementation-aad-kerberos.md - - name: Setup summary - displayName: Windows Authentication for Azure AD Principals with Kerberos Setup - href: managed-instance\winauth-azuread-setup.md - - name: Set up the modern interactive flow - displayName: Windows authentication for Azure AD with the modern interactive flow - href: managed-instance\winauth-azuread-setup-modern-interactive-flow.md - - name: Set up the incoming trust-based flow - displayName: Windows authentication for Azure AD with the incoming trust-based flow - href: managed-instance\winauth-azuread-setup-incoming-trust-based-flow.md - - name: Set up managed instances - displayName: Set up managed instances for Windows authentication for Azure AD using Kerberos - href: managed-instance\winauth-azuread-kerberos-managed-instance.md - - name: Run a trace using Windows Auth - displayName: Run a trace using Windows authentication for Azure AD with Kerberos - href: managed-instance\winauth-azuread-run-trace-managed-instance.md - - name: Troubleshoot - displayName: Troubleshoot Windows authentication for Azure AD with Kerberos - href: managed-instance\winauth-azuread-troubleshoot.md - - name: How to - items: - - name: How-to documentation - href: managed-instance/how-to-content-reference-guide.md - - name: Connect applications - href: managed-instance/connect-application-instance.md - - name: Job automation with SQL Agent - href: managed-instance/job-automation-managed-instance.md - - name: Configure settings - items: - - name: Customize time zone - href: managed-instance/timezones-overview.md - - name: Configure connection types - href: managed-instance/connection-types-overview.md - - name: Create alerts on SQL MI - href: managed-instance/alerts-create.md - - name: Configure threat detection - href: managed-instance/threat-detection-configure.md - - - name: Configure networking - items: - - name: Determine size of SQL MI subnet - href: managed-instance/vnet-subnet-determine-size.md - - name: Create new VNet and subnet for SQL MI - href: managed-instance/virtual-network-subnet-create-arm-template.md - - name: Configure existing VNet and subnet for SQL MI - href: managed-instance/vnet-existing-add-subnet.md - - name: Configure service endpoint policies for SQL MI - href: managed-instance/service-endpoint-policies-configure.md - - name: Move SQL MI to another subnet - href: managed-instance/vnet-subnet-move-instance.md - - name: Delete subnet after deleting SQL MI - href: managed-instance/virtual-cluster-delete.md - - name: Configure custom DNS - href: managed-instance/custom-dns-configure.md - - name: Sync DNS configuration - href: managed-instance/synchronize-vnet-dns-servers-setting-on-virtual-cluster.md - - name: Find management endpoint IP address - href: managed-instance/management-endpoint-find-ip-address.md - - name: Verify built-in firewall protection - href: managed-instance/management-endpoint-verify-built-in-firewall.md - - - name: Migrate - items: - - name: Database using Log Replay Service - href: managed-instance/log-replay-service-migrate.md - - name: TDE certificate - href: managed-instance/tde-certificate-migrate.md - - - name: Managed Instance link - items: - - name: Prepare environment for link - href: managed-instance/managed-instance-link-preparation.md - - name: Replicate databases in SSMS - href: managed-instance/managed-instance-link-use-ssms-to-replicate-database.md - - name: Fail over databases in SSMS - href: managed-instance/managed-instance-link-use-ssms-to-failover-database.md - - name: Replicate databases with scripts - href: managed-instance/managed-instance-link-use-scripts-to-replicate-database.md - - name: Fail over databases with scripts - href: managed-instance/managed-instance-link-use-scripts-to-failover-database.md - - name: Best practices - href: managed-instance/managed-instance-link-best-practices.md - - - name: Configure business continuity - items: - - name: Restore to a point in time - href: managed-instance/point-in-time-restore.md - - name: Monitor back up - href: managed-instance/backup-activity-monitor.md - - name: Auto-failover groups - items: - - name: Create failover group - href: managed-instance/auto-failover-group-configure-sql-mi.md - - name: Manually initiate a failover - href: managed-instance/user-initiated-failover.md - - name: Samples - items: - - name: Azure CLI - items: - - name: Samples overview - href: database/az-cli-script-samples-content-guide.md?tabs=managed-instance - - name: Create SQL Managed Instance - href: managed-instance/scripts/create-configure-managed-instance-cli.md - - name: Configure Transparent Data Encryption (TDE) - href: managed-instance/scripts/transparent-data-encryption-byok-sql-managed-instance-cli.md - - name: Restore geo-backup - href: managed-instance/scripts/restore-geo-backup-cli.md - - name: Azure PowerShell - href: database/powershell-script-content-guide.md?tabs=managed-instance - - name: Azure Resource Manager - href: database/arm-templates-content-guide.md?tabs=managed-instance - - name: Code samples - href: https://azure.microsoft.com/resources/samples/?service=sql-database - -- name: SQL Server on Azure VMs - items: - - name: Documentation - href: virtual-machines/index.yml - - name: What's new? - href: virtual-machines/windows/doc-changes-updates-release-notes-whats-new.md - - name: Windows - items: - - name: Overview - items: - - name: What is a SQL Server VM? - href: virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md - - name: SQL IaaS Agent extension - href: virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md - - name: Quickstarts - items: - - name: Portal - href: virtual-machines/windows/sql-vm-create-portal-quickstart.md - - name: PowerShell - href: virtual-machines/windows/sql-vm-create-powershell-quickstart.md - - name: ARM template - displayName: Resource Manager - href: virtual-machines/windows/create-sql-vm-resource-manager-template.md - - name: Concepts - items: - - name: Business continuity - items: - - name: Overview - href: virtual-machines/windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md - displayName: failover cluster instance, fci, availability group, ag, always on - - name: Backup and restore - href: virtual-machines/windows/backup-restore.md - - name: Azure Storage for backup - href: virtual-machines/windows/azure-storage-sql-server-backup-restore-use.md - - name: Availability group (AG) - href: virtual-machines/windows/availability-group-overview.md - - name: Failover cluster instance (FCI) - href: virtual-machines/windows/failover-cluster-instance-overview.md - - name: Windows Server Failover Cluster - href: virtual-machines/windows/hadr-windows-server-failover-cluster-overview.md - displayName: failover cluster instance, fci, availability group, ag, always on - - name: Best practices - items: - - name: Quick checklist - href: virtual-machines/windows/performance-guidelines-best-practices-checklist.md - - name: VM size - href: virtual-machines/windows/performance-guidelines-best-practices-vm-size.md - - name: Storage - href: virtual-machines/windows/performance-guidelines-best-practices-storage.md - - name: Security - href: virtual-machines/windows/security-considerations-best-practices.md - - name: HADR configuration - href: virtual-machines/windows/hadr-cluster-best-practices.md - displayName: failover cluster instance, fci, availability group, ag, always on - - name: Application patterns - href: virtual-machines/windows/application-patterns-development-strategies.md - - name: Collect baseline - href: virtual-machines/windows/performance-guidelines-best-practices-collect-baseline.md - - name: Management - items: - - name: Dedicated host - href: virtual-machines/windows/dedicated-host.md - - name: Extend support for SQL 2008 & R2 - href: virtual-machines/windows/sql-server-2008-extend-end-of-support.md - - name: How-to guides - items: - - name: Connect to SQL Server VM - href: virtual-machines/windows/ways-to-connect-to-sql.md - - name: Create SQL Server VM - items: - - name: Use the portal - href: virtual-machines/windows/create-sql-vm-portal.md - - name: Use Azure PowerShell - href: virtual-machines/windows/create-sql-vm-powershell.md - - - name: Manage - items: - - name: With the Azure portal - href: virtual-machines/windows/manage-sql-vm-portal.md - - name: License model - displayName: ahb, ahub, payg, change, modify, update - href: virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md - - name: Change edition - displayName: change, modify, update - href: virtual-machines/windows/change-sql-server-edition.md - - name: Change version - href: virtual-machines/windows/change-sql-server-version.md - - name: Storage - href: virtual-machines/windows/storage-configuration.md - - name: Automated Patching - href: virtual-machines/windows/automated-patching.md - - name: SQL best practices assessment - href: virtual-machines/windows/sql-assessment-for-sql-vm.md - - name: Azure Key Vault Integration - href: virtual-machines/windows/azure-key-vault-integration-configure.md - - name: Migrate storage to UltraSSD - href: virtual-machines/windows/storage-migrate-to-ultradisk.md - - name: SQL IaaS Agent extension - displayName: resource provider, registration, sql vm rp - items: - - name: Automatic registration - href: virtual-machines/windows/sql-agent-extension-automatic-registration-all-vms.md - - name: Register single VM - href: virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md - - name: Bulk register multiple VMs - href: virtual-machines/windows/sql-agent-extension-manually-register-vms-bulk.md - - name: Migrate - items: - - name: SQL Server database to VM - href: virtual-machines/windows/migrate-to-vm-from-sql-server.md - - name: VM to a new region - href: virtual-machines/windows/move-sql-vm-different-region.md - - name: Business continuity - items: - - name: Configure cluster quorum - href: virtual-machines/windows/hadr-cluster-quorum-configure-how-to.md - - name: Backup and restore - items: - - name: Automated backup (SQL 2016+) - href: virtual-machines/windows/automated-backup.md - - name: Automated backup (SQL 2014) - href: virtual-machines/windows/automated-backup-sql-2014.md - - name: Availability group (AG) - displayName: Always On, alwayson, availability group - items: - - name: Configure AG (multi-subnet) - displayName: Always On, alwayson, availability group - items: - - name: Prerequisites - href: virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md - displayName: Always On, alwayson, availability group - - name: Create availability group - href: virtual-machines/windows/availability-group-manually-configure-tutorial-multi-subnet.md - displayName: Always On, alwayson, availability group - - name: Configure AG (single subnet) - displayName: Always On, alwayson, availability group - items: - - name: Azure portal - href: virtual-machines/windows/availability-group-azure-portal-configure.md - displayName: Always On, alwayson, availability group - - name: PowerShell or Az CLI - href: virtual-machines/windows/availability-group-az-commandline-configure.md - displayName: Always On, alwayson, availability group - - name: Azure Quickstart templates - href: virtual-machines/windows/availability-group-quickstart-template-configure.md - displayName: Always On, alwayson, availability group - - name: Multiple regions - href: virtual-machines/windows/availability-group-manually-configure-multiple-regions.md - displayName: Always On, alwayson, availability group - - name: Domain-independent (workgroup) - href: virtual-machines/windows/availability-group-clusterless-workgroup-configure.md - displayName: Always On, alwayson, availability group - - name: Manually - items: - - name: Prerequisites - href: virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-single-subnet.md - displayName: Always On, alwayson, availability group - - name: Create availability group - href: virtual-machines/windows/availability-group-manually-configure-tutorial-single-subnet.md - - name: Load balancer - Azure portal - href: virtual-machines/windows/availability-group-load-balancer-portal-configure.md - - name: Load balancer - Azure PowerShell - href: virtual-machines/windows/availability-group-listener-powershell-configure.md - - name: Configure connectivity - items: - - name: VNN listener - displayName: virtual network name - href: virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure.md - - name: DNN listener - displayName: distributed network name - href: virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md - - name: DNN interoperability - href: virtual-machines/windows/availability-group-dnn-interoperability.md - - - name: Failover cluster instance (FCI) - items: - - name: Prepare VM for FCI - href: virtual-machines/windows/failover-cluster-instance-prepare-vm.md - - name: Create FCI - items: - - name: Azure shared disks - href: virtual-machines/windows/failover-cluster-instance-azure-shared-disks-manually-configure.md - - name: Storage Spaces Direct (Win2016+) - href: virtual-machines/windows/failover-cluster-instance-storage-spaces-direct-manually-configure.md - - name: Premium file share (Win2012+) - href: virtual-machines/windows/failover-cluster-instance-premium-file-share-manually-configure.md - - name: Configure connectivity (Single subnet) - items: - - name: Virtual network name (VNN) - href: virtual-machines/windows/failover-cluster-instance-vnn-azure-load-balancer-configure.md - - name: Distributed network name (DNN) - href: virtual-machines/windows/failover-cluster-instance-distributed-network-name-dnn-configure.md - - name: DNN interoperability - href: virtual-machines/windows/failover-cluster-instance-dnn-interoperability.md - - - - name: Reference - items: - - name: Azure PowerShell - href: /powershell/azure/ - - name: Azure CLI - href: /cli/azure/azure-cli-reference-for-sql - - name: T-SQL - href: /sql/t-sql/language-reference - - name: SQL Server Drivers - href: /sql/connect/sql-connection-libraries - - name: REST - href: /rest/api/ - - name: Azure Policy built-ins - displayName: samples, policies, definitions - href: ./database/policy-reference.md - - name: Resources - items: - - name: FAQ - href: virtual-machines/windows/frequently-asked-questions-faq.yml - - name: Pricing - href: virtual-machines/windows/pricing-guidance.md - - name: Archived classic RM docs - href: /previous-versions/azure/virtual-machines/windows/sqlclassic/virtual-machines-windows-classic-ps-sql-create - - name: SQL Server Data Tools (SSDT) - href: /sql/ssdt/download-sql-server-data-tools-ssdt - - name: SQL Server Management Studio (SSMS) - href: /sql/ssms/download-sql-server-management-studio-ssms - - name: SQL Server Tools - href: /sql/tools/overview-sql-tools - - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/?category=compute - - name: MSDN forum - href: https://social.msdn.microsoft.com/Forums/en-US/home?forum=WAVirtualMachinesforWindows&filter=alltypes&brandIgnore=True&sort=relevancedesc&searchTerm=SQL+Server - - name: Stack Overflow - href: https://stackoverflow.com/search?q=%5Bazure-virtual-machine%5D+sql+server - - - name: Linux - items: - - name: Overview - items: - - name: About Linux SQL Server VMs - href: virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md - - name: Quickstarts - items: - - name: Create SQL VM - Portal - href: virtual-machines/linux/sql-vm-create-portal-quickstart.md - - - name: Concepts - items: - - name: SQL IaaS agent extension - href: virtual-machines/linux/sql-server-iaas-agent-extension-linux.md - - name: "How-to guides" - items: - - name: Register with SQL IaaS extension - href: virtual-machines/linux/sql-iaas-agent-extension-register-vm-linux.md - - name: Tutorials - items: - - name: Setting up Azure RHEL VM availability group with STONITH - href: virtual-machines/linux/rhel-high-availability-stonith-tutorial.md - - name: Configure availability group listener for SQL Server on RHEL virtual machines in Azure - href: virtual-machines/linux/rhel-high-availability-listener-tutorial.md - - name: Setup Always On availability group with DH2i DxEnterprise - href: virtual-machines/linux/dh2i-high-availability-tutorial.md - - name: Resources - items: - - name: FAQ - href: virtual-machines/linux/frequently-asked-questions-faq.yml - - name: SQL Server on Linux Documentation - href: /sql/linux/sql-server-linux-overview - - name: SQL Server Data Tools (SSDT) - href: /sql/ssdt/download-sql-server-data-tools-ssdt - - name: SQL Server Tools - href: /sql/tools/overview-sql-tools - - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/?category=compute - - name: Stack Overflow - href: https://stackoverflow.com/search?q=%5Bazure-virtual-machine%5D+sql+server - - - name: Migration guides - items: - - name: From Db2 - href: migration-guides/virtual-machines/db2-to-sql-on-azure-vm-guide.md - - name: From Oracle - href: migration-guides/virtual-machines/oracle-to-sql-on-azure-vm-guide.md - - name: From SQL Server - items: - - name: Overview - href: migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md - - name: Migration guide - href: migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-individual-databases-guide.md - - name: Availability group (AG) - href: migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md - - name: Failover cluster instance (FCI) - href: migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md - - name: Using distributed AG - items: - - name: Prerequisites - href: migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-prerequisites.md - - name: Standalone instance - href: migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-standalone-instance.md - - name: Availability group - href: migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-ag.md - - name: Complete migration - href: migration-guides/virtual-machines/sql-server-distributed-availability-group-complete-migration.md - - -- name: Reference - items: - - name: Azure SQL glossary of terms - href: glossary-terms.md - - name: T-SQL language reference - href: /sql/t-sql/language-reference - - name: Azure CLI - href: /cli/azure/azure-cli-reference-for-sql - - name: Azure PowerShell - href: /powershell/module/az.sql/ - - name: .NET - href: /dotnet/api/microsoft.azure.management.sql.models - - name: Java - href: /java/api/com.microsoft.azure.management.sql - - name: REST - href: /rest/api/sql/ - - name: Resource Manager templates for SQL - displayName: ARM - href: /azure/templates/microsoft.sql/allversions - - name: SQL tools - href: /sql/tools/overview-sql-tools - - name: SQL Server Management Studio (SSMS) - href: /sql/ssms/download-sql-server-management-studio-ssms - - name: SQL Server Data Tools (SSDT) - href: /sql/ssdt/download-sql-server-data-tools-ssdt - - name: BCP - href: /sql/tools/bcp-utility - - name: SQLCMD - href: /sql/tools/sqlcmd-utility - - name: SqlPackage - href: /sql/tools/sqlpackage/sqlpackage?toc=/azure/azure-sql/toc.json - - name: SQL Database Management Library package - href: https://www.nuget.org/packages/Microsoft.Azure.Management.Sql - - name: SQL connection drivers - href: /sql/connect/sql-connection-libraries - - name: SQL Server drivers - href: /sql/connect/sql-connection-libraries - items: - - name: ADO.NET - href: /sql/connect/ado-net/microsoft-ado-net-sql-server?view=sql-server-ver15&preserve-view=true - - name: JDBC - href: /sql/connect/jdbc/microsoft-jdbc-driver-for-sql-server - - name: Node.js - href: /sql/connect/node-js/node-js-driver-for-sql-server - - name: ODBC - href: /sql/connect/odbc/microsoft-odbc-driver-for-sql-server - - name: PHP - href: /sql/connect/php/microsoft-php-driver-for-sql-server - - name: Python - href: /sql/connect/python/python-driver-for-sql-server - - name: Ruby - href: /sql/connect/ruby/ruby-driver-for-sql-server - - name: Azure Policy built-ins - displayName: samples, policies, definitions - href: ./database/policy-reference.md - - name: DTU benchmark - href: database/dtu-benchmark.md - -- name: Resources - items: - - name: Build your skills with Microsoft Learn - href: /learn/browse/?products=azure-sql-database - - name: SQL Server Blog - href: https://cloudblogs.microsoft.com/sqlserver/?product=azure-sql-database - - name: Microsoft Azure Blog - href: https://azure.microsoft.com/blog/ - - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/ - - name: Public data sets - href: public-data-sets.md - - name: Pricing - href: https://azure.microsoft.com/pricing/details/azure-sql-database/single/ - - name: MSDN forum - href: https://social.msdn.microsoft.com/Forums/home - - name: Stack Overflow - href: https://stackoverflow.com/questions/tagged/azure-sql-database - - name: Troubleshoot - items: - - name: Known issues with SQL Managed Instance - href: managed-instance/doc-changes-updates-known-issues.md - - name: Capacity errors during deployment - href: capacity-errors-troubleshoot.md - - name: Connectivity errors - href: database/troubleshoot-common-errors-issues.md - - name: Common connection issues - href: database/troubleshoot-common-connectivity-issues.md - - name: Troubleshoot out of memory errors - href: database/troubleshoot-memory-errors-issues.md - - name: Import/Export service hangs - href: database/database-import-export-hang.md - - name: Transaction log errors - href: database/troubleshoot-transaction-log-errors-issues.md - - name: Request quota increases - href: database/quota-increase-request.md - - name: Service updates - items: - - name: SSL root certificate expiring - href: updates/ssl-root-certificate-expiring.md - - name: Gateway IP address updates - href: database/gateway-migration.md - - name: Periodic maintenance events - href: database/planned-maintenance.md - - name: Videos - href: https://azure.microsoft.com/documentation/videos/index/?services=sql-database - - name: Service updates - href: https://azure.microsoft.com/updates/?product=sql-database - - name: Architecture center - href: /azure/architecture/ - - name: Customer stories - href: https://customers.microsoft.com/en-us/search?sq=%22Azure%20SQL%20Database%22&ff=&p=0&so=story_publish_date%20desc diff --git a/articles/azure-sql/updates/ssl-root-certificate-expiring.md b/articles/azure-sql/updates/ssl-root-certificate-expiring.md deleted file mode 100644 index e4a238e7d78df..0000000000000 --- a/articles/azure-sql/updates/ssl-root-certificate-expiring.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Certificate rotation for Azure SQL Database & SQL Managed Instance -description: Learn about the upcoming changes of root certificate changes that will affect Azure SQL Database and Azure SQL Managed Instance -author: srdan-bozovic-msft -ms.author: srbozovi -ms.reviewer: kendralittle, mathoma, vanto -ms.service: sql-db-mi -ms.subservice: security -ms.topic: conceptual -ms.date: 09/13/2020 ---- - -# Understanding the changes in the Root CA change for Azure SQL Database & SQL Managed Instance - -Azure SQL Database & SQL Managed Instance will be changing the root certificate for the client application/driver enabled with SSL, used to establish secure TDS connection. The [current root certificate](https://www.digicert.com/CACerts/BaltimoreCyberTrustRoot.crt.pem) is set to expire October 26, 2020 as part of standard maintenance and security best practices. This article gives you more details about the upcoming changes, the resources that will be affected, and the steps needed to ensure that your application maintains connectivity to your database server. - -## What update is going to happen? - -[Certificate Authority (CA) Browser forum](https://cabforum.org/) recently published reports of multiple certificates issued by CA vendors to be non-compliant. - -As per the industry’s compliance requirements, CA vendors began revoking CA certificates for non-compliant CAs, requiring servers to use certificates issued by compliant CAs, and signed by CA certificates from those compliant CAs. Since Azure SQL Database & SQL Managed Instance currently use one of these non-compliant certificates, which client applications use to validate their SSL connections, we need to ensure that appropriate actions are taken (described below) to minimize the potential impact to your Azure SQL servers. - -The new certificate will be used starting October 26, 2020. If you use full validation of the server certificate when connecting from a SQL client (TrustServerCertificate=true), you need to ensure that your SQL client would be able to validate new root certificate before October 26, 2020. - -## How do I know if my application might be affected? - -All applications that use SSL/TLS and verify the root certificate needs to update the root certificate in order to connect to Azure SQL Database & SQL Managed Instance. - -If you are not using SSL/TLS currently, there is no impact to your application availability. You can verify if your client application is trying to verify root certificate by looking at the connection string. If TrustServerCertificate is explicitly set to true then you are not affected. - -If your client driver utilizes OS certificate store, as majority of drivers do, and your OS is regularly maintained this change will likely not affect you, as the root certificate we are switching to should be already available in your Trusted Root Certificate Store. Check for Baltimore CyberDigiCert GlobalRoot G2 and validate it is present. - -If your client driver utilizes local file certificate store, to avoid your application’s availability being interrupted due to certificates being unexpectedly revoked, or to update a certificate, which has been revoked, refer to the [**What do I need to do to maintain connectivity**](./ssl-root-certificate-expiring.md#what-do-i-need-to-do-to-maintain-connectivity) section. - -## What do I need to do to maintain connectivity - -To avoid your application’s availability being interrupted due to certificates being unexpectedly revoked, or to update a certificate, which has been revoked, follow the steps below: - -* Download Baltimore CyberTrust Root & DigiCert GlobalRoot G2 Root CA from links below: - * https://www.digicert.com/CACerts/BaltimoreCyberTrustRoot.crt.pem - * https://cacerts.digicert.com/DigiCertGlobalRootG2.crt.pem - -* Generate a combined CA certificate store with both **BaltimoreCyberTrustRoot** and **DigiCertGlobalRootG2** certificates are included. - -## What can be the impact? -If you are validating server certificates as documented here, your application’s availability might be interrupted since the database will not be reachable. Depending on your application, you may receive a variety of error messages including but not limited to: -* Invalid certificate/revoked certificate -* Connection timed out -* Error if applicable - -## Frequently asked questions - -### If I am not using SSL/TLS, do I still need to update the root CA? -No actions regarding this change are required if you are not using SSL/TLS. Still you should set a plan for start using latest TLS version as we plan for TLS enforcement in near future. - -### What will happen if I do not update the root certificate before October 26, 2020? -If you do not update the root certificate before November 30, 2020, your applications that connect via SSL/TLS and does verification for the root certificate will be unable to communicate to the Azure SQL Database & SQL Managed Instance and application will experience connectivity issues to your Azure SQL Database & SQL Managed Instance. - -### Do I need to plan a maintenance downtime for this change?
    -No. Since the change here is only on the client side to connect to the server, there is no maintenance downtime needed here for this change. - -### What if I cannot get a scheduled downtime for this change before October 26, 2020? -Since the clients used for connecting to the server needs to be updating the certificate information as described in the fix section [here](./ssl-root-certificate-expiring.md#what-do-i-need-to-do-to-maintain-connectivity), we do not need to a downtime for the server in this case. - -### If I create a new server after November 30, 2020, will I be impacted? -For servers created after October 26, 2020, you can use the newly issued certificate for your applications to connect using SSL. - -### How often does Microsoft update their certificates or what is the expiry policy? -These certificates used by Azure SQL Database & SQL Managed Instance are provided by trusted Certificate Authorities (CA). So the support of these certificates on Azure SQL Database & SQL Managed Instance is tied to the support of these certificates by CA. However, as in this case, there can be unforeseen bugs in these predefined certificates, which need to be fixed at the earliest. - -### If I am using read replicas, do I need to perform this update only on primary server or all the read replicas? -Since this update is a client-side change, if the client used to read data from the replica server, we will need to apply the changes for those clients as well. - -### Do we have server-side query to verify if SSL is being used? -Since this configuration is client-side, information is not available on server side. - -### What if I have further questions? -If you have a support plan and you need technical help, create Azure support request, see [How to create Azure support request](../../azure-portal/supportability/how-to-create-azure-support-request.md). \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/index.yml b/articles/azure-sql/virtual-machines/index.yml deleted file mode 100644 index d75479cc77367..0000000000000 --- a/articles/azure-sql/virtual-machines/index.yml +++ /dev/null @@ -1,157 +0,0 @@ -### YamlMime:Landing - -title: SQL Server on Azure VM documentation -summary: Find concepts, quickstarts, tutorials, and samples for SQL Server installed to Azure virtual machines, both Windows and Linux. - -metadata: - title: SQL Server on Azure VM documentation - description: Find documentation about SQL Server installed to Azure virtual machines, both Windows and Linux. - services: virtual-machines-sql - ms.service: virtual-machines-sql - ms.subservice: service-overview - ms.topic: landing-page - author: MashaMSFT - ms.author: mathoma - ms.date: 05/27/2020 - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - - # Card - - title: SQL Server on Azure VM - linkLists: - - linkListType: quickstart - links: - - text: Create Windows SQL VM (portal) - url: windows/sql-vm-create-portal-quickstart.md - - text: Create Windows SQL VM (PowerShell) - url: windows/sql-vm-create-powershell-quickstart.md - - text: Create Linux SQL VM (portal) - url: linux/sql-vm-create-portal-quickstart.md - - linkListType: video - links: - - text: SQL Server on Azure VM overview - url: /shows/Azure-SQL-for-Beginners/SQL-Server-on-Azure-VM-Overview-4-of-61 - - linkListType: overview - links: - - text: What's new? - url: windows/doc-changes-updates-release-notes-whats-new.md - - text: What is SQL Server on Windows VM? - url: windows/sql-server-on-azure-vm-iaas-what-is-overview.md - - text: What is SQL Server on Linux VM? - url: linux/sql-server-on-linux-vm-what-is-iaas-overview.md - - text: Migrate from SQL Server - url: windows/migrate-to-vm-from-sql-server.md - - text: Security considerations - url: windows/security-considerations-best-practices.md - - text: Performance guidelines - url: ./windows/performance-guidelines-best-practices-checklist.md - - text: Pricing guidance - url: windows/pricing-guidance.md - - # Card - - title: Manage - linkLists: - - linkListType: concept - links: - - text: SQL Server IaaS Agent extension - url: windows/sql-server-iaas-agent-extension-automate-management.md - - text: Manage with Azure portal - url: windows/manage-sql-vm-portal.md - - text: Register with SQL VM resource provider - url: windows/sql-agent-extension-manually-register-single-vm.md - - text: Automated patching - url: windows/automated-patching.md - - text: Change license type - url: windows/licensing-model-azure-hybrid-benefit-ahb-change.md - - text: Change edition of SQL Server - url: windows/change-sql-server-edition.md - - text: Move to new region - url: windows/move-sql-vm-different-region.md - - text: Integrate with Azure Key Vault - url: windows/azure-key-vault-integration-configure.md - - # Card - - title: Business continuity - linkLists: - - linkListType: overview - links: - - text: High availability & disaster recovery - url: windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md - - text: Backup and restore - url: windows/backup-restore.md - - text: Availability groups - url: windows/availability-group-overview.md - - linkListType: how-to-guide - links: - - text: Availability group (Az CLI) - url: ./windows/availability-group-az-commandline-configure.md - - text: Clusterless availability group - url: windows/availability-group-clusterless-workgroup-configure.md - - text: FCI (Storage Spaces Direct) - url: windows/failover-cluster-instance-storage-spaces-direct-manually-configure.md - - text: FCI (Premium File Share) - url: windows/failover-cluster-instance-premium-file-share-manually-configure.md - - linkListType: tutorial - links: - - text: Availability group (manually) - url: windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md - - text: Stonith availability group (RHEL) - url: linux/rhel-high-availability-stonith-tutorial.md - - text: Availability group listener (RHEL) - url: linux/rhel-high-availability-listener-tutorial.md - - # Card - - title: Learn Azure SQL - linkLists: - - linkListType: learn - links: - - text: Azure SQL for beginners - url: https://aka.ms/azuresql4beginners - - text: Azure SQL fundamentals - url: /learn/paths/azure-sql-fundamentals/ - - text: Azure SQL hands-on labs - url: https://aka.ms/asqlworkshop - - text: Azure SQL bootcamp - url: https://aka.ms/azuresqlbootcamp - - text: Educational SQL resources - url: /sql/sql-server/educational-sql-resources - - # Card - - title: Reference - linkLists: - - linkListType: deploy - links: - - text: Azure portal - url: windows/sql-vm-create-portal-quickstart.md - - text: Azure CLI - url: /cli/azure/azure-cli-reference-for-sql#sql-virtual-machines-references - - text: PowerShell samples - url: windows/sql-vm-create-powershell-quickstart.md - - text: ARM template samples - url: windows/create-sql-vm-resource-manager-template.md - - linkListType: download - links: - - text: SQL Server Management Studio (SSMS) - url: /sql/ssms/download-sql-server-management-studio-ssms - - text: Azure Data Studio - url: /sql/azure-data-studio/download-azure-data-studio - - text: SQL Server Data Tools - url: /sql/ssdt/download-sql-server-data-tools-ssdt - - text: Visual Studio 2019 - url: https://visualstudio.microsoft.com/downloads/ - - linkListType: reference - links: - - text: Migration guide - url: https://datamigration.microsoft.com/ - - text: Transact-SQL (T-SQL) - url: /sql/t-sql/language-reference - - text: Azure CLI - url: /cli/azure/azure-cli-reference-for-sql#sql-virtual-machines-references - - text: PowerShell - url: /powershell/module/az.sql - - text: REST API - url: /rest/api/sql/ diff --git a/articles/azure-sql/virtual-machines/linux/dh2i-high-availability-tutorial.md b/articles/azure-sql/virtual-machines/linux/dh2i-high-availability-tutorial.md deleted file mode 100644 index 2cb03ae35eacf..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/dh2i-high-availability-tutorial.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: "Setup Always On availability group with DH2i DxEnterprise running on Linux-based Azure Virtual Machines" -description: Use DH2i DxEnterprise as the cluster manager to achieve high availability with an availability group on SQL Server on Linux Azure Virtual Machines -ms.date: 03/04/2021 -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: tutorial -author: amvin87 -ms.author: amitkh -ms.reviewer: vanto ---- - -# Tutorial - Setup a three node Always On availability group with DH2i DxEnterprise running on Linux-based Azure Virtual Machines - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This tutorial explains how to configure SQL Server Always On availability group with DH2i DxEnterprise running on Linux-based Azure Virtual Machines (VMs). - -For more information about DxEnterprise, see [DH2i DxEnterprise](https://dh2i.com/dxenterprise-availability-groups/). - -> [!NOTE] -> Microsoft supports data movement, the availability group, and the SQL Server components. Contact DH2i for support related to the documentation of DH2i DxEnterprise cluster, for the cluster and quorum management. -  - -This tutorial consists of the following steps: - -> [!div class="checklist"] -> * Install SQL Server on all the Azure virtual machines (VMs) that will be part of the availability group. -> * Install DxEnterprise on all the VMs and configure the DxEnterprise cluster. -> * Create the virtual hosts to provide failover support and high availability, add an availability group and database to the availability group. -> * Create the Internal Azure Load balancer for Availability group listener (optional). -> * Perform an manual or automatic failover. - -In this tutorial, we are going to set up a DxEnterprise cluster using [DxAdmin Client UI](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-dxadmin-client-ui-quick-start-guide/). Optionally, you can also set up the cluster using the [DxCLI](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-software-dxcli-guide/) command-line interface. For this example, we've used four VMs. Three of those VMs are running Ubuntu 18.04, and are part of the three node cluster. The fourth VM is running Windows 10 with the DxAdmin tool to manage and configure the cluster. - -## Prerequisites - -- Create four VMs in Azure. Follow the [Quickstart: Create Linux virtual machine in Azure portal](../../../virtual-machines/linux/quick-create-portal.md) article to create Linux based virtual machines. Similarly, for creating the Windows based virtual machine, follow the [Quickstart: Create a Windows virtual machine in the Azure portal](../../../virtual-machines/windows/quick-create-portal.md) article. -- Install .NET 3.1 on all the Linux-based VMs that are going to be part of the cluster. Follow the instructions documented [here](/dotnet/core/install/linux) based on the Linux operating system that you choose. -- A valid DxEnterprise license with availability group management features enabled will be required. For more information, see [DxEnterprise Free Trial](https://dh2i.com/trial/) about how you can obtain a free trial. - -## Install SQL Server on all the Azure VMs that will be part of the availability group - -In this tutorial, we are setting up a three node Linux-based cluster running the availability group. Follow the documentation for [SQL Server installation on Linux](/sql/linux/sql-server-linux-overview#install) based on the choice of your Linux platform. We also recommend you install the [SQL Server tools](/sql/linux/sql-server-linux-setup-tools) for this tutorial. -  -> [!NOTE] -> Ensure that the Linux OS that you choose is a common distribution that is supported by both [DH2i DxEnterprise ( See Minimal System Requirements Section)](https://dh2i.com/wp-content/uploads/DxEnterprise-v20-Admin-Guide.pdf) and [Microsoft SQL Server](/sql/linux/sql-server-linux-release-notes-2019#supported-platforms). -> -> In this example, we use Ubuntu 18.04, which is supported by both DH2i DxEnterprise and Microsoft SQL Server. - -For this tutorial, we are not going to install SQL Server on the Windows VM, as this node is not going to be part of the cluster, and is used only to manage the cluster using DxAdmin. - -After you complete this step, you should have SQL Server and [SQL Server tools](/sql/linux/sql-server-linux-setup-tools) (optionally) installed on all three Linux-based VMs that will participate in the availability group. -  -## Install DxEnterprise on all the VMs and Configure the cluster - -In this step, we are going to install DH2i DxEnterprise for Linux on the three Linux VMs. The following table describes the role each server plays in the cluster: - -| Number of VMs | DH2i DxEnterprise role | Microsoft SQL Server availability group replica role | -|--|--|--| -| 1 | Cluster node - Linux based | Primary | -| 1 | Cluster node - Linux based | Secondary - Synchronous commit | -| 1 | Cluster node - Linux based | Secondary - Synchronous commit | -| 1 | DxAdmin Client | NA | - - -To install DxEnterprise on the three Linux-based nodes, follow the DH2i DxEnterprise documentation based on the Linux operating system you choose. Install DxEnterprise using any one of the methods listed below. - -- **Ubuntu** - - [Repo Installation Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-ubuntu-installation-quick-start-guide/) - - [Extension Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-azure-extension-quick-start-guide/) - - [Marketplace Image Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-azure-marketplace-image-for-linux-quick-start-guide/) -- **RHEL** - - [Repo Installation Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-rhel-centos-installation-quick-start-guide/) - - [Extension Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-azure-extension-quick-start-guide/) - - [Marketplace Image Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-azure-marketplace-image-for-linux-quick-start-guide/) - -To install just the DxAdmin client tool on the Windows VM, follow [DxAdmin Client UI Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-dxadmin-client-ui-quick-start-guide/). - -After this step, you should have the DxEnterprise cluster created on the Linux VMs, and DxAdmin client installed on the Windows Client machine. - -> [!NOTE] -> You can also create a three node cluster where one of the node is added as *configuration-only mode*, as described [here](/sql/database-engine/availability-groups/windows/availability-modes-always-on-availability-groups#SupportedAvModes) to enable automatic failover. - -## Create the virtual hosts to provide failover support and high availability - -In this step, we're going to create a virtual host, availability group, and then add a database, all using the DxAdmin UI. - -> [!NOTE] -> During this step, the SQL Server instances will be restarted to enable Always On. - -Connect to the Windows client machine running DxAdmin to connect to the cluster created in the step above. Follow the steps documented at [MSSQL Availability Groups with DxAdmin](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-mssql-availability-groups-with-dxadmin-quick-start-guide/) to enable Always On and create the virtual host and availability group. - -> [!TIP] -> Before adding the databases, ensure the database is created and backed up on the primary instance of SQL Server. - -## Create the Internal Azure Load balancer for Listener (optional) - -In this optional step, you can create and configure the Azure Load balancer that holds the IP addresses for the availability group listeners. For more information on Azure Load Balancer, refer [Azure Load Balancer](../../../load-balancer/load-balancer-overview.md). To configure the Azure load balancer and availability group listener using DxAdmin, follow the DxEnterprise [Azure Load Balancer Quick Start Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-0-software-azure-load-balancer-quick-start-guide/). - -After this step, you should have an availability group listener created and mapped to the Internal Azure load balancer. - -## Test manual or automatic failover - -For the automatic failover test, you can go ahead and bring down the primary replica (power off the virtual machine from the Azure portal). This will replicate the sudden unavailability of the primary node. The expected behavior is: -- The cluster manager promotes one of the secondary replicas in the availability group to primary. -- The failed primary replica automatically joins the cluster after it is back up. The cluster manager promotes it to secondary replica. - - -You could also perform a manual failover by following the below mentioned steps: - -1. Connect to the cluster via DxAdmin -1. Expand the virtual host for the availability group -1. Right-click on the target node/secondary replica and select **Start Hosting on Member** to initiate the failover - -For more information on more operations within DxEnterprise, access the [DxEnterprise Admin Guide](https://dh2i.com/wp-content/uploads/DxEnterprise-v20-Admin-Guide.pdf) and [DxEnterprise DxCLI Guide](https://dh2i.com/docs/20-0/dxenterprise/dh2i-dxenterprise-20-software-dxcli-guide/) - -## Next Steps - -- Learn more about [Availability Groups on Linux](/sql/linux/sql-server-linux-availability-group-overview) -- [Quickstart: Create Linux virtual machine in Azure portal](../../../virtual-machines/linux/quick-create-portal.md) -- [Quickstart: Create a Windows virtual machine in the Azure portal](../../../virtual-machines/windows/quick-create-portal.md) -- [Supported platforms for SQL Server 2019 on Linux](/sql/linux/sql-server-linux-release-notes-2019#supported-platforms) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/linux/frequently-asked-questions-faq.yml b/articles/azure-sql/virtual-machines/linux/frequently-asked-questions-faq.yml deleted file mode 100644 index 86a5672c6fb43..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/frequently-asked-questions-faq.yml +++ /dev/null @@ -1,115 +0,0 @@ -### YamlMime:FAQ -metadata: - title: SQL Server on Linux virtual machines FAQ | Microsoft Docs - description: This article provides answers to frequently asked questions about running SQL Server on Linux virtual machines. - services: virtual-machines-linux - documentationcenter: '' - author: MashaMSFT - tags: azure-service-management - ms.service: virtual-machines-sql - ms.subservice: service-overview - ms.topic: faq - ms.workload: iaas-sql-server - ms.date: 12/13/2017 - ms.author: mathoma -title: Frequently asked questions for SQL Server on Linux virtual machines -summary: | - [!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - - > [!div class="op_single_selector"] - > * [Windows](../windows/frequently-asked-questions-faq.yml) - > * [Linux](frequently-asked-questions-faq.yml) - - This article provides answers to some of the most common questions about running [SQL Server on Linux virtual machines](sql-server-on-linux-vm-what-is-iaas-overview.md). - - [!INCLUDE [support-disclaimer](../../../../includes/support-disclaimer.md)] - - -sections: - - name: Images - questions: - - question: | - What SQL Server virtual machine gallery images are available? - answer: | - Azure maintains virtual machine (VM) images for all supported major releases of SQL Server on all editions for both Linux and Windows. For more details, see the complete list of [Linux VM images](sql-server-on-linux-vm-what-is-iaas-overview.md#create) and [Windows VM images](../windows/sql-server-on-azure-vm-iaas-what-is-overview.md#payasyougo). - - - question: | - Are existing SQL Server virtual machine gallery images updated? - answer: Every two months, SQL Server images in the virtual machine gallery are updated with the latest Linux and Windows updates. For Linux images, this includes the latest system updates. For Windows images, this includes any updates that are marked as important in Windows Update, including important SQL Server security updates and service packs. SQL Server cumulative updates are handled differently for Linux and Windows. For Linux, SQL Server cumulative updates are also included in the refresh. But at this time, Windows VMs are not updated with SQL Server or Windows Server cumulative updates. - - - question: | - What related SQL Server packages are also installed? - answer: | - To see the SQL Server packages that are installed by default on SQL Server on Linux VMs, see [Installed packages](sql-server-on-linux-vm-what-is-iaas-overview.md#packages). - - - question: | - Can SQL Server virtual machine images get removed from the gallery? - answer: Yes. Azure only maintains one image per major version and edition. For example, when a new SQL Server service pack is released, Azure adds a new image to the gallery for that service pack. The SQL Server image for the previous service pack is immediately removed from the Azure portal. However, it is still available for provisioning from PowerShell for the next three months. After three months, the previous service pack image is no longer available. This removal policy would also apply if a SQL Server version becomes unsupported when it reaches the end of its lifecycle. - - - name: Creation - questions: - - question: | - How do I create a Linux virtual machine with SQL Server? - answer: | - The easiest solution is to create a Linux virtual machine that includes SQL Server. For a tutorial on signing up for Azure and creating a SQL Server VM from the portal, see [Provision a Linux virtual machine running SQL Server in the Azure portal](sql-vm-create-portal-quickstart.md). You also have the option of manually installing SQL Server on a VM with either a freely licensed edition (Developer or Express) or by reusing an on-premises license. If you bring your own license, you must have [License Mobility through Software Assurance on Azure](https://azure.microsoft.com/pricing/license-mobility). - - - question: | - Why can’t I provision an RHEL or SLES SQL Server VM with an Azure subscription that has a spending limit? - answer: | - RHEL and SLES virtual machines require a subscription with no spending limit and a verified payment method (usually a credit card) associated with the subscription. If you provision an RHEL or SLES VM without removing the spending limit, your subscription will get disabled and all VMs/services stopped. If you do run into this state, to re-enable the subscription [remove the spending limit](https://account.windowsazure.com/subscriptions). Your remaining credits will be restored for the current billing cycle but an RHEL or SLES VM image surcharge will go against your credit card if you choose to re-start and continue running it. - - - name: Licensing - questions: - - question: | - How can I install my licensed copy of SQL Server on an Azure VM? - answer: | - First, create a Linux OS-only virtual machine. Then run the [SQL Server installation steps](/sql/linux/sql-server-linux-setup#platforms) for your Linux distribution. Unless you are installing one of the freely licensed editions of SQL Server, you must also have a SQL Server license and [License Mobility through Software Assurance on Azure](https://azure.microsoft.com/pricing/license-mobility/). - - - question: | - Are there Bring-Your-Own-License (BYOL) Linux virtual machine images for SQL Server? - answer: At this time, there are no BYOL Linux virtual machine images for SQL Server. However, you can manually install SQL Server on a Linux-only VM as discussed in the previous questions. - - - question: | - Can I change a VM to use my own SQL Server license if it was created from one of the pay-as-you-go gallery images? - answer: No. You cannot switch from pay-per-second licensing to using your own license. You must create a new Linux VM, install SQL Server, and migrate your data. See the previous question for more details about bringing your own license. - - - name: Administration - questions: - - question: | - Can I manage a Linux virtual machine running SQL Server with SQL Server Management Studio (SSMS)? - answer: | - Yes, but SSMS is currently a Windows-only tool. You must connect remotely from a Windows machine to use SSMS with Linux VMs running SQL Server. Locally on Linux, the new [mssql-conf](/sql/linux/sql-server-linux-configure-mssql-conf) tool can perform many administrative tasks. For a cross-platform database management tool, see [Azure Data Studio](/sql/azure-data-studio/what-is). - - - question: | - Can I remove SQL Server completely from a SQL Server VM? - answer: | - Yes, but you will continue to be charged for your SQL Server VM as described in [Pricing guidance for SQL Server Azure VMs](../windows/pricing-guidance.md?toc=%2fazure%2fvirtual-machines%2flinux%2fsql%2ftoc.json). If you no longer need SQL Server, you can deploy a new virtual machine and migrate the data and applications to the new virtual machine. Then you can remove the SQL Server virtual machine. - - - name: Updating and patching - questions: - - question: | - How do I upgrade to a new version/edition of the SQL Server in an Azure VM? - answer: | - Currently, there is no in-place upgrade for SQL Server running in an Azure VM. Create a new Azure virtual machine with the desired SQL Server version/edition, and then migrate your databases to the new server using [standard data migration techniques](/sql/linux/sql-server-linux-migrate-overview). - - - name: General - questions: - - question: | - Are SQL Server high-availability solutions supported on Azure VMs? - answer: Not at this time. Always On availability groups and Failover Clustering both require a clustering solution in Linux, such as Pacemaker. The supported Linux distributions for SQL Server do not support their high availability add-ons in the cloud. - -additionalContent: | - - ## Resources - - **Linux VMs**: - - * [Overview of SQL Server on a Linux VM](sql-server-on-linux-vm-what-is-iaas-overview.md) - * [Provision SQL Server on a Linux VM](sql-vm-create-portal-quickstart.md) - * [SQL Server on Linux documentation](/sql/linux/sql-server-linux-overview) - - **Windows VMs**: - - * [Overview of SQL Server on a Windows VM](../windows/sql-server-on-azure-vm-iaas-what-is-overview.md) - * [Provision SQL Server on a Windows VM](../windows/sql-vm-create-portal-quickstart.md) - * [FAQ (Windows)](../windows/frequently-asked-questions-faq.yml) diff --git a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-backend-pool.png b/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-backend-pool.png deleted file mode 100644 index f047a8ca144cb..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-backend-pool.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-load-balancing-rule.png b/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-load-balancing-rule.png deleted file mode 100644 index cbc7354853423..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-listener-tutorial/add-load-balancing-rule.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-stonith-tutorial/availability-group-joined.png b/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-stonith-tutorial/availability-group-joined.png deleted file mode 100644 index 7778be5ee0c4e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/rhel-high-availability-stonith-tutorial/availability-group-joined.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/no.png b/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/no.png deleted file mode 100644 index 1aa084e6a3326..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/no.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png b/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png deleted file mode 100644 index dd2030fe2cb27..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/azure-compute-blade.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/azure-compute-blade.png deleted file mode 100644 index 93849636bab5b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/azure-compute-blade.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/basics.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/basics.png deleted file mode 100644 index 9b6c4f0ef57d8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/basics.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/networking.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/networking.png deleted file mode 100644 index ec8fd021e4865..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/networking.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/port-settings.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/port-settings.png deleted file mode 100644 index c29999c084be3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/port-settings.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/searchfilter.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/searchfilter.png deleted file mode 100644 index 355d24e34770d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/searchfilter.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/sqlnsgrule.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/sqlnsgrule.png deleted file mode 100644 index 28d14c1754e7d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/sqlnsgrule.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmproperties.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmproperties.png deleted file mode 100644 index 6b1bc53afa0f1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmproperties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmsizes.png b/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmsizes.png deleted file mode 100644 index 224ffa12563cb..0000000000000 Binary files a/articles/azure-sql/virtual-machines/linux/media/sql-vm-create-portal-quickstart/vmsizes.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/linux/rhel-high-availability-listener-tutorial.md b/articles/azure-sql/virtual-machines/linux/rhel-high-availability-listener-tutorial.md deleted file mode 100644 index b02c2e8804b84..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/rhel-high-availability-listener-tutorial.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Configure an availability group listener for SQL Server on RHEL virtual machines in Azure - Linux virtual machines | Microsoft Docs -description: Learn about setting up an availability group listener in SQL Server on RHEL virtual machines in Azure -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: tutorial -author: VanMSFT -ms.author: vanto -ms.date: 03/11/2020 ---- -# Tutorial: Configure an availability group listener for SQL Server on RHEL virtual machines in Azure -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!NOTE] -> The tutorial presented is in **public preview**. -> -> We use SQL Server 2017 with RHEL 7.6 in this tutorial, but it is possible to use SQL Server 2019 in RHEL 7 or RHEL 8 to configure high availability. The commands to configure availability group resources has changed in RHEL 8, and you'll want to look at the article [Create availability group resource](/sql/linux/sql-server-linux-availability-group-cluster-rhel#create-availability-group-resource) and RHEL 8 resources for more information on the correct commands. - -This tutorial will go over steps on how to create an availability group listener for your SQL Servers on RHEL virtual machines (VMs) in Azure. You will learn how to: - -> [!div class="checklist"] -> - Create a load balancer in the Azure portal -> - Configure the back-end pool for the load balancer -> - Create a probe for the load balancer -> - Set the load balancing rules -> - Create the load balancer resource in the cluster -> - Create the availability group listener -> - Test connecting to the listener -> - Testing a failover - -## Prerequisite - -Completed [Tutorial: Configure availability groups for SQL Server on RHEL virtual machines in Azure](rhel-high-availability-stonith-tutorial.md) - -## Create the load balancer in the Azure portal - -The following instructions take you through steps 1 through 4 from the [Create and configure the load balancer in the Azure portal](../windows/availability-group-load-balancer-portal-configure.md#create--configure-load-balancer) section of the [Load balancer - Azure portal](../windows/availability-group-load-balancer-portal-configure.md) article. - -### Create the load balancer - -1. In the Azure portal, open the resource group that contains the SQL Server virtual machines. - -2. In the resource group, click **Add**. - -3. Search for **load balancer** and then, in the search results, select **Load Balancer**, which is published by **Microsoft**. - -4. On the **Load Balancer** blade, click **Create**. - -5. In the **Create load balancer** dialog box, configure the load balancer as follows: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the load balancer. For example, **sqlLB**. | - | **Type** |**Internal** | - | **Virtual network** |The default virtual network that was created should be named **VM1VNET**. | - | **Subnet** |Select the subnet that the SQL Server instances are in. The default should be **VM1Subnet**.| - | **IP address assignment** |**Static** | - | **Private IP address** |Use the `virtualip` IP address that was created in the cluster. | - | **Subscription** |Use the subscription that was used for your resource group. | - | **Resource group** |Select the resource group that the SQL Server instances are in. | - | **Location** |Select the Azure location that the SQL Server instances are in. | - -### Configure the back-end pool -Azure calls the back-end address pool *backend pool*. In this case, the back-end pool is the addresses of the three SQL Server instances in your availability group. - -1. In your resource group, click the load balancer that you created. - -2. On **Settings**, click **Backend pools**. - -3. On **Backend pools**, click **Add** to create a back-end address pool. - -4. On **Add backend pool**, under **Name**, type a name for the back-end pool. - -5. Under **Associated to**, select **Virtual machine**. - -6. Select each virtual machine in the environment, and associate the appropriate IP address to each selection. - - :::image type="content" source="media/rhel-high-availability-listener-tutorial/add-backend-pool.png" alt-text="Add backend pool"::: - -7. Click **Add**. - -### Create a probe - -The probe defines how Azure verifies which of the SQL Server instances currently owns the availability group listener. Azure probes the service based on the IP address on a port that you define when you create the probe. - -1. On the load balancer **Settings** blade, click **Health probes**. - -2. On the **Health probes** blade, click **Add**. - -3. Configure the probe on the **Add probe** blade. Use the following values to configure the probe: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the probe. For example, **SQLAlwaysOnEndPointProbe**. | - | **Protocol** |**TCP** | - | **Port** |You can use any available port. For example, *59999*. | - | **Interval** |*5* | - | **Unhealthy threshold** |*2* | - -4. Click **OK**. - -5. Log in to all your virtual machines, and open the probe port using the following commands: - - ```bash - sudo firewall-cmd --zone=public --add-port=59999/tcp --permanent - sudo firewall-cmd --reload - ``` - -Azure creates the probe and then uses it to test which SQL Server instance has the listener for the availability group. - -### Set the load-balancing rules - -The load-balancing rules configure how the load balancer routes traffic to the SQL Server instances. For this load balancer, you enable direct server return because only one of the three SQL Server instances owns the availability group listener resource at a time. - -1. On the load balancer **Settings** blade, click **Load balancing rules**. - -2. On the **Load balancing rules** blade, click **Add**. - -3. On the **Add load balancing rules** blade, configure the load-balancing rule. Use the following settings: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the load-balancing rules. For example, **SQLAlwaysOnEndPointListener**. | - | **Protocol** |**TCP** | - | **Port** |*1433* | - | **Backend port** |*1433*. This value is ignored because this rule uses **Floating IP (direct server return)**. | - | **Probe** |Use the name of the probe that you created for this load balancer. | - | **Session persistence** |**None** | - | **Idle timeout (minutes)** |*4* | - | **Floating IP (direct server return)** |**Enabled** | - - :::image type="content" source="media/rhel-high-availability-listener-tutorial/add-load-balancing-rule.png" alt-text="Add load balancing rule"::: - -4. Click **OK**. -5. Azure configures the load-balancing rule. Now the load balancer is configured to route traffic to the SQL Server instance that hosts the listener for the availability group. - -At this point, the resource group has a load balancer that connects to all SQL Server machines. The load balancer also contains an IP address for the SQL Server Always On availability group listener, so that any machine can respond to requests for the availability groups. - -## Create the load balancer resource in the cluster - -1. Log in to the primary virtual machine. We need to create the resource to enable the Azure load balancer probe port (59999 is used in our example). Run the following command: - - ```bash - sudo pcs resource create azure_load_balancer azure-lb port=59999 - ``` - -1. Create a group that contains the `virtualip` and `azure_load_balancer` resource: - - ```bash - sudo pcs resource group add virtualip_group azure_load_balancer virtualip - ``` - -### Add constraints - -1. A colocation constraint must be configured to ensure the Azure load balancer IP address and the AG resource are running on the same node. Run the following command: - - ```bash - sudo pcs constraint colocation add azure_load_balancer ag_cluster-master INFINITY with-rsc-role=Master - ``` -1. Create an ordering constraint to ensure that the AG resource is up and running before the Azure load balancer IP address. While the colocation constraint implies an ordering constraint, this enforces it. - - ```bash - sudo pcs constraint order promote ag_cluster-master then start azure_load_balancer - ``` - -1. To verify the constraints, run the following command: - - ```bash - sudo pcs constraint list --full - ``` - - You should see the following output: - - ```output - Location Constraints: - Ordering Constraints: - promote ag_cluster-master then start virtualip (kind:Mandatory) (id:order-ag_cluster-master-virtualip-mandatory) - promote ag_cluster-master then start azure_load_balancer (kind:Mandatory) (id:order-ag_cluster-master-azure_load_balancer-mandatory) - Colocation Constraints: - virtualip with ag_cluster-master (score:INFINITY) (with-rsc-role:Master) (id:colocation-virtualip-ag_cluster-master-INFINITY) - azure_load_balancer with ag_cluster-master (score:INFINITY) (with-rsc-role:Master) (id:colocation-azure_load_balancer-ag_cluster-master-INFINITY) - Ticket Constraints: - ``` - -## Create the availability group listener - -1. On the primary node, run the following command in SQLCMD or SSMS: - - - Replace the IP address used below with the `virtualip` IP address. - - ```sql - ALTER AVAILABILITY - GROUP [ag1] ADD LISTENER 'ag1-listener' ( - WITH IP(('10.0.0.7' ,'255.255.255.0')) - ,PORT = 1433 - ); - GO - ``` - -1. Log in to each VM node. Use the following command to open the hosts file and set up host name resolution for the `ag1-listener` on each machine. - - ``` - sudo vi /etc/hosts - ``` - - In the **vi** editor, enter `i` to insert text, and on a blank line, add the IP of the `ag1-listener`. Then add `ag1-listener` after a space next to the IP. - - ```output - ag1-listener - ``` - - To exit the **vi** editor, first hit the **Esc** key, and then enter the command `:wq` to write the file and quit. Do this on each node. - -## Test the listener and a failover - -### Test logging in to SQL Server using the availability group listener - -1. Use SQLCMD to log in to the primary node of SQL Server using the availability group listener name: - - - Use a login that was previously created and replace `` with the correct password. The example below uses the `sa` login that was created with the SQL Server. - - ```bash - sqlcmd -S ag1-listener -U sa -P - ``` - -1. Check the name of the server that you are connected to. Run the following command in SQLCMD: - - ```sql - SELECT @@SERVERNAME - ``` - - Your output should show the current primary node. This should be `VM1` if you have never tested a failover. - - Exit the SQL Server session by typing the `exit` command. - -### Test a failover - -1. Run the following command to manually fail over the primary replica to `` or another replica. Replace `` with the value of your server name. - - ```bash - sudo pcs resource move ag_cluster-master --master - ``` - -1. If you check your constraints, you'll see that another constraint was added because of the manual failover: - - ```bash - sudo pcs constraint list --full - ``` - - You will see that a constraint with ID `cli-prefer-ag_cluster-master` was added. - -1. Remove the constraint with ID `cli-prefer-ag_cluster-master` using the following command: - - ```bash - sudo pcs constraint remove cli-prefer-ag_cluster-master - ``` - -1. Check your cluster resources using the command `sudo pcs resource`, and you should see that the primary instance is now ``. - - > [!NOTE] - > This article contains references to the term *slave*, a term that Microsoft no longer uses. When the term is removed from the software, we’ll remove it from this article. - - - ```output - [@ ~]$ sudo pcs resource - Master/Slave Set: ag_cluster-master [ag_cluster] - Masters: [ ] - Slaves: [ ] - Resource Group: virtualip_group - azure_load_balancer (ocf::heartbeat:azure-lb): Started - virtualip (ocf::heartbeat:IPaddr2): Started - ``` - -1. Use SQLCMD to log in to your primary replica using the listener name: - - - Use a login that was previously created and replace `` with the correct password. The example below uses the `sa` login that was created with the SQL Server. - - ```bash - sqlcmd -S ag1-listener -U sa -P - ``` - -1. Check the server that you are connected to. Run the following command in SQLCMD: - - ```sql - SELECT @@SERVERNAME - ``` - - You should see that you are now connected to the VM that you failed-over to. - -## Next steps - -For more information on load balancers in Azure, see: - -> [!div class="nextstepaction"] -> [Configure a load balance for an availability group on SQL Server on Azure VMs](../windows/availability-group-load-balancer-portal-configure.md) diff --git a/articles/azure-sql/virtual-machines/linux/rhel-high-availability-stonith-tutorial.md b/articles/azure-sql/virtual-machines/linux/rhel-high-availability-stonith-tutorial.md deleted file mode 100644 index 015b4de7b8b46..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/rhel-high-availability-stonith-tutorial.md +++ /dev/null @@ -1,1248 +0,0 @@ ---- -title: Configure availability groups for SQL Server on RHEL virtual machines in Azure - Linux virtual machines | Microsoft Docs -description: Learn about setting up high availability in an RHEL cluster environment and set up STONITH -ms.service: virtual-machines-sql -ms.subservice: -ms.topic: tutorial -author: VanMSFT -ms.author: vanto -ms.date: 06/25/2020 ---- -# Tutorial: Configure availability groups for SQL Server on RHEL virtual machines in Azure -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!NOTE] -> We use SQL Server 2017 with RHEL 7.6 in this tutorial, but it is possible to use SQL Server 2019 in RHEL 7 or RHEL 8 to configure high availability. The commands to configure the pacemake cluster and availability group resources has changed in RHEL 8, and you'll want to look at the article [Create availability group resource](/sql/linux/sql-server-linux-availability-group-cluster-rhel#create-availability-group-resource) and RHEL 8 resources for more information on the correct commands. - -In this tutorial, you learn how to: - -> [!div class="checklist"] -> - Create a new resource group, availability set, and Linux virtual machines (VMs) -> - Enable high availability (HA) -> - Create a Pacemaker cluster -> - Configure a fencing agent by creating a STONITH device -> - Install SQL Server and mssql-tools on RHEL -> - Configure SQL Server Always On availability group -> - Configure availability group (AG) resources in the Pacemaker cluster -> - Test a failover and the fencing agent - -This tutorial will use the Azure CLI to deploy resources in Azure. - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -[!INCLUDE [azure-cli-prepare-your-environment.md](../../../../includes/azure-cli-prepare-your-environment.md)] - -- This article requires version 2.0.30 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. - -## Create a resource group - -If you have more than one subscription, [set the subscription](/cli/azure/manage-azure-subscriptions-azure-cli) that you want deploy these resources to. - -Use the following command to create a resource group `` in a region. Replace `` with a name of your choosing. We're using `East US 2` for this tutorial. For more information, see the following [Quickstart](../../../application-gateway/quick-create-cli.md). - -```azurecli-interactive -az group create --name --location eastus2 -``` - -## Create an availability set - -The next step is to create an availability set. Run the following command in Azure Cloud Shell, and replace `` with your resource group name. Choose a name for ``. - -```azurecli-interactive -az vm availability-set create \ - --resource-group \ - --name \ - --platform-fault-domain-count 2 \ - --platform-update-domain-count 2 -``` - -You should get the following results once the command completes: - -```output -{ - "id": "/subscriptions//resourceGroups//providers/Microsoft.Compute/availabilitySets/", - "location": "eastus2", - "name": "", - "platformFaultDomainCount": 2, - "platformUpdateDomainCount": 2, - "proximityPlacementGroup": null, - "resourceGroup": "", - "sku": { - "capacity": null, - "name": "Aligned", - "tier": null - }, - "statuses": null, - "tags": {}, - "type": "Microsoft.Compute/availabilitySets", - "virtualMachines": [] -} -``` - -## Create RHEL VMs inside the availability set - -> [!WARNING] -> If you choose a Pay-As-You-Go (PAYG) RHEL image, and configure high availability (HA), you may be required to register your subscription. This can cause you to pay twice for the subscription, as you will be charged for the Microsoft Azure RHEL subscription for the VM, and a subscription to Red Hat. For more information, see https://access.redhat.com/solutions/2458541. -> -> To avoid being "double billed", use a RHEL HA image when creating the Azure VM. Images offered as RHEL-HA images are also PAYG images with HA repo pre-enabled. - -1. Get a list of virtual machine images that offer RHEL with HA: - - ```azurecli-interactive - az vm image list --all --offer "RHEL-HA" - ``` - - You should see the following results: - - ```output - [ - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "7.4", - "urn": "RedHat:RHEL-HA:7.4:7.4.2019062021", - "version": "7.4.2019062021" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "7.5", - "urn": "RedHat:RHEL-HA:7.5:7.5.2019062021", - "version": "7.5.2019062021" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "7.6", - "urn": "RedHat:RHEL-HA:7.6:7.6.2019062019", - "version": "7.6.2019062019" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "8.0", - "urn": "RedHat:RHEL-HA:8.0:8.0.2020021914", - "version": "8.0.2020021914" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "8.1", - "urn": "RedHat:RHEL-HA:8.1:8.1.2020021914", - "version": "8.1.2020021914" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "80-gen2", - "urn": "RedHat:RHEL-HA:80-gen2:8.0.2020021915", - "version": "8.0.2020021915" - }, - { - "offer": "RHEL-HA", - "publisher": "RedHat", - "sku": "81_gen2", - "urn": "RedHat:RHEL-HA:81_gen2:8.1.2020021915", - "version": "8.1.2020021915" - } - ] - ``` - - For this tutorial, we're choosing the image `RedHat:RHEL-HA:7.6:7.6.2019062019` for the RHEL 7 example and choosing `RedHat:RHEL-HA:8.1:8.1.2020021914` for the RHEL 8 example. - - You can also choose SQL Server 2019 pre-installed on RHEL8-HA images. To get the list of these images, run the following command: - - ```azurecli-interactive - az vm image list --all --offer "sql2019-rhel8" - ``` - - You should see the following results: - - ```output - [ - { - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "enterprise", - "urn": "MicrosoftSQLServer:sql2019-rhel8:enterprise:15.0.200317", - "version": "15.0.200317" - }, - } - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "enterprise", - "urn": "MicrosoftSQLServer:sql2019-rhel8:enterprise:15.0.200512", - "version": "15.0.200512" - }, - { - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "sqldev", - "urn": "MicrosoftSQLServer:sql2019-rhel8:sqldev:15.0.200317", - "version": "15.0.200317" - }, - { - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "sqldev", - "urn": "MicrosoftSQLServer:sql2019-rhel8:sqldev:15.0.200512", - "version": "15.0.200512" - }, - { - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "standard", - "urn": "MicrosoftSQLServer:sql2019-rhel8:standard:15.0.200317", - "version": "15.0.200317" - }, - { - "offer": "sql2019-rhel8", - "publisher": "MicrosoftSQLServer", - "sku": "standard", - "urn": "MicrosoftSQLServer:sql2019-rhel8:standard:15.0.200512", - "version": "15.0.200512" - } - ] - ``` - - If you do use one of the above images to create the virtual machines, it has SQL Server 2019 pre-installed. Skip the [Install SQL Server and mssql-tools](#install-sql-server-and-mssql-tools) section as described in this article. - - - > [!IMPORTANT] - > Machine names must be less than 15 characters to set up availability group. Username cannot contain upper case characters, and passwords must have more than 12 characters. - -1. We want to create 3 VMs in the availability set. Replace the following in the command below: - - - `` - - `` - - `` - - `` - An example would be "Standard_D16_v3" - - `` - - `` - - ```azurecli-interactive - for i in `seq 1 3`; do - az vm create \ - --resource-group \ - --name $i \ - --availability-set \ - --size "" \ - --image "RedHat:RHEL-HA:7.6:7.6.2019062019" \ - --admin-username "" \ - --admin-password "" \ - --authentication-type all \ - --generate-ssh-keys - done - ``` - -The above command creates the VMs, and creates a default VNet for those VMs. For more information on the different configurations, see the [az vm create](/cli/azure/vm) article. - -You should get results similar to the following once the command completes for each VM: - -```output -{ - "fqdns": "", - "id": "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/", - "location": "eastus2", - "macAddress": "", - "powerState": "VM running", - "privateIpAddress": "", - "publicIpAddress": "", - "resourceGroup": "", - "zones": "" -} -``` - -> [!IMPORTANT] -> The default image that is created with the above command creates a 32GB OS disk by default. You could potentially run out of space with this default installation. You can use the following parameter added to the above `az vm create` command to create an OS disk with 128GB as an example: `--os-disk-size-gb 128`. -> -> You can then [configure Logical Volume Manager (LVM)](/previous-versions/azure/virtual-machines/linux/configure-lvm) if you need to expand appropriate folder volumes to accomodate your installation. - -### Test connection to the created VMs - -Connect to VM1 or the other VMs using the following command in Azure Cloud Shell. If you are unable to find your VM IPs, follow this [Quickstart on Azure Cloud Shell](../../../cloud-shell/quickstart.md#ssh-into-your-linux-vm). - -```azurecli-interactive -ssh @publicipaddress -``` - -If the connection is successful, you should see the following output representing the Linux terminal: - -```output -[@ ~]$ -``` - -Type `exit` to leave the SSH session. - -## Enable high availability - -> [!IMPORTANT] -> In order to complete this portion of the tutorial, you must have a subscription for RHEL and the High Availability Add-on. If you are using an image recommended in the previous section, you do not have to register another subscription. - -Connect to each VM node and follow the guide below to enable HA. For more information, see [enable high availability subscription for RHEL](/sql/linux/sql-server-linux-availability-group-cluster-rhel#enable-the-high-availability-subscription-for-rhel). - -> [!TIP] -> It will be easier if you open an SSH session to each of the VMs simultaneously as the same commands will need to be run on each VM throughout the article. -> -> If you are copying and pasting multiple `sudo` commands, and are prompted for a password, the additional commands will not run. Run each command separately. - - -1. Run the following commands on each VM to open the Pacemaker firewall ports: - - ```bash - sudo firewall-cmd --permanent --add-service=high-availability - sudo firewall-cmd --reload - ``` - -1. Update and install Pacemaker packages on all nodes using the following commands: - - > [!NOTE] - > **nmap** is installed as part of this command block as a tool to find available IP addresses in your network. You do not have to install **nmap**, but it will be useful later in this tutorial. - - ```bash - sudo yum update -y - sudo yum install -y pacemaker pcs fence-agents-all resource-agents fence-agents-azure-arm nmap - sudo reboot - ``` - -1. Set the password for the default user that is created when installing Pacemaker packages. Use the same password on all nodes. - - ```bash - sudo passwd hacluster - ``` - -1. Use the following command to open the hosts file and set up host name resolution. For more information, see [Configure AG](/sql/linux/sql-server-linux-availability-group-configure-ha#prerequisites) on configuring the hosts file. - - ``` - sudo vi /etc/hosts - ``` - - In the **vi** editor, enter `i` to insert text, and on a blank line, add the **Private IP** of the corresponding VM. Then add the VM name after a space next to the IP. Each line should have a separate entry. - - ```output - - - - ``` - - > [!IMPORTANT] - > We recommend that you use your **Private IP** address above. Using the Public IP address in this configuration will cause the setup to fail and we don't recommend exposing your VM to external networks. - - To exit the **vi** editor, first hit the **Esc** key, and then enter the command `:wq` to write the file and quit. - -## Create the Pacemaker cluster - -In this section, we will enable and start the pcsd service, and then configure the cluster. For SQL Server on Linux, the cluster resources are not created automatically. We'll need to enable and create the pacemaker resources manually. For more information, see the article on [configuring a failover cluster instance for RHEL](/sql/linux/sql-server-linux-shared-disk-cluster-red-hat-7-configure#install-and-configure-pacemaker-on-each-cluster-node) - -### Enable and start pcsd service and Pacemaker - -1. Run the commands on all nodes. These commands allow the nodes to rejoin the cluster after reboot. - - ```bash - sudo systemctl enable pcsd - sudo systemctl start pcsd - sudo systemctl enable pacemaker - ``` - -1. Remove any existing cluster configuration from all nodes. Run the following command: - - ```bash - sudo pcs cluster destroy - sudo systemctl enable pacemaker - ``` - -1. On the primary node, run the following commands to set up the cluster. - - - When running the `pcs cluster auth` command to authenticate the cluster nodes, you will be prompted for a password. Enter the password for the **hacluster** user created earlier. - - **RHEL7** - - ```bash - sudo pcs cluster auth -u hacluster - sudo pcs cluster setup --name az-hacluster --token 30000 - sudo pcs cluster start --all - sudo pcs cluster enable --all - ``` - - **RHEL8** - - For RHEL 8, you will need to authenticate the nodes separately. Manually enter in the username and password for **hacluster** when prompted. - - ```bash - sudo pcs host auth - sudo pcs cluster setup - sudo pcs cluster start --all - sudo pcs cluster enable --all - ``` - -1. Run the following command to check that all nodes are online. - - ```bash - sudo pcs status - ``` - - **RHEL 7** - - If all nodes are online, you will see an output similar to the following: - - ```output - Cluster name: az-hacluster - - WARNINGS: - No stonith devices and stonith-enabled is not false - - Stack: corosync - Current DC: (version 1.1.19-8.el7_6.5-c3c624ea3d) - partition with quorum - Last updated: Fri Aug 23 18:27:57 2019 - Last change: Fri Aug 23 18:27:56 2019 by hacluster via crmd on - - 3 nodes configured - 0 resources configured - - Online: [ ] - - No resources - - - Daemon Status: - corosync: active/enabled - pacemaker: active/enabled - pcsd: active/enabled - ``` - - **RHEL 8** - - ```output - Cluster name: az-hacluster - - WARNINGS: - No stonith devices and stonith-enabled is not false - - Cluster Summary: - * Stack: corosync - * Current DC: (version 1.1.19-8.el7_6.5-c3c624ea3d) - partition with quorum - * Last updated: Fri Aug 23 18:27:57 2019 - * Last change: Fri Aug 23 18:27:56 2019 by hacluster via crmd on - * 3 nodes configured - * 0 resource instances configured - - Node List: - * Online: [ ] - - Full List of Resources: - * No resources - - Daemon Status: - corosync: active/enabled - pacemaker: active/enabled - pcsd: active/enabled - - ``` - -1. Set expected votes in the live cluster to 3. This command only affects the live cluster, and does not change the configuration files. - - On all nodes, set the expected votes with the following command: - - ```bash - sudo pcs quorum expected-votes 3 - ``` - -## Configure the fencing agent - -A STONITH device provides a fencing agent. The below instructions are modified for this tutorial. For more information, see [create a STONITH device](../../../virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md#create-stonith-device). - -[Check the version of the Azure Fence Agent to ensure that it's updated](../../../virtual-machines/workloads/sap/high-availability-guide-rhel-pacemaker.md#cluster-installation). Use the following command: - -```bash -sudo yum info fence-agents-azure-arm -``` - -You should see a similar output to the below example. - -```output -Loaded plugins: langpacks, product-id, search-disabled-repos, subscription-manager -Installed Packages -Name : fence-agents-azure-arm -Arch : x86_64 -Version : 4.2.1 -Release : 11.el7_6.8 -Size : 28 k -Repo : installed -From repo : rhel-ha-for-rhel-7-server-eus-rhui-rpms -Summary : Fence agent for Azure Resource Manager -URL : https://github.com/ClusterLabs/fence-agents -License : GPLv2+ and LGPLv2+ -Description : The fence-agents-azure-arm package contains a fence agent for Azure instances. -``` - -### Register a new application in Azure Active Directory - - 1. Go to https://portal.azure.com - 2. Open the [Azure Active Directory blade](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Properties). Go to Properties and write down the Directory ID. This is the `tenant ID` - 3. Click [**App registrations**](https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade) - 4. Click **New registration** - 5. Enter a **Name** like `-app`, select **Accounts in this organization directory only** - 6. Select Application Type **Web**, enter a sign-on URL (for example http://localhost) and click Add. The sign-on URL is not used and can be any valid URL. Once done, Click **Register** - 7. Select **Certificates and secrets** for your new App registration, then click **New client secret** - 8. Enter a description for a new key (client secret), select **Never expires** and click **Add** - 9. Write down the value of the secret. It is used as the password for the Service Principal -10. Select **Overview**. Write down the Application ID. It is used as the username (login ID in the steps below) of the Service Principal - -### Create a custom role for the fence agent - -Follow the tutorial to [Create an Azure custom role using Azure CLI](../../../role-based-access-control/tutorial-custom-role-cli.md#create-a-custom-role). - -Your json file should look similar to the following: - -- Replace `` with a name of your choice. This is to avoid any duplication when creating this role definition. -- Replace `` with your Azure Subscription ID. - -```json -{ - "Name": "Linux Fence Agent Role-", - "Id": null, - "IsCustom": true, - "Description": "Allows to power-off and start virtual machines", - "Actions": [ - "Microsoft.Compute/*/read", - "Microsoft.Compute/virtualMachines/powerOff/action", - "Microsoft.Compute/virtualMachines/start/action" - ], - "NotActions": [ - ], - "AssignableScopes": [ - "/subscriptions/" - ] -} -``` - -To add the role, run the following command: - -- Replace `` with the name of the file. -- If you are executing the command from a path other than the folder that the file is saved to, include the folder path of the file in the command. - -```azurecli-interactive -az role definition create --role-definition ".json" -``` - -You should see the following output: - -```output -{ - "assignableScopes": [ - "/subscriptions/" - ], - "description": "Allows to power-off and start virtual machines", - "id": "/subscriptions//providers/Microsoft.Authorization/roleDefinitions/", - "name": "", - "permissions": [ - { - "actions": [ - "Microsoft.Compute/*/read", - "Microsoft.Compute/virtualMachines/powerOff/action", - "Microsoft.Compute/virtualMachines/start/action" - ], - "dataActions": [], - "notActions": [], - "notDataActions": [] - } - ], - "roleName": "Linux Fence Agent Role-", - "roleType": "CustomRole", - "type": "Microsoft.Authorization/roleDefinitions" -} -``` - -### Assign the custom role to the Service Principal - -Assign the custom role `Linux Fence Agent Role-` that was created in the last step to the Service Principal. Do not use the Owner role anymore! - -1. Go to https://portal.azure.com -2. Open the [All resources blade](https://portal.azure.com/#blade/HubsExtension/BrowseAll) -3. Select the virtual machine of the first cluster node -4. Click **Access control (IAM)** -5. Click **Add a role assignment** -6. Select the role `Linux Fence Agent Role-` from the **Role** list -7. In the **Select** list, enter the name of the application you created above, `-app` -8. Click **Save** -9. Repeat the steps above for the all cluster node. - -### Create the STONITH devices - -Run the following commands on node 1: - -- Replace the `` with the ID value from your application registration. -- Replace the `` with the value from the client secret. -- Replace the `` with the resource group from your subscription used for this tutorial. -- Replace the `` and the `` from your Azure Subscription. - -```bash -sudo pcs property set stonith-timeout=900 -sudo pcs stonith create rsc_st_azure fence_azure_arm login="" passwd="" resourceGroup="" tenantId="" subscriptionId="" power_timeout=240 pcmk_reboot_timeout=900 -``` - -Since we already added a rule to our firewall to allow the HA service (`--add-service=high-availability`), there's no need to open the following firewall ports on all nodes: 2224, 3121, 21064, 5405. However, if you are experiencing any type of connection issues with HA, use the following command to open these ports that are associated with HA. - -> [!TIP] -> You can optionally add all ports in this tutorial at once to save some time. The ports that need to be opened are explained in their relative sections below. If you would like to add all ports now, add the additional ports: 1433 and 5022. - -```bash -sudo firewall-cmd --zone=public --add-port=2224/tcp --add-port=3121/tcp --add-port=21064/tcp --add-port=5405/tcp --permanent -sudo firewall-cmd --reload -``` - -## Install SQL Server and mssql-tools - -> [!NOTE] -> If you have created the VMs with the SQL Server 2019 pre-installed on RHEL8-HA then you can skip the below steps to install SQL Server and mssql-tools and start the **Configure an Availability Group** section after you setup the sa password on all the VMs by running the command `sudo /opt/mssql/bin/mssql-conf set-sa-password` on all VMs. - -Use the below section to install SQL Server and mssql-tools on the VMs. You can choose one of the below samples to install SQL Server 2017 on RHEL 7 or SQL Server 2019 on RHEL 8. Perform each of these actions on all nodes. For more information, see [Install SQL Server on a Red Hat VM](/sql/linux/quickstart-install-connect-red-hat). - - -### Installing SQL Server on the VMs - -The following commands are used to install SQL Server: - -**RHEL 7 with SQL Server 2017** - -```bash -sudo curl -o /etc/yum.repos.d/mssql-server.repo https://packages.microsoft.com/config/rhel/7/mssql-server-2017.repo -sudo yum install -y mssql-server -sudo /opt/mssql/bin/mssql-conf setup -sudo yum install mssql-server-ha -``` - -**RHEL 8 with SQL Server 2019** - -```bash -sudo curl -o /etc/yum.repos.d/mssql-server.repo https://packages.microsoft.com/config/rhel/8/mssql-server-2019.repo -sudo yum install -y mssql-server -sudo /opt/mssql/bin/mssql-conf setup -sudo yum install mssql-server-ha -``` -### Open firewall port 1433 for remote connections - -You'll need to open port 1433 on the VM in order to connect remotely. Use the following commands to open port 1433 in the firewall of each VM: - -```bash -sudo firewall-cmd --zone=public --add-port=1433/tcp --permanent -sudo firewall-cmd --reload -``` - -### Installing SQL Server command-line tools - -The following commands are used to install SQL Server command-line tools. For more information, see [install the SQL Server command-line tools](/sql/linux/quickstart-install-connect-red-hat#tools). - -**RHEL 7** - -```bash -sudo curl -o /etc/yum.repos.d/msprod.repo https://packages.microsoft.com/config/rhel/7/prod.repo -sudo yum install -y mssql-tools unixODBC-devel -``` - -**RHEL 8** - -```bash -sudo curl -o /etc/yum.repos.d/msprod.repo https://packages.microsoft.com/config/rhel/8/prod.repo -sudo yum install -y mssql-tools unixODBC-devel -``` - -> [!NOTE] -> For convenience, add /opt/mssql-tools/bin/ to your PATH environment variable. This enables you to run the tools without specifying the full path. Run the following commands to modify the PATH for both login sessions and interactive/non-login sessions:

    -`echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bash_profile`
    -`echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bashrc`
    -`source ~/.bashrc` - - -### Check the status of the SQL Server - -Once you are done with the configuration, you can check the status of SQL Server and verify that it is running: - -```bash -systemctl status mssql-server --no-pager -``` - -You should see the following output: - -```output -● mssql-server.service - Microsoft SQL Server Database Engine - Loaded: loaded (/usr/lib/systemd/system/mssql-server.service; enabled; vendor preset: disabled) - Active: active (running) since Thu 2019-12-05 17:30:55 UTC; 20min ago - Docs: https://docs.microsoft.com/en-us/sql/linux - Main PID: 11612 (sqlservr) - CGroup: /system.slice/mssql-server.service - ├─11612 /opt/mssql/bin/sqlservr - └─11640 /opt/mssql/bin/sqlservr -``` - -## Configure an availability group - -Use the following steps to configure a SQL Server Always On availability group for your VMs. For more information, see [Configure SQL Server Always On availability groups for high availability on Linux](/sql/linux/sql-server-linux-availability-group-configure-ha) - -### Enable Always On availability groups and restart mssql-server - -Enable Always On availability groups on each node that hosts a SQL Server instance. Then restart mssql-server. Run the following script: - -``` -sudo /opt/mssql/bin/mssql-conf set hadr.hadrenabled 1 -sudo systemctl restart mssql-server -``` - -### Create a certificate - -We currently don't support AD authentication to the AG endpoint. Therefore, we must use a certificate for AG endpoint encryption. - -1. Connect to **all nodes** using SQL Server Management Studio (SSMS) or SQL CMD. Run the following commands to enable an AlwaysOn_health session and create a master key: - - > [!IMPORTANT] - > If you are connecting remotely to your SQL Server instance, you will need to have port 1433 open on your firewall. You'll also need to allow inbound connections to port 1433 in your NSG for each VM. For more information, see [Create a security rule](../../../virtual-network/manage-network-security-group.md#create-a-security-rule) for creating an inbound security rule. - - - Replace the `` with your own password. - - - ```sql - ALTER EVENT SESSION AlwaysOn_health ON SERVER WITH (STARTUP_STATE=ON); - GO - CREATE MASTER KEY ENCRYPTION BY PASSWORD = ''; - ``` - - -1. Connect to the primary replica using SSMS or SQL CMD. The below commands will create a certificate at `/var/opt/mssql/data/dbm_certificate.cer` and a private key at `var/opt/mssql/data/dbm_certificate.pvk` on your primary SQL Server replica: - - - Replace the `` with your own password. - - ```sql - CREATE CERTIFICATE dbm_certificate WITH SUBJECT = 'dbm'; - GO - - BACKUP CERTIFICATE dbm_certificate - TO FILE = '/var/opt/mssql/data/dbm_certificate.cer' - WITH PRIVATE KEY ( - FILE = '/var/opt/mssql/data/dbm_certificate.pvk', - ENCRYPTION BY PASSWORD = '' - ); - GO - ``` - -Exit the SQL CMD session by running the `exit` command, and return back to your SSH session. - -### Copy the certificate to the secondary replicas and create the certificates on the server - -1. Copy the two files that were created to the same location on all servers that will host availability replicas. - - On the primary server, run the following `scp` command to copy the certificate to the target servers: - - - Replace `` and `` with the user name and target VM name that you are using. - - Run this command for all secondary replicas. - - > [!NOTE] - > You don't have to run `sudo -i`, which gives you the root environment. You could just run the `sudo` command in front of each command as we previously did in this tutorial. - - ```bash - # The below command allows you to run commands in the root environment - sudo -i - ``` - - ```bash - scp /var/opt/mssql/data/dbm_certificate.* @:/home/ - ``` - -1. On the target server, run the following command: - - - Replace `` with your user name. - - The `mv` command moves the files or directory from one place to another. - - The `chown` command is used to change the owner and group of files, directories, or links. - - Run these commands for all secondary replicas. - - ```bash - sudo -i - mv /home//dbm_certificate.* /var/opt/mssql/data/ - cd /var/opt/mssql/data - chown mssql:mssql dbm_certificate.* - ``` - -1. The following Transact-SQL script creates a certificate from the backup that you created on the primary SQL Server replica. Update the script with strong passwords. The decryption password is the same password that you used to create the .pvk file in the previous step. To create the certificate, run the following script using SQL CMD or SSMS on all secondary servers: - - ```sql - CREATE CERTIFICATE dbm_certificate - FROM FILE = '/var/opt/mssql/data/dbm_certificate.cer' - WITH PRIVATE KEY ( - FILE = '/var/opt/mssql/data/dbm_certificate.pvk', - DECRYPTION BY PASSWORD = '' - ); - GO - ``` - -### Create the database mirroring endpoints on all replicas - -Run the following script on all SQL Server instances using SQL CMD or SSMS: - -```sql -CREATE ENDPOINT [Hadr_endpoint] - AS TCP (LISTENER_PORT = 5022) - FOR DATABASE_MIRRORING ( - ROLE = ALL, - AUTHENTICATION = CERTIFICATE dbm_certificate, -ENCRYPTION = REQUIRED ALGORITHM AES -); -GO - -ALTER ENDPOINT [Hadr_endpoint] STATE = STARTED; -GO -``` - -### Create the availability group - -Connect to the SQL Server instance that hosts the primary replica using SQL CMD or SSMS. Run the following command to create the availability group: - -- Replace `ag1` with your desired Availability Group name. -- Replace the ``, ``, and `` values with the names of the SQL Server instances that host the replicas. - -```sql -CREATE AVAILABILITY GROUP [ag1] - WITH (DB_FAILOVER = ON, CLUSTER_TYPE = EXTERNAL) - FOR REPLICA ON - N'' - WITH ( - ENDPOINT_URL = N'tcp://:5022', - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - FAILOVER_MODE = EXTERNAL, - SEEDING_MODE = AUTOMATIC - ), - N'' - WITH ( - ENDPOINT_URL = N'tcp://:5022', - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - FAILOVER_MODE = EXTERNAL, - SEEDING_MODE = AUTOMATIC - ), - N'' - WITH( - ENDPOINT_URL = N'tcp://:5022', - AVAILABILITY_MODE = SYNCHRONOUS_COMMIT, - FAILOVER_MODE = EXTERNAL, - SEEDING_MODE = AUTOMATIC - ); -GO - -ALTER AVAILABILITY GROUP [ag1] GRANT CREATE ANY DATABASE; -GO -``` - -### Create a SQL Server login for Pacemaker - -On all SQL Server instances, create a SQL Server login for Pacemaker. The following Transact-SQL creates a login. - -- Replace `` with your own complex password. - -```sql -USE [master] -GO - -CREATE LOGIN [pacemakerLogin] with PASSWORD= N''; -GO - -ALTER SERVER ROLE [sysadmin] ADD MEMBER [pacemakerLogin]; -GO -``` - -On all SQL Server instances, save the credentials used for the SQL Server login. - -1. Create the file: - - ```bash - sudo vi /var/opt/mssql/secrets/passwd - ``` - -1. Add the following 2 lines to the file: - - ```bash - pacemakerLogin - - ``` - - To exit the **vi** editor, first hit the **Esc** key, and then enter the command `:wq` to write the file and quit. - -1. Make the file only readable by root: - - ```bash - sudo chown root:root /var/opt/mssql/secrets/passwd - sudo chmod 400 /var/opt/mssql/secrets/passwd - ``` - -### Join secondary replicas to the availability group - -1. In order to join the secondary replicas to the AG, you'll need to open port 5022 on the firewall for all servers. Run the following command in your SSH session: - - ```bash - sudo firewall-cmd --zone=public --add-port=5022/tcp --permanent - sudo firewall-cmd --reload - ``` - -1. On your secondary replicas, run the following commands to join them to the AG: - - ```sql - ALTER AVAILABILITY GROUP [ag1] JOIN WITH (CLUSTER_TYPE = EXTERNAL); - GO - - ALTER AVAILABILITY GROUP [ag1] GRANT CREATE ANY DATABASE; - GO - ``` - -1. Run the following Transact-SQL script on the primary replica and each secondary replica: - - ```sql - GRANT ALTER, CONTROL, VIEW DEFINITION ON AVAILABILITY GROUP::ag1 TO pacemakerLogin; - GO - - GRANT VIEW SERVER STATE TO pacemakerLogin; - GO - ``` - -1. Once the secondary replicas are joined, you can see them in SSMS Object Explorer by expanding the **Always On High Availability** node: - - ![Screenshot shows the primary and secondary availability replicas.](./media/rhel-high-availability-stonith-tutorial/availability-group-joined.png) - -### Add a database to the availability group - -We will follow the [configure availability group article on adding a database](/sql/linux/sql-server-linux-availability-group-configure-ha#add-a-database-to-the-availability-group). - -The following Transact-SQL commands are used in this step. Run these commands on the primary replica: - -```sql -CREATE DATABASE [db1]; -- creates a database named db1 -GO - -ALTER DATABASE [db1] SET RECOVERY FULL; -- set the database in full recovery mode -GO - -BACKUP DATABASE [db1] -- backs up the database to disk - TO DISK = N'/var/opt/mssql/data/db1.bak'; -GO - -ALTER AVAILABILITY GROUP [ag1] ADD DATABASE [db1]; -- adds the database db1 to the AG -GO -``` - -### Verify that the database is created on the secondary servers - -On each secondary SQL Server replica, run the following query to see if the db1 database was created and is in a SYNCHRONIZED state: - -``` -SELECT * FROM sys.databases WHERE name = 'db1'; -GO -SELECT DB_NAME(database_id) AS 'database', synchronization_state_desc FROM sys.dm_hadr_database_replica_states; -``` - -If the `synchronization_state_desc` lists SYNCHRONIZED for `db1`, this means the replicas are synchronized. The secondaries are showing `db1` in the primary replica. - -## Create availability group resources in the Pacemaker cluster - -We will be following the guide to [create the availability group resources in the Pacemaker cluster](/sql/linux/sql-server-linux-create-availability-group#create-the-availability-group-resources-in-the-pacemaker-cluster-external-only). - -> [!NOTE] -> This article contains references to the term *slave*, a term that Microsoft no longer uses. When the term is removed from the software, we’ll remove it from this article. - -### Create the AG cluster resource - -1. Use one of the following commands based on the environment chosen earlier to create the resource `ag_cluster` in the availability group `ag1`. - - **RHEL 7** - - ```bash - sudo pcs resource create ag_cluster ocf:mssql:ag ag_name=ag1 meta failure-timeout=30s master notify=true - ``` - - **RHEL 8** - - ```bash - sudo pcs resource create ag_cluster ocf:mssql:ag ag_name=ag1 meta failure-timeout=30s promotable notify=true - ``` - -2. Check your resource and ensure that they are online before proceeding using the following command: - - ```bash - sudo pcs resource - ``` - - You should see the following output: - - **RHEL 7** - - ```output - [@VM1 ~]$ sudo pcs resource - Master/Slave Set: ag_cluster-master [ag_cluster] - Masters: [ ] - Slaves: [ ] - ``` - - **RHEL 8** - - ```output - [@VM1 ~]$ sudo pcs resource - * Clone Set: ag_cluster-clone [ag_cluster] (promotable): - * ag_cluster (ocf::mssql:ag) : Slave VMrhel3 (Monitoring) - * ag_cluster (ocf::mssql:ag) : Master VMrhel1 (Monitoring) - * ag_cluster (ocf::mssql:ag) : Slave VMrhel2 (Monitoring) - ``` - - -### Create a virtual IP resource - -1. Use an available static IP address from your network to create a virtual IP resource. You can find one using the command tool `nmap`. - - ```bash - nmap -sP - # For example: nmap -sP 10.0.0.* - # The above will scan for all IP addresses that are already occupied in the 10.0.0.x space. - ``` - -2. Set the **stonith-enabled** property to false - - ```bash - sudo pcs property set stonith-enabled=false - ``` - -3. Create the virtual IP resource by using the following command: - - - Replace the `` value below with an unused IP address. - - ```bash - sudo pcs resource create virtualip ocf:heartbeat:IPaddr2 ip= - ``` - -### Add constraints - -1. To ensure that the IP address and the AG resource are running on the same node, a colocation constraint must be configured. Run the following command: - - **RHEL 7** - - ```bash - sudo pcs constraint colocation add virtualip ag_cluster-master INFINITY with-rsc-role=Master - ``` - - **RHEL 8** - - ```bash - sudo pcs constraint colocation add virtualip with master ag_cluster-clone INFINITY with-rsc-role=Master - ``` - -2. Create an ordering constraint to ensure that the AG resource is up and running before the IP address. While the colocation constraint implies an ordering constraint, this enforces it. - - **RHEL 7** - - ```bash - sudo pcs constraint order promote ag_cluster-master then start virtualip - ``` - - **RHEL 8** - - ```bash - sudo pcs constraint order promote ag_cluster-clone then start virtualip - ``` - -3. To verify the constraints, run the following command: - - ```bash - sudo pcs constraint list --full - ``` - - You should see the following output: - - **RHEL 7** - - ``` - Location Constraints: - Ordering Constraints: - promote ag_cluster-master then start virtualip (kind:Mandatory) (id:order-ag_cluster-master-virtualip-mandatory) - Colocation Constraints: - virtualip with ag_cluster-master (score:INFINITY) (with-rsc-role:Master) (id:colocation-virtualip-ag_cluster-master-INFINITY) - Ticket Constraints: - ``` - - **RHEL 8** - - ```output - Location Constraints: - Ordering Constraints: - promote ag_cluster-clone then start virtualip (kind:Mandatory) (id:order-ag_cluster-clone-virtualip-mandatory) - Colocation Constraints: - virtualip with ag_cluster-clone (score:INFINITY) (with-rsc-role:Master) (id:colocation-virtualip-ag_cluster-clone-INFINITY) - Ticket Constraints: - ``` - -### Re-enable stonith - -We're ready for testing. Re-enable stonith in the cluster by running the following command on Node 1: - -```bash -sudo pcs property set stonith-enabled=true -``` - -### Check cluster status - -You can check the status of your cluster resources using the following command: - -```output -[@VM1 ~]$ sudo pcs status -Cluster name: az-hacluster -Stack: corosync -Current DC: (version 1.1.19-8.el7_6.5-c3c624ea3d) - partition with quorum -Last updated: Sat Dec 7 00:18:38 2019 -Last change: Sat Dec 7 00:18:02 2019 by root via cibadmin on VM1 - -3 nodes configured -5 resources configured - -Online: [ ] - -Full list of resources: - - Master/Slave Set: ag_cluster-master [ag_cluster] - Masters: [ ] - Slaves: [ ] - virtualip (ocf::heartbeat:IPaddr2): Started - rsc_st_azure (stonith:fence_azure_arm): Started - -Daemon Status: - corosync: active/enabled - pacemaker: active/enabled - pcsd: active/enabled -``` - -## Test failover - -To ensure that the configuration has succeeded so far, we will test a failover. For more information, see [Always On availability group failover on Linux](/sql/linux/sql-server-linux-availability-group-failover-ha). - -1. Run the following command to manually fail over the primary replica to ``. Replace `` with the value of your server name. - - **RHEL 7** - - ```bash - sudo pcs resource move ag_cluster-master --master - ``` - - **RHEL 8** - - ```bash - sudo pcs resource move ag_cluster-clone --master - ``` - - You can also specify an additional option so that the temporary constraint that's created to move the resource to a desired node is disabled automatically, and you do not have to perform steps 2 and 3 below. - - **RHEL 7** - - ```bash - sudo pcs resource move ag_cluster-master --master lifetime=30S - ``` - - **RHEL 8** - - ```bash - sudo pcs resource move ag_cluster-clone --master lifetime=30S - ``` - - Another alternative to automate steps 2 and 3 below which clear the temporary constraint in the resource move command itself is by combining multiple commands in a single line. - - **RHEL 7** - - ```bash - sudo pcs resource move ag_cluster-master --master && sleep 30 && pcs resource clear ag_cluster-master - ``` - - **RHEL 8** - - ```bash - sudo pcs resource move ag_cluster-clone --master && sleep 30 && pcs resource clear ag_cluster-clone - ``` - -2. If you check your constraints again, you'll see that another constraint was added because of the manual failover: - - **RHEL 7** - - ```output - [@VM1 ~]$ sudo pcs constraint list --full - Location Constraints: - Resource: ag_cluster-master - Enabled on: VM2 (score:INFINITY) (role: Master) (id:cli-prefer-ag_cluster-master) - Ordering Constraints: - promote ag_cluster-master then start virtualip (kind:Mandatory) (id:order-ag_cluster-master-virtualip-mandatory) - Colocation Constraints: - virtualip with ag_cluster-master (score:INFINITY) (with-rsc-role:Master) (id:colocation-virtualip-ag_cluster-master-INFINITY) - Ticket Constraints: - ``` - - **RHEL 8** - - ```output - [@VM1 ~]$ sudo pcs constraint list --full - Location Constraints: - Resource: ag_cluster-master - Enabled on: VM2 (score:INFINITY) (role: Master) (id:cli-prefer-ag_cluster-clone) - Ordering Constraints: - promote ag_cluster-clone then start virtualip (kind:Mandatory) (id:order-ag_cluster-clone-virtualip-mandatory) - Colocation Constraints: - virtualip with ag_cluster-clone (score:INFINITY) (with-rsc-role:Master) (id:colocation-virtualip-ag_cluster-clone-INFINITY) - Ticket Constraints: - ``` - -3. Remove the constraint with ID `cli-prefer-ag_cluster-master` using the following command: - - **RHEL 7** - - ```bash - sudo pcs constraint remove cli-prefer-ag_cluster-master - ``` - - **RHEL 8** - - ```bash - sudo pcs constraint remove cli-prefer-ag_cluster-clone - ``` - -1. Check your cluster resources using the command `sudo pcs resource`, and you should see that the primary instance is now ``. - - ```output - [@ ~]$ sudo pcs resource - Master/Slave Set: ag_cluster-master [ag_cluster] - ag_cluster (ocf::mssql:ag): FAILED (Monitoring) - Masters: [ ] - Slaves: [ ] - virtualip (ocf::heartbeat:IPaddr2): Started - [@ ~]$ sudo pcs resource - Master/Slave Set: ag_cluster-master [ag_cluster] - Masters: [ ] - Slaves: [ ] - virtualip (ocf::heartbeat:IPaddr2): Started - ``` - -## Test fencing - -You can test STONITH by running the following command. Try running the below command from `` for ``. - -```bash -sudo pcs stonith fence --debug -``` - -> [!NOTE] -> By default, the fence action brings the node off and then on. If you only want to bring the node offline, use the option `--off` in the command. - -You should get the following output: - -```output -[@ ~]$ sudo pcs stonith fence --debug -Running: stonith_admin -B -Return Value: 0 ---Debug Output Start-- ---Debug Output End-- - -Node: fenced -``` -For more information on testing a fence device, see the following [Red Hat](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/high_availability_add-on_reference/s1-stonithtest-haar) article. - -## Next steps - -In order to utilize an availability group listener for your SQL Server instances, you will need to create and configure a load balancer. - -> [!div class="nextstepaction"] -> [Tutorial: Configure an availability group listener for SQL Server on RHEL virtual machines in Azure](rhel-high-availability-listener-tutorial.md) diff --git a/articles/azure-sql/virtual-machines/linux/sql-iaas-agent-extension-register-vm-linux.md b/articles/azure-sql/virtual-machines/linux/sql-iaas-agent-extension-register-vm-linux.md deleted file mode 100644 index 6c0d7e75f80f3..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/sql-iaas-agent-extension-register-vm-linux.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: Register with SQL IaaS Agent extension (Linux) -description: Learn how to register your SQL Server on Linux Azure VM with the SQL IaaS Agent extension to enable Azure features, as well as for compliance, and improved manageability. -services: virtual-machines-windows -documentationcenter: na -author: adbadram -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: how-to -ms.tgt_pltfrm: vm-Linux-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: adbadram -ms.reviewer: mathoma -ms.custom: devx-track-azurecli, devx-track-azurepowershell, contperf-fy21q2 - ---- -# Register Linux SQL Server VM with SQL IaaS Agent extension -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](../windows/sql-agent-extension-manually-register-single-vm.md) -> * [Linux](sql-iaas-agent-extension-register-vm-linux.md) - - -Register your SQL Server VM with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-linux.md) to unlock a wealth of feature benefits for your SQL Server on Linux Azure VM. - -## Overview - -Registering with the [SQL Server IaaS Agent extension](sql-server-iaas-agent-extension-linux.md) creates the **SQL virtual machine** _resource_ within your subscription, which is a _separate_ resource from the virtual machine resource. Unregistering your SQL Server VM from the extension removes the **SQL virtual machine** _resource_ but will not drop the actual virtual machine. - -To utilize the SQL IaaS Agent extension, you must first [register your subscription with the **Microsoft.SqlVirtualMachine** provider](#register-subscription-with-rp), which gives the SQL IaaS extension the ability to create resources within that specific subscription. - -> [!IMPORTANT] -> The SQL IaaS Agent extension collects data for the express purpose of giving customers optional benefits when using SQL Server within Azure Virtual Machines. Microsoft will not use this data for licensing audits without the customer's advance consent. See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -## Prerequisites - -To register your SQL Server VM with the extension, you'll need: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- An Azure Resource Model [Ubuntu Linux virtual machine](../../../virtual-machines/linux/quick-create-portal.md) with [SQL Server 2017 (or greater)](https://www.microsoft.com/sql-server/sql-server-downloads) deployed to the public or Azure Government cloud. -- The latest version of [Azure CLI](/cli/azure/install-azure-cli) or [Azure PowerShell (5.0 minimum)](/powershell/azure/install-az-ps). - -## Register subscription with RP - -To register your SQL Server VM with the SQL IaaS Agent extension, you must first register your subscription with the **Microsoft.SqlVirtualMachine** resource provider (RP). This gives the SQL IaaS Agent extension the ability to create resources within your subscription. You can do so by using the Azure portal, the Azure CLI, or Azure PowerShell. - -### Azure portal - -Register your subscription with the resource provider by using the Azure portal: - -1. Open the Azure portal and go to **All Services**. -1. Go to **Subscriptions** and select the subscription of interest. -1. On the **Subscriptions** page, select **Resource providers** under **Settings**. -1. Enter **sql** in the filter to bring up the SQL-related resource providers. -1. Select **Register**, **Re-register**, or **Unregister** for the **Microsoft.SqlVirtualMachine** provider, depending on your desired action. - - -![Modify the provider](../windows/media/sql-agent-extension-manually-register-single-vm/select-resource-provider-sql.png) - -### Command line - -Register your Azure subscription with the **Microsoft.SqlVirtualMachine** provider using either Azure CLI or Azure PowerShell. - -# [Azure CLI](#tab/bash) - -Register your subscription with the resource provider by using the Azure CLI: - -```azurecli-interactive -# Register the SQL IaaS Agent extension to your subscription -az provider register --namespace Microsoft.SqlVirtualMachine -``` - -# [Azure PowerShell](#tab/powershell) - -Register your subscription with the resource provider by using Azure PowerShell: - -```powershell-interactive -# Register the SQL IaaS Agent extension to your subscription -Register-AzResourceProvider -ProviderNamespace Microsoft.SqlVirtualMachine -``` - ---- - -## Register VM - -The SQL IaaS Agent extension on Linux is only available in lightweight mode, which supports only changing the license type and edition of SQL Server. Use the Azure CLI or Azure PowerShell to register your SQL Server VM with the extension in lightweight mode for limited functionality. - -Provide the SQL Server license type as either pay-as-you-go (`PAYG`) to pay per usage, Azure Hybrid Benefit (`AHUB`) to use your own license, or disaster recovery (`DR`) to activate the [free DR replica license](../windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure). - -# [Azure CLI](#tab/bash) - -Register a SQL Server VM in lightweight mode with the Azure CLI: - -```azurecli-interactive -# Register Enterprise or Standard self-installed VM in Lightweight mode -az sql vm create --name --resource-group --location --license-type -``` - -# [Azure PowerShell](#tab/powershell) - -Register a SQL Server VM in lightweight mode with Azure PowerShell: - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName -# Register SQL VM with 'Lightweight' SQL IaaS agent -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType -SqlManagementType LightWeight -``` - ---- - -## Verify registration status - -You can verify if your SQL Server VM has already been registered with the SQL IaaS Agent extension by using the Azure portal, the Azure CLI, or Azure PowerShell. - - -### Azure portal - -Verify the registration status by using the Azure portal: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Go to your SQL virtual machines resource. -1. Select your SQL Server VM from the list. If your SQL Server VM is not listed here, it likely hasn't been registered with the SQL IaaS Agent extension. - -### Command line - -Verify current SQL Server VM registration status using either Azure CLI or Azure PowerShell. `ProvisioningState` shows as `Succeeded` if registration was successful. - -# [Azure CLI](#tab/bash) - -Verify the registration status by using the Azure CLI: - -```azurecli-interactive -az sql vm show -n -g -``` - -# [Azure PowerShell](#tab/powershell) - -Verify the registration status by using the Azure PowerShell: - -```powershell-interactive -Get-AzSqlVM -Name -ResourceGroupName -``` - ---- - -An error indicates that the SQL Server VM has not been registered with the extension. - - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-linux-vm-what-is-iaas-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](../windows/pricing-guidance.md) -* [Release notes for SQL Server on a Windows VM](../windows/doc-changes-updates-release-notes.md) diff --git a/articles/azure-sql/virtual-machines/linux/sql-server-iaas-agent-extension-linux.md b/articles/azure-sql/virtual-machines/linux/sql-server-iaas-agent-extension-linux.md deleted file mode 100644 index e76b7877bfcf3..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/sql-server-iaas-agent-extension-linux.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: SQL Server IaaS Agent extension for Linux -description: This article describes how the SQL Server IaaS Agent extension helps automate management specific administration tasks of SQL Server on Linux Azure VMs. -services: virtual-machines-windows -documentationcenter: '' -author: adbadram -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: adbadram -ms.reviewer: mathoma ---- -# SQL Server IaaS Agent extension for Linux -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](../windows/sql-server-iaas-agent-extension-automate-management.md) -> * [Linux](sql-server-iaas-agent-extension-linux.md) - -The SQL Server IaaS Agent extension (SqlIaasExtension) runs on SQL Server on Linux Azure Virtual Machines (VMs) to automate management and administration tasks. - -This article provides an overview of the extension. See [Register with the extension](sql-iaas-agent-extension-register-vm-linux.md) to learn more. - - -## Overview - -The SQL Server IaaS Agent extension enables integration with the Azure portal and unlocks the following benefits for SQL Server on Linux Azure VMs: - -- **Compliance**: The extension offers a simplified method to fulfill the requirement of notifying Microsoft that the Azure Hybrid Benefit has been enabled as is specified in the product terms. This process negates needing to manage licensing registration forms for each resource. - -- **Simplified license management**: The extension simplifies SQL Server license management, and allows you to quickly identify SQL Server VMs with the Azure Hybrid Benefit enabled using the Azure portal, Azure PowerShell or the Azure CLI: - - # [PowerShell](#tab/azure-powershell) - - ```powershell-interactive - Get-AzSqlVM | Where-Object {$_.LicenseType -eq 'AHUB'} - ``` - - # [Azure CLI](#tab/azure-cli) - - ```azurecli-interactive - $ az sql vm list --query "[?sqlServerLicenseType=='AHUB']" - ``` - --- - -- **Free**: There is no additional cost associated with the extension. - - - -## Installation - -[Register](sql-iaas-agent-extension-register-vm-linux.md) your SQL Server VM with the SQL Server IaaS Agent extension to create the **SQL virtual machine** _resource_ within your subscription, which is a _separate_ resource from the virtual machine resource. Unregistering your SQL Server VM from the extension will remove the **SQL virtual machine** _resource_ from your subscription but will not drop the actual virtual machine. - -The SQL Server IaaS Agent extension for Linux is currently only available in lightweight mode. - - -## Verify extension status - -Use the Azure portal or Azure PowerShell to check the status of the extension. - -### Azure portal - -Verify the extension is installed by using the Azure portal. - -Go to your **Virtual machine** resource in the Azure portal (not the *SQL virtual machines* resource, but the resource for your VM). Select **Extensions** under **Settings**. You should see the **SqlIaasExtension** extension listed, as in the following example: - -![Check the Status of the SQL Server IaaS Agent extension SqlIaaSExtension in the Azure portal](../windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-portal.png) - - - - -### Azure PowerShell - -You can also use the **Get-AzVMSqlServerExtension** Azure PowerShell cmdlet: - -```powershell-interactive - Get-AzVMSqlServerExtension -VMName "vmname" -ResourceGroupName "resourcegroupname" -``` - -The previous command confirms that the agent is installed and provides general status information. You can get specific status information about automated backup and patching by using the following commands: - -```powershell-interactive - $sqlext = Get-AzVMSqlServerExtension -VMName "vmname" -ResourceGroupName "resourcegroupname" - $sqlext.AutoPatchingSettings - $sqlext.AutoBackupSettings -``` - -## Limitations - -The Linux SQL IaaS Agent extension has the following limitations: - -- Only SQL Server VMs running on the Ubuntu Linux operating system are supported. Other Linux distributions are not currently supported. -- SQL Server VMs running Ubuntu Linux Pro are not supported. -- SQL Server VMs running on generalized images are not supported. -- Only SQL Server VMs deployed through the Azure Resource Manager are supported. SQL Server VMs deployed through the classic model are not supported. -- SQL Server with only a single instance. Multiple instances are not supported. - -## Privacy statement - -When using SQL Server on Azure VMs and the SQL IaaS extension, consider the following privacy statements: - -- **Data collection**: The SQL IaaS Agent extension collects data for the express purpose of giving customers optional benefits when using SQL Server on Azure Virtual Machines. Microsoft **will not use this data for licensing audits** without the customer's advance consent. See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -- **In-region data residency**: SQL Server on Azure VMs and SQL IaaS Agent Extension do not move or store customer data out of the region in which the VMs are deployed. - - -## Next steps - -For more information about running SQL Server on Azure Virtual Machines, see the [What is SQL Server on Azure Linux Virtual Machines?](sql-server-on-linux-vm-what-is-iaas-overview.md). - -To learn more, see [frequently asked questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md b/articles/azure-sql/virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md deleted file mode 100644 index ff44f3ce9979e..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/sql-server-on-linux-vm-what-is-iaas-overview.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Overview of SQL Server on Azure Virtual Machines for Linux| Microsoft Docs -description: Learn about how to run full SQL Server editions on Azure Virtual Machines for Linux. Get direct links to all Linux SQL Server VM images and related content. -services: virtual-machines-sql -documentationcenter: '' -author: MashaMSFT -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: service-overview - -ms.topic: overview -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: mathoma ---- -# Overview of SQL Server on Linux Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](../windows/sql-server-on-azure-vm-iaas-what-is-overview.md) -> * [Linux](sql-server-on-linux-vm-what-is-iaas-overview.md) - -SQL Server on Azure Virtual Machines enables you to use full versions of SQL Server in the cloud without having to manage any on-premises hardware. SQL Server VMs also simplify licensing costs when you pay as you go. - -Azure virtual machines run in many different [geographic regions](https://azure.microsoft.com/regions/) around the world. They also offer a variety of [machine sizes](../../../virtual-machines/sizes.md). The virtual machine image gallery allows you to create a SQL Server VM with the right version, edition, and operating system. This makes virtual machines a good option for a many different SQL Server workloads. - -If you're new to Azure SQL, check out the *SQL Server on Azure VM Overview* video from our in-depth [Azure SQL video series](/shows/Azure-SQL-for-Beginners?WT.mc_id=azuresql4beg_azuresql-ch9-niner): -> [!VIDEO https://docs.microsoft.com/shows/Azure-SQL-for-Beginners/SQL-Server-on-Azure-VM-Overview-4-of-61/player] - -## Get started with SQL Server VMs - -To get started, choose a SQL Server virtual machine image with your required version, edition, and operating system. The following sections provide direct links to the Azure portal for the SQL Server virtual machine gallery images. - -> [!TIP] -> For more information about how to understand pricing for SQL Server images, see [the pricing page for Linux VMs running SQL Server](https://azure.microsoft.com/pricing/details/virtual-machines/linux/). - -| Version | Operating system | Edition | -| --- | --- | --- | -| **SQL Server 2019** | Ubuntu 18.04 | [Enterprise](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ubuntu1804enterprise-ARM), [Standard](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ubuntu1804standard-ARM), [Web](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ubuntu1804web-ARM), [Developer](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ubuntu1804sqldev-ARM) | -| **SQL Server 2019** | Red Hat Enterprise Linux (RHEL) 8 | [Enterprise](https://portal.azure.com/#create/microsoftsqlserver.sql2019-rhel8enterprise-ARM), [Standard](https://portal.azure.com/#create/microsoftsqlserver.sql2019-rhel8standard-ARM), [Web](https://portal.azure.com/#create/microsoftsqlserver.sql2019-rhel8web-ARM), [Developer](https://portal.azure.com/#create/microsoftsqlserver.sql2019-rhel8sqldev-ARM)| -| **SQL Server 2019** | SUSE Linux Enterprise Server (SLES) v12 SP5 | [Enterprise](https://portal.azure.com/#create/microsoftsqlserver.sql2019-sles12sp5enterprise-ARM), [Standard](https://portal.azure.com/#create/microsoftsqlserver.sql2019-sles12sp5standard-ARM), [Web](https://portal.azure.com/#create/microsoftsqlserver.sql2019-sles12sp5web-ARM), [Developer](https://portal.azure.com/#create/microsoftsqlserver.sql2019-sles12sp5sqldev-ARM)| -| **SQL Server 2017** | Red Hat Enterprise Linux (RHEL) 7.4 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonRedHatEnterpriseLinux74), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonRedHatEnterpriseLinux74), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonRedHatEnterpriseLinux74), [Express](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonRedHatEnterpriseLinux74), [Developer](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonRedHatEnterpriseLinux74) | -| **SQL Server 2017** | SUSE Linux Enterprise Server (SLES) v12 SP2 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonSLES12SP2), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonSLES12SP2), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonSLES12SP2), [Express](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonSLES12SP2), [Developer](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonSLES12SP2) | -| **SQL Server 2017** | Ubuntu 16.04 LTS |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonUbuntuServer1604LTS), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonUbuntuServer1604LTS), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonUbuntuServer1604LTS), [Express](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonUbuntuServer1604LTS), [Developer](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonUbuntuServer1604LTS) | - -> [!NOTE] -> To see the available SQL Server virtual machine images for Windows, see [Overview of SQL Server on Azure Virtual Machines (Windows)](../windows/sql-server-on-azure-vm-iaas-what-is-overview.md). - -## Installed packages - -When you configure SQL Server on Linux, you install the Database Engine package and then several optional packages depending on your requirements. The Linux virtual machine images for SQL Server automatically install most packages for you. The following table shows which packages are installed for each distribution. - -| Distribution | [Database Engine](/sql/linux/sql-server-linux-setup) | [Tools](/sql/linux/sql-server-linux-setup-tools) | [SQL Server agent](/sql/linux/sql-server-linux-setup-sql-agent) | [Full-text search](/sql/linux/sql-server-linux-setup-full-text-search) | [SSIS](/sql/linux/sql-server-linux-setup-ssis) | [HA add-on](/sql/linux/sql-server-linux-business-continuity-dr) | -|---|---|---|---|---|---|---| -| RHEL | ![RHEL and database engine](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![RHEL and tools](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![RHEL and SQL Server agent](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![RHEL and full-text search](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![RHEL and SSIS](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![RHEL and HA add-on](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | -| SLES | ![SLES and database engine](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![SLES and tools](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![SLES and SQL Server agent](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![SLES and full-text search](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![SLES and SSIS](./media/sql-server-on-linux-vm-what-is-iaas-overview/no.png) | ![SLES and HA add-on](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png)| -| Ubuntu | ![Ubuntu and database engine](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![Ubuntu and tools](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![Ubuntu and SQL Server agent](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![Ubuntu and full-text search](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![Ubuntu and SSIS](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | ![Ubuntu and HA add-on](./media/sql-server-on-linux-vm-what-is-iaas-overview/yes.png) | - - -> [!NOTE] -> SQL IaaS Agent extension for SQL Server on Azure Linux Virtual Machines is only available for Ubuntu Linux distribution. - -## Related products and services - -### Linux virtual machines - -* [Azure Virtual Machines overview](../../../virtual-machines/linux/overview.md) - -### Storage - -* [Introduction to Microsoft Azure Storage](../../../storage/common/storage-introduction.md) - -### Networking - -* [Virtual Network overview](../../../virtual-network/virtual-networks-overview.md) -* [IP addresses in Azure](../../../virtual-network/ip-services/public-ip-addresses.md) -* [Create a Fully Qualified Domain Name in the Azure portal](../../../virtual-machines/create-fqdn.md) - -### SQL - -* [SQL Server on Linux documentation](/sql/linux) -* [Azure SQL Database comparison](../../azure-sql-iaas-vs-paas-what-is-overview.md) - -## Next steps - -Get started with SQL Server on Linux virtual machines: - -* [Create a SQL Server VM in the Azure portal](sql-vm-create-portal-quickstart.md) - -Get answers to commonly asked questions about SQL Server VMs on Linux: - -* [SQL Server on Azure Virtual Machines FAQ](frequently-asked-questions-faq.yml) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/linux/sql-vm-create-portal-quickstart.md b/articles/azure-sql/virtual-machines/linux/sql-vm-create-portal-quickstart.md deleted file mode 100644 index df802f49efb02..0000000000000 --- a/articles/azure-sql/virtual-machines/linux/sql-vm-create-portal-quickstart.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: "Quickstart: Create a Linux SQL Server VM in Azure" -description: This tutorial shows how to create a Linux SQL Server 2017 virtual machine in the Azure portal. -services: virtual-machines-sql -author: MashaMSFT -ms.date: 10/22/2019 -tags: azure-service-management -ms.topic: quickstart -ms.service: virtual-machines-sql -ms.subservice: deployment -ms.workload: iaas-sql-server -ms.author: mathoma -ms.custom: mode-ui ---- - -# Provision a Linux virtual machine running SQL Server in the Azure portal -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Linux](sql-vm-create-portal-quickstart.md) -> * [Windows](../windows/sql-vm-create-portal-quickstart.md) - -In this quickstart tutorial, you use the Azure portal to create a Linux virtual machine with SQL Server 2017 installed. You learn the following: - - -* [Create a Linux VM running SQL Server from the gallery](#create) -* [Connect to the new VM with ssh](#connect) -* [Change the SA password](#password) -* [Configure for remote connections](#remote) - -## Prerequisites - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free) before you begin. - -## Create a Linux VM with SQL Server installed - -1. Sign in to the [Azure portal](https://portal.azure.com/). - -1. In the left pane, select **Create a resource**. - -1. In the **Create a resource** pane, select **Compute**. - -1. Select **See all** next to the **Featured** heading. - - ![See all VM images](./media/sql-vm-create-portal-quickstart/azure-compute-blade.png) - -1. In the search box, type **SQL Server 2019**, and select **Enter** to start the search. - -1. Limit the search results by selecting **Operating system** > **Redhat**. - - ![Search filter for SQL Server 2019 VM images](./media/sql-vm-create-portal-quickstart/searchfilter.png) - -1. Select a SQL Server 2019 Linux image from the search results. This tutorial uses **SQL Server 2019 on RHEL74**. - - > [!TIP] - > The Developer edition lets you test or develop with the features of the Enterprise edition but no SQL Server licensing costs. You only pay for the cost of running the Linux VM. - -1. Select **Create**. - - -### Set up your Linux VM - -1. In the **Basics** tab, select your **Subscription** and **Resource Group**. - - ![Basics window](./media/sql-vm-create-portal-quickstart/basics.png) - -1. In **Virtual machine name**, enter a name for your new Linux VM. -1. Then, type or select the following values: - * **Region**: Select the Azure region that's right for you. - * **Availability options**: Choose the availability and redundancy option that's best for your apps and data. - * **Change size**: Select this option to pick a machine size and when done, choose **Select**. For more information about VM machine sizes, see [VM sizes](../../../virtual-machines/sizes.md). - - ![Choose a VM size](./media/sql-vm-create-portal-quickstart/vmsizes.png) - - > [!TIP] - > For development and functional testing, use a VM size of **DS2** or higher. For performance testing, use **DS13** or higher. - - * **Authentication type**: Select **SSH public key**. - - > [!Note] - > You have the choice of using an SSH public key or a Password for authentication. SSH is more secure. For instructions on how to generate an SSH key, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](../../../virtual-machines/linux/mac-create-ssh-keys.md). - - * **Username**: Enter the Administrator name for the VM. - * **SSH public key**: Enter your RSA public key. - * **Public inbound ports**: Choose **Allow selected ports** and pick the **SSH (22)** port in the **Select public inbound ports** list. In this quickstart, this step is necessary to connect and complete the SQL Server configuration. If you want to remotely connect to SQL Server, you will need to manually allow traffic to the default port (1433) used by Microsoft SQL Server for connections over the Internet after the virtual machine is created. - - ![Inbound ports](./media/sql-vm-create-portal-quickstart/port-settings.png) - -1. Make any changes you want to the settings in the following additional tabs or keep the default settings. - * **Disks** - * **Networking** - * **Management** - * **Guest config** - * **Tags** - -1. Select **Review + create**. -1. In the **Review + create** pane, select **Create**. - -## Connect to the Linux VM - -If you already use a BASH shell, connect to the Azure VM using the **ssh** command. In the following command, replace the VM user name and IP address to connect to your Linux VM. - -```bash -ssh azureadmin@40.55.55.555 -``` - -You can find the IP address of your VM in the Azure portal. - -![IP address in Azure portal](./media/sql-vm-create-portal-quickstart/vmproperties.png) - -If you're running on Windows and don't have a BASH shell, install an SSH client, such as PuTTY. - -1. [Download and install PuTTY](https://www.chiark.greenend.org.uk/~sgtatham/putty/download.html). - -1. Run PuTTY. - -1. On the PuTTY configuration screen, enter your VM's public IP address. - -1. Select **Open** and enter your username and password at the prompts. - -For more information about connecting to Linux VMs, see [Create a Linux VM on Azure using the Portal](../../../virtual-machines/linux/quick-create-portal.md). - -> [!NOTE] -> If you see a PuTTY security alert about the server's host key not being cached in the registry, choose from the following options. If you trust this host, select **Yes** to add the key to PuTTy's cache and continue connecting. If you want to carry on connecting just once, without adding the key to the cache, select **No**. If you don't trust this host, select **Cancel** to abandon the connection. - -## Change the SA password - -The new virtual machine installs SQL Server with a random SA password. Reset this password before you connect to SQL Server with the SA login. - -1. After connecting to your Linux VM, open a new command terminal. - -1. Change the SA password with the following commands: - - ```bash - sudo systemctl stop mssql-server - sudo /opt/mssql/bin/mssql-conf set-sa-password - ``` - - Enter a new SA password and password confirmation when prompted. - -1. Restart the SQL Server service. - - ```bash - sudo systemctl start mssql-server - ``` - -## Add the tools to your path (optional) - -Several SQL Server [packages](sql-server-on-linux-vm-what-is-iaas-overview.md#packages) are installed by default, including the SQL Server command-line Tools package. The tools package contains the **sqlcmd** and **bcp** tools. For convenience, you can optionally add the tools path, `/opt/mssql-tools/bin/`, to your **PATH** environment variable. - -1. Run the following commands to modify the **PATH** for both login sessions and interactive/non-login sessions: - - ```bash - echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bash_profile - echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bashrc - source ~/.bashrc - ``` - -## Configure for remote connections - -If you need to remotely connect to SQL Server on the Azure VM, you must configure an inbound rule on the network security group. The rule allows traffic on the port on which SQL Server listens (default of 1433). The following steps show how to use the Azure portal for this step. - -> [!TIP] -> If you selected the inbound port **MS SQL (1433)** in the settings during provisioning, these changes have been made for you. You can go to the next section on how to configure the firewall. - -1. In the portal, select **Virtual machines**, and then select your SQL Server VM. -1. In the left navigation pane, under **Settings**, select **Networking**. -1. In the Networking window, select **Add inbound port** under **Inbound Port Rules**. - - ![Inbound port rules](./media/sql-vm-create-portal-quickstart/networking.png) - -1. In the **Service** list, select **MS SQL**. - - ![MS SQL security group rule](./media/sql-vm-create-portal-quickstart/sqlnsgrule.png) - -1. Click **OK** to save the rule for your VM. - -### Open the firewall on RHEL - -This tutorial directed you to create a Red Hat Enterprise Linux (RHEL) VM. If you want to connect remotely to RHEL VMs, you also have to open up port 1433 on the Linux firewall. - -1. [Connect](#connect) to your RHEL VM. - -1. In the BASH shell, run the following commands: - - ```bash - sudo firewall-cmd --zone=public --add-port=1433/tcp --permanent - sudo firewall-cmd --reload - ``` - -## Next steps - -Now that you have a SQL Server 2017 virtual machine in Azure, you can connect locally with **sqlcmd** to run Transact-SQL queries. - -If you configured the Azure VM for remote SQL Server connections, you should be able to connect remotely. For an example of how to connect remotely to SQL Server on Linux from Windows, see [Use SSMS on Windows to connect to SQL Server on Linux](/sql/linux/sql-server-linux-develop-use-ssms). To connect with Visual Studio Code, see [Use Visual Studio Code to create and run Transact-SQL scripts for SQL Server](/sql/linux/sql-server-linux-develop-use-vscode) - -For more general information about SQL Server on Linux, see [Overview of SQL Server 2017 on Linux](/sql/linux/sql-server-linux-overview). For more information about using SQL Server 2017 Linux virtual machines, see [Overview of SQL Server 2017 virtual machines on Azure](sql-server-on-linux-vm-what-is-iaas-overview.md). diff --git a/articles/azure-sql/virtual-machines/windows/application-patterns-development-strategies.md b/articles/azure-sql/virtual-machines/windows/application-patterns-development-strategies.md deleted file mode 100644 index e1c545d44c09b..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/application-patterns-development-strategies.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: SQL Server application patterns on VMs | Microsoft Docs -description: This article covers application patterns for SQL Server on Azure Virtual Machines. It provides solution architects and developers a foundation for good application architecture and design. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management,azure-resource-manager - -ms.assetid: 41863c8d-f3a3-4584-ad86-b95094365e05 -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 05/31/2017 -ms.author: pamela -ms.reviewer: mathoma - ---- -# Application patterns and development strategies for SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -[!INCLUDE [learn-about-deployment-models](../../../../includes/learn-about-deployment-models-both-include.md)] - -## Summary: -Determining which application pattern or patterns to use for your SQL Server-based applications in an Azure environment is an important design decision and it requires a solid understanding of how SQL Server and each infrastructure component of Azure work together. With SQL Server in Azure Infrastructure Services, you can easily migrate, maintain, and monitor your existing SQL Server applications built on Windows Server to virtual machines (VMs) in Azure. - -The goal of this article is to provide solution architects and developers a foundation for good application architecture and design, which they can follow when migrating existing applications to Azure as well as developing new applications in Azure. - -For each application pattern, you will find an on-premises scenario, its respective cloud-enabled solution, and the related technical recommendations. In addition, the article discusses Azure-specific development strategies so that you can design your applications correctly. Due to the many possible application patterns, it’s recommended that architects and developers should choose the most appropriate pattern for their applications and users. - -**Technical contributors:** Luis Carlos Vargas Herring, Madhan Arumugam Ramakrishnan - -**Technical reviewers:** Corey Sanders, Drew McDaniel, Narayan Annamalai, Nir Mashkowski, Sanjay Mishra, Silvano Coriani, Stefan Schackow, Tim Hickey, Tim Wieman, Xin Jin - -## Introduction -You can develop many types of n-tier applications by separating the components of the different application layers on different machines as well as in separate components. For example, you can place the client application and business rules components in one machine, front-end web tier and data access tier components in another machine, and a back-end database tier in another machine. This kind of structuring helps isolate each tier from each other. If you change where data comes from, you don’t need to change the client or web application but only the data access tier components. - -A typical *n-tier* application includes the presentation tier, the business tier, and the data tier: - -| Tier | Description | -| --- | --- | -| **Presentation** |The *presentation tier* (web tier, front-end tier) is the layer in which users interact with an application. | -| **Business** |The *business tier* (middle tier) is the layer that the presentation tier and the data tier use to communicate with each other and includes the core functionality of the system. | -| **Data** |The *data tier* is basically the server that stores an application's data (for example, a server running SQL Server). | - -Application layers describe the logical groupings of the functionality and components in an application; whereas tiers describe the physical distribution of the functionality and components on separate physical servers, computers, networks, or remote locations. The layers of an application may reside on the same physical computer (the same tier) or may be distributed over separate computers (n-tier), and the components in each layer communicate with components in other layers through well-defined interfaces. You can think of the term tier as referring to physical distribution patterns such as two-tier, three-tier, and n-tier. A **2-tier application pattern** contains two application tiers: application server and database server. The direct communication happens between the application server and the database server. The application server contains both web-tier and business-tier components. In **3-tier application pattern**, there are three application tiers: web server, application server, which contains the business logic tier and/or business tier data access components, and the database server. The communication between the web server and the database server happens over the application server. For detailed information on application layers and tiers, see [Microsoft Application Architecture Guide](/previous-versions/msp-n-p/ff650706(v=pandp.10)). - -Before you start reading this article, you should have knowledge on the fundamental concepts of SQL Server and Azure. For information, see [SQL Server Books Online](/sql/sql-server/), [SQL Server on Azure Virtual Machines](sql-server-on-azure-vm-iaas-what-is-overview.md) and [Azure.com](https://azure.microsoft.com/). - -This article describes several application patterns that can be suitable for your simple applications as well as the highly complex enterprise applications. Before detailing each pattern, we recommend that you should familiarize yourself with the available data storage services in Azure, such as [Azure Storage](../../../storage/common/storage-introduction.md), [Azure SQL Database](../../database/sql-database-paas-overview.md), and [SQL Server in an Azure virtual machine](sql-server-on-azure-vm-iaas-what-is-overview.md). To make the best design decisions for your applications, understand when to use which data storage service clearly. - -### Choose SQL Server on Azure Virtual Machines, when: - -* You need control on SQL Server and Windows. For example, this might include the SQL Server version, special hotfixes, performance configuration, etc. -* You need a full compatibility with SQL Server and want to move existing applications to Azure as-is. -* You want to leverage the capabilities of the Azure environment but Azure SQL Database does not support all the features that your application requires. This could include the following areas: - - * **Database size**: At the time this article was updated, SQL Database supports a database of up to 1 TB of data. If your application requires more than 1 TB of data and you don’t want to implement custom sharding solutions, it’s recommended that you use SQL Server in an Azure virtual machine. For the latest information, see [Scaling Out Azure SQL Database](/previous-versions/azure/dn495641(v=azure.100)), [DTU-Based Purchasing Model](../../database/service-tiers-dtu.md), and [vCore-Based Purchasing Model](../../database/service-tiers-vcore.md)(preview). - * **HIPAA compliance**: Healthcare customers and Independent Software Vendors (ISVs) might choose [SQL Server on Azure Virtual Machines](sql-server-on-azure-vm-iaas-what-is-overview.md) instead of [Azure SQL Database](../../database/sql-database-paas-overview.md) because SQL Server on Azure Virtual Machines is covered by HIPAA Business Associate Agreement (BAA). For information on compliance, see [Microsoft Azure Trust Center: Compliance](https://azure.microsoft.com/support/trust-center/compliance/). - * **Instance-level features**: At this time, SQL Database doesn’t support features that live outside of the database (such as Linked Servers, Agent jobs, FileStream, Service Broker, etc.). For more information, see [Azure SQL Database Guidelines and Limitations](/previous-versions/azure/ff394102(v=azure.100)). - -## 1-tier (simple): Single virtual machine -In this application pattern, you deploy your SQL Server application and database to a standalone virtual machine in Azure. The same virtual machine contains your client/web application, business components, data access layer, and the database server. The presentation, business, and data access code are logically separated but are physically located in a single-server machine. Most customers start with this application pattern and then, they scale out by adding more web roles or virtual machines to their system. - -This application pattern is useful when: - -* You want to perform a simple migration to Azure platform to evaluate whether the platform answers your application’s requirements or not. -* You want to keep all the application tiers hosted in the same virtual machine in the same Azure data center to reduce the latency between tiers. -* You want to quickly provision development and test environments for short periods of time. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. - -The following diagram demonstrates a simple on-premises scenario and how you can deploy its cloud enabled solution in a single virtual machine in Azure. - -![1-tier application pattern](./media/application-patterns-development-strategies/IC728008.png) - -Deploying the business layer (business logic and data access components) on the same physical tier as the presentation layer can maximize application performance, unless you must use a separate tier due to scalability or security concerns. - -Since this is a very common pattern to start with, you might find the following article on migration useful for moving your data to your SQL Server VM: [Migrating a Database to SQL Server on an Azure VM](migrate-to-vm-from-sql-server.md). - -## 3-tier (simple): Multiple virtual machines -In this application pattern, you deploy a 3-tier application in Azure by placing each application tier in a different virtual machine. This provides a flexible environment for an easy scale-up and scale-out scenarios. When one virtual machine contains your client/web application, the other one hosts your business components, and the other one hosts the database server. - -This application pattern is useful when: - -* You want to perform a migration of complex database applications to Azure Virtual Machines. -* You want different application tiers to be hosted in different regions. For example, you might have shared databases that are deployed to multiple regions for reporting purposes. -* You want to move enterprise applications from on-premises virtualized platforms to Azure Virtual Machines. For a detailed discussion on enterprise applications, see [What is an Enterprise Application](/previous-versions/visualstudio/aa267045(v=vs.60)). -* You want to quickly provision development and test environments for short periods of time. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. - -The following diagram demonstrates how you can place a simple 3-tier application in Azure by placing each application tier in a different virtual machine. - -![3-tier application pattern](./media/application-patterns-development-strategies/IC728009.png) - -In this application pattern, there is only one virtual machine in each tier. If you have multiple VMs in Azure, we recommend that you set up a virtual network. [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) creates a trusted security boundary and also allows VMs to communicate among themselves over the private IP address. In addition, always make sure that all Internet connections only go to the presentation tier. When following this application pattern, manage the network security group rules to control access. For more information, see [Allow external access to your VM using the Azure portal](../../../virtual-machines/windows/nsg-quickstart-portal.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -In the diagram, Internet Protocols can be TCP, UDP, HTTP, or HTTPS. - -> [!NOTE] -> Setting up a virtual network in Azure is free of charge. However, you are charged for the VPN gateway that connects to on-premises. This charge is based on the amount of time that connection is provisioned and available. -> -> - -## 2-tier and 3-tier with presentation tier scale-out -In this application pattern, you deploy 2-tier or 3-tier database application to Azure Virtual Machines by placing each application tier in a different virtual machine. In addition, you scale out the presentation tier due to increased volume of incoming client requests. - -This application pattern is useful when: - -* You want to move enterprise applications from on-premises virtualized platforms to Azure Virtual Machines. -* You want to scale out the presentation tier due to increased volume of incoming client requests. -* You want to quickly provision development and test environments for short periods of time. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. -* You want to own an infrastructure environment that can scale up and down on demand. - -The following diagram demonstrates how you can place the application tiers in multiple virtual machines in Azure by scaling out the presentation tier due to increased volume of incoming client requests. As seen in the diagram, Azure Load Balancer is responsible for distributing traffic across multiple virtual machines and also determining which web server to connect to. Having multiple instances of the web servers behind a load balancer ensures the high availability of the presentation tier. - -![Application pattern - presentation tier scale-out](./media/application-patterns-development-strategies/IC728010.png) - -### Best practices for 2-tier, 3-tier, or n-tier patterns that have multiple VMs in one tier -It’s recommended that you place the virtual machines that belong to the same tier in the same cloud service and in the same the availability set. For example, place a set of web servers in **CloudService1** and **AvailabilitySet1** and a set of database servers in **CloudService2** and **AvailabilitySet2**. An availability set in Azure enables you to place the high availability nodes into separate fault domains and upgrade domains. - -To leverage multiple VM instances of a tier, you need to configure Azure Load Balancer between application tiers. To configure Load Balancer in each tier, create a load-balanced endpoint on each tier’s VMs separately. For a specific tier, first create VMs in the same cloud service. This ensures that they have the same public Virtual IP address. Next, create an endpoint on one of the virtual machines on that tier. Then, assign the same endpoint to the other virtual machines on that tier for load balancing. By creating a load-balanced set, you distribute traffic across multiple virtual machines and also allow the Load Balancer to determine which node to connect when a backend VM node fails. For example, having multiple instances of the web servers behind a load balancer ensures the high availability of the presentation tier. - -As a best practice, always make sure that all internet connections first go to the presentation tier. The presentation layer accesses the business tier, and then the business tier accesses the data tier. For more information on how to allow access to the presentation layer, see [Allow external access to your VM using the Azure portal](../../../virtual-machines/windows/nsg-quickstart-portal.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -Note that the Load Balancer in Azure works similar to load balancers in an on-premises environment. For more information, see [Load balancing for Azure infrastructure services](../../../virtual-machines/windows/tutorial-load-balancer.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -In addition, we recommend that you set up a private network for your virtual machines by using Azure Virtual Network. This allows them to communicate among themselves over the private IP address. For more information, see [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md). - -## 2-tier and 3-tier with business tier scale-out -In this application pattern, you deploy a 2-tier or 3-tier database application to Azure Virtual Machines by placing each application tier in a different virtual machine. In addition, you might want to distribute the application server components to multiple virtual machines due to the complexity of your application. - -This application pattern is useful when: - -* You want to move enterprise applications from on-premises virtualized platforms to Azure Virtual Machines. -* You want to distribute the application server components to multiple virtual machines due to the complexity of your application. -* You want to move business logic heavy on-premises LOB (line-of-business) applications to Azure Virtual Machines. LOB applications are a set of critical computer applications that are vital to running an enterprise, such as accounting, human resources (HR), payroll, supply chain management, and resource planning applications. -* You want to quickly provision development and test environments for short periods of time. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. -* You want to own an infrastructure environment that can scale up and down on demand. - -The following diagram demonstrates an on-premises scenario and its cloud enabled solution. In this scenario, you place the application tiers in multiple virtual machines in Azure by scaling out the business tier, which contains the business logic tier and data access components. As seen in the diagram, Azure Load Balancer is responsible for distributing traffic across multiple virtual machines and also determining which web server to connect to. Having multiple instances of the application servers behind a load balancer ensures the high availability of the business tier. For more information, see [Best practices for 2-tier, 3-tier, or n-tier application patterns that have multiple virtual machines in one tier](#best-practices-for-2-tier-3-tier-or-n-tier-patterns-that-have-multiple-vms-in-one-tier). - -![Application pattern with business tier scale-out](./media/application-patterns-development-strategies/IC728011.png) - -## 2-tier and 3-tier with presentation and business tiers scale-out and HADR -In this application pattern, you deploy a 2-tier or 3-tier database application to Azure Virtual Machines by distributing the presentation tier (web server) and the business tier (application server) components to multiple virtual machines. In addition, you implement high-availability and disaster recovery (HADR) solutions for your databases in Azure Virtual Machines. - -This application pattern is useful when: - -* You want to move enterprise applications from virtualized platforms on-premises to Azure by implementing SQL Server high availability and disaster recovery capabilities. -* You want to scale out the presentation tier and the business tier due to increased volume of incoming client requests and the complexity of your application. -* You want to quickly provision development and test environments for short periods of time. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. -* You want to own an infrastructure environment that can scale up and down on demand. - -The following diagram demonstrates an on-premises scenario and its cloud enabled solution. In this scenario, you scale out the presentation tier and the business tier components in multiple virtual machines in Azure. In addition, you implement high availability and disaster recovery (HADR) techniques for SQL Server databases in Azure. - -Running multiple copies of an application in different VMs make sure that you are load balancing requests across them. When you have multiple virtual machines, you need to make sure that all your VMs are accessible and running at one point in time. If you configure load balancing, Azure Load Balancer tracks the health of VMs and directs incoming calls to the healthy functioning VM nodes properly. For information on how to set up load balancing of the virtual machines, see [Load balancing for Azure infrastructure services](../../../virtual-machines/windows/tutorial-load-balancer.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). Having multiple instances of web and application servers behind a load balancer ensures the high availability of the presentation and business tiers. - -![Scale-out and high availability](./media/application-patterns-development-strategies/IC728012.png) - -### Best practices for application patterns requiring SQL HADR -When you set up SQL Server high availability and disaster recovery solutions in Azure Virtual Machines, setting up a virtual network for your virtual machines using [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) is mandatory. Virtual machines within a Virtual Network will have a stable private IP address even after a service downtime, thus you can avoid the update time required for DNS name resolution. In addition, the virtual network allows you to extend your on-premises network to Azure and creates a trusted security boundary. For example, if your application has corporate domain restrictions (such as, Windows authentication, Active Directory), setting up [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) is necessary. - -Most of customers, who are running production code on Azure, are keeping both primary and secondary replicas in Azure. - -For comprehensive information and tutorials on high availability and disaster recovery techniques, see [High Availability and Disaster Recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md). - -## 2-tier and 3-tier using Azure Virtual Machines and Cloud Services -In this application pattern, you deploy 2-tier or 3-tier application to Azure by using both [Azure Cloud Services](../../../cloud-services/cloud-services-choose-me.md) (web and worker roles - Platform as a Service (PaaS)) and [Azure Virtual Machines](../../../virtual-machines/windows/overview.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) (Infrastructure as a Service (IaaS)). Using [Azure Cloud Services](https://azure.microsoft.com/documentation/services/cloud-services/) for the presentation tier/business tier and SQL Server in [Azure Virtual Machines](../../../virtual-machines/windows/overview.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) for the data tier is beneficial for most applications running on Azure. The reason is that having a compute instance running on Cloud Services provides an easy management, deployment, monitoring, and scale-out. - -With Cloud Services, Azure maintains the infrastructure for you, performs routine maintenance, patches the operating systems, and attempts to recover from service and hardware failures. When your application needs scale-out, automatic, and manual scale-out options are available for your cloud service project by increasing or decreasing the number of instances or virtual machines that are used by your application. In addition, you can use on-premises Visual Studio to deploy your application to a cloud service project in Azure. - -In summary, if you don’t want to own extensive administrative tasks for the presentation/business tier and your application does not require any complex configuration of software or the operating system, use Azure Cloud Services. If Azure SQL Database does not support all the features you are looking for, use SQL Server in an Azure virtual machine for the data tier. Running an application on Azure Cloud Services and storing data in Azure Virtual Machines combines the benefits of both services. For a detailed comparison, see the section in this topic on [Comparing development strategies in Azure](#comparing-web-development-strategies-in-azure). - -In this application pattern, the presentation tier includes a web role, which is a Cloud Services component running in the Azure execution environment and it is customized for web application programming as supported by IIS and ASP.NET. The business or backend tier includes a worker role, which is a Cloud Services component running in the Azure execution environment and it is useful for generalized development, and may perform background processing for a web role. The database tier resides in a SQL Server virtual machine in Azure. The communication between the presentation tier and the database tier happens directly or over the business tier – worker role components. - -This application pattern is useful when: - -* You want to move enterprise applications from virtualized platforms on-premises to Azure by implementing SQL Server high availability and disaster recovery capabilities. -* You want to own an infrastructure environment that can scale up and down on demand. -* Azure SQL Database does not support all the features that your application or database needs. -* You want to perform stress testing for varying workload levels but at the same time you do not want to own and maintain many physical machines all the time. - -The following diagram demonstrates an on-premises scenario and its cloud enabled solution. In this scenario, you place the presentation tier in web roles, the business tier in worker roles but the data tier in virtual machines in Azure. Running multiple copies of the presentation tier in different web roles ensures to load balance requests across them. When you combine Azure Cloud Services with Azure Virtual Machines, we recommend that you set up [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) as well. With [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md), you can have stable and persistent private IP addresses within the same cloud service in the cloud. Once you define a virtual network for your virtual machines and cloud services, they can start communicating among themselves over the private IP address. In addition, having virtual machines and Azure web/worker roles in the same [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) provides low latency and more secure connectivity. For more information, see [What is a cloud service](../../../cloud-services/cloud-services-choose-me.md). - -As seen in the diagram, Azure Load Balancer distributes traffic across multiple virtual machines and also determines which web server or application server to connect to. Having multiple instances of the web and application servers behind a load balancer ensures the high availability of the presentation tier and the business tier. For more information, see [Best practices for application patterns requiring SQL HADR](#best-practices-for-application-patterns-requiring-sql-hadr). - -![Diagram shows on-premises physical or virtual machines connected to web role instances in an Azure virtual network through an Azure load balancer.](./media/application-patterns-development-strategies/IC728013.png) - -Another approach to implement this application pattern is to use a consolidated web role that contains both presentation tier and business tier components as shown in the following diagram. This application pattern is useful for applications that require stateful design. Since Azure provides stateless compute nodes on web and worker roles, we recommend that you implement a logic to store session state using one of the following technologies: [Azure Caching](https://azure.microsoft.com/documentation/services/azure-cache-for-redis/), [Azure Table Storage](../../../cosmos-db/tutorial-develop-table-dotnet.md) or [Azure SQL Database](../../database/sql-database-paas-overview.md). - -![Diagram shows on-premises physical or virtual machines connected to consolidated web/worker role instances in an Azure virtual network.](./media/application-patterns-development-strategies/IC728014.png) - -## Pattern with Azure Virtual Machines, Azure SQL Database, and Azure App Service (Web Apps) -The primary goal of this application pattern is to show you how to combine Azure infrastructure as a service (IaaS) components with Azure platform-as-a-service components (PaaS) in your solution. This pattern is focused on Azure SQL Database for relational data storage. It does not include SQL Server in an Azure virtual machine, which is part of the Azure infrastructure as a service offering. - -In this application pattern, you deploy a database application to Azure by placing the presentation and business tiers in the same virtual machine and accessing a database in Azure SQL Database (SQL Database) servers. You can implement the presentation tier by using traditional IIS-based web solutions. Or, you can implement a combined presentation and business tier by using [Azure App Service](https://azure.microsoft.com/documentation/services/app-service/web/). - -This application pattern is useful when: - -* You already have an existing SQL Database server configured in Azure and you want to test your application quickly. -* You want to test the capabilities of Azure environment. -* You want to quickly provision development and test environments for short periods of time. -* Your business logic and data access components can be self-contained within a web application. - -The following diagram demonstrates an on-premises scenario and its cloud enabled solution. In this scenario, you place the application tiers in a single virtual machine in Azure and access data in Azure SQL Database. - -![Mixed application pattern](./media/application-patterns-development-strategies/IC728015.png) - -If you choose to implement a combined web and application tier by using Azure Web Apps, we recommend that you keep the middle-tier or application tier as dynamic-link libraries (DLLs) in the context of a web application. - -In addition, review the recommendations given in the [Comparing web development strategies in Azure](#comparing-web-development-strategies-in-azure) section at the end of this article to learn more about programming techniques. - -## N-tier hybrid application pattern -In n-tier hybrid application pattern, you implement your application in multiple tiers distributed between on-premises and Azure. Therefore, you create a flexible and reusable hybrid system, which you can modify or add a specific tier without changing the other tiers. To extend your corporate network to the cloud, you use [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md) service. - -This hybrid application pattern is useful when: - -* You want to build applications that run partly in the cloud and partly on-premises. -* You want to migrate some or all elements of an existing on-premises application to the cloud. -* You want to move enterprise applications from on-premises virtualized platforms to Azure. -* You want to own an infrastructure environment that can scale up and down on demand. -* You want to quickly provision development and test environments for short periods of time. -* You want a cost effective way to take backups for enterprise database applications. - -The following diagram demonstrates an n-tier hybrid application pattern that spans across on-premises and Azure. As shown in the diagram, on-premises infrastructure includes [Active Directory Domain Services](/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/hh831484(v=ws.11)) domain controller to support user authentication and authorization. Note that the diagram demonstrates a scenario, where some parts of the data tier live in an on-premises data center whereas some parts of the data tier live in Azure. Depending on your application’s needs, you can implement several other hybrid scenarios. For example, you might keep the presentation tier and the business tier in an on-premises environment but the data tier in Azure. - -![N-tier application pattern](./media/application-patterns-development-strategies/IC728016.png) - -In Azure, you can use Active Directory as a standalone cloud directory for your organization, or you can also integrate existing on-premises Active Directory with [Azure Active Directory](https://azure.microsoft.com/documentation/services/active-directory/). As seen in the diagram, the business tier components can access to multiple data sources, such as to [SQL Server in Azure](sql-server-on-azure-vm-iaas-what-is-overview.md) via a private internal IP address, to on-premises SQL Server via [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md), or to [SQL Database](../../database/sql-database-paas-overview.md) using the .NET Framework data provider technologies. In this diagram, Azure SQL Database is an optional data storage service. - -In n-tier hybrid application pattern, you can implement the following workflow in the order specified: - -1. Identify enterprise database applications that need to be moved up to cloud by using the [Microsoft Assessment and Planning (MAP) Toolkit](https://microsoft.com/map). The MAP Toolkit gathers inventory and performance data from computers you are considering for virtualization and provides recommendations on capacity and assessment planning. -2. Plan the resources and configuration needed in the Azure platform, such as storage accounts and virtual machines. -3. Set up network connectivity between the corporate network on-premises and [Azure Virtual Network](../../../virtual-network/virtual-networks-overview.md). To set up the connection between the corporate network on-premises and a virtual machine in Azure, use one of the following two methods: - - 1. Establish a connection between on-premises and Azure via public end points on a virtual machine in Azure. This method provides an easy setup and enables you to use SQL Server authentication in your virtual machine. In addition, set up your network security group rules to control public traffic to the VM. For more information, see [Allow external access to your VM using the Azure portal](../../../virtual-machines/windows/nsg-quickstart-portal.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - 2. Establish a connection between on-premises and Azure via Azure Virtual Private network (VPN) tunnel. This method allows you to extend domain policies to a virtual machine in Azure. In addition, you can set up firewall rules and use Windows authentication in your virtual machine. Currently, Azure supports secure site-to-site VPN and point-to-site VPN connections: - - * With secure site-to-site connection, you can establish network connectivity between your on-premises network and your virtual network in Azure. It is recommended for connecting your on-premises data center environment to Azure. - * With secure point-to-site connection, you can establish network connectivity between your virtual network in Azure and your individual computers running anywhere. It is mostly recommended for development and test purposes. - - For information on how to connect to SQL Server in Azure, see [Connect to a SQL Server virtual machine on Azure](ways-to-connect-to-sql.md). -4. Set up scheduled jobs and alerts that back up on-premises data in a virtual machine disk in Azure. For more information, see [SQL Server Backup and Restore with Azure Blob Storage](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service) and [Backup and Restore for SQL Server on Azure Virtual Machines](/azure/azure-sql/virtual-machines/windows/backup-restore). -5. Depending on your application’s needs, you can implement one of the following three common scenarios: - - 1. You can keep your web server, application server, and insensitive data in a database server in Azure whereas you keep the sensitive data on-premises. - 2. You can keep your web server and application server on-premises whereas the database server in a virtual machine in Azure. - 3. You can keep your database server, web server, and application server on-premises whereas you keep the database replicas in virtual machines in Azure. This setting allows the on-premises web servers or reporting applications to access the database replicas in Azure. Therefore, you can achieve to lower the workload in an on-premises database. We recommend that you implement this scenario for heavy read workloads and developmental purposes. For information on creating database replicas in Azure, see AlwaysOn Availability Groups at [High Availability and Disaster Recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md). - -## Comparing web development strategies in Azure -To implement and deploy a multi-tier SQL Server-based application in Azure, you can use one of the following two programming methods: - -* Set up a traditional web server (IIS - Internet Information Services) in Azure and access databases in SQL Server on Azure Virtual Machines. -* Implement and deploy a cloud service to Azure. Then, make sure that this cloud service can access databases in SQL Server on Azure Virtual Machines. A cloud service can include multiple web and worker roles. - -The following table provides a comparison of traditional web development with Azure Cloud Services and Azure Web Apps with respect to SQL Server on Azure Virtual Machines. The table includes Azure Web Apps as it is possible to use SQL Server in an Azure VM as a data source for Azure Web Apps via its public virtual IP address or DNS name. - -| | Traditional web development in Azure Virtual Machines | Cloud services in Azure | Web hosting with Azure Web Apps | -| --- | --- | --- | --- | -| **Application migration from on-premises** |Existing applications as-is. |Applications need web and worker roles. |Existing applications as-is but suited for self-contained web applications and web services that require quick scalability. | -| **Development and deployment** |Visual Studio, WebMatrix, Visual Web Developer, WebDeploy, FTP, TFS, IIS Manager, PowerShell. |Visual Studio, Azure SDK, TFS, PowerShell. Each cloud service has two environments to which you can deploy your service package and configuration: staging and production. You can deploy a cloud service to the staging environment to test it before you promote it to production. |Visual Studio, WebMatrix, Visual Web Developer, FTP, GIT, BitBucket, CodePlex, DropBox, GitHub, Mercurial, TFS, Web Deploy, PowerShell. | -| **Administration and setup** |You are responsible for administrative tasks on the application, data, firewall rules, virtual network, and operating system. |You are responsible for administrative tasks on the application, data, firewall rules, and virtual network. |You are responsible for administrative tasks on the application and data only. | -| **High availability and disaster recovery (HADR)** |We recommend that you place virtual machines in the same availability set and in the same cloud service. Keeping your VMs in the same availability set allows Azure to place the high availability nodes into separate fault domains and upgrade domains. Similarly, keeping your VMs in the same cloud service enables load balancing and VMs can communicate directly with one another over the local network within an Azure data center.

    You are responsible for implementing a high availability and disaster recovery solution for SQL Server on Azure Virtual Machines to avoid any downtime. For supported HADR technologies, see [High Availability and Disaster Recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md).

    You are responsible for backing up your own data and application.

    Azure can move your virtual machines if the host machine in the data center fails due to hardware issues. In addition, there could be planned downtime of your VM when the host machine is updated for security or software updates. Therefore, we recommend that you maintain at least two VMs in each application tier to ensure the continuous availability. Azure does not provide SLA for a single virtual machine. |Azure manages the failures resulting from the underlying hardware or operating system software. We recommend that you implement multiple instances of a web or worker role to ensure the high availability of your application. For information, see [Cloud Services, Virtual Machines, and Virtual Network Service Level Agreement](https://azure.microsoft.com/support/legal/sla/virtual-machines/v1_8/).

    You are responsible for backing up your own data and application.

    For databases residing in a SQL Server database in an Azure VM, you are responsible for implementing a high availability and disaster recovery solution to avoid any downtime. For supported HDAR technologies, see High Availability and Disaster Recovery for SQL Server on Azure Virtual Machines.

    **SQL Server Database Mirroring**: Use with Azure Cloud Services (web/worker roles). SQL Server VMs and a cloud service project can be in the same Azure Virtual Network. If SQL Server VM is not in the same Virtual Network, you need to create a SQL Server Alias to route communication to the instance of SQL Server. In addition, the alias name must match the SQL Server name. |High Availability is inherited from Azure worker roles, Azure blob storage, and Azure SQL Database. For example, Azure Storage maintains three replicas of all blob, table, and queue data. At any one time, Azure SQL Database keeps three replicas of data running—one primary replica and two secondary replicas. For more information, see [Azure Storage](https://azure.microsoft.com/documentation/services/storage/) and [Azure SQL Database](../../database/sql-database-paas-overview.md).

    When using SQL Server in an Azure VM as a data source for Azure Web Apps, keep in mind that Azure Web Apps does not support Azure Virtual Network. In other words, all connections from Azure Web Apps to SQL Server VMs in Azure must go through public end points of virtual machines. This might cause some limitations for high availability and disaster recovery scenarios. For example, the client application on Azure Web Apps connecting to SQL Server VM with database mirroring would not be able to connect to the new primary server as database mirroring requires that you set up Azure Virtual Network between SQL Server host VMs in Azure. Therefore, using **SQL Server Database Mirroring** with Azure Web Apps is not supported currently.

    **SQL Server AlwaysOn Availability Groups**: You can set up AlwaysOn Availability Groups when using Azure Web Apps with SQL Server VMs in Azure. But you need to configure AlwaysOn Availability Group Listener to route the communication to the primary replica via public load-balanced endpoints. | -| **Cross-premises connectivity** |You can use Azure Virtual Network to connect to on-premises. |You can use Azure Virtual Network to connect to on-premises. |Azure Virtual Network is supported. For more information, see [Web Apps Virtual Network Integration](https://azure.microsoft.com/blog/2014/09/15/azure-websites-virtual-network-integration/). | -| **Scalability** |Scale-up is available by increasing the virtual machine sizes or adding more disks. For more information about virtual machine sizes, see [Virtual machine Sizes for Azure](../../../virtual-machines/sizes.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    **For Database Server**: Scale-out is available via database partitioning techniques and SQL Server AlwaysOn Availability groups.

    For heavy read workloads, you can use [AlwaysOn Availability Groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server) on multiple secondary nodes as well as SQL Server Replication.

    For heavy write workloads, you can implement horizontal partitioning data across multiple physical servers to provide application scale-out.

    In addition, you can implement a scale-out by using [SQL Server with Data Dependent Routing](/previous-versions/sql/sql-server-2005/administrator/cc966448(v=technet.10)). With Data Dependent Routing (DDR), you need to implement the partitioning mechanism in the client application, typically in the business tier layer, to route the database requests to multiple SQL Server nodes. The business tier contains mappings to how the data is partitioned and which node contains the data.

    You can scale applications that are running virtual machines. For more information, see [How to Scale an Application](../../../cloud-services/cloud-services-how-to-scale-portal.md).

    **Important Note**: The **AutoScale** feature in Azure allows you to automatically increase or decrease the virtual machines that are used by your application. This feature guarantees that the end-user experience is not affected negatively during peak periods, and VMs are shut down when the demand is low. It’s recommended that you do not set the AutoScale option for your cloud service if it includes SQL Server VMs. The reason is that the AutoScale feature lets Azure to turn on a virtual machine when the CPU usage in that VM is higher than some threshold, and to turn off a virtual machine when the CPU usage goes lower than it. The AutoScale feature is useful for stateless applications, such as web servers, where any VM can manage the workload without any references to any previous state. However, the AutoScale feature is not useful for stateful applications, such as SQL Server, where only one instance allows writing to the database. |Scale-up is available by using multiple web and worker roles. For more information about virtual machine sizes for web roles and worker roles, see [Configure Sizes for Cloud Services](../../../cloud-services/cloud-services-sizes-specs.md).

    When using **Cloud Services**, you can define multiple roles to distribute processing and also achieve flexible scaling of your application. Each cloud service includes one or more web roles and/or worker roles, each with its own application files and configuration. You can scale-up a cloud service by increasing the number of role instances (virtual machines) deployed for a role and scale-down a cloud service by decreasing the number of role instances. For detailed information, see [Azure Execution Models](../../../cloud-services/cloud-services-choose-me.md).

    Scale-out is available via built-in Azure high availability support through [Cloud Services, Virtual Machines, and Virtual Network Service Level Agreement](https://azure.microsoft.com/support/legal/sla/virtual-machines/v1_8/) and Load Balancer.

    For a multi-tier application, we recommend that you connect web/worker roles application to database server VMs via Azure Virtual Network. In addition, Azure provides load balancing for VMs in the same cloud service, spreading user requests across them. Virtual machines connected in this way can communicate directly with one another over the local network within an Azure data center.

    You can set up **AutoScale** on the Azure portal as well as the schedule times. For more information, see [How to configure auto scaling for a Cloud Service in the portal](../../../cloud-services/cloud-services-how-to-scale-portal.md). |**Scale up and down**: You can increase/decrease the size of the instance (VM) reserved for your web site.

    Scale out: You can add more reserved instances (VMs) for your web site.

    You can set up **AutoScale** on the portal as well as the schedule times. For more information, see [How to Scale Web Apps](../../../app-service/manage-scale-up.md). | - -For more information on choosing between these programming methods, see [Azure Web Apps, Cloud Services, and VMs: When to use which](/azure/architecture/guide/technology-choices/compute-decision-tree). - -## Next steps -For more information on running SQL Server on Azure Virtual Machines, see [SQL Server on Azure Virtual Machines Overview](sql-server-on-azure-vm-iaas-what-is-overview.md). diff --git a/articles/azure-sql/virtual-machines/windows/automated-backup-sql-2014.md b/articles/azure-sql/virtual-machines/windows/automated-backup-sql-2014.md deleted file mode 100644 index 8f4f499a8e8be..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/automated-backup-sql-2014.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -title: Automated Backup for SQL Server 2014 Azure virtual machines -description: Explains the Automated Backup feature for SQL Server 2014 VMs running in Azure. This article is specific to VMs using the Resource Manager. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.assetid: bdc63fd1-db49-4e76-87d5-b5c6a890e53c -ms.service: virtual-machines-sql -ms.subservice: backup - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 05/03/2018 -ms.author: mathoma -ms.reviewer: pamela -ms.custom: devx-track-azurepowershell ---- - -# Automated Backup for SQL Server 2014 virtual machines (Resource Manager) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [SQL Server 2014](automated-backup-sql-2014.md) -> * [SQL Server 2016/2017](automated-backup.md) - -Automated Backup automatically configures [Managed Backup to Microsoft Azure](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure) for all existing and new databases on an Azure VM running SQL Server 2014 Standard or Enterprise. This enables you to configure regular database backups that utilize durable Azure Blob storage. Automated Backup depends on the [SQL Server infrastructure as a service (IaaS) Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -[!INCLUDE [learn-about-deployment-models](../../../../includes/learn-about-deployment-models-rm-include.md)] - -## Prerequisites -To use Automated Backup, consider the following prerequisites: - - -**Operating system**: - -- Windows Server 2012 and greater - -**SQL Server version/edition**: - -- SQL Server 2014 Standard -- SQL Server 2014 Enterprise - -> [!NOTE] -> For SQL 2016 and greater, see [Automated Backup for SQL Server 2016](automated-backup.md). - -**Database configuration**: - -- Target _user_ databases must use the full recovery model. System databases do not have to use the full recovery model. However, if you require log backups to be taken for Model or MSDB, you must use the full recovery model. For more information about the impact of the full recovery model on backups, see [Backup under the full recovery model](/previous-versions/sql/sql-server-2008-r2/ms190217(v=sql.105)). -- The SQL Server VM has been registered with the SQL IaaS Agent extension in [full management mode](sql-agent-extension-manually-register-single-vm.md#upgrade-to-full). -- Automated backup relies on the full [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). As such, automated backup is only supported on target databases from the default instance, or a single named instance. If there is no default instance, and multiple named instances, the SQL IaaS extension fails and automated backup will not work. - -## Settings - -The following table describes the options that can be configured for Automated Backup. The actual configuration steps vary depending on whether you use the Azure portal or Azure Windows PowerShell commands. - -| Setting | Range (Default) | Description | -| --- | --- | --- | -| **Automated Backup** | Enable/Disable (Disabled) | Enables or disables Automated Backup for an Azure VM running SQL Server 2014 Standard or Enterprise. | -| **Retention Period** | 1-30 days (30 days) | The number of days to retain a backup. | -| **Storage Account** | Azure storage account | An Azure storage account to use for storing Automated Backup files in blob storage. A container is created at this location to store all backup files. The backup file naming convention includes the date, time, and machine name. | -| **Encryption** | Enable/Disable (Disabled) | Enables or disables encryption. When encryption is enabled, the certificates used to restore the backup are located in the specified storage account in the same `automaticbackup` container using the same naming convention. If the password changes, a new certificate is generated with that password, but the old certificate remains to restore prior backups. | -| **Password** | Password text | A password for encryption keys. This is only required if encryption is enabled. In order to restore an encrypted backup, you must have the correct password and related certificate that was used at the time the backup was taken. | - - -## Configure new VMs - -Use the Azure portal to configure Automated Backup when you create a new SQL Server 2014 virtual machine in the Resource Manager deployment model. - -On the **SQL Server settings** tab, scroll down to **Automated backup** and select **Enable**. The following Azure portal screenshot shows the **SQL Automated Backup** settings. - -![SQL Automated Backup configuration in the Azure portal](./media/automated-backup-sql-2014/azure-sql-arm-autobackup.png) - -## Configure existing VMs - -For existing SQL Server VMs, you can enable and disable automated backups, change the retention period, specify the storage account, and enable encryption from the Azure portal. - -Navigate to the [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource) for your SQL Server 2014 virtual machine and then select **Backups**. - -![SQL Automated Backup for existing VMs](./media/automated-backup-sql-2014/azure-sql-rm-autobackup-existing-vms.png) - -When finished, select the **Apply** button on the bottom of the **Backups** page to save your changes. - -If you are enabling Automated Backup for the first time, Azure configures the SQL Server IaaS Agent in the background. During this time, the Azure portal might not show that Automated Backup is configured. Wait several minutes for the agent to be installed and configured. After that, the Azure portal will reflect the new settings. - -> [!NOTE] -> You can also configure Automated Backup using a template. For more information, see [Azure quickstart template for Automated Backup](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-sql-existing-autobackup-update). - -## Configure with PowerShell - -You can use PowerShell to configure Automated Backup. Before you begin, you must: - -- [Download and install the latest Azure PowerShell](https://aka.ms/webpi-azps). -- Open Windows PowerShell and associate it with your account with the **Connect-AzAccount** command. - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - - -### Verify current settings - -If you enabled automated backup during provisioning, you can use PowerShell to check your current configuration. Run the **Get-AzVMSqlServerExtension** command and examine the **AutoBackupSettings** property: - -```powershell -(Get-AzVMSqlServerExtension -VMName $vmname -ResourceGroupName $resourcegroupname).AutoBackupSettings -``` - -You should get output similar to the following: - -``` -Enable : False -EnableEncryption : False -RetentionPeriod : -1 -StorageUrl : NOTSET -StorageAccessKey : -Password : -BackupSystemDbs : False -BackupScheduleType : -FullBackupFrequency : -FullBackupStartTime : -FullBackupWindowHours : -LogBackupFrequency : -``` - -If your output shows that **Enable** is set to **False**, then you have to enable automated backup. The good news is that you enable and configure Automated Backup in the same way. See the next section for this information. - -> [!NOTE] -> If you check the settings immediately after making a change, it is possible that you will get back the old configuration values. Wait a few minutes and check the settings again to make sure that your changes were applied. - -### Configure Automated Backup -You can use PowerShell to enable Automated Backup as well as to modify its configuration and behavior at any time. - -First, select or create a storage account for the backup files. The following script selects a storage account or creates it if it does not exist. - -```powershell -$storage_accountname = "yourstorageaccount" -$storage_resourcegroupname = $resourcegroupname - -$storage = Get-AzStorageAccount -ResourceGroupName $resourcegroupname ` - -Name $storage_accountname -ErrorAction SilentlyContinue -If (-Not $storage) - { $storage = New-AzStorageAccount -ResourceGroupName $storage_resourcegroupname ` - -Name $storage_accountname -SkuName Standard_GRS -Location $region } -``` - -> [!NOTE] -> Automated Backup does not support storing backups in premium storage, but it can take backups from VM disks which use Premium Storage. - -Then use the **New-AzVMSqlServerAutoBackupConfig** command to enable and configure the Automated Backup settings to store backups in the Azure storage account. In this example, the backups are retained for 10 days. The second command, **Set-AzVMSqlServerExtension**, updates the specified Azure VM with these settings. - -```powershell -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -RetentionPeriodInDays 10 -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -It could take several minutes to install and configure the SQL Server IaaS Agent. - -> [!NOTE] -> There are other settings for **New-AzVMSqlServerAutoBackupConfig** that apply only to SQL Server 2016 and Automated Backup v2. SQL Server 2014 does not support the following settings: **BackupSystemDbs**, **BackupScheduleType**, **FullBackupFrequency**, **FullBackupStartHour**, **FullBackupWindowInHours**, and **LogBackupFrequencyInMinutes**. If you attempt to configure these settings on a SQL Server 2014 virtual machine, there is no error, but the settings do not get applied. If you want to use these settings on a SQL Server 2016 virtual machine, see [Automated Backup v2 for SQL Server 2016 Azure virtual machines](automated-backup.md). - -To enable encryption, modify the previous script to pass the **EnableEncryption** parameter along with a password (secure string) for the **CertificatePassword** parameter. The following script enables the Automated Backup settings in the previous example and adds encryption. - -```powershell -$password = "P@ssw0rd" -$encryptionpassword = $password | ConvertTo-SecureString -AsPlainText -Force - -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -EnableEncryption -CertificatePassword $encryptionpassword ` - -RetentionPeriodInDays 10 -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -To confirm your settings are applied, [verify the Automated Backup configuration](#verifysettings). - -### Disable Automated Backup - -To disable Automated Backup, run the same script without the **-Enable** parameter to the **New-AzVMSqlServerAutoBackupConfig** command. The absence of the **-Enable** parameter signals the command to disable the feature. As with installation, it can take several minutes to disable Automated Backup. - -```powershell -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -ResourceGroupName $storage_resourcegroupname - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -### Example script - -The following script provides a set of variables that you can customize to enable and configure Automated Backup for your VM. In your case, you might need to customize the script based on your requirements. For example, you would have to make changes if you wanted to disable the backup of system databases or enable encryption. - -```powershell -$vmname = "yourvmname" -$resourcegroupname = "vmresourcegroupname" -$region = "Azure region name such as EASTUS2" -$storage_accountname = "storageaccountname" -$storage_resourcegroupname = $resourcegroupname -$retentionperiod = 10 - -# ResourceGroupName is the resource group which is hosting the VM where you are deploying the SQL Server IaaS Extension - -Set-AzVMSqlServerExtension -VMName $vmname ` - -ResourceGroupName $resourcegroupname -Name "SQLIaasExtension" ` - -Version "2.0" -Location $region - -# Creates/use a storage account to store the backups - -$storage = Get-AzStorageAccount -ResourceGroupName $resourcegroupname ` - -Name $storage_accountname -ErrorAction SilentlyContinue -If (-Not $storage) - { $storage = New-AzStorageAccount -ResourceGroupName $storage_resourcegroupname ` - -Name $storage_accountname -SkuName Standard_GRS -Location $region } - -# Configure Automated Backup settings - -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -RetentionPeriodInDays $retentionperiod -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname - -# Apply the Automated Backup settings to the VM - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -## Monitoring - -To monitor Automated Backup on SQL Server 2014, you have two main options. Because Automated Backup uses the SQL Server Managed Backup feature, the same monitoring techniques apply to both. - -First, you can poll the status by calling [msdb.smart_admin.sp_get_backup_diagnostics](/sql/relational-databases/system-stored-procedures/managed-backup-sp-get-backup-diagnostics-transact-sql). Or query the [msdb.smart_admin.fn_get_health_status](/sql/relational-databases/system-functions/managed-backup-fn-get-health-status-transact-sql) table valued function. - -> [!NOTE] -> The schema for Managed Backup in SQL Server 2014 is **msdb.smart_admin**. In SQL Server 2016 this changed to **msdb.managed_backup**, and the reference topics use this newer schema. But for SQL Server 2014, you must continue to use the **smart_admin** schema for all Managed Backup objects. - -Another option is to take advantage of the built-in Database Mail feature for notifications. - -1. Call the [msdb.smart_admin.sp_set_parameter](/sql/relational-databases/system-stored-procedures/managed-backup-sp-set-parameter-transact-sql) stored procedure to assign an email address to the **SSMBackup2WANotificationEmailIds** parameter. -1. Enable [SendGrid](https://docs.sendgrid.com/for-developers/partners/microsoft-azure-2021#create-a-twilio-sendgrid-accountcreate-a-twilio-sendgrid-account) to send the emails from the Azure VM. -1. Use the SMTP server and user name to configure Database Mail. You can configure Database Mail in SQL Server Management Studio or with Transact-SQL commands. For more information, see [Database Mail](/sql/relational-databases/database-mail/database-mail). -1. [Configure SQL Server Agent to use Database Mail](/sql/relational-databases/database-mail/configure-sql-server-agent-mail-to-use-database-mail). -1. Verify that the SMTP port is allowed both through the local VM firewall and the network security group for the VM. - -## Next steps - -Automated Backup configures Managed Backup on Azure VMs. So it is important to [review the documentation for Managed Backup on SQL Server 2014](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure). - -You can find additional backup and restore guidance for SQL Server on Azure VMs in the following article: [Backup and restore for SQL Server on Azure virtual machines](backup-restore.md). - -For information about other available automation tasks, see [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -For more information about running SQL Server on Azure VMs, see [SQL Server on Azure virtual machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/automated-backup.md b/articles/azure-sql/virtual-machines/windows/automated-backup.md deleted file mode 100644 index 1960cda3c4a5e..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/automated-backup.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -title: Automated Backup v2 for SQL Server 2016/2017 Azure VMs | Microsoft Docs -description: This article explains the Automated Backup feature for SQL Server 2016/2017 VMs running on Azure. This article is specific to VMs using the Resource Manager. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.assetid: ebd23868-821c-475b-b867-06d4a2e310c7 -ms.service: virtual-machines-sql -ms.subservice: backup - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/21/2021 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell ---- - -# Automated Backup v2 for Azure virtual machines (Resource Manager) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [SQL Server 2014](automated-backup-sql-2014.md) -> * [SQL Server 2016 +](automated-backup.md) - -Automated Backup v2 automatically configures [Managed Backup to Microsoft Azure](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure) for all existing and new databases on an Azure VM running SQL Server 2016 or later Standard, Enterprise, or Developer editions. This enables you to configure regular database backups that utilize durable Azure blob storage. Automated Backup v2 depends on the [SQL Server infrastructure as a service (IaaS) Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -## Prerequisites -To use Automated Backup v2, review the following prerequisites: - -**Operating system**: - -- Windows Server 2012 R2 or higher - -**SQL Server version/edition**: - -- SQL Server 2016 or higher: Developer, Standard, or Enterprise - -> [!NOTE] -> For SQL Server 2014, see [Automated Backup for SQL Server 2014](automated-backup-sql-2014.md). - -**Database configuration**: - -- Target _user_ databases must use the full recovery model. System databases do not have to use the full recovery model. However, if you require log backups to be taken for Model or MSDB, you must use the full recovery model. For more information about the impact of the full recovery model on backups, see [Backup under the full recovery model](/previous-versions/sql/sql-server-2008-r2/ms190217(v=sql.105)). -- The SQL Server VM has been registered with the SQL IaaS Agent extension in [full management mode](sql-agent-extension-manually-register-single-vm.md#upgrade-to-full). -- Automated backup relies on the full [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). As such, automated backup is only supported on target databases from the default instance, or a single named instance. If there is no default instance, and multiple named instances, the SQL IaaS extension fails and automated backup will not work. - -## Settings -The following table describes the options that can be configured for Automated Backup v2. The actual configuration steps vary depending on whether you use the Azure portal or Azure Windows PowerShell commands. - -### Basic Settings - -| Setting | Range (Default) | Description | -| --- | --- | --- | -| **Automated Backup** | Enable/Disable (Disabled) | Enables or disables Automated Backup for an Azure VM running SQL Server 2016/2017 Developer, Standard, or Enterprise. | -| **Retention Period** | 1-30 days (30 days) | The number of days to retain backups. | -| **Storage Account** | Azure storage account | An Azure storage account to use for storing Automated Backup files in blob storage. A container is created at this location to store all backup files. The backup file naming convention includes the date, time, and database GUID. | -| **Encryption** |Enable/Disable (Disabled) | Enables or disables encryption. When encryption is enabled, the certificates used to restore the backup are located in the specified storage account. It uses the same **automatic backup** container with the same naming convention. If the password changes, a new certificate is generated with that password, but the old certificate remains to restore prior backups. | -| **Password** |Password text | A password for encryption keys. This password is only required if encryption is enabled. In order to restore an encrypted backup, you must have the correct password and related certificate that was used at the time the backup was taken. | - -### Advanced Settings - -| Setting | Range (Default) | Description | -| --- | --- | --- | -| **System Database Backups** | Enable/Disable (Disabled) | When enabled, this feature also backs up the system databases: Master, MSDB, and Model. For the MSDB and Model databases, verify that they are in full recovery mode if you want log backups to be taken. Log backups are never taken for Master. And no backups are taken for TempDB. | -| **Backup Schedule** | Manual/Automated (Automated) | By default, the backup schedule is automatically determined based on the log growth. Manual backup schedule allows the user to specify the time window for backups. In this case, backups only take place at the specified frequency and during the specified time window of a given day. | -| **Full backup frequency** | Daily/Weekly | Frequency of full backups. In both cases, full backups begin during the next scheduled time window. When weekly is selected, backups could span multiple days until all databases have successfully backed up. | -| **Full backup start time** | 00:00 – 23:00 (01:00) | Start time of a given day during which full backups can take place. | -| **Full backup time window** | 1 – 23 hours (1 hour) | Duration of the time window of a given day during which full backups can take place. | -| **Log backup frequency** | 5 – 60 minutes (60 minutes) | Frequency of log backups. | - -## Understanding full backup frequency -It is important to understand the difference between daily and weekly full backups. Consider the following two example scenarios. - -### Scenario 1: Weekly backups -You have a SQL Server VM that contains a number of large databases. - -On Monday, you enable Automated Backup v2 with the following settings: - -- Backup schedule: **Manual** -- Full backup frequency: **Weekly** -- Full backup start time: **01:00** -- Full backup time window: **1 hour** - -This means that the next available backup window is Tuesday at 1 AM for 1 hour. At that time, Automated Backup begins backing up your databases one at a time. In this scenario, your databases are large enough that full backups complete for the first couple databases. However, after one hour not all of the databases have been backed up. - -When this happens, Automated Backup begins backing up the remaining databases the next day, Wednesday at 1 AM for one hour. If not all databases have been backed up in that time, it tries again the next day at the same time. This continues until all databases have been successfully backed up. - -After it reaches Tuesday again, Automated Backup begins backing up all databases again. - -This scenario shows that Automated Backup only operates within the specified time window, and each database is backed up once per week. This also shows that it is possible for backups to span multiple days in the case where it is not possible to complete all backups in a single day. - -### Scenario 2: Daily backups -You have a SQL Server VM that contains a number of large databases. - -On Monday, you enable Automated Backup v2 with the following settings: - -- Backup schedule: Manual -- Full backup frequency: Daily -- Full backup start time: 22:00 -- Full backup time window: 6 hours - -This means that the next available backup window is Monday at 10 PM for 6 hours. At that time, Automated Backup begins backing up your databases one at a time. - -Then, on Tuesday at 10 for 6 hours, full backups of all databases start again. - - -> [!IMPORTANT] -> Backups happen sequentially during each interval. For instances with a large number of databases, schedule your backup interval with enough time to accommodate all backups. If backups cannot complete within the given interval, some backups may be skipped, and your time between backups for a single database may be higher than the configured backup interval time, which could negatively impact your restore point objective (RPO). - -## Configure new VMs - -Use the Azure portal to configure Automated Backup when you create a new SQL Server 2016 or later machine in the Resource Manager deployment model. - -In the **SQL Server settings** tab, select **Enable** under **Automated backup**. -When you enable automated backup, you can configure the following settings: - -* Retention period for backups (up to 90 days) -* Storage account, and storage container, to use for backups -* Encryption option and password for backups -* Backup system databases -* Configure backup schedule - -To encrypt the backup, select **Enable**. Then specify the **Password**. Azure creates a certificate to encrypt the backups and uses the specified password to protect that certificate. - -Choose **Select Storage Container** to specify the container where you want to store your backups. - -By default the schedule is set automatically, but you can create your own schedule by selecting **Manual**, which allows you to configure the backup frequency, backup time window, and the log backup frequency in minutes. - -The following Azure portal screenshot shows the **Automated Backup** settings when you create a new SQL Server VM: - -![Automated Backup configuration in the Azure portal](./media/automated-backup/automated-backup-blade.png) - - -## Configure existing VMs - -For existing SQL Server virtual machines, go to the [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource) and then select **Backups** to configure your automated backups. - -Select **Enable** to configure your automated backup settings. - -You can configure the retention period (up to 90 days), the container for the storage account where you want to store your backups, as well as the encryption, and the backup schedule. By default, the schedule is automated. - -![Automated Backup for existing VMs](./media/automated-backup/sql-server-configuration.png) - -If you want to set your own backup schedule, choose **Manual** and configure the backup frequency, whether or not you want system databases backed up, and the transaction log backup interval in minutes. - -![Select manual to configure your own backup schedule](./media/automated-backup/configure-manual-backup-schedule.png) - -When finished, click the **Apply** button on the bottom of the **Backups** settings page to save your changes. - -If you are enabling Automated Backup for the first time, Azure configures the SQL Server IaaS Agent in the background. During this time, the Azure portal might not show that Automated Backup is configured. Wait several minutes for the agent to be installed, configured. After that, the Azure portal will reflect the new settings. - -## Configure with PowerShell - -You can use PowerShell to configure Automated Backup v2. Before you begin, you must: - -- [Download and install the latest Azure PowerShell](https://aka.ms/webpi-azps). -- Open Windows PowerShell and associate it with your account with the **Connect-AzAccount** command. - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - -### Install the SQL Server IaaS Extension -If you provisioned a SQL Server virtual machine from the Azure portal, the SQL Server IaaS Extension should already be installed. You can determine whether it is installed for your VM by calling **Get-AzVM** command and examining the **Extensions** property. - -```powershell -$vmname = "vmname" -$resourcegroupname = "resourcegroupname" - -(Get-AzVM -Name $vmname -ResourceGroupName $resourcegroupname).Extensions -``` - -If the SQL Server IaaS Agent extension is installed, you should see it listed as "SqlIaaSAgent" or "SQLIaaSExtension." **ProvisioningState** for the extension should also show "Succeeded." - -If it is not installed or it has failed to be provisioned, you can install it with the following command. In addition to the VM name and resource group, you must also specify the region (**$region**) that your VM is located in. - -```powershell -$region = "EASTUS2" -Set-AzVMSqlServerExtension -VMName $vmname ` - -ResourceGroupName $resourcegroupname -Name "SQLIaasExtension" ` - -Version "2.0" -Location $region -``` - -### Verify current settings -If you enabled Automated Backup during provisioning, you can use PowerShell to check your current configuration. Run the **Get-AzVMSqlServerExtension** command and examine the **AutoBackupSettings** property: - -```powershell -(Get-AzVMSqlServerExtension -VMName $vmname -ResourceGroupName $resourcegroupname).AutoBackupSettings -``` - -You should get output similar to the following: - -``` -Enable : True -EnableEncryption : False -RetentionPeriod : 30 -StorageUrl : https://test.blob.core.windows.net/ -StorageAccessKey : -Password : -BackupSystemDbs : False -BackupScheduleType : Manual -FullBackupFrequency : WEEKLY -FullBackupStartTime : 2 -FullBackupWindowHours : 2 -LogBackupFrequency : 60 -``` - -If your output shows that **Enable** is set to **False**, then you have to enable Automated Backup. The good news is that you enable and configure Automated Backup in the same way. See the next section for this information. - -> [!NOTE] -> If you check the settings immediately after making a change, it is possible that you will get back the old configuration values. Wait a few minutes and check the settings again to make sure that your changes were applied. - -### Configure Automated Backup v2 -You can use PowerShell to enable Automated Backup as well as to modify its configuration and behavior at any time. - -First, select, or create a storage account for the backup files. The following script selects a storage account or creates it if it does not exist. - -```powershell -$storage_accountname = "yourstorageaccount" -$storage_resourcegroupname = $resourcegroupname - -$storage = Get-AzStorageAccount -ResourceGroupName $resourcegroupname ` - -Name $storage_accountname -ErrorAction SilentlyContinue -If (-Not $storage) - { $storage = New-AzStorageAccount -ResourceGroupName $storage_resourcegroupname ` - -Name $storage_accountname -SkuName Standard_GRS -Location $region } -``` - -> [!NOTE] -> Automated Backup does not support storing backups in premium storage, but it can take backups from VM disks which use Premium Storage. - -Then use the **New-AzVMSqlServerAutoBackupConfig** command to enable and configure the Automated Backup v2 settings to store backups in the Azure storage account. In this example, the backups are set to be retained for 10 days. System database backups are enabled. Full backups are scheduled for weekly with a time window starting at 20:00 for two hours. Log backups are scheduled for every 30 minutes. The second command, **Set-AzVMSqlServerExtension**, updates the specified Azure VM with these settings. - -```powershell -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -RetentionPeriodInDays 10 -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname -BackupSystemDbs ` - -BackupScheduleType Manual -FullBackupFrequency Weekly ` - -FullBackupStartHour 20 -FullBackupWindowInHours 2 ` - -LogBackupFrequencyInMinutes 30 - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -It could take several minutes to install and configure the SQL Server IaaS Agent. - -To enable encryption, modify the previous script to pass the **EnableEncryption** parameter along with a password (secure string) for the **CertificatePassword** parameter. The following script enables the Automated Backup settings in the previous example and adds encryption. - -```powershell -$password = "P@ssw0rd" -$encryptionpassword = $password | ConvertTo-SecureString -AsPlainText -Force - -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -EnableEncryption -CertificatePassword $encryptionpassword ` - -RetentionPeriodInDays 10 -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname -BackupSystemDbs ` - -BackupScheduleType Manual -FullBackupFrequency Weekly ` - -FullBackupStartHour 20 -FullBackupWindowInHours 2 ` - -LogBackupFrequencyInMinutes 30 - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -To confirm your settings are applied, [verify the Automated Backup configuration](#verifysettings). - -### Disable Automated Backup -To disable Automated Backup, run the same script without the **-Enable** parameter to the **New-AzVMSqlServerAutoBackupConfig** command. The absence of the **-Enable** parameter signals the command to disable the feature. As with installation, it can take several minutes to disable Automated Backup. - -```powershell -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -ResourceGroupName $storage_resourcegroupname - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -### Example script -The following script provides a set of variables that you can customize to enable and configure Automated Backup for your VM. In your case, you might need to customize the script based on your requirements. For example, you would have to make changes if you wanted to disable the backup of system databases or enable encryption. - -```powershell -$vmname = "yourvmname" -$resourcegroupname = "vmresourcegroupname" -$region = "Azure region name such as EASTUS2" -$storage_accountname = "storageaccountname" -$storage_resourcegroupname = $resourcegroupname -$retentionperiod = 10 -$backupscheduletype = "Manual" -$fullbackupfrequency = "Weekly" -$fullbackupstarthour = "20" -$fullbackupwindow = "2" -$logbackupfrequency = "30" - -# ResourceGroupName is the resource group which is hosting the VM where you are deploying the SQL Server IaaS Extension - -Set-AzVMSqlServerExtension -VMName $vmname ` - -ResourceGroupName $resourcegroupname -Name "SQLIaasExtension" ` - -Version "2.0" -Location $region - -# Creates/use a storage account to store the backups - -$storage = Get-AzStorageAccount -ResourceGroupName $resourcegroupname ` - -Name $storage_accountname -ErrorAction SilentlyContinue -If (-Not $storage) - { $storage = New-AzStorageAccount -ResourceGroupName $storage_resourcegroupname ` - -Name $storage_accountname -SkuName Standard_GRS -Location $region } - -# Configure Automated Backup settings - -$autobackupconfig = New-AzVMSqlServerAutoBackupConfig -Enable ` - -RetentionPeriodInDays $retentionperiod -StorageContext $storage.Context ` - -ResourceGroupName $storage_resourcegroupname -BackupSystemDbs ` - -BackupScheduleType $backupscheduletype -FullBackupFrequency $fullbackupfrequency ` - -FullBackupStartHour $fullbackupstarthour -FullBackupWindowInHours $fullbackupwindow ` - -LogBackupFrequencyInMinutes $logbackupfrequency - -# Apply the Automated Backup settings to the VM - -Set-AzVMSqlServerExtension -AutoBackupSettings $autobackupconfig ` - -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -## Monitoring - -To monitor Automated Backup on SQL Server 2016/2017, you have two main options. Because Automated Backup uses the SQL Server Managed Backup feature, the same monitoring techniques apply to both. - -First, you can poll the status by calling [msdb.managed_backup.sp_get_backup_diagnostics](/sql/relational-databases/system-stored-procedures/managed-backup-sp-get-backup-diagnostics-transact-sql). Or query the [msdb.managed_backup.fn_get_health_status](/sql/relational-databases/system-functions/managed-backup-fn-get-health-status-transact-sql) table-valued function. - -Another option is to take advantage of the built-in Database Mail feature for notifications. - -1. Call the [msdb.managed_backup.sp_set_parameter](/sql/relational-databases/system-stored-procedures/managed-backup-sp-set-parameter-transact-sql) stored procedure to assign an email address to the **SSMBackup2WANotificationEmailIds** parameter. -1. Enable [SendGrid](https://docs.sendgrid.com/for-developers/partners/microsoft-azure-2021#create-a-twilio-sendgrid-accountcreate-a-twilio-sendgrid-account) to send the emails from the Azure VM. -1. Use the SMTP server and user name to configure Database Mail. You can configure Database Mail in SQL Server Management Studio or with Transact-SQL commands. For more information, see [Database Mail](/sql/relational-databases/database-mail/database-mail). -1. [Configure SQL Server Agent to use Database Mail](/sql/relational-databases/database-mail/configure-sql-server-agent-mail-to-use-database-mail). -1. Verify that the SMTP port is allowed both through the local VM firewall and the network security group for the VM. - -## Next steps -Automated Backup v2 configures Managed Backup on Azure VMs. So it is important to [review the documentation for Managed Backup](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure) to understand the behavior and implications. - -You can find additional backup and restore guidance for SQL Server on Azure VMs in the following article: [Backup and restore for SQL Server on Azure virtual machines](backup-restore.md). - -For information about other available automation tasks, see [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -For more information about running SQL Server on Azure VMs, see [SQL Server on Azure virtual machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md). diff --git a/articles/azure-sql/virtual-machines/windows/automated-patching.md b/articles/azure-sql/virtual-machines/windows/automated-patching.md deleted file mode 100644 index bacb663a518c4..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/automated-patching.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Automated Patching for SQL Server VMs (Resource Manager) | Microsoft Docs -description: This article explains the Automated Patching feature for SQL Server virtual machines running on Azure using Resource Manager. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-resource-manager -ms.assetid: 58232e92-318f-456b-8f0a-2201a541e08d -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: article -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/07/2018 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell ---- -# Automated Patching for SQL Server on Azure virtual machines (Resource Manager) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -Automated Patching establishes a maintenance window for an Azure virtual machine running SQL Server. Automated Updates can only be installed during this maintenance window. For SQL Server, this restriction ensures that system updates and any associated restarts occur at the best possible time for the database. - -> [!IMPORTANT] -> Only Windows and SQL Server updates marked as **Important** or **Critical** are installed. Other SQL Server updates, such as service packs and cumulative updates that are not marked as **Important** or **Critical**, must be installed manually. - -Automated Patching depends on the [SQL Server infrastructure as a service (IaaS) Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -## Prerequisites -To use Automated Patching, consider the following prerequisites: - -**Operating system**: - -* Windows Server 2008 R2 -* Windows Server 2012 -* Windows Server 2012 R2 -* Windows Server 2016 -* Windows Server 2019 - -**SQL Server version**: - -* SQL Server 2008 R2 -* SQL Server 2012 -* SQL Server 2014 -* SQL Server 2016 -* SQL Server 2017 -* SQL Server 2019 - -**Azure PowerShell**: - -* [Install the latest Azure PowerShell commands](/powershell/azure/) if you plan to configure Automated Patching with PowerShell. - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - -> [!NOTE] -> Automated Patching relies on the SQL Server IaaS Agent Extension. Current SQL virtual machine gallery images add this extension by default. For more information, see [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). -> -> - -## Settings -The following table describes the options that can be configured for Automated Patching. The actual configuration steps vary depending on whether you use the Azure portal or Azure Windows PowerShell commands. - -| Setting | Possible values | Description | -| --- | --- | --- | -| **Automated Patching** |Enable/Disable (Disabled) |Enables or disables Automated Patching for an Azure virtual machine. | -| **Maintenance schedule** |Everyday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday |The schedule for downloading and installing Windows, SQL Server, and Microsoft updates for your virtual machine. | -| **Maintenance start hour** |0-24 |The local start time to update the virtual machine. | -| **Maintenance window duration** |30-180 |The number of minutes permitted to complete the download and installation of updates. | -| **Patch Category** |Important | The category of Windows updates to download and install.| - -## Configure in the Azure portal -You can use the Azure portal to configure Automated Patching during provisioning or for existing VMs. - -### New VMs -Use the Azure portal to configure Automated Patching when you create a new SQL Server virtual machine in the Resource Manager deployment model. - -In the **SQL Server settings** tab, select **Change configuration** under **Automated patching**. The following Azure portal screenshot shows the **SQL Automated Patching** blade. - -![SQL Automated Patching in the Azure portal](./media/automated-patching/azure-sql-arm-patching.png) - -For more information, see [Provision a SQL Server virtual machine on Azure](create-sql-vm-portal.md). - -### Existing VMs - -For existing SQL Server virtual machines, open your [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource) and select **Patching** under **Settings**. - -![SQL Automatic Patching for existing VMs](./media/automated-patching/azure-sql-rm-patching-existing-vms.png) - - -When you're finished, click the **OK** button on the bottom of the **SQL Server configuration** blade to save your changes. - -If you are enabling Automated Patching for the first time, Azure configures the SQL Server IaaS Agent in the background. During this time, the Azure portal might not show that Automated Patching is configured. Wait several minutes for the agent to be installed and configured. After that the Azure portal reflects the new settings. - -## Configure with PowerShell -After provisioning your SQL VM, use PowerShell to configure Automated Patching. - -In the following example, PowerShell is used to configure Automated Patching on an existing SQL Server VM. The **New-AzVMSqlServerAutoPatchingConfig** command configures a new maintenance window for automatic updates. - -```azurepowershell -$vmname = "vmname" -$resourcegroupname = "resourcegroupname" -$aps = New-AzVMSqlServerAutoPatchingConfig -Enable -DayOfWeek "Thursday" -MaintenanceWindowStartingHour 11 -MaintenanceWindowDuration 120 -PatchCategory "Important" -s -Set-AzVMSqlServerExtension -AutoPatchingSettings $aps -VMName $vmname -ResourceGroupName $resourcegroupname -``` - -Based on this example, the following table describes the practical effect on the target Azure VM: - -| Parameter | Effect | -| --- | --- | -| **DayOfWeek** |Patches installed every Thursday. | -| **MaintenanceWindowStartingHour** |Begin updates at 11:00am. | -| **MaintenanceWindowsDuration** |Patches must be installed within 120 minutes. Based on the start time, they must complete by 1:00pm. | -| **PatchCategory** |The only possible setting for this parameter is **Important**. This installs Windows update marked Important; it does not install any SQL Server updates that are not included in this category. | - -It could take several minutes to install and configure the SQL Server IaaS Agent. - -To disable Automated Patching, run the same script without the **-Enable** parameter to the **New-AzVMSqlServerAutoPatchingConfig**. The absence of the **-Enable** parameter signals the command to disable the feature. - -> [!NOTE] -> There are also several other ways to enable automatic patching of Azure VMs, such as [Update Management](../../../automation/update-management/overview.md) or [Automatic VM guest patching](../../../virtual-machines/automatic-vm-guest-patching.md). Choose only one option to automatically update your VM as overlapping tools may lead to failed updates. - - -## Next steps -For information about other available automation tasks, see [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). - -For more information about running SQL Server on Azure VMs, see [SQL Server on Azure virtual machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md). \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-az-commandline-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-az-commandline-configure.md deleted file mode 100644 index 25e9ce057f016..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-az-commandline-configure.md +++ /dev/null @@ -1,532 +0,0 @@ ---- -title: Configure an availability group (PowerShell & Az CLI) -description: "Use either PowerShell or the Azure CLI to create the Windows failover cluster, the availability group listener, and the internal load balancer on a SQL Server VM in Azure." -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma -ms.custom: "seo-lt-2019, devx-track-azurecli, devx-track-azurepowershell" - ---- -# Use PowerShell or Az CLI to configure an availability group for SQL Server on Azure VM -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -This article describes how to use [PowerShell](/powershell/scripting/install/installing-powershell) or the [Azure CLI](/cli/azure/sql/vm) to deploy a Windows failover cluster, add SQL Server VMs to the cluster, and create the internal load balancer and listener for an Always On availability group within a single subnet. - -Deployment of the availability group is still done manually through SQL Server Management Studio (SSMS) or Transact-SQL (T-SQL). - -While this article uses PowerShell and the Az CLI to configure the availability group environment, it is also possible to do so from the [Azure portal](availability-group-azure-portal-configure.md), using [Azure Quickstart templates](availability-group-quickstart-template-configure.md), or [Manually](availability-group-manually-configure-tutorial-single-subnet.md) as well. - -> [!NOTE] -> It's now possible to lift and shift your availability group solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) to learn more. - -## Prerequisites - -To configure an Always On availability group, you must have the following prerequisites: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- A resource group with a domain controller. -- One or more domain-joined [VMs in Azure running SQL Server 2016 (or later) Enterprise edition](./create-sql-vm-portal.md) in the *same* availability set or *different* availability zones that have been [registered with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). -- The latest version of [PowerShell](/powershell/scripting/install/installing-powershell) or the [Azure CLI](/cli/azure/install-azure-cli). -- Two available (not used by any entity) IP addresses. One is for the internal load balancer. The other is for the availability group listener within the same subnet as the availability group. If you're using an existing load balancer, you only need one available IP address for the availability group listener. - -## Permissions - -You need the following account permissions to configure the Always On availability group by using the Azure CLI: - -- An existing domain user account that has **Create Computer Object** permission in the domain. For example, a domain admin account typically has sufficient permission (for example: account@domain.com). _This account should also be part of the local administrator group on each VM to create the cluster._ -- The domain user account that controls SQL Server. - -## Create a storage account - -The cluster needs a storage account to act as the cloud witness. You can use any existing storage account, or you can create a new storage account. If you want to use an existing storage account, skip ahead to the next section. - -The following code snippet creates the storage account: - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Create the storage account -# example: az storage account create -n 'cloudwitness' -g SQLVM-RG -l 'West US' ` -# --sku Standard_LRS --kind StorageV2 --access-tier Hot --https-only true - -az storage account create -n -g -l ` - --sku Standard_LRS --kind StorageV2 --access-tier Hot --https-only true -``` - ->[!TIP] -> You might see the error `az sql: 'vm' is not in the 'az sql' command group` if you're using an outdated version of the Azure CLI. Download the [latest version of Azure CLI](/cli/azure/install-azure-cli-windows) to get past this error. - - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive -# Create the storage account -# example: New-AzStorageAccount -ResourceGroupName SQLVM-RG -Name cloudwitness ` -# -SkuName Standard_LRS -Location West US -Kind StorageV2 ` -# -AccessTier Hot -EnableHttpsTrafficOnly $true - -New-AzStorageAccount -ResourceGroupName -Name ` - -SkuName Standard_LRS -Location -Kind StorageV2 ` - -AccessTier Hot -EnableHttpsTrafficOnly $true -``` - ---- - -## Define cluster metadata - -The Azure CLI [az sql vm group](/cli/azure/sql/vm/group) command group manages the metadata of the Windows Server Failover Cluster (WSFC) service that hosts the availability group. Cluster metadata includes the Active Directory domain, cluster accounts, storage accounts to be used as the cloud witness, and SQL Server version. Use [az sql vm group create](/cli/azure/sql/vm/group#az-sql-vm-group-create) to define the metadata for WSFC so that when the first SQL Server VM is added, the cluster is created as defined. - -The following code snippet defines the metadata for the cluster: - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Define the cluster metadata -# example: az sql vm group create -n Cluster -l 'West US' -g SQLVM-RG ` -# --image-offer SQL2017-WS2016 --image-sku Enterprise --domain-fqdn domain.com ` -# --operator-acc vmadmin@domain.com --bootstrap-acc vmadmin@domain.com --service-acc sqlservice@domain.com ` -# --sa-key '4Z4/i1Dn8/bpbseyWX' ` -# --storage-account 'https://cloudwitness.blob.core.windows.net/' - -az sql vm group create -n -l -g ` - --image-offer --image-sku Enterprise --domain-fqdn ` - --operator-acc --bootstrap-acc ` - --service-acc ` - --sa-key '' ` - --storage-account '' -``` - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive -# Define the cluster metadata -# example: $group = New-AzSqlVMGroup -Name Cluster -Location West US ' -# -ResourceGroupName SQLVM-RG -Offer SQL2017-WS2016 -# -Sku Enterprise -DomainFqdn domain.com -ClusterOperatorAccount vmadmin@domain.com -# -ClusterBootstrapAccount vmadmin@domain.com -SqlServiceAccount sqlservice@domain.com -# -StorageAccountUrl '' ` -# -StorageAccountPrimaryKey '4Z4/i1Dn8/bpbseyWX' - -$storageAccountPrimaryKey = ConvertTo-SecureString -String "" -AsPlainText -Force -$group = New-AzSqlVMGroup -Name -Location - -ResourceGroupName -Offer - -Sku Enterprise -DomainFqdn -ClusterOperatorAccount - -ClusterBootstrapAccount -SqlServiceAccount - -StorageAccountUrl '' ` - -StorageAccountPrimaryKey $storageAccountPrimaryKey -``` - ---- - -## Add VMs to the cluster - -Adding the first SQL Server VM to the cluster creates the cluster. The [az sql vm add-to-group](/cli/azure/sql/vm#az-sql-vm-add-to-group) command creates the cluster with the name previously given, installs the cluster role on the SQL Server VMs, and adds them to the cluster. Subsequent uses of the `az sql vm add-to-group` command add more SQL Server VMs to the newly created cluster. - -The following code snippet creates the cluster and adds the first SQL Server VM to it: - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Add SQL Server VMs to cluster -# example: az sql vm add-to-group -n SQLVM1 -g SQLVM-RG --sqlvm-group Cluster ` -# -b Str0ngAzur3P@ssword! -p Str0ngAzur3P@ssword! -s Str0ngAzur3P@ssword! -# example: az sql vm add-to-group -n SQLVM2 -g SQLVM-RG --sqlvm-group Cluster ` -# -b Str0ngAzur3P@ssword! -p Str0ngAzur3P@ssword! -s Str0ngAzur3P@ssword! - -az sql vm add-to-group -n -g --sqlvm-group ` - -b -p -s -az sql vm add-to-group -n -g --sqlvm-group ` - -b -p -s -``` -Use this command to add any other SQL Server VMs to the cluster. Modify only the `-n` parameter for the SQL Server VM name. - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive -# Add SQL Server VMs to cluster -# example: $sqlvm1 = Get-AzSqlVM -Name SQLVM1 -ResourceGroupName SQLVM-RG -# $sqlvm2 = Get-AzSqlVM -Name SQLVM2 -ResourceGroupName SQLVM-RG - -# $sqlvmconfig1 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm1 ` -# -SqlVMGroup $group -ClusterOperatorAccountPassword Str0ngAzur3P@ssword! ` -# -SqlServiceAccountPassword Str0ngAzur3P@ssword! ` -# -ClusterBootstrapAccountPassword Str0ngAzur3P@ssword! - -# $sqlvmconfig2 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm2 ` -# -SqlVMGroup $group -ClusterOperatorAccountPassword Str0ngAzur3P@ssword! ` -# -SqlServiceAccountPassword Str0ngAzur3P@ssword! ` -# - ClusterBootstrapAccountPassword Str0ngAzur3P@ssword! - -$sqlvm1 = Get-AzSqlVM -Name -ResourceGroupName -$sqlvm2 = Get-AzSqlVM -Name -ResourceGroupName - -$sqlvmconfig1 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm1 ` - -SqlVMGroup $group -ClusterOperatorAccountPassword ` - -SqlServiceAccountPassword ` - -ClusterBootstrapAccountPassword - -Update-AzSqlVM -ResourceId $sqlvm1.ResourceId -SqlVM $sqlvmconfig1 - -$sqlvmconfig2 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm2 ` - -SqlVMGroup $group -ClusterOperatorAccountPassword ` - -SqlServiceAccountPassword ` - -ClusterBootstrapAccountPassword - -Update-AzSqlVM -ResourceId $sqlvm2.ResourceId -SqlVM $sqlvmconfig2 -``` - ---- - -## Configure quorum - -Although the disk witness is the most resilient quorum option, it requires an Azure shared disk which imposes some limitations to the availability group. As such, the cloud witness is the recommended quorum solution for clusters hosting availability groups for SQL Server on Azure VMs. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - - -## Validate cluster - -For a failover cluster to be supported by Microsoft, it must pass cluster validation. Connect to the VM using your preferred method, such as Remote Desktop Protocol (RDP) and validate that your cluster passes validation before proceeding further. Failure to do so leaves your cluster in an unsupported state. - -You can validate the cluster using Failover Cluster Manager (FCM) or the following PowerShell command: - - ```powershell - Test-Cluster –Node ("","") –Include "Inventory", "Network", "System Configuration" - ``` - -## Create availability group - -Manually create the availability group as you normally would, by using [SQL Server Management Studio](/sql/database-engine/availability-groups/windows/use-the-availability-group-wizard-sql-server-management-studio), [PowerShell](/sql/database-engine/availability-groups/windows/create-an-availability-group-sql-server-powershell), or [Transact-SQL](/sql/database-engine/availability-groups/windows/create-an-availability-group-transact-sql). - ->[!IMPORTANT] -> Do *not* create a listener at this time because this is done through the Azure CLI in the following sections. - -## Create internal load balancer - -[!INCLUDE [sql-ag-use-dnn-listener](../../includes/sql-ag-use-dnn-listener.md)] - -The Always On availability group listener requires an internal instance of Azure Load Balancer. The internal load balancer provides a “floating” IP address for the availability group listener that allows for faster failover and reconnection. If the SQL Server VMs in an availability group are part of the same availability set, you can use a Basic load balancer. Otherwise, you need to use a Standard load balancer. - -> [!NOTE] -> The internal load balancer should be in the same virtual network as the SQL Server VM instances. - -The following code snippet creates the internal load balancer: - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Create the internal load balancer -# example: az network lb create --name sqlILB -g SQLVM-RG --sku Standard ` -# --vnet-name SQLVMvNet --subnet default - -az network lb create --name sqlILB -g --sku Standard ` - --vnet-name --subnet -``` - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive -# Create the internal load balancer -# example: New-AzLoadBalancer -name sqlILB -ResourceGroupName SQLVM-RG ` -# -Sku Standard -Location West US - -New-AzLoadBalancer -name sqlILB -ResourceGroupName ` - -Sku Standard -Location -``` - ---- - ->[!IMPORTANT] -> The public IP resource for each SQL Server VM should have a Standard SKU to be compatible with the Standard load balancer. To determine the SKU of your VM's public IP resource, go to **Resource Group**, select your **Public IP Address** resource for the desired SQL Server VM, and locate the value under **SKU** in the **Overview** pane. - -## Create listener - -After you manually create the availability group, you can create the listener by using [az sql vm ag-listener](/cli/azure/sql/vm/group/ag-listener#az-sql-vm-group-ag-listener-create). - -The *subnet resource ID* is the value of `/subnets/` appended to the resource ID of the virtual network resource. To identify the subnet resource ID: - 1. Go to your resource group in the [Azure portal](https://portal.azure.com). - 1. Select the virtual network resource. - 1. Select **Properties** in the **Settings** pane. - 1. Identify the resource ID for the virtual network and append `/subnets/` to the end of it to create the subnet resource ID. For example: - - Your virtual network resource ID is: - `/subscriptions/a1a1-1a11a/resourceGroups/SQLVM-RG/providers/Microsoft.Network/virtualNetworks/SQLVMvNet` - - Your subnet name is: `default` - - Therefore, your subnet resource ID is: - `/subscriptions/a1a1-1a11a/resourceGroups/SQLVM-RG/providers/Microsoft.Network/virtualNetworks/SQLVMvNet/subnets/default` - - -The following code snippet creates the availability group listener: - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Create the availability group listener -# example: az sql vm group ag-listener create -n AGListener -g SQLVM-RG ` -# --ag-name SQLAG --group-name Cluster --ip-address 10.0.0.27 ` -# --load-balancer sqlilb --probe-port 59999 ` -# --subnet /subscriptions/a1a1-1a11a/resourceGroups/SQLVM-RG/providers/Microsoft.Network/virtualNetworks/SQLVMvNet/subnets/default ` -# --sqlvms sqlvm1 sqlvm2 - -az sql vm group ag-listener create -n -g ` - --ag-name --group-name --ip-address ` - --load-balancer --probe-port ` - --subnet ` - --sqlvms -``` - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive - -# example: New-AzAvailabilityGroupListener -Name AGListener -ResourceGroupName SQLVM-RG ` -# -AvailabilityGroupName SQLAG -GroupName Cluster ` -# -IpAddress 10.0.0.27 -LoadBalancerResourceId sqlilb ` -# -ProbePort 59999 ` -# -SubnetId /subscriptions/a1a1-1a11a/resourceGroups/SQLVM-RG/providers/Microsoft.Network/virtualNetworks/SQLVMvNet/subnets/default ` -# -SqlVirtualMachineId sqlvm1 sqlvm2 - - -New-AzAvailabilityGroupListener -Name -ResourceGroupName ` - -AvailabilityGroupName -GroupName ` - -IpAddress -LoadBalancerResourceId ` - -ProbePort ` - -SubnetId ` - -SqlVirtualMachineId -``` - ---- - -## Modify number of replicas -There's an added layer of complexity when you're deploying an availability group to SQL Server VMs hosted in Azure. The resource provider and the virtual machine group now manage the resources. As such, when you're adding or removing replicas in the availability group, there's an additional step of updating the listener metadata with information about the SQL Server VMs. When you're modifying the number of replicas in the availability group, you must also use the [az sql vm group ag-listener update](/cli/azure/sql/vm/group/ag-listener#az-sql-vm-group-ag-listener-update) command to update the listener with the metadata of the SQL Server VMs. - - -### Add a replica - -To add a new replica to the availability group: - -# [Azure CLI](#tab/azure-cli) - -1. Add the SQL Server VM to the cluster group: - ```azurecli-interactive - - # Add the SQL Server VM to the cluster group - # example: az sql vm add-to-group -n SQLVM3 -g SQLVM-RG --sqlvm-group Cluster ` - # -b Str0ngAzur3P@ssword! -p Str0ngAzur3P@ssword! -s Str0ngAzur3P@ssword! - - az sql vm add-to-group -n -g --sqlvm-group ` - -b -p -s - ``` - -1. Use SQL Server Management Studio to add the SQL Server instance as a replica within the availability group. -1. Add the SQL Server VM metadata to the listener: - - ```azurecli-interactive - # Update the listener metadata with the new VM - # example: az sql vm group ag-listener update -n AGListener ` - # -g sqlvm-rg --group-name Cluster --sqlvms sqlvm1 sqlvm2 sqlvm3 - - az sql vm group ag-listener update -n ` - -g --group-name --sqlvms - ``` - -# [PowerShell](#tab/azure-powershell) - -1. Add the SQL Server VM to the cluster group: - - ```powershell-interactive - # Add the SQL Server VM to the cluster group - # example: $sqlvm3 = Get-AzSqlVM -Name SQLVM3 -ResourceGroupName SQLVM-RG - # $group = Get-AzSqlVMGroup -ResourceGroupName SQLVM-RG -Name Cluster - # $sqlvmconfig3 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm3 -SqlVMGroup $group ` - # -ClusterOperatorAccountPassword Str0ngAzur3P@ssword! ` - # -SqlServiceAccountPassword Str0ngAzur3P@ssword! ` - # -ClusterBootstrapAccountPassword Str0ngAzur3P@ssword! - - $sqlvm3 = Get-AzSqlVM -Name -ResourceGroupName - $group = Get-AzSqlVMGroup -ResourceGroupName -Name - $sqlvmconfig3 = Set-AzSqlVMConfigGroup -SqlVM $sqlvm3 -SqlVMGroup $group ` - -ClusterOperatorAccountPassword ` - -SqlServiceAccountPassword ` - -ClusterBootstrapAccountPassword - - Update-AzSqlVM -ResourceId $sqlvm3.ResourceId -SqlVM $sqlvmconfig3 - ``` - -1. Use SQL Server Management Studio to add the SQL Server instance as a replica within the availability group. -1. Add the SQL Server VM metadata to the listener: - - ```powershell-interactive - # Update the listener metadata with the new VM - # example: Update-AzAvailabilityGroupListener -Name AGListener -ResourceGroupName SQLVM-RG ` - # -SqlVMGroupName Cluster -SqlVirtualMachineId SQLVM3 - - Update-AzAvailabilityGroupListener -Name -ResourceGroupName ` - -SqlVMGroupName -SqlVirtualMachineId - - ``` - ---- - -### Remove a replica - -To remove a replica from the availability group: - -# [Azure CLI](#tab/azure-cli) - -1. Remove the replica from the availability group by using SQL Server Management Studio. -1. Remove the SQL Server VM metadata from the listener: - ```azurecli-interactive - # Update the listener metadata by removing the VM from the SQLVMs list - # example: az sql vm group ag-listener update -n AGListener ` - # -g sqlvm-rg --group-name Cluster --sqlvms sqlvm1 sqlvm2 - - az sql vm group ag-listener update -n ` - -g --group-name --sqlvms - ``` -1. Remove the SQL Server VM from the cluster: - ```azurecli-interactive - # Remove the SQL VM from the cluster - # example: az sql vm remove-from-group --name SQLVM3 --resource-group SQLVM-RG - - az sql vm remove-from-group --name --resource-group - ``` - -# [PowerShell](#tab/azure-powershell) - -1. Remove the replica from the availability group by using SQL Server Management Studio. -1. Remove the SQL Server VM metadata from the listener: - - ```powershell-interactive - # Update the listener metadata by removing the VM from the SQLVMs list - # example: Update-AzAvailabilityGroupListener -Name AGListener -ResourceGroupName SQLVM-RG ` - # -SqlVMGroupName Cluster -SqlVirtualMachineId SQLVM3 - - Update-AzAvailabilityGroupListener -Name -ResourceGroupName ` - -SqlVMGroupName -SqlVirtualMachineId - - ``` -1. Remove the SQL Server VM from the cluster: - - ```powershell-interactive - # Remove the SQL VM from the cluster - # example: $sqlvm = Get-AzSqlVM -Name SQLVM3 -ResourceGroupName SQLVM-RG - # $sqlvm. SqlVirtualMachineGroup = "" - # Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm - - $sqlvm = Get-AzSqlVM -Name -ResourceGroupName - $sqlvm. SqlVirtualMachineGroup = "" - - Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm - ``` - ---- - -## Remove listener -If you later need to remove the availability group listener configured with the Azure CLI, you must go through the SQL IaaS Agent extension. Because the listener is registered through the SQL IaaS Agent extension, just deleting it via SQL Server Management Studio is insufficient. - -The best method is to delete it through the SQL IaaS Agent extension by using the following code snippet in the Azure CLI. Doing so removes the availability group listener metadata from the SQL IaaS Agent extension. It also physically deletes the listener from the availability group. - -# [Azure CLI](#tab/azure-cli) - -```azurecli-interactive -# Remove the availability group listener -# example: az sql vm group ag-listener delete --group-name Cluster --name AGListener --resource-group SQLVM-RG - -az sql vm group ag-listener delete --group-name --name --resource-group -``` - -# [PowerShell](#tab/azure-powershell) - -```powershell-interactive -# Remove the availability group listener -# example: Remove-AzAvailabilityGroupListener -Name AGListener ` -# -ResourceGroupName SQLVM-RG -SqlVMGroupName Cluster - -Remove-AzAvailabilityGroupListener -Name ` - -ResourceGroupName -SqlVMGroupName -``` - ---- - -## Remove cluster - -Remove all of the nodes from the cluster to destroy it, and then remove the cluster metadata from the SQL IaaS Agent extension. You can do so by using the Azure CLI or PowerShell. - - -# [Azure CLI](#tab/azure-cli) - -First, remove all of the SQL Server VMs from the cluster: - -```azurecli-interactive -# Remove the VM from the cluster metadata -# example: az sql vm remove-from-group --name SQLVM2 --resource-group SQLVM-RG - -az sql vm remove-from-group --name --resource-group -az sql vm remove-from-group --name --resource-group -``` - -If these are the only VMs in the cluster, then the cluster will be destroyed. If there are any other VMs in the cluster apart from the SQL Server VMs that were removed, the other VMs will not be removed and the cluster will not be destroyed. - -Next, remove the cluster metadata from the SQL IaaS Agent extension: - -```azurecli-interactive -# Remove the cluster from the SQL VM RP metadata -# example: az sql vm group delete --name Cluster --resource-group SQLVM-RG - -az sql vm group delete --name Cluster --resource-group -``` - - - -# [PowerShell](#tab/azure-powershell) - -First, remove all of the SQL Server VMs from the cluster. This will physically remove the nodes from the cluster, and destroy the cluster: - -```powershell-interactive -# Remove the SQL VM from the cluster -# example: $sqlvm = Get-AzSqlVM -Name SQLVM3 -ResourceGroupName SQLVM-RG -# $sqlvm. SqlVirtualMachineGroup = "" -# Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm - -$sqlvm = Get-AzSqlVM -Name -ResourceGroupName - $sqlvm. SqlVirtualMachineGroup = "" - - Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm -``` - -If these are the only VMs in the cluster, then the cluster will be destroyed. If there are any other VMs in the cluster apart from the SQL Server VMs that were removed, the other VMs will not be removed and the cluster will not be destroyed. - -Next, remove the cluster metadata from the SQL IaaS Agent extension: - -```powershell-interactive -# Remove the cluster metadata -# example: Remove-AzSqlVMGroup -ResourceGroupName "SQLVM-RG" -Name "Cluster" - -Remove-AzSqlVMGroup -ResourceGroupName "" -Name " " -``` - ---- - -## Next steps - -Once the availability group is deployed, consider optimizing the [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-azure-portal-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-azure-portal-configure.md deleted file mode 100644 index ba390748f2c2e..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-azure-portal-configure.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: Configure an availability group (Azure portal) -description: "Use the Azure portal to create the Windows failover cluster, the availability group listener, and the internal load balancer on a SQL Server VM in Azure." -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: article -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma -ms.custom: "seo-lt-2019, devx-track-azurecli, devx-track-azurepowershell" - ---- -# Use Azure portal to configure an availability group (Preview) for SQL Server on Azure VM -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -This article describes how to use the [Azure portal](https://portal.azure.com) to configure an availability group for SQL Server on Azure VMs within a single subnet. - -Use the Azure portal to create a new cluster or onboard an existing cluster, and then create the availability group, listener, and internal load balancer. - -This feature is currently in preview. - -While this article uses the Azure portal to configure the availability group environment, it is also possible to do so using [PowerShell or the Azure CLI](availability-group-az-commandline-configure.md), [Azure Quickstart templates](availability-group-quickstart-template-configure.md), or [Manually](availability-group-manually-configure-tutorial-single-subnet.md) as well. - -> [!NOTE] -> It's now possible to lift and shift your availability group solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) to learn more. - - -## Prerequisites - -To configure an Always On availability group using the Azure portal, you must have the following prerequisites: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- A resource group with a domain controller. -- One or more domain-joined [VMs in Azure running SQL Server 2016 (or later) Enterprise edition](./create-sql-vm-portal.md) in the *same* availability set or *different* availability zones that have been [registered with the SQL IaaS Agent extension in full manageability mode](sql-agent-extension-manually-register-single-vm.md) and are using the same domain account for the SQL Server service on each VM. -- Two available (not used by any entity) IP addresses. One is for the internal load balancer. The other is for the availability group listener within the same subnet as the availability group. If you're using an existing load balancer, you only need one available IP address for the availability group listener. - -## Permissions - -You need the following account permissions to configure the availability group by using the Azure portal: - -- An existing domain user account that has **Create Computer Object** permission in the domain. For example, a domain admin account typically has sufficient permission (for example: account@domain.com). _This account should also be part of the local administrator group on each VM to create the cluster._ -- The domain user account that controls SQL Server. This should be the same account for every SQL Server VM you intend to add to the availability group. - -## Configure cluster - -Configure the cluster by using the Azure portal. You can either create a new cluster, or if you already have an existing cluster, you can onboard it to the SQL IaaS Agent extension to for portal manageability. - - -### Create a new cluster - -If you already have a cluster, skip this section and move to [Onboard existing cluster](#onboard-existing-cluster) instead. - -If you do not already have an existing cluster, create it by using the Azure portal with these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your [SQL virtual machines](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource. -1. Select **High Availability** under **Settings**. -1. Select **+ New Windows Server failover cluster** to open the **Configure Windows Failover cluster** page. - - :::image type="content" source="media/availability-group-az-portal-configure/create-new-cluster.png" alt-text="Create new cluster by selecting the + new cluster in the portal"::: - -1. Name your cluster and provide a storage account to use as the Cloud Witness. Use an existing storage account or select **Create new** to create a new storage account. Storage account name must be between 3 and 24 characters in length and use numbers and lower-case letters only. - - :::image type="content" source="media/availability-group-az-portal-configure/configure-new-cluster-1.png" alt-text="Provide name, storage account, and credentials for the cluster"::: - -1. Expand **Windows Server Failover Cluster credentials** to provide [credentials](/rest/api/sqlvm/2021-11-01-preview/sql-virtual-machine-groups/create-or-update#wsfcdomainprofile) for the SQL Server service account, as well as the cluster operator and bootstrap accounts if they're different than the account used for the SQL Server service. - - :::image type="content" source="media/availability-group-az-portal-configure/configure-new-cluster-2.png" alt-text="Provide credentials for the SQL Service account, cluster operator account and cluster bootstrap account"::: - -1. Select the SQL Server VMs you want to add to the cluster. Note whether or not a restart is required, and proceed with caution. Only VMs that are registered with the SQL IaaS Agent extension in full manageability mode, and are in the same location, domain, and on the same virtual network as the primary SQL Server VM will be visible. -1. Select **Apply** to create the cluster. You can check the status of your deployment in the **Activity log** which is accessible from the bell icon in the top navigation bar. -1. For a failover cluster to be supported by Microsoft, it must pass cluster validation. Connect to the VM using your preferred method (such as Remote Desktop Protocol (RDP)) and validate that your cluster passes validation before proceeding further. Failure to do so leaves your cluster in an unsupported state. You can validate the cluster using Failover Cluster Manager (FCM) or the following PowerShell command: - - ```powershell - Test-Cluster –Node ("","") –Include "Inventory", "Network", "System Configuration" - ``` - - - -### Onboard existing cluster - -If you already have a cluster configured in your SQL Server VM environment, you can onboard it from the Azure portal. - -To do so, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your [SQL virtual machines](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource. -1. Select **High Availability** under **Settings**. -1. Select **Onboard existing Windows Server Failover Cluster** to open the **Onboard Windows Server Failover Cluster** page. - - :::image type="content" source="media/availability-group-az-portal-configure/onboard-existing-cluster.png" alt-text="Onboard an existing cluster from the High Availability page on your SQL virtual machines resource"::: - -1. Review the settings for your cluster. -1. Select **Apply** to onboard your cluster and then select **Yes** at the prompt to proceed. - -## Create availability group - -After your cluster was either created or onboarded, create the availability group by using the Azure portal. To do so, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your [SQL virtual machines](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource. -1. Select **High Availability** under **Settings**. -1. Select **+ New Always On availability group** to open the **Create availability group** page. - - :::image type="content" source="media/availability-group-az-portal-configure/create-new-availability-group.png" alt-text="Select new always on availability group to open the create availability group page."::: - -1. Enter a name for the availability group. -1. Select **Configure listener** to open the **Configure availability group listener** page. - - :::image type="content" source="media/availability-group-az-portal-configure/create-availability-group.png" alt-text="Provide a name for the availability group and configure a listener"::: - -1. Fill out the values, and either use an existing load balancer, or select **Create new** to create a new load balancer. Select **Apply** to save your settings and create your listener and load balancer. - - :::image type="content" source="media/availability-group-az-portal-configure/configure-new-listener.png" alt-text="Fill out the values in the form to create your new listener and load balancer"::: - -1. Choose **+ Select replica** to open the **Configure availability group replicas** page. -1. Select the virtual machines you want to add to the availability group, and choose the availability group settings that best suit your business needs. Select **Apply** to save your settings. - - :::image type="content" source="media/availability-group-az-portal-configure/add-replicas.png" alt-text="Choose VMs to add to your availability group and configure settings appropriate to your business"::: - -1. Verify your availability group settings and then select **Apply** to create your availability group. - -You can check the status of your deployment in the **Activity log** which is accessible from the bell icon in the top navigation bar. - - > [!NOTE] - > Your **Synchronization health** on the **High Availability** page of the Azure portal will show as **Not healthy** until you add databases to your availability group. - - -## Add database to availability group - -Add your databases to your availability group after deployment completes. The below steps use SQL Server Management Studio (SSMS) but you can use [Transact-SQL or PowerShell](/sql/database-engine/availability-groups/windows/availability-group-add-a-database) as well. - -To add databases to your availability group using SQL Server Management Studio, follow these steps: - -1. Connect to one of your SQL Server VMs by using your preferred method, such as Remote Desktop Connection (RDP). -1. Open SQL Server Management Studio (SSMS). -1. Connect to your SQL Server instance. -1. Expand **Always On High Availability** in **Object Explorer**. -1. Expand **Availability Groups**, right-click your availability group and choose to **Add database...**. - - :::image type="content" source="media/availability-group-az-portal-configure/add-database.png" alt-text="Right-click the availability group in object explorer and choose to Add database"::: - -1. Follow the prompts to select the database(s) you want to add to your availability group. -1. Select **OK** to save your settings and add your database to the availability group. -1. After the database is added, refresh **Object Explorer** to confirm the status of your database as `synchronized`. - -After databases are added, you can check the status of your availability group in the Azure portal: - -:::image type="content" source="media/availability-group-az-portal-configure/healthy-availability-group.png" alt-text="Check the status of your availability group from the high availability page from the Azure portal after databases are synchronized"::: - -## Add more VMs - -To add more SQL Server VMs to the cluster, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your [SQL virtual machines](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource. -1. Select **High Availability** under **Settings**. -1. Select **Configure Windows Server Failover Cluster** to open the **Configure Windows Server Failover Cluster** page. - - :::image type="content" source="media/availability-group-az-portal-configure/configure-existing-cluster.png" alt-text="Select Configure Windows Server Failover Cluster to add VMs to your cluster."::: - -1. Expand **Windows Server Failover Cluster credentials** and enter in the accounts used for the SQL Server service, cluster operator and cluster bootstrap accounts. -1. Select the SQL Server VMs you want to add to the cluster. -1. Select **Apply**. - -You can check the status of your deployment in the **Activity log** which is accessible from the bell icon in the top navigation bar. - -## Configure quorum - -Although the disk witness is the most resilient quorum option, it requires an Azure shared disk which imposes some limitations to the availability group. As such, the cloud witness is the recommended quorum solution for clusters hosting availability groups for SQL Server on Azure VMs. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - - -## Modify availability group - - -You can **Add more replicas** to the availability group, **Configure the Listener**, or **Delete the Listener** from the **High Availability** page in the Azure portal by selecting the ellipses (...) next to your availability group: - -:::image type="content" source="media/availability-group-az-portal-configure/configure-listener.png" alt-text="Select the ellipses next to the availability group and then select add replica to add more replicas to the availability group."::: - -## Remove cluster - -Remove all of the SQL Server VMs from the cluster to destroy it, and then remove the cluster metadata from the SQL IaaS Agent extension. You can do so by using the latest version of the [Azure CLI](/cli/azure/install-azure-cli) or PowerShell. - -# [Azure CLI](#tab/azure-cli) - -First, remove all of the SQL Server VMs from the cluster. This will physically remove the nodes from the cluster, and destroy the cluster: - -```azurecli-interactive -# Remove the VM from the cluster metadata -# example: az sql vm remove-from-group --name SQLVM2 --resource-group SQLVM-RG - -az sql vm remove-from-group --name --resource-group -az sql vm remove-from-group --name --resource-group -``` - -If these are the only VMs in the cluster, then the cluster will be destroyed. If there are any other VMs in the cluster apart from the SQL Server VMs that were removed, the other VMs will not be removed and the cluster will not be destroyed. - -Next, remove the cluster metadata from the SQL IaaS Agent extension: - -```azurecli-interactive -# Remove the cluster from the SQL VM RP metadata -# example: az sql vm group delete --name Cluster --resource-group SQLVM-RG - -az sql vm group delete --name --resource-group -``` - -# [PowerShell](#tab/azure-powershell) - -First, remove all of the SQL Server VMs from the cluster. This will physically remove the nodes from the cluster, and destroy the cluster: - -```powershell-interactive -# Remove the SQL VM from the cluster -# example: $sqlvm = Get-AzSqlVM -Name SQLVM3 -ResourceGroupName SQLVM-RG -# $sqlvm. SqlVirtualMachineGroup = "" -# Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm - -$sqlvm = Get-AzSqlVM -Name -ResourceGroupName - $sqlvm. SqlVirtualMachineGroup = "" - - Update-AzSqlVM -ResourceId $sqlvm -SqlVM $sqlvm -``` - -If these are the only VMs in the cluster, then the cluster will be destroyed. If there are any other VMs in the cluster apart from the SQL Server VMs that were removed, the other VMs will not be removed and the cluster will not be destroyed. - - -Next, remove the cluster metadata from the SQL IaaS Agent extension: - -```powershell-interactive -# Remove the cluster metadata -# example: Remove-AzSqlVMGroup -ResourceGroupName "SQLVM-RG" -Name "Cluster" - -Remove-AzSqlVMGroup -ResourceGroupName "" -Name "" -``` - ---- - -## Troubleshooting - -If you run into issues, you can check the deployment history, and review the common errors as well as their resolutions. - -### Check deployment history - -Changes to the cluster and availability group via the portal are done through deployments. Deployment history can provide greater detail if there are issues with creating, or onboarding the cluster, or with creating the availability group. - -To view the logs for the deployment, and check the deployment history, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your resource group. -1. Select **Deployments** under **Settings**. -1. Select the deployment of interest to learn more about the deployment. - - - :::image type="content" source="media/availability-group-az-portal-configure/failed-deployment.png" alt-text="Select the deployment you're interested in learning more about." ::: - -### Common errors - -Review the following common errors and their resolutions. - -#### The account which is used to start up sql service is not a domain account - -This is an indication that the resource provider could not access the SQL Server service with the provided credentials. Some common resolutions: -- Ensure your domain controller is running. -- Validate the credentials provided in the portal match those of the SQL Server service. - - -## Next steps - -Once the availability group is deployed, consider optimizing the [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-clusterless-workgroup-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-clusterless-workgroup-configure.md deleted file mode 100644 index f1d7355e88948..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-clusterless-workgroup-configure.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: Configure a domain-independent workgroup availability group -description: Learn how to configure an Active Directory domain-independent workgroup Always On availability group on a SQL Server virtual machine in Azure. -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -editor: '' -tags: azure-service-management - -ms.assetid: 53981f7e-8370-4979-b26a-93a5988d905f -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 01/29/2020 -ms.author: rsetlem -ms.reviewer: mathoma - ---- -# Configure a workgroup availability group -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article explains the steps necessary to create an Active Directory domain-independent cluster with an Always On availability group; this is also known as a workgroup cluster. This article focuses on the steps that are relevant to preparing and configuring the workgroup and availability group, and glosses over steps that are covered in other articles, such as how to create the cluster, or deploy the availability group. - - -## Prerequisites - -To configure a workgroup availability group, you need the following: -- At least two Windows Server 2016 (or higher) virtual machines running SQL Server 2016 (or higher), deployed to the same availability set, or different availability zones, using static IP addresses. -- A local network with a minimum of 4 free IP addresses on the subnet. -- An account on each machine in the administrator group that also has sysadmin rights within SQL Server. -- Open ports: TCP 1433, TCP 5022, TCP 59999. - -For reference, the following parameters are used in this article, but can be modified as is necessary: - -| **Name** | **Parameter** | -| :------ | :---------------------------------- | -| **Node1** | AGNode1 (10.0.0.4) | -| **Node2** | AGNode2 (10.0.0.5) | -| **Cluster name** | AGWGAG (10.0.0.6) | -| **Listener** | AGListener (10.0.0.7) | -| **DNS suffix** | ag.wgcluster.example.com | -| **Work group name** | AGWorkgroup | - - -## Set a DNS suffix - -In this step, configure the DNS suffix for both servers. For example, `ag.wgcluster.example.com`. This allows you to use the name of the object you want to connect to as a fully qualified address within your network, such as `AGNode1.ag.wgcluster.example.com`. - -To configure the DNS suffix, follow these steps: - -1. RDP in to your first node and open Server Manager. -1. Select **Local Server** and then select the name of your virtual machine under **Computer name**. -1. Select **Change...** under **To rename this computer...**. -1. Change the name of the workgroup name to be something meaningful, such as `AGWORKGROUP`: - - ![Change workgroup name](./media/availability-group-clusterless-workgroup-configure/1-change-workgroup-name.png) - -1. Select **More...** to open the **DNS Suffix and NetBIOS Computer Name** dialog box. -1. Type the name of your DNS suffix under **Primary DNS suffix of this computer**, such as `ag.wgcluster.example.com` and then select **OK**: - - ![Screenshot shows the D N S Suffix and NetBIOS Computer Name dialog box where you can enter the value.](./media/availability-group-clusterless-workgroup-configure/2-add-dns-suffix.png) - -1. Confirm that the **Full computer name** is now showing the DNS suffix, and then select **OK** to save your changes: - - ![Screenshot shows where to see your Full computer name.](./media/availability-group-clusterless-workgroup-configure/3-confirm-full-computer-name.png) - -1. Reboot the server when you are prompted to do so. -1. Repeat these steps on any other nodes to be used for the availability group. - -## Edit a host file - -Since there is no active directory, there is no way to authenticate Windows connections. As such, assign trust by editing the host file with a text editor. - -To edit the host file, follow these steps: - -1. RDP in to your virtual machine. -1. Use **File Explorer** to go to `c:\windows\system32\drivers\etc`. -1. Right-click the **hosts** file and open the file with **Notepad** (or any other text editor). -1. At the end of the file, add an entry for each node, the availability group, and the listener in the form of `IP Address, DNS Suffix #comment` like: - - ``` - 10.0.0.4 AGNode1.ag.wgcluster.example.com #Availability group node - 10.0.0.5 AGNode2.ag.wgcluster.example.com #Availability group node - 10.0.0.6 AGWGAG.ag.wgcluster.example.com #Cluster IP - 10.0.0.7 AGListener.ag.wgcluster.example.com #Listener IP - ``` - - ![Add entries for the IP address, cluster, and listener to the host file](./media/availability-group-clusterless-workgroup-configure/4-host-file.png) - -## Set permissions - -Since there is no Active Directory to manage permissions, you need to manually allow a non-builtin local administrator account to create the cluster. - -To do so, run the following PowerShell cmdlet in an administrative PowerShell session on every node: - -```PowerShell - -new-itemproperty -path HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System -Name LocalAccountTokenFilterPolicy -Value 1 -``` - -## Create the failover cluster - -In this step, you will create the failover cluster. If you're unfamiliar with these steps, you can follow them from the [failover cluster tutorial](failover-cluster-instance-storage-spaces-direct-manually-configure.md). - -Notable differences between the tutorial and what should be done for a workgroup cluster: -- Uncheck **Storage**, and **Storage Spaces Direct** when running the cluster validation. -- When adding the nodes to the cluster, add the fully qualified name, such as: - - `AGNode1.ag.wgcluster.example.com` - - `AGNode2.ag.wgcluster.example.com` -- Uncheck **Add all eligible storage to the cluster**. - -Once the cluster has been created, assign a static Cluster IP address. To do so, follow these steps: - -1. On one of the nodes, open **Failover Cluster Manager**, select the cluster, right-click the **Name: \** under **Cluster Core Resources** and then select **Properties**. - - ![Launch properties for the cluster name](./media/availability-group-clusterless-workgroup-configure/5-launch-cluster-name-properties.png) - -1. Select the IP address under **IP Addresses** and select **Edit**. -1. Select **Use Static**, provide the IP address of the cluster, and then select **OK**: - - ![Provide a static IP address for the cluster](./media/availability-group-clusterless-workgroup-configure/6-provide-static-ip-for-cluster.png) - -1. Verify that your settings look correct, and then select **OK** to save them: - - ![Verify cluster properties](./media/availability-group-clusterless-workgroup-configure/7-verify-cluster-properties.png) - -## Create a cloud witness - -In this step, configure a cloud share witness. If you're unfamiliar with the steps, see [Deploy a Cloud Witness for a Failover Cluster](/windows-server/failover-clustering/deploy-cloud-witness). - -## Enable the availability group feature - -In this step, enable the availability group feature. If you're unfamiliar with the steps, see the [availability group tutorial](availability-group-manually-configure-tutorial-single-subnet.md#enable-availability-groups). - -## Create keys and certificates - -In this step, create certificates that a SQL login uses on the encrypted endpoint. Create a folder on each node to hold the certificate backups, such as `c:\certs`. - -To configure the first node, follow these steps: - -1. Open **SQL Server Management Studio** and connect to your first node, such as `AGNode1`. -1. Open a **New Query** window and run the following Transact-SQL (T-SQL) statement after updating to a complex and secure password: - - ```sql - USE master; - CREATE MASTER KEY ENCRYPTION BY PASSWORD = 'PassWOrd123!'; - GO - - --create a cert from the master key - USE master; - CREATE CERTIFICATE AGNode1Cert - WITH SUBJECT = 'AGNode1 Certificate'; - GO - - --Backup the cert and transfer it to AGNode2 - BACKUP CERTIFICATE AGNode1Cert TO FILE = 'C:\certs\AGNode1Cert.crt'; - GO - ``` - -1. Next, create the HADR endpoint, and use the certificate for authentication by running this Transact-SQL (T-SQL) statement: - - ```sql - --CREATE or ALTER the mirroring endpoint - CREATE ENDPOINT hadr_endpoint - STATE = STARTED - AS TCP ( - LISTENER_PORT=5022 - , LISTENER_IP = ALL - ) - FOR DATABASE_MIRRORING ( - AUTHENTICATION = CERTIFICATE AGNode1Cert - , ENCRYPTION = REQUIRED ALGORITHM AES - , ROLE = ALL - ); - GO - ``` - -1. Use **File Explorer** to go to the file location where your certificate is, such as `c:\certs`. -1. Manually make a copy of the certificate, such as `AGNode1Cert.crt`, from the first node, and transfer it to the same location on the second node. - -To configure the second node, follow these steps: - -1. Connect to the second node with **SQL Server Management Studio**, such as `AGNode2`. -1. In a **New Query** window, run the following Transact-SQL (T-SQL) statement after updating to a complex and secure password: - - ```sql - USE master; - CREATE MASTER KEY ENCRYPTION BY PASSWORD = 'PassWOrd123!'; - GO - - --create a cert from the master key - USE master; - CREATE CERTIFICATE AGNode2Cert - WITH SUBJECT = 'AGNode2 Certificate'; - GO - --Backup the cert and transfer it to AGNode1 - BACKUP CERTIFICATE AGNode2Cert TO FILE = 'C:\certs\AGNode2Cert.crt'; - GO - ``` - -1. Next, create the HADR endpoint, and use the certificate for authentication by running this Transact-SQL (T-SQL) statement: - - ```sql - --CREATE or ALTER the mirroring endpoint - CREATE ENDPOINT hadr_endpoint - STATE = STARTED - AS TCP ( - LISTENER_PORT=5022 - , LISTENER_IP = ALL - ) - FOR DATABASE_MIRRORING ( - AUTHENTICATION = CERTIFICATE AGNode2Cert - , ENCRYPTION = REQUIRED ALGORITHM AES - , ROLE = ALL - ); - GO - ``` - -1. Use **File Explorer** to go to the file location where your certificate is, such as `c:\certs`. -1. Manually make a copy of the certificate, such as `AGNode2Cert.crt`, from the second node, and transfer it to the same location on the first node. - -If there are any other nodes in the cluster, repeat these steps there also, modifying the respective certificate names. - -## Create logins - -Certificate authentication is used to synchronize data across nodes. To allow this, create a login for the other node, create a user for the login, create a certificate for the login to use the backed-up certificate, and then grant connect on the mirroring endpoint. - -To do so, first run the following Transact-SQL (T-SQL) query on the first node, such as `AGNode1`: - -```sql ---create a login for the AGNode2 -USE master; -CREATE LOGIN AGNode2_Login WITH PASSWORD = 'PassWord123!'; -GO - ---create a user from the login -CREATE USER AGNode2_User FOR LOGIN AGNode2_Login; -GO - ---create a certificate that the login uses for authentication -CREATE CERTIFICATE AGNode2Cert - AUTHORIZATION AGNode2_User - FROM FILE = 'C:\certs\AGNode2Cert.crt' -GO - ---grant connect for login -GRANT CONNECT ON ENDPOINT::hadr_endpoint TO [AGNode2_login]; -GO -``` - -Next, run the following Transact-SQL (T-SQL) query on the second node, such as `AGNode2`: - -```sql ---create a login for the AGNode1 -USE master; -CREATE LOGIN AGNode1_Login WITH PASSWORD = 'PassWord123!'; -GO - ---create a user from the login -CREATE USER AGNode1_User FOR LOGIN AGNode1_Login; -GO - ---create a certificate that the login uses for authentication -CREATE CERTIFICATE AGNode1Cert - AUTHORIZATION AGNode1_User - FROM FILE = 'C:\certs\AGNode1Cert.crt' -GO - ---grant connect for login -GRANT CONNECT ON ENDPOINT::hadr_endpoint TO [AGNode1_login]; -GO -``` - -If there are any other nodes in the cluster, repeat these steps there also, modifying the respective certificate and user names. - -## Configure an availability group - -In this step, configure your availability group, and add your databases to it. Do not create a listener at this time. If you're not familiar with the steps, see the [availability group tutorial](availability-group-manually-configure-tutorial-single-subnet.md#create-the-availability-group). Be sure to initiate a failover and failback to verify that everything is working as it should be. - - > [!NOTE] - > If there is a failure during the synchronization process, you may need to grant `NT AUTHORITY\SYSTEM` sysadmin rights to create cluster resources on the first node, such as `AGNode1` temporarily. - -## Configure a load balancer - -In this final step, configure the load balancer using either the [Azure portal](availability-group-load-balancer-portal-configure.md) or [PowerShell](availability-group-listener-powershell-configure.md). - - -## Next steps - -Once the availability group is deployed, consider optimizing the [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md deleted file mode 100644 index 178c729de180f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-distributed-network-name-dnn-listener-configure.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Configure DNN listener for availability group -description: Learn how to configure a distributed network name (DNN) listener to replace your virtual network name (VNN) listener and route traffic to your Always On availability group on SQL Server on Azure VM. -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma - ---- -# Configure a DNN listener for an availability group -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for a distributed network name for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -With SQL Server on Azure VMs in a single subnet, the distributed network name (DNN) routes traffic to the appropriate clustered resource. It provides an easier way to connect to an Always On availability group (AG) than the virtual network name (VNN) listener, without the need for an Azure Load Balancer. - -This article teaches you to configure a DNN listener to replace the VNN listener and route traffic to your availability group with SQL Server on Azure VMs for high availability and disaster recovery (HADR). - - -For an alternative connectivity option, consider a [VNN listener and Azure Load Balancer](availability-group-vnn-azure-load-balancer-configure.md) instead. - -## Overview - -A distributed network name (DNN) listener replaces the traditional virtual network name (VNN) availability group listener when used with [Always On availability groups on SQL Server VMs](availability-group-overview.md). This negates the need for an Azure Load Balancer to route traffic, simplifying deployment, maintenance, and improving failover. - -Use the DNN listener to replace an existing VNN listener, or alternatively, use it in conjunction with an existing VNN listener so that your availability group has two distinct connection points - one using the VNN listener name (and port if non-default), and one using the DNN listener name and port. - -> [!CAUTION] -> The routing behavior when using a DNN differs when using a VNN. Do not use port 1433. To learn more, see the [Port consideration](#port-considerations) section later in this article. - -## Prerequisites - -Before you complete the steps in this article, you should already have: - -- SQL Server starting with either [SQL Server 2019 CU8](https://support.microsoft.com/topic/cumulative-update-8-for-sql-server-2019-ed7f79d9-a3f0-a5c2-0bef-d0b7961d2d72) and later, [SQL Server 2017 CU25](https://support.microsoft.com/topic/kb5003830-cumulative-update-25-for-sql-server-2017-357b80dc-43b5-447c-b544-7503eee189e9) and later, or [SQL Server 2016 SP3](https://support.microsoft.com/topic/kb5003279-sql-server-2016-service-pack-3-release-information-46ab9543-5cf9-464d-bd63-796279591c31) and later on Windows Server 2016 and later. -- Decided that the distributed network name is the appropriate [connectivity option for your HADR solution](hadr-cluster-best-practices.md#connectivity). -- Configured your [Always On availability group](availability-group-overview.md). -- Installed the latest version of [PowerShell](/powershell/azure/install-az-ps). -- Identified the unique port that you will use for the DNN listener. The port used for a DNN listener must be unique across all replicas of the availability group or failover cluster instance. No other connection can share the same port. - - - -## Create script - -Use PowerShell to create the distributed network name (DNN) resource and associate it with your availability group. - -To do so, follow these steps: - -1. Open a text editor, such as Notepad. -1. Copy and paste the following script: - - ```powershell - param ( - [Parameter(Mandatory=$true)][string]$Ag, - [Parameter(Mandatory=$true)][string]$Dns, - [Parameter(Mandatory=$true)][string]$Port - ) - - Write-Host "Add a DNN listener for availability group $Ag with DNS name $Dns and port $Port" - - $ErrorActionPreference = "Stop" - - # create the DNN resource with the port as the resource name - Add-ClusterResource -Name $Port -ResourceType "Distributed Network Name" -Group $Ag - - # set the DNS name of the DNN resource - Get-ClusterResource -Name $Port | Set-ClusterParameter -Name DnsName -Value $Dns - - # start the DNN resource - Start-ClusterResource -Name $Port - - - $Dep = Get-ClusterResourceDependency -Resource $Ag - if ( $Dep.DependencyExpression -match '\s*\((.*)\)\s*' ) - { - $DepStr = "$($Matches.1) or [$Port]" - } - else - { - $DepStr = "[$Port]" - } - - Write-Host "$DepStr" - - # add the Dependency from availability group resource to the DNN resource - Set-ClusterResourceDependency -Resource $Ag -Dependency "$DepStr" - - - #bounce the AG resource - Stop-ClusterResource -Name $Ag - Start-ClusterResource -Name $Ag - ``` - -1. Save the script as a `.ps1` file, such as `add_dnn_listener.ps1`. - -## Execute script - -To create the DNN listener, execute the script passing in parameters for the name of the availability group, listener name, and port. - -For example, assuming an availability group name of `ag1`, listener name of `dnnlsnr`, and listener port as `6789`, follow these steps: - -1. Open a command-line interface tool, such as command prompt or PowerShell. -1. Navigate to where you saved the `.ps1` script, such as c:\Documents. -1. Execute the script: ```add_dnn_listener.ps1 ```. For example: - - ```console - c:\Documents> add_dnn_listener.ps1 ag1 dnnlsnr 6789 - ``` - -## Verify listener - -Use either SQL Server Management Studio or Transact-SQL to confirm your DNN listener is created successfully. - -### SQL Server Management Studio - -Expand **Availability Group Listeners** in [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms) to view your DNN listener: - -:::image type="content" source="media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-in-ssms.png" alt-text="View the DNN listener under availability group listeners in SQL Server Management Studio (SSMS)"::: - -### Transact-SQL - -Use Transact-SQL to view the status of the DNN listener: - -```sql -SELECT * FROM SYS.AVAILABILITY_GROUP_LISTENERS -``` - -A value of `1` for `is_distributed_network_name` indicates the listener is a distributed network name (DNN) listener: - -:::image type="content" source="media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-tsql.png" alt-text="Use sys.availability_group_listeners to identify DNN listeners that have a value of 1 in is_distributed_network_name"::: - -## Update connection string - -Update the connection string for any application that needs to connect to the DNN listener. The connection string to the DNN listener must provide the DNN port number, and specify `MultiSubnetFailover=True` in the connection string. If the SQL client does not support the `MultiSubnetFailover=True` parameter, then it is not compatible with a DNN listener. - -The following is an example of a connection string for listener name **DNN_Listener** and port 6789: - -`DataSource=DNN_Listener,6789,MultiSubnetFailover=True` - -## Test failover - -Test failover of the availability group to ensure functionality. - -To test failover, follow these steps: - -1. Connect to the DNN listener or one of the replicas by using [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). -1. Expand **Always On Availability Group** in **Object Explorer**. -1. Right-click the availability group and choose **Failover** to open the **Failover Wizard**. -1. Follow the prompts to choose a failover target and fail the availability group over to a secondary replica. -1. Confirm the database is in a synchronized state on the new primary replica. -1. (Optional) Fail back to the original primary, or another secondary replica. - -## Test connectivity - -Test the connectivity to your DNN listener with these steps: - -1. Open [SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). -1. Connect to your DNN listener. -1. Open a new query window and check which replica you're connected to by running `SELECT @@SERVERNAME`. -1. Fail the availability group over to another replica. -1. After a reasonable amount of time, run `SELECT @@SERVERNAME` to confirm your availability group is now hosted on another replica. - -## Limitations - -- DNN Listeners **MUST** be configured with a unique port. The port cannot be shared with any other connection on any replica. -- The client connecting to the DNN listener must support the `MultiSubnetFailover=True` parameter in the connection string. -- There might be additional considerations when you're working with other SQL Server features and an availability group with a DNN. For more information, see [AG with DNN interoperability](availability-group-dnn-interoperability.md). - -## Port considerations - -DNN listeners are designed to listen on all IP addresses, but on a specific, unique port. The DNS entry for the listener name should resolve to the addresses of all replicas in the availability group. This is done automatically with the PowerShell script provided in the [Create Script](#create-script) section. Since DNN listeners accept connections on all IP addresses, it is critical that the listener port be unique, and not in use by any other replica in the availability group. Since SQL Server listens on port 1433 by default, either directly or via the SQL Browser service, using port 1433 for the DNN listener is strongly discouraged. - -## Next steps - -Once the availability group is deployed, consider optimizing the [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-dnn-interoperability.md b/articles/azure-sql/virtual-machines/windows/availability-group-dnn-interoperability.md deleted file mode 100644 index 0ae4b439876bb..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-dnn-interoperability.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: Feature interoperability with availability groups and DNN listener -description: "Learn about the additional considerations when working with certain SQL Server features and a distributed network name (DNN) listener with an Always On availability group on SQL Server on Azure VMs. " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Feature interoperability with AG and DNN listener -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for a distributed network name for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -There are certain SQL Server features that rely on a hard-coded virtual network name (VNN). As such, when using the distributed network name (DNN) listener with your Always On availability group and SQL Server on Azure VMs in a single subnet, there may be some additional considerations. - -This article details SQL Server features and interoperability with the availability group DNN listener. - -## Behavior differences - -There are some behavior differences between the functionality of the VNN listener and DNN listener that are important to note: - -- **Failover time**: Failover time is faster when using a DNN listener since there is no need to wait for the network load balancer to detect the failure event and change its routing. -- **Existing connections**: Connections made to a *specific database* within a failing-over availability group will close, but other connections to the primary replica will remain open since the DNN stays online during the failover process. This is different than a traditional VNN environment where all connections to the primary replica typically close when the availability group fails over, the listener goes offline, and the primary replica transitions to the secondary role. When using a DNN listener, you may need to adjust application connection strings to ensure that connections are redirected to the new primary replica upon failover. -- **Open transactions**: Open transactions against a database in a failing-over availability group will close and roll back, and you need to *manually* reconnect. For example, in SQL Server Management Studio, close the query window and open a new one. - -## Client drivers - -For ODBC, OLEDB, ADO.NET, JDBC, PHP, and Node.js drivers, users need to explicitly specify the DNN listener name and port as the server name in the connection string. To ensure rapid connectivity upon failover, add `MultiSubnetFailover=True` to the connection string if the SQL client supports it. - -## Tools - -Users of [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms), [sqlcmd](/sql/tools/sqlcmd-utility), [Azure Data Studio](/sql/azure-data-studio/what-is), and [SQL Server Data Tools](/sql/ssdt/sql-server-data-tools) need to explicitly specify the DNN listener name and port as the server name in the connection string to connect to the listener. - -Creating the DNN listener via the SQL Server Management Studio (SSMS) GUI is currently not supported. - - -## Availability groups and FCI - -You can configure an Always On availability group by using a failover cluster instance (FCI) as one of the replicas. For this configuration to work with the DNN listener, the [failover cluster instance must also use the DNN](failover-cluster-instance-distributed-network-name-dnn-configure.md) as there is no way to put the FCI virtual IP address in the AG DNN IP list. - -In this configuration, the mirroring endpoint URL for the FCI replica needs to use the FCI DNN. Likewise, if the FCI is used as a read-only replica, the read-only routing to the FCI replica needs to use the FCI DNN. - -The format for the mirroring endpoint is: `ENDPOINT_URL = 'TCP://:'`. - -For example, if your FCI DNN DNS name is `dnnlsnr`, and `5022` is the port of the FCI's mirroring endpoint, the Transact-SQL (T-SQL) code snippet to create the endpoint URL looks like: - -```sql -ENDPOINT_URL = 'TCP://dnnlsnr:5022' -``` - -Likewise, the format for the read-only routing URL is: `TCP://:`. - -For example, if your DNN DNS name is `dnnlsnr`, and `1444` is the port used by the read-only target SQL Server FCI, the T-SQL code snippet to create the read-only routing URL looks like: - -```sql -READ_ONLY_ROUTING_URL = 'TCP://dnnlsnr:1444' -``` - -You can omit the port in the URL if it is the default 1433 port. For a named instance, configure a static port for the named instance and specify it in the read-only routing URL. - -## Distributed availability group - -Distributed availability groups are not currently supported with the DNN listener. - -## Replication - -Transactional, Merge, and Snapshot Replication all support replacing the VNN listener with the DNN listener and port in replication objects that connect to the listener. - -For more information on how to use replication with availability groups, see [Publisher and AG](/sql/database-engine/availability-groups/windows/configure-replication-for-always-on-availability-groups-sql-server), [Subscriber and AG](/sql/database-engine/availability-groups/windows/replication-subscribers-and-always-on-availability-groups-sql-server), and [Distributor and AG](/sql/relational-databases/replication/configure-distribution-availability-group). - -## MSDTC - -Both local and clustered MSDTC are supported but MSDTC uses a dynamic port, which requires a standard Azure Load Balancer to configure the HA port. As such, either the VM must use a standard IP reservation, or it cannot be exposed to the internet. - -Define two rules, one for the RPC Endpoint Mapper port 135, and one for the real MSDTC port. After failover, modify the LB rule to the new MSDTC port after it changes on the new node. - -If the MSDTC is local, be sure to allow outbound communication. - -## Distributed query - -Distributed query relies on a linked server, which can be configured using the AG DNN listener and port. If the port is not 1433, choose the **Use other data source** option in SQL Server Management Studio (SSMS) when configuring your linked server. - -## FileStream - -Filestream is supported but not for scenarios where users access the scoped file share by using the Windows File API. - -## Filetable - -Filetable is supported but not for scenarios where users access the scoped file share by using the Windows File API. - -## Linked servers - -Configure the linked server using the AG DNN listener name and port. If the port is not 1433, choose the **Use other data source** option in SQL Server Management Studio (SSMS) when configuring your linked server. - - -## Frequently asked questions - - -- Which SQL Server version brings AG DNN listener support? - - SQL Server 2019 CU8 and later. - -- What is the expected failover time when the DNN listener is used? - - For DNN listener, the failover time will be just the AG failover time, without any additional time (like probe time when you're using Azure Load Balancer). - -- Is there any version requirement for SQL clients to support DNN with OLEDB and ODBC? - - We recommend `MultiSubnetFailover=True` connection string support for DNN listener. It's available starting with SQL Server 2012 (11.x). - -- Are any SQL Server configuration changes required for me to use the DNN listener? - - SQL Server does not require any configuration change to use DNN, but some SQL Server features might require more consideration. - -- Does DNN support multiple-subnet clusters? - - Yes. The cluster binds the DNN in DNS with the physical IP addresses of all replicas in the availability regardless of the subnet. The SQL client tries all IP addresses of the DNS name regardless of the subnet. - -- Does the availability group DNN listener support read-only routing? - - Yes. Read-only routing is supported with the DNN listener. - - -## Next steps - -To learn more, see: - -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-listener-powershell-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-listener-powershell-configure.md deleted file mode 100644 index a97df6f993bca..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-listener-powershell-configure.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: Configure availability group listeners and load balancer (PowerShell) -description: Configure Availability Group listeners on the Azure Resource Manager model, using an internal load balancer with one or more IP addresses. -services: virtual-machines -documentationcenter: na -author: rajeshsetlem -editor: monicar -ms.assetid: 14b39cde-311c-4ddf-98f3-8694e01a7d3b -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.custom: "seo-lt-2019, devx-track-azurepowershell" -ms.reviewer: mathoma ---- -# Configure one or more Always On availability group listeners - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -This document shows you how to use PowerShell to do one of the following tasks: -- create a load balancer -- add IP addresses to an existing load balancer for SQL Server availability groups. - -An availability group listener is a virtual network name that clients connect to for database access. On Azure Virtual Machines in a single subnet, a load balancer holds the IP address for the listener. The load balancer routes traffic to the instance of SQL Server that is listening on the probe port. Usually, an availability group uses an internal load balancer. An Azure internal load balancer can host one or many IP addresses. Each IP address uses a specific probe port. - -The ability to assign multiple IP addresses to an internal load balancer is new to Azure and is only available in the Resource Manager model. To complete this task, you need to have a SQL Server availability group deployed on Azure Virtual Machines in the Resource Manager model. Both SQL Server virtual machines must belong to the same availability set. You can use the [Microsoft template](./availability-group-quickstart-template-configure.md) to automatically create the availability group in Azure Resource Manager. This template automatically creates the availability group, including the internal load balancer for you. If you prefer, you can [manually configure an Always On availability group](availability-group-manually-configure-tutorial-single-subnet.md). - -To complete the steps in this article, your availability groups need to be already configured. - -Related topics include: - -* [Configure AlwaysOn Availability Groups in Azure VM (GUI)](availability-group-manually-configure-tutorial-single-subnet.md) -* [Configure a VNet-to-VNet connection by using Azure Resource Manager and PowerShell](../../../vpn-gateway/vpn-gateway-vnet-vnet-rm-ps.md) - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - -[!INCLUDE [Start your PowerShell session](../../../../includes/sql-vm-powershell.md)] - -## Verify PowerShell version - -The examples in this article are tested using Azure PowerShell module version 5.4.1. - -Verify that your PowerShell module is 5.4.1 or later. - -See [Install the Azure PowerShell module](/powershell/azure/install-az-ps). - -## Configure the Windows Firewall - -Configure the Windows Firewall to allow SQL Server access. The firewall rules allow TCP connections to the ports use by the SQL Server instance, and the listener probe. For detailed instructions, see [Configure a Windows Firewall for Database Engine Access](/sql/database-engine/configure-windows/configure-a-windows-firewall-for-database-engine-access#Anchor_1). Create an inbound rule for the SQL Server port and for the probe port. - -If you are restricting access with an Azure Network Security Group, ensure that the allow rules include the backend SQL Server VM IP addresses, and the load balancer floating IP addresses for the AG listener and the cluster core IP address, if applicable. - -## Determine the load balancer SKU required - -[Azure load balancer](../../../load-balancer/load-balancer-overview.md) is available in two SKUs: Basic & Standard. The standard load balancer is recommended. If the virtual machines are in an availability set, basic load balancer is permitted. If the virtual machines are in an availability zone, a standard load balancer is required. Standard load balancer requires that all VM IP addresses use standard IP addresses. - -The current [Microsoft template](./availability-group-quickstart-template-configure.md) for an availability group uses a basic load balancer with basic IP addresses. - - > [!NOTE] - > You will need to configure a [service endpoint](../../../storage/common/storage-network-security.md?toc=%2fazure%2fvirtual-network%2ftoc.json#grant-access-from-a-virtual-network) if you use a standard load balancer and Azure Storage for the cloud witness. - > - -The examples in this article specify a standard load balancer. In the examples, the script includes `-sku Standard`. - -```powershell -$ILB= New-AzLoadBalancer -Location $Location -Name $ILBName -ResourceGroupName $ResourceGroupName -FrontendIpConfiguration $FEConfig -BackendAddressPool $BEConfig -LoadBalancingRule $ILBRule -Probe $SQLHealthProbe -sku Standard -``` - -To create a basic load balancer, remove `-sku Standard` from the line that creates the load balancer. For example: - -```powershell -$ILB= New-AzLoadBalancer -Location $Location -Name $ILBName -ResourceGroupName $ResourceGroupName -FrontendIpConfiguration $FEConfig -BackendAddressPool $BEConfig -LoadBalancingRule $ILBRule -Probe $SQLHealthProbe -``` - -## Example Script: Create an internal load balancer with PowerShell - -> [!NOTE] -> If you created your availability group with the [Microsoft template](./availability-group-quickstart-template-configure.md), the internal load balancer was already created. - -The following PowerShell script creates an internal load balancer, configures the load-balancing rules, and sets an IP address for the load balancer. To run the script, open Windows PowerShell ISE, and then paste the script in the Script pane. Use `Connect-AzAccount` to log in to PowerShell. If you have multiple Azure subscriptions, use `Select-AzSubscription` to set the subscription. - -```powershell -# Connect-AzAccount -# Select-AzSubscription -SubscriptionId - -$ResourceGroupName = "" # Resource group name -$VNetName = "" # Virtual network name -$SubnetName = "" # Subnet name -$ILBName = "" # ILB name -$Location = "" # Azure location -$VMNames = "","" # Virtual machine names - -$ILBIP = "" # IP address -[int]$ListenerPort = "" # AG listener port -[int]$ProbePort = "" # Probe port - -$LBProbeName ="ILBPROBE_$ListenerPort" # The Load balancer Probe Object Name -$LBConfigRuleName = "ILBCR_$ListenerPort" # The Load Balancer Rule Object Name - -$FrontEndConfigurationName = "FE_SQLAGILB_1" # Object name for the front-end configuration -$BackEndConfigurationName ="BE_SQLAGILB_1" # Object name for the back-end configuration - -$VNet = Get-AzVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName - -$Subnet = Get-AzVirtualNetworkSubnetConfig -VirtualNetwork $VNet -Name $SubnetName - -$FEConfig = New-AzLoadBalancerFrontendIpConfig -Name $FrontEndConfigurationName -PrivateIpAddress $ILBIP -SubnetId $Subnet.id - -$BEConfig = New-AzLoadBalancerBackendAddressPoolConfig -Name $BackEndConfigurationName - -$SQLHealthProbe = New-AzLoadBalancerProbeConfig -Name $LBProbeName -Protocol tcp -Port $ProbePort -IntervalInSeconds 15 -ProbeCount 2 - -$ILBRule = New-AzLoadBalancerRuleConfig -Name $LBConfigRuleName -FrontendIpConfiguration $FEConfig -BackendAddressPool $BEConfig -Probe $SQLHealthProbe -Protocol tcp -FrontendPort $ListenerPort -BackendPort $ListenerPort -LoadDistribution Default -EnableFloatingIP - -$ILB= New-AzLoadBalancer -Location $Location -Name $ILBName -ResourceGroupName $ResourceGroupName -FrontendIpConfiguration $FEConfig -BackendAddressPool $BEConfig -LoadBalancingRule $ILBRule -Probe $SQLHealthProbe - -$bepool = Get-AzLoadBalancerBackendAddressPoolConfig -Name $BackEndConfigurationName -LoadBalancer $ILB - -foreach($VMName in $VMNames) - { - $VM = Get-AzVM -ResourceGroupName $ResourceGroupName -Name $VMName - $NICName = ($vm.NetworkProfile.NetworkInterfaces.Id.split('/') | select -last 1) - $NIC = Get-AzNetworkInterface -name $NICName -ResourceGroupName $ResourceGroupName - $NIC.IpConfigurations[0].LoadBalancerBackendAddressPools = $BEPool - Set-AzNetworkInterface -NetworkInterface $NIC - start-AzVM -ResourceGroupName $ResourceGroupName -Name $VM.Name - } -``` - -## Example script: Add an IP address to an existing load balancer with PowerShell - -To use more than one availability group, add an additional IP address to the load balancer. Each IP address requires its own load-balancing rule, probe port, and front port. -Add only the primary IP address of the VM to the back-end pool of the load balancer as the [secondary VM IP address does not support floating IP](../../../load-balancer/load-balancer-floating-ip.md). - -The front-end port is the port that applications use to connect to the SQL Server instance. IP addresses for different availability groups can use the same front-end port. - -> [!NOTE] -> For SQL Server availability groups, each IP address requires a specific probe port. For example, if one IP address on a load balancer uses probe port 59999, no other IP addresses on that load balancer can use probe port 59999. - -* For information about load balancer limits, see **Private front end IP per load balancer** under [Networking Limits - Azure Resource Manager](../../../azure-resource-manager/management/azure-subscription-service-limits.md#azure-resource-manager-virtual-networking-limits). -* For information about availability group limits, see [Restrictions (Availability Groups)](/sql/database-engine/availability-groups/windows/prereqs-restrictions-recommendations-always-on-availability#RestrictionsAG). - -The following script adds a new IP address to an existing load balancer. The ILB uses the listener port for the load-balancing front-end port. This port can be the port that SQL Server is listening on. For default instances of SQL Server, the port is 1433. The load-balancing rule for an availability group requires a floating IP (direct server return) so the back-end port is the same as the front-end port. Update the variables for your environment. - -```powershell -# Connect-AzAccount -# Select-AzSubscription -SubscriptionId - -$ResourceGroupName = "" # Resource group name -$VNetName = "" # Virtual network name -$SubnetName = "" # Subnet name -$ILBName = "" # ILB name - -$ILBIP = "" # IP address -[int]$ListenerPort = "" # AG listener port -[int]$ProbePort = "" # Probe port - -$ILB = Get-AzLoadBalancer -Name $ILBName -ResourceGroupName $ResourceGroupName - -$count = $ILB.FrontendIpConfigurations.Count+1 -$FrontEndConfigurationName ="FE_SQLAGILB_$count" - -$LBProbeName = "ILBPROBE_$count" -$LBConfigrulename = "ILBCR_$count" - -$VNet = Get-AzVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName -$Subnet = Get-AzVirtualNetworkSubnetConfig -VirtualNetwork $VNet -Name $SubnetName - -$ILB | Add-AzLoadBalancerFrontendIpConfig -Name $FrontEndConfigurationName -PrivateIpAddress $ILBIP -SubnetId $Subnet.Id - -$ILB | Add-AzLoadBalancerProbeConfig -Name $LBProbeName -Protocol Tcp -Port $Probeport -ProbeCount 2 -IntervalInSeconds 15 | Set-AzLoadBalancer - -$ILB = Get-AzLoadBalancer -Name $ILBname -ResourceGroupName $ResourceGroupName - -$FEConfig = get-AzLoadBalancerFrontendIpConfig -Name $FrontEndConfigurationName -LoadBalancer $ILB - -$SQLHealthProbe = Get-AzLoadBalancerProbeConfig -Name $LBProbeName -LoadBalancer $ILB - -$BEConfig = Get-AzLoadBalancerBackendAddressPoolConfig -Name $ILB.BackendAddressPools[0].Name -LoadBalancer $ILB - -$ILB | Add-AzLoadBalancerRuleConfig -Name $LBConfigRuleName -FrontendIpConfiguration $FEConfig -BackendAddressPool $BEConfig -Probe $SQLHealthProbe -Protocol tcp -FrontendPort $ListenerPort -BackendPort $ListenerPort -LoadDistribution Default -EnableFloatingIP | Set-AzLoadBalancer -``` - -## Configure the listener - -[!INCLUDE [ag-listener-configure](../../../../includes/virtual-machines-ag-listener-configure.md)] - -## Set the listener port in SQL Server Management Studio - -1. Launch SQL Server Management Studio and connect to the primary replica. - -1. Navigate to **AlwaysOn High Availability** > **Availability Groups** > **Availability Group Listeners**. - -1. You should now see the listener name that you created in Failover Cluster Manager. Right-click the listener name and select **Properties**. - -1. In the **Port** box, specify the port number for the availability group listener by using the $EndpointPort you used earlier (1433 was the default), then select **OK**. - -## Test the connection to the listener - -To test the connection: - -1. Use Remote Desktop Protocol (RDP) to connect to a SQL Server that is in the same virtual network, but does not own the replica. It might be the other SQL Server in the cluster. - -1. Use **sqlcmd** utility to test the connection. For example, the following script establishes a **sqlcmd** connection to the primary replica through the listener with Windows authentication: - - ``` - sqlcmd -S -E - ``` - - If the listener is using a port other than the default port (1433), specify the port in the connection string. For example, the following sqlcmd command connects to a listener at port 1435: - - ``` - sqlcmd -S ,1435 -E - ``` - -The SQLCMD connection automatically connects to whichever instance of SQL Server hosts the primary replica. - -> [!NOTE] -> Make sure that the port you specify is open on the firewall of both SQL Servers. Both servers require an inbound rule for the TCP port that you use. For more information, see [Add or Edit Firewall Rule](/previous-versions/orphan-topics/ws.11/cc753558(v=ws.11)). -> - -## Guidelines and limitations - -Note the following guidelines on availability group listener in Azure using internal load balancer: - -* With an internal load balancer, you only access the listener from within the same virtual network. - -* If you're restricting access with an Azure Network Security Group, ensure that the allow rules include: - - The backend SQL Server VM IP addresses - - The load balancer floating IP addresses for the AG listener - - The cluster core IP address, if applicable. - -* Create a service endpoint when using a standard load balancer with Azure Storage for the cloud witness. For more information, see [Grant access from a virtual network](../../../storage/common/storage-network-security.md?toc=%2fazure%2fvirtual-network%2ftoc.json#grant-access-from-a-virtual-network). - -## PowerShell cmdlets - -Use the following PowerShell cmdlets to create an internal load balancer for Azure Virtual Machines. - -* [New-AzLoadBalancer](/powershell/module/Azurerm.Network/New-AzureRmLoadBalancer) creates a load balancer. -* [New-AzLoadBalancerFrontendIpConfig](/powershell/module/Azurerm.Network/New-AzureRmLoadBalancerFrontendIpConfig) creates a front-end IP configuration for a load balancer. -* [New-AzLoadBalancerRuleConfig](/powershell/module/Azurerm.Network/New-AzureRmLoadBalancerRuleConfig) creates a rule configuration for a load balancer. -* [New-AzLoadBalancerBackendAddressPoolConfig](/powershell/module/Azurerm.Network/New-AzureRmLoadBalancerBackendAddressPoolConfig) creates a backend address pool configuration for a load balancer. -* [New-AzLoadBalancerProbeConfig](/powershell/module/Azurerm.Network/New-AzureRmLoadBalancerProbeConfig) creates a probe configuration for a load balancer. -* [Remove-AzLoadBalancer](/powershell/module/Azurerm.Network/Remove-AzureRmLoadBalancer) removes a load balancer from an Azure resource group. - -## Next steps - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-load-balancer-portal-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-load-balancer-portal-configure.md deleted file mode 100644 index 2bb29fde5f7b9..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-load-balancer-portal-configure.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -title: Configure a load balancer & availability group listener (Azure portal) -description: Step-by-step instructions for creating a listener for an Always On availability group for SQL Server in Azure virtual machines -services: virtual-machines -documentationcenter: na -author: rajeshsetlem -editor: monicar - -ms.assetid: d1f291e9-9af2-41ba-9d29-9541e3adcfcf -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.custom: "seo-lt-2019" -ms.reviewer: mathoma ---- -# Configure a load balancer & availability group listener (SQL Server on Azure VMs) - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - - -This article explains how to create a load balancer for a SQL Server Always On availability group in Azure Virtual Machines within a single subnet that are running with Azure Resource Manager. An availability group requires a load balancer when the SQL Server instances are on Azure Virtual Machines. The load balancer stores the IP address for the availability group listener. If an availability group spans multiple regions, each region needs a load balancer. - -To complete this task, you need to have a SQL Server Always On availability group deployed in Azure VMs that are running with Resource Manager. Both SQL Server virtual machines must belong to the same availability set. You can use the [Microsoft template](./availability-group-quickstart-template-configure.md) to automatically create the availability group in Resource Manager. This template automatically creates an internal load balancer for you. - -If you prefer, you can [manually configure an availability group](availability-group-manually-configure-tutorial-single-subnet.md). - -This article requires that your availability groups are already configured. - -View related articles: - -* [Configure Always On availability groups in Azure VM (GUI)](availability-group-manually-configure-tutorial-single-subnet.md) -* [Configure a VNet-to-VNet connection by using Azure Resource Manager and PowerShell](../../../vpn-gateway/vpn-gateway-vnet-vnet-rm-ps.md) - -By walking through this article, you create and configure a load balancer in the Azure portal. After the process is complete, you configure the cluster to use the IP address from the load balancer for the availability group listener. - -## Create & configure load balancer - -In this portion of the task, do the following steps: - -1. In the Azure portal, create the load balancer and configure the IP address. -2. Configure the back-end pool. -3. Create the probe. -4. Set the load-balancing rules. - -> [!NOTE] -> If the SQL Server instances are in multiple resource groups and regions, perform each step twice, once in each resource group. -> - -### Step 1: Create the load balancer and configure the IP address - -First, create the load balancer. - -1. In the Azure portal, open the resource group that contains the SQL Server virtual machines. - -2. In the resource group, select **Add**. - -3. Search for **load balancer**. Choose **Load Balancer** (published by **Microsoft**) in the search results. - -4. On the **Load Balancer** blade, select **Create**. - -5. In the **Create load balancer** dialog box, configure the load balancer as follows: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the load balancer. For example, **sqlLB**. | - | **Type** |**Internal**: Most implementations use an internal load balancer, which allows applications within the same virtual network to connect to the availability group.
    **External**: Allows applications to connect to the availability group through a public Internet connection. | - | **SKU** |**Basic**: Default option. Only valid if SQL Server instances are in the same availability set.
    **Standard**: Preferred. Valid if SQL Server instances are in the same availability set. Required if your SQL Server instances are in different availability zones. | - | **Virtual network** |Select the virtual network that the SQL Server instances are in. | - | **Subnet** |Select the subnet that the SQL Server instances are in. | - | **IP address assignment** |**Static** | - | **Private IP address** |Specify an available IP address from the subnet. Use this IP address when you create a listener on the cluster. In a PowerShell script, later in this article, use this address for the `$ListenerILBIP` variable. | - | **Subscription** |If you have multiple subscriptions, this field might appear. Select the subscription that you want to associate with this resource. It's normally the same subscription as all the resources for the availability group. | - | **Resource group** |Select the resource group that the SQL Server instances are in. | - | **Location** |Select the Azure location that the SQL Server instances are in. | - -6. Select **Create**. - -Azure creates the load balancer. The load balancer belongs to a specific network, subnet, resource group, and location. After Azure completes the task, verify the load balancer settings in Azure. - -### Step 2: Configure the back-end pool - -Azure calls the back-end address pool *backend pool*. In this case, the back-end pool is the addresses of the two SQL Server instances in your availability group. - -1. In your resource group, select the load balancer that you created. - -2. On **Settings**, select **Backend pools**. - -3. On **Backend pools**, select **Add** to create a back-end address pool. - -4. On **Add backend pool**, under **Name**, type a name for the back-end pool. - -5. Under **Virtual machines**, select **Add a virtual machine**. Only add the primary IP address of the VM, do not add any secondary IP addresses. - -6. Under **Choose virtual machines**, select **Choose an availability set**, and then specify the availability set that the SQL Server virtual machines belong to. - -7. After you have chosen the availability set, select **Choose the virtual machines**, select the two virtual machines that host the SQL Server instances in the availability group, and then choose **Select**. - -8. Select **OK** to close the blades for **Choose virtual machines**, and **Add backend pool**. - -Azure updates the settings for the back-end address pool. Now your availability set has a pool of two SQL Server instances. - -### Step 3: Create a probe - -The probe defines how Azure verifies which of the SQL Server instances currently owns the availability group listener. Azure probes the service based on the IP address on a port that you define when you create the probe. - -1. On the load balancer **Settings** blade, select **Health probes**. - -2. On the **Health probes** blade, select **Add**. - -3. Configure the probe on the **Add probe** blade. Use the following values to configure the probe: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the probe. For example, **SQLAlwaysOnEndPointProbe**. | - | **Protocol** |**TCP** | - | **Port** |You can use any available port. For example, *59999*. | - | **Interval** |*5* | - | **Unhealthy threshold** |*2* | - -4. Select **OK**. - -> [!NOTE] -> Make sure that the port you specify is open on the firewall of both SQL Server instances. Both instances require an inbound rule for the TCP port that you use. For more information, see [Add or Edit Firewall Rule](/previous-versions/orphan-topics/ws.11/cc753558(v=ws.11)). -> - -Azure creates the probe and then uses it to test which SQL Server instance has the listener for the availability group. - -### Step 4: Set the load-balancing rules - -The load-balancing rules configure how the load balancer routes traffic to the SQL Server instances. For this load balancer, you enable direct server return because only one of the two SQL Server instances owns the availability group listener resource at a time. - -1. On the load balancer **Settings** blade, select **Load balancing rules**. - -2. On the **Load balancing rules** blade, select **Add**. - -3. On the **Add load balancing rules** blade, configure the load-balancing rule. Use the following settings: - - | Setting | Value | - | --- | --- | - | **Name** |A text name representing the load-balancing rules. For example, **SQLAlwaysOnEndPointListener**. | - | **Protocol** |**TCP** | - | **Port** |*1433* | - | **Backend Port** |*1433*. This value is ignored because this rule uses **Floating IP (direct server return)**. | - | **Probe** |Use the name of the probe that you created for this load balancer. | - | **Session persistence** |**None** | - | **Idle timeout (minutes)** |*4* | - | **Floating IP (direct server return)** |**Enabled** | - - > [!NOTE] - > You might have to scroll down the blade to view all the settings. - > - -4. Select **OK**. - -5. Azure configures the load-balancing rule. Now the load balancer is configured to route traffic to the SQL Server instance that hosts the listener for the availability group. - -At this point, the resource group has a load balancer that connects to both SQL Server machines. The load balancer also contains an IP address for the SQL Server Always On availability group listener, so that either machine can respond to requests for the availability groups. - -> [!NOTE] -> If your SQL Server instances are in two separate regions, repeat the steps in the other region. Each region requires a load balancer. -> - -## Configure the cluster to use the load balancer IP address - -The next step is to configure the listener on the cluster, and bring the listener online. Do the following steps: - -1. Create the availability group listener on the failover cluster. - -2. Bring the listener online. - -### Step 5: Create the availability group listener on the failover cluster - -In this step, you manually create the availability group listener in Failover Cluster Manager and SQL Server Management Studio. - -[!INCLUDE [ag-listener-configure](../../../../includes/virtual-machines-ag-listener-configure.md)] - -### Verify the configuration of the listener - -If the cluster resources and dependencies are correctly configured, you should be able to view the listener in SQL Server Management Studio. To set the listener port, do the following steps: - -1. Start SQL Server Management Studio, and then connect to the primary replica. - -2. Go to **AlwaysOn High Availability** > **Availability Groups** > **Availability Group Listeners**. - - You should now see the listener name that you created in Failover Cluster Manager. - -3. Right-click the listener name, and then select **Properties**. - -4. In the **Port** box, specify the port number for the availability group listener by using the $EndpointPort you used earlier (1433 was the default), and then select **OK**. - -You now have an availability group in Azure virtual machines running in Resource Manager mode. - -## Test the connection to the listener - -Test the connection by doing the following steps: - -1. Use remote desktop protocol (RDP) to connect to a SQL Server instance that's in the same virtual network, but does not own the replica. This server can be the other SQL Server instance in the cluster. - -2. Use **sqlcmd** utility to test the connection. For example, the following script establishes a **sqlcmd** connection to the primary replica through the listener with Windows authentication: - - ```console - sqlcmd -S -E - ``` - -The SQLCMD connection automatically connects to the SQL Server instance that hosts the primary replica. - -## Create an IP address for an additional availability group - -Each availability group uses a separate listener. Each listener has its own IP address. Use the same load balancer to hold the IP address for additional listeners. Add only the primary IP address of the VM to the back-end pool of the load balancer as the [secondary VM IP address does not support floating IP](../../../load-balancer/load-balancer-floating-ip.md). - -To add an IP address to a load balancer with the Azure portal, do the following steps: - -1. In the Azure portal, open the resource group that contains the load balancer, and then select the load balancer. - -2. Under **SETTINGS**, select **Frontend IP pool**, and then select **Add**. - -3. Under **Add frontend IP address**, assign a name for the front end. - -4. Verify that the **Virtual network** and the **Subnet** are the same as the SQL Server instances. - -5. Set the IP address for the listener. - - >[!TIP] - >You can set the IP address to static and type an address that is not currently used in the subnet. Alternatively, you can set the IP address to dynamic and save the new front-end IP pool. When you do so, the Azure portal automatically assigns an available IP address to the pool. You can then reopen the front-end IP pool and change the assignment to static. - -6. Save the IP address for the listener. - -7. Add a health probe by using the following settings: - - |Setting |Value - |:-----|:---- - |**Name** |A name to identify the probe. - |**Protocol** |TCP - |**Port** |An unused TCP port, which must be available on all virtual machines. It cannot be used for any other purpose. No two listeners can use the same probe port. - |**Interval** |The amount of time between probe attempts. Use the default (5). - |**Unhealthy threshold** |The number of consecutive thresholds that should fail before a virtual machine is considered unhealthy. - -8. Select **OK** to save the probe. - -9. Create a load-balancing rule. Select **Load balancing rules**, and then select **Add**. - -10. Configure the new load-balancing rule by using the following settings: - - |Setting |Value - |:-----|:---- - |**Name** |A name to identify the load-balancing rule. - |**Frontend IP address** |Select the IP address you created. - |**Protocol** |TCP - |**Port** |Use the port that the SQL Server instances are using. A default instance uses port 1433, unless you changed it. - |**Backend port** |Use the same value as **Port**. - |**Backend pool** |The pool that contains the virtual machines with the SQL Server instances. - |**Health probe** |Choose the probe you created. - |**Session persistence** |None - |**Idle timeout (minutes)** |Default (4) - |**Floating IP (direct server return)** | Enabled - -### Configure the availability group to use the new IP address - -To finish configuring the cluster, repeat the steps that you followed when you made the first availability group. That is, configure the [cluster to use the new IP address](#configure-the-cluster-to-use-the-load-balancer-ip-address). - -After you have added an IP address for the listener, configure the additional availability group by doing the following steps: - -1. Verify that the probe port for the new IP address is open on both SQL Server virtual machines. - -2. [In Cluster Manager, add the client access point](#addcap). - -3. [Configure the IP resource for the availability group](#congroup). - - >[!IMPORTANT] - >When you create the IP address, use the IP address that you added to the load balancer. - -4. [Make the SQL Server availability group resource dependent on the client access point](#dependencyGroup). - -5. [Make the client access point resource dependent on the IP address](#listname). - -6. [Set the cluster parameters in PowerShell](#setparam). - -After you configure the availability group to use the new IP address, configure the connection to the listener. - -## Add load-balancing rule for distributed availability group - -If an availability group participates in a distributed availability group, the load balancer needs an additional rule. This rule stores the port used by the distributed availability group listener. - ->[!IMPORTANT] ->This step only applies if the availability group participates in a [distributed availability group](/sql/database-engine/availability-groups/windows/configure-distributed-availability-groups). - -1. On each server that participates in the distributed availability group, create an inbound rule on the distributed availability group listener TCP port. In many examples, documentation uses 5022. - -1. In the Azure portal, select on the load balancer and select **Load balancing rules**, and then select **+Add**. - -1. Create the load balancing rule with the following settings: - - |Setting |Value - |:-----|:---- - |**Name** |A name to identify the load balancing rule for the distributed availability group. - |**Frontend IP address** |Use the same frontend IP address as the availability group. - |**Protocol** |TCP - |**Port** |5022 - The port for the [distributed availability group endpoint listener](/sql/database-engine/availability-groups/windows/configure-distributed-availability-groups).
    Can be any available port. - |**Backend port** | 5022 - Use the same value as **Port**. - |**Backend pool** |The pool that contains the virtual machines with the SQL Server instances. - |**Health probe** |Choose the probe you created. - |**Session persistence** |None - |**Idle timeout (minutes)** |Default (4) - |**Floating IP (direct server return)** | Enabled - -Repeat these steps for the load balancer on the other availability groups that participate in the distributed availability groups. - -If you have an Azure Network Security Group to restrict access, make sure that the allow rules include: -- The backend SQL Server VM IP addresses -- The load balancer floating IP addresses for the AG listener -- The cluster core IP address, if applicable. - -## Next steps - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-multiple-regions.md b/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-multiple-regions.md deleted file mode 100644 index ed03595ae406d..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-multiple-regions.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Configure a SQL Server Always On availability group across different regions -description: This article explains how to configure a SQL Server Always On availability group on Azure virtual machines with a replica in a different region. -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management - -ms.assetid: 388c464e-a16e-4c9d-a0d5-bb7cf5974689 -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: "05/02/2017" -ms.author: rsetlem -ms.custom: "seo-lt-2019" -ms.reviewer: mathoma ---- - -# Configure a SQL Server Always On availability group across different Azure regions - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article explains how to configure a SQL Server Always On availability group replica on Azure virtual machines in a remote Azure location. Use this configuration to support disaster recovery. - -This article applies to Azure Virtual Machines in Resource Manager mode. - -The following image shows a common deployment of an availability group on Azure virtual machines: - -:::image type="content" source="./media/availability-group-manually-configure-multiple-regions/00-availability-group-basic.png" alt-text="Diagram that shows the Azure load balancer and the Availability set with a Windows Server Failover Cluster and Always On Availability Group"::: - -In this deployment, all virtual machines are in one Azure region. The availability group replicas can have synchronous commit with automatic failover on SQL-1 and SQL-2. To build this architecture, see [Availability Group template or tutorial](availability-group-overview.md). - -This architecture is vulnerable to downtime if the Azure region becomes inaccessible. To overcome this vulnerability, add a replica in a different Azure region. The following diagram shows how the new architecture would look: - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/00-availability-group-basic-dr.png" alt-text="Availability Group DR"::: - -The preceding diagram shows a new virtual machine called SQL-3. SQL-3 is in a different Azure region. SQL-3 is added to the Windows Server Failover Cluster. SQL-3 can host an availability group replica. Finally, notice that the Azure region for SQL-3 has a new Azure load balancer. - ->[!NOTE] -> An Azure availability set is required when more than one virtual machine is in the same region. If only one virtual machine is in the region, then the availability set is not required. You can only place a virtual machine in an availability set at creation time. If the virtual machine is already in an availability set, you can add a virtual machine for an additional replica later. - -In this architecture, the replica in the remote region is normally configured with asynchronous commit availability mode and manual failover mode. - -When availability group replicas are on Azure virtual machines in different Azure regions, then you can connect the Virtual Networks using the recommended [Virtual Network Peering](../../../virtual-network/virtual-network-peering-overview.md) or [Site to Site VPN Gateway](../../../vpn-gateway/vpn-gateway-about-vpngateways.md) - - ->[!IMPORTANT] ->This architecture incurs outbound data charges for data replicated between Azure regions. See [Bandwidth Pricing](https://azure.microsoft.com/pricing/details/bandwidth/). - -## Create remote replica - -To create a replica in a remote data center, do the following steps: - -1. [Create a virtual network in the new region](../../../virtual-network/manage-virtual-network.md#create-a-virtual-network). - -1. Connect the Virtual Networks in the two Azure regions using one of the following methods: - - [Virtual Network Peering - Connect virtual networks with virtual network peering using the Azure portal](../../../virtual-network/tutorial-connect-virtual-networks-portal.md) (Recommended) - - or - - [Site to Site VPN Gateway - Configure a VNet-to-VNet connection using the Azure portal](../../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). - - >[!NOTE] - >In some cases, you may have to use PowerShell to create the VNet-to-VNet connection. For example, if you use different Azure accounts you cannot configure the connection in the portal. In this case see, [Configure a VNet-to-VNet connection using the Azure portal](../../../vpn-gateway/vpn-gateway-vnet-vnet-rm-ps.md). - -1. [Create a domain controller in the new region](/windows-server/identity/ad-ds/introduction-to-active-directory-domain-services-ad-ds-virtualization-level-100). - - This domain controller provides authentication if the domain controller in the primary site is not available. - -1. [Create a SQL Server virtual machine in the new region](create-sql-vm-portal.md). - -1. [Create an Azure load balancer in the network on the new region](availability-group-manually-configure-tutorial-single-subnet.md#configure-internal-load-balancer). - - This load balancer must: - - - Be in the same network and subnet as the new virtual machine. - - Have a static IP address for the availability group listener. - - Include a backend pool consisting of only the virtual machines in the same region as the load balancer. - - Use a TCP port probe specific to the IP address. - - Have a load balancing rule specific to the SQL Server in the same region. - - Be a Standard Load Balancer if the virtual machines in the backend pool are not part of either a single availability set or virtual machine scale set. For additional information review [Azure Load Balancer Standard overview](../../../load-balancer/load-balancer-overview.md). - - Be a Standard Load Balancer if the two virtual networks in two different regions are peered over global VNet peering. For more information, see [Azure Virtual Network frequently asked questions (FAQ)](../../../virtual-network/virtual-networks-faq.md#what-are-the-constraints-related-to-global-vnet-peering-and-load-balancers). - -1. [Add Failover Clustering feature to the new SQL Server](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#add-failover-clustering-features-to-both-sql-server-vms). - -1. [Join the new SQL Server to the domain](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#joinDomain). - -1. [Set the new SQL Server service account to use a domain account](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#setServiceAccount). - -1. [Add the new SQL Server to the Windows Server Failover Cluster](availability-group-manually-configure-tutorial-single-subnet.md#addNode). - -1. Add an IP address resource to the cluster. - - You can create the IP address resource in Failover Cluster Manager. Select the name of the cluster, then right-click the cluster name under **Cluster Core Resources** and select **Properties**: - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/cluster-name-properties.png" alt-text="Screenshot that shows the Failover Cluster Manager with a cluster name Server Name and Properties selected."::: - - On the **Properties** dialog box, select **Add** under **IP Address**, and then add the IP address of the cluster name from the remote network region. Select **OK** on the **IP Address** dialog box, and then select **OK** again on the **Cluster Properties** dialog box to save the new IP address. - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/add-cluster-ip-address.png" alt-text="Add cluster IP"::: - - -1. Add the IP address as a dependency for the core cluster name. - - Open the cluster properties once more and select the **Dependencies** tab. Configure an OR dependency for the two IP addresses: - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/cluster-ip-dependencies.png" alt-text="Cluster properties"::: - -1. Add an IP address resource to the availability group role in the cluster. - - Right-click the availability group role in Failover Cluster Manager, choose **Add Resource**, **More Resources**, and select **IP Address**. - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/20-add-ip-resource.png" alt-text="Create IP Address"::: - - Configure this IP address as follows: - - - Use the network from the remote data center. - - Assign the IP address from the new Azure load balancer. - -1. Add the IP address resource as a dependency for the listener client access point (network name) cluster. - - The following screenshot shows a properly configured IP address cluster resource: - - :::image type="content" source="./media/availability-group-manually-configure-multiple-regions/50-configure-dependency-multiple-ip.png" alt-text="Availability Group"::: - - >[!IMPORTANT] - >The cluster resource group includes both IP addresses. Both IP addresses are dependencies for the listener client access point. Use the **OR** operator in the cluster dependency configuration. - -1. [Set the cluster parameters in PowerShell](availability-group-manually-configure-tutorial-single-subnet.md#setparam). - - Run the PowerShell script with the cluster network name, IP address, and probe port that you configured on the load balancer in the new region. - - ```powershell - $ClusterNetworkName = "" # The cluster name for the network in the new region (Use Get-ClusterNetwork on Windows Server 2012 of higher to find the name). - $IPResourceName = "" # The cluster name for the new IP Address resource. - $ILBIP = "" # The IP Address of the Internal Load Balancer (ILB) in the new region. This is the static IP address for the load balancer you configured in the Azure portal. - [int]$ProbePort = # The probe port you set on the ILB. - - Import-Module FailoverClusters - - Get-ClusterResource $IPResourceName | Set-ClusterParameter -Multiple @{"Address"="$ILBIP";"ProbePort"=$ProbePort;"SubnetMask"="255.255.255.255";"Network"="$ClusterNetworkName";"EnableDhcp"=0} - ``` - -1. On the new SQL Server in SQL Server Configuration Manager, [enable Always On Availability Groups](/sql/database-engine/availability-groups/windows/enable-and-disable-always-on-availability-groups-sql-server). - -1. [Open firewall ports on the new SQL Server](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#endpoint-firewall). The port numbers you need to open depend on your environment. Open ports for the mirroring endpoint and Azure load balancer health probe. -1. On the new SQL Server in SQL Server Management Studio, [configure system account permissions](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#configure-system-account-permissions). - -1. [Add a replica to the availability group on the new SQL Server](/sql/database-engine/availability-groups/windows/use-the-add-replica-to-availability-group-wizard-sql-server-management-studio). For a replica in a remote Azure region, set it for asynchronous replication with manual failover. - -## Set connection for multiple subnets - -The replica in the remote data center is part of the availability group but it is in a different subnet. If this replica becomes the primary replica, application connection time-outs may occur. This behavior is the same as an on-premises availability group in a multi-subnet deployment. To allow connections from client applications, either update the client connection or configure name resolution caching on the cluster network name resource. - -Preferably, update the cluster configuration to set `RegisterAllProvidersIP=1` and the client connection strings to set `MultiSubnetFailover=Yes`. See [Connecting With MultiSubnetFailover](/sql/relational-databases/native-client/features/sql-server-native-client-support-for-high-availability-disaster-recovery#Anchor_0). - -If you cannot modify the connection strings, you can configure name resolution caching. See [Time-out error and you cannot connect to a SQL Server 2012 AlwaysOn availability group listener in a multi-subnet environment](https://support.microsoft.com/help/2792139/time-out-error-and-you-cannot-connect-to-a-sql-server-2012-alwayson-av). - -## Fail over to remote region - -To test listener connectivity to the remote region, you can fail over the replica to the remote region. While the replica is asynchronous, failover is vulnerable to potential data loss. To fail over without data loss, change the availability mode to synchronous and set the failover mode to automatic. Use the following steps: - -1. In **Object Explorer**, connect to the instance of SQL Server that hosts the primary replica. -1. Under **AlwaysOn Availability Groups**, **Availability Groups**, right-click your availability group and select **Properties**. -1. On the **General** page, under **Availability Replicas**, set the secondary replica in the DR site to use **Synchronous Commit** availability mode and **Automatic** failover mode. -1. If you have a secondary replica in same site as your primary replica for high availability, set this replica to **Asynchronous Commit** and **Manual**. -1. Select OK. -1. In **Object Explorer**, right-click the availability group, and select **Show Dashboard**. -1. On the dashboard, verify that the replica on the DR site is synchronized. -1. In **Object Explorer**, right-click the availability group, and select **Failover...**. SQL Server Management Studios opens a wizard to fail over SQL Server. -1. Select **Next**, and select the SQL Server instance in the DR site. Select **Next** again. -1. Connect to the SQL Server instance in the DR site and select **Next**. -1. On the **Summary** page, verify the settings and select **Finish**. - -After testing connectivity, move the primary replica back to your primary data center and set the availability mode back to their normal operating settings. The following table shows the normal operational settings for the architecture described in this document: - -| Location | Server Instance | Role | Availability Mode | Failover Mode -| ----- | ----- | ----- | ----- | ----- -| Primary data center | SQL-1 | Primary | Synchronous | Automatic -| Primary data center | SQL-2 | Secondary | Synchronous | Automatic -| Secondary or remote data center | SQL-3 | Secondary | Asynchronous | Manual - - -### More information about planned and forced manual failover - -For more information, see the following topics: - -- [Perform a Planned Manual Failover of an Availability Group (SQL Server)](/sql/database-engine/availability-groups/windows/perform-a-planned-manual-failover-of-an-availability-group-sql-server) -- [Perform a Forced Manual Failover of an Availability Group (SQL Server)](/sql/database-engine/availability-groups/windows/perform-a-forced-manual-failover-of-an-availability-group-sql-server) - -## Next steps - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md b/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md deleted file mode 100644 index 2e64446e7eaab..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md +++ /dev/null @@ -1,588 +0,0 @@ ---- -title: "Tutorial: Prerequisites for AG in multiple subnets" -description: "This tutorial shows how to configure the prerequisites for creating an Always On availability group in multiple subnets for SQL Server on Azure Virtual Machines (VMs). " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management - -ms.assetid: 08a00342-fee2-4afe-8824-0db1ed4b8fca -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: tutorial -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Tutorial: Prerequisites for availability groups in multiple subnets (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer or distributed network name (DNN) for your Always On availability group by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - -In this tutorial, complete the prerequisites for creating an [Always On availability group for SQL Server on Azure Virtual Machines (VMs) in multiple subnets](availability-group-manually-configure-tutorial-multi-subnet.md). At the end of this tutorial, you will have a domain controller on two Azure virtual machines, two SQL Server VMs in multiple subnets, and a storage account in a single resource group. - -**Time estimate**: This tutorial creates several resources in Azure and may take up to 30 minutes to complete. - -The following diagram illustrates the resources you deploy in this tutorial: - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/multi-subnet-availability-group-diagram.png" alt-text="The following diagram illustrates the resources you deploy in this tutorial"::: - - -## Prerequisites - -To complete this tutorial, you need the following: - -- An Azure subscription. You can [open a free Azure account](https://signup.azure.com/signup?offer=ms-azr-0044p&appId=102&ref=azureplat-generic) or [activate Visual Studio subscriber benefits](/visualstudio/subscriptions/subscriber-benefits). -- A basic understanding of, and familiarity with, [Always On availability groups in SQL Server](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). - - -## Create resource group - -To create the resource group in the Azure portal, follow these steps: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Select **+ Create a resource** to create a new resource in the portal. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-portal-plus.png" alt-text="Select +Create a resource to create a new resource in the portal."::: - -1. Search for **resource group** in the **Marketplace** search box and choose the **Resource group** tile from Microsoft. Select **Create** on the **Resource group** page. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-search.png" alt-text="Search for resource group in the Marketplace and then choose to create the Resource group. "::: - -1. On the **Create a resource group** page, fill out the values to create the resource group: - 1. Choose the appropriate Azure subscription from the drop-down. - 1. Provide a name for your resource group, such as **SQL-HA-RG**. - 1. Choose a region from the drop-down, such as **West US 2**. Be sure to deploy all subsequent resources to this location as well. - 1. Select **Review + create** to review your resource parameters, and then select **Create** to create your resource group. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create-complete.png" alt-text="Fill out the values to create your resource group in the Azure portal. "::: - - -## Create network and subnets - -Next, create the virtual network and three subnets. To learn more, see [Virtual network overview](../../../virtual-network/virtual-networks-overview.md). - -To create the virtual network in the Azure portal, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select **+ Create** - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png" alt-text="Create new resource in your resource group"::: - -1. Search for **virtual network** in the **Marketplace** search box and choose the **virtual network** tile from Microsoft. Select **Create** on the **Virtual network** page. -1. On the **Create virtual network** page, enter the following information on the **Basics** tab: - 1. Under **Project details**, choose the appropriate Azure **Subscription**, and the **Resource group** you created previously, such as **SQL-HA-RG**. - 1. Under **Instance details**, provide a name for your virtual network, such as **SQLHAVNET**, and choose the same region as your resource group from the drop-down. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/03-create-vnet-basics.png" alt-text="Choose the resource group you made previously, then provide a name for your virtual network, such as SQLHAVNET"::: - -1. On the **IP addresses** tab, select the **default** subnet to open the **Edit subnet** page. Change the name to **DC-subnet** to use for the domain controller subnet. Select **Save**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/04-create-vnet-ip-address-rename-default-subnet.png" alt-text="On the IP addresses tab, select the default subnet to open the Edit subnet page. Change the name to DC-subnet to use for the domain controller subnet. Select Save"::: - -1. Select **+ Add subnet** to add an additional subnet for your first SQL Server VM, and fill in the following values: - 1. Provide a value for the **Subnet name**, such as **SQL-subnet-1**. - 1. Provide a unique subnet address range within the virtual network address space. For example, you can iterate the third octet of DC-subnet address range by 1. - - For example, if your **DC-subnet** range is *10.38.0.0/24*, enter the IP address range `10.38.1.0/24` for **SQL-subnet-1**. - - Likewise, if your **DC-subnet** IP range is *10.5.0.0/24*, then enter `10.5.1.0/24` for the new subnet. - 1. Select **Add** to add your new subnet. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/05-create-vnet-ip-address-add-sql-subnet-1.png" alt-text="Name your first subnet, such as sql-subnet-1, and then iterate the third octet by 1, so that if your DC-subnet IP address is 10.5.0.0, your new subnet should be 10.5.1.0"::: - -1. Repeat the previous step to add an additional unique subnet range for your second SQL Server VM with a name such as **SQL-subnet-2**. You can iterate the third octet by one again. - - For example, if your **DC-subnet** IP range is *10.38.0.0/24*, and your **SQL-subnet-1** is *10.38.1.0/24*, then enter `10.38.2.0/24` for the new subnet - - Likewise, if your **DC-subnet** IP range is *10.5.0.0/24*, and your **SQL-subnet-1** is *10.5.1.0/24*, then enter the IP address range `10.5.2.0/24` for **SQL-subnet-2** . - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/06-create-vnet-ip-address-add-sql-subnet-2.png" alt-text="Name your second subnet, such as sql-subnet-2, and then iterate the third octet by 2, so that if your DC-subnet IP address is 10.38.0.0/24, your new subnet should be 10.38.2.0/24"::: - -1. After you've added the second subnet, review your subnet names and ranges (your IP address ranges may differ from the image). If everything looks correct, select **Review + create**, then **Create** to create your new virtual network. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/07-create-vnet-ip-address.png" alt-text="After you've added the second subnet, review your subnet names and ranges, like the image example (though your IP addresses may be different). If everything looks correct, select Review + create, then Create to create your new virtual network."::: - - Azure returns you to the portal dashboard and notifies you when the new network is created. - - -## Create domain controllers - -After your network and subnets are ready, create a virtual machine (or two optionally, for high availability) and configure it as your domain controller. - -### Create DC virtual machines - -To create your domain controller (DC) virtual machines in the Azure portal, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select **+ Create** - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png" alt-text="Create new resource in your resource group"::: - -1. Search for **Windows Server** in the **Marketplace** search box. -1. On the **Windows Server** tile from Microsoft, select the **Create** drop-down and choose the **Windows Server 2016 Datacenter** image. -1. Fill out the values on the **Create a virtual machine** page to create your domain controller VM, such as **DC-VM-1**. Optionally, create an additional VM, such as **DC-VM-2** to provide high availability for the Active Directory Domain Services. Use the values in the following tablet to create your VM(s): - - | **Field** | Value | - | --- | --- | - | **Subscription** |*Your subscription* | - | **Resource group** |SQL-HA-RG | - | **Virtual machine name** |First domain controller: *DC-VM-1*.
    Second domain controller *DC-VM-2*. | - | **Region** |*The location where you deployed your resource group and virtual network.* | - | **Availability options** |Availability zone
    *For Azure regions that do not support Availability zones, use Availability sets instead. Create a new availability set and place all VMs created in this tutorial inside the availability set.* | - | **Availability zone** |Specify 1 for DC-VM-1.
    Specify 2 for DC-VM-2. | - | **Size** |D2s_v3 (2 vCPUs, 8 GB RAM) | - | **User name** |DomainAdmin | - | **Password** |Contoso!0000 | - | **Public inbound ports** | *Allow selected ports* | - | **Select inbound ports** | *RDP (3389)* | - | **OS disk type** | Premium SSD (locally redundant storage) | - | **Virtual network** |SQLHAVNET | - | **Subnet** |DC-subnet | - | **Public IP** |*Same name as the VM, such as DC-VM-1 or DC-VM-2* | - | **NIC network security group**| Basic | - | **Public inbound ports**| Allow selected ports | - | **Select inbound ports**| RDP (3389) | - | **Boot diagnostics** |Enable with managed storage account (recommended). | - - Azure notifies you when your virtual machines are created and ready to use. - - -### Configure the domain controller - -After your DC virtual machines are ready, configure the domain controller for corp.contoso.com. - -To configure **DC-VM-1** as the domain controller, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select the **DC-VM-1** machine. -1. On the **DC-VM-1** page, select **Connect** to download an RDP file for remote desktop access and then open the file. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/08-dc-vm-1-rdp-connect.png" alt-text="Connect to a virtual machine"::: - -1. Connect to the RDP session using your configured administrator account (**DomainAdmin**) and password (**Contoso!0000**). -1. Open the **Server Manager** dashboard (which may open by default) and choose to **Add roles and features**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png" alt-text="Server Manager - Add roles"::: - -1. Select **Next** until you get to the **Server Roles** section. -1. Select the **Active Directory Domain Services** and **DNS Server** roles. When you're prompted, add any additional features that are required by these roles. - - > [!NOTE] - > Windows warns you that there is no static IP address. If you're testing the configuration, select **Continue**. For production scenarios, set the IP address to static in the Azure portal, or [use PowerShell to set the static IP address of the domain controller machine](/azure/virtual-network/virtual-networks-static-private-ip-arm-ps). - > - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/10-add-roles.png" alt-text="Add Roles dialog"::: - -1. Select **Next** until you reach the **Confirmation** section. Select the **Restart the destination server automatically if required** check box. -1. Select **Install**. -1. After the features finish installing, return to the **Server Manager** dashboard. -1. Select the new **AD DS** option on the left-hand pane. -1. Select the **More** link on the yellow warning bar. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/11-ad-ds-more.png" alt-text="AD DS dialog on the DNS Server VM"::: - -1. In the **Action** column of the **All Server Task Details** dialog, select **Promote this server to a domain controller**. -1. In the **Active Directory Domain Services Configuration Wizard**, use the following values: - - | **Page** | Setting | - | --- | --- | - | **Deployment Configuration** |**Add a new forest**
    **Root domain name** = corp.contoso.com | - | **Domain Controller Options** |**DSRM Password** = Contoso!0000
    **Confirm Password** = Contoso!0000 | - -1. Select **Next** to go through the other pages in the wizard. On the **Prerequisites Check** page, verify that you see the following message: **All prerequisite checks passed successfully**. You can review any applicable warning messages, but it's possible to continue with the installation. -1. Select **Install**. The **DC-VM-1** virtual machine automatically reboots. - -### Identify DNS IP address - -Use the primary domain controller for DNS. To do so, identify the private IP address of the VM used for the primary domain controller. - -To identify the private IP address of the VM in the Azure portal, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select the primary domain controller, **DC-VM-1**. -1. On the **DC-VM-1** page, choose **Networking** in the **Settings** pane. -1. Note the **NIC Private IP** address. Use this IP address as the DNS server for the other virtual machines. In the example image, the private IP address is **10.38.0.4**. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-dc-vm-1-private-ip.png" alt-text="On the DC-VM-1 page, choose Networking in the Settings pane, and then note the NIC private IP address. Use this IP address as the DNS server. "::: - -### Configure virtual network DNS - -After you create the first domain controller and enable DNS, configure the virtual network to use this VM for DNS. - -To configure your virtual network for DNS, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com), and select your virtual network, such as **SQLHAVNET**. -1. Select **DNS servers** under the **Settings** pane and then select **Custom**. -1. Enter the private IP address you identified previously in the **IP Address** field, such as `10.38.0.4`. -1. Select **Save**. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-identify-dns-ip-address.png" alt-text=" Select DNS servers under the Settings pane and then select Custom. Enter the private IP address you identified previously in the IP Address field, such as 10.38.0.4. "::: - - -### Configure second domain controller - -After the primary domain controller reboots, you can optionally configure the second domain controller for the purpose of high availability. If you do not want to configure a second domain controller, skip this step. However, note that a second domain controller is recommended in production environments. - -Set the preferred DNS server address, join the domain, and then configure the secondary domain controller. - -#### Set preferred DNS server address - -First, change the preferred DNS server address. To do so, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select the second domain controller machine, such as **DC-VM-2**. -1. On the **DC-VM-2** page, select **Connect** to download the RDP file for remote desktop access and then open the file. -1. Connect to the RDP session using your configured administrator account (**DomainAdmin**) and password (**Contoso!0000**). -1. Open the **Network and Sharing Center** and select the network interface. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/13-network-interface.png" alt-text="Network interface"::: - -1. Open the **Properties** page. -1. Choose the **Internet Protocol Version 4 (TCP/IPv4)** and then select **Properties**. -1. Select **Use the following DNS server addresses** and then specify the private IP address of the primary domain controller in **Preferred DNS server**, such as `10.38.0.4`. -1. Select **OK** and then **Close** to commit the changes. If you lose your remote desktop connection after changing the DNS IP address, go to the virtual machine in the [Azure portal](https://portal.azure.com) and restart the VM. - -### Join the domain - -Next, join the **corp.contoso.com** domain. To do so, follow these steps: - -1. Remotely connect to the virtual machine using the **BUILTIN\DomainAdmin** account. -1. Open **Server Manager**, and select **Local Server**. -1. Select **WORKGROUP**. -1. In the **Computer Name** section, select **Change**. -1. Select the **Domain** checkbox and type **corp.contoso.com** in the text box. Select **OK**. -1. In the **Windows Security** popup dialog, specify the credentials for the default domain administrator account (**CORP\DomainAdmin**) and the password (**Contoso!0000**). -1. When you see the "Welcome to the corp.contoso.com domain" message, select **OK**. -1. Select **Close**, and then select **Restart Now** in the popup dialog. - - -#### Configure domain controller - -Once your server has joined the domain, you can configure it as the second domain controller. To do so, follow these steps: - -1. If you're not already connected, open an RDP session to your secondary domain controller, and open **Server Manager Dashboard** (which may be open by default). -1. Select the **Add roles and features** link on the dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png" alt-text="Server Manager - Add roles"::: - -1. Select **Next** until you get to the **Server Roles** section. -1. Select the **Active Directory Domain Services** and **DNS Server** roles. When you're prompted, add any additional features that are required by these roles. -1. After the features finish installing, return to the **Server Manager** dashboard. -1. Select the new **AD DS** option on the left-hand pane. -1. Select the **More** link on the yellow warning bar. -1. In the **Action** column of the **All Server Task Details** dialog, select **Promote this server to a domain controller**. -1. Under **Deployment Configuration**, select **Add a domain controller to an existing domain**. -1. Click **Select**. -1. Connect by using the administrator account (**CORP.CONTOSO.COM\domainadmin**) and password (**Contoso!0000**). -1. In **Select a domain from the forest**, choose your domain and then select **OK**. -1. In **Domain Controller Options**, use the default values and set a DSRM password. - - >[!NOTE] - >The **DNS Options** page might warn you that a delegation for this DNS server can't be created. You can ignore this warning in non-production environments. - > - -1. Select **Next** until the dialog reaches the **Prerequisites** check. Then select **Install**. - -After the server finishes the configuration changes, restart the server. - -### Add second DC IP address to DNS - -After your second domain controller is configured, follow the same steps as before to [identify the private IP address of the VM](#identify-dns-ip-address), and [add the private IP address as a secondary custom DNS server](#configure-virtual-network-dns) in the virtual network of your resource group. Adding the secondary DNS server in the Azure portal enables redundancy of the DNS service. - - - -## Configure domain accounts - -After your domain controller(s) have been configured, and you've set your DNS server(s) in the Azure portal, create domain accounts for the user who is installing SQL Server, and for the SQL Server service account. - -Configure three accounts in total, one installation account for both SQL Server VMs, and then a service account for each SQL Server VM. For example, use the values in the following table for the accounts: - -|Account | VM |Full domain name |Description | -|---------|---------|---------|---------| -|Install |Both| Corp\Install |Log into either VM with this account to configure the cluster and availability group. | -|SQLSvc1 |SQL-VM-1 |Corp\SQLSvc1 | Use this account for the SQL Server service on the first SQL Server VM. | -|SQLSvc2 |SQL-VM2 |Corp\SQLSvc2| Use this account for the SQL Server service on the second SQL Server VM.| - -Follow these steps to create each account: - -1. Connect to your primary domain controller machine, such as **DC-VM-1**. . -1. In **Server Manager**, select **Tools**, and then select **Active Directory Administrative Center**. -1. Select **corp (local)** from the left pane. -1. On the right **Tasks** pane, select **New**, and then select **User**. -1. Enter in the new user account and set a complex password. For non-production environments, set the user account to never expire. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/14-ad-dc-new-user.png" alt-text="Active Directory Administrative Center"::: - -1. Select **OK** to create the user. -1. Repeat these steps to create all three accounts. - -### Grant installation account permissions - -Once the accounts are created, grant required domain permissions to the installation account so the account is able to create objects in AD. - -To grant the permissions to the installation account, follow these step: - -1. Open the **Active Directory Administrative Center** from **Server Manager**, if it's not open already. -1. Select **corp (local)** in the left pane. -1. In the right-hand **Tasks** pane, verify you see **corp (local)** in the drop-down, and and then select **Properties** underneath. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/15-ad-dc-properties.png" alt-text="CORP user properties"::: - -1. Select **Extensions**, and then select the **Advanced** button on the **Security** tab. -1. On the **Advanced Security Settings for corp** dialog box, select **Add**. -1. Select **Select a principal**, search for **CORP\Install**, and then select **OK**. -1. Check the boxes next to **Read all properties** and **Create Computer Objects**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/16-add-permissions.png" alt-text="Corp user permissions"::: - -1. Select **OK**, and then select **OK** again. Close the **corp** properties window. - -Now that you've finished configuring Active Directory and the user objects, you are ready to create your SQL Server VMs. - -## Create SQL Server VMs - -Once your AD, DNS, and user accounts are configured, you are ready to create your SQL Server VMs. For simplicity, use the SQL Server VM images in the marketplace. - -However, before creating your SQL Server VMs, consider the following design decisions: - -**Availability - Availability Zones** - -For the highest level of redundancy, resiliency and availability deploy the VMs within separate Availability Zones. Availability Zones are unique physical locations within an Azure region. Each zone is made up of one or more datacenters with independent power, cooling, and networking. For Azure regions that do not support Availability Zones yet, use Availability Sets instead. Place all the VMs within the same Availability Set. - -**Storage - Azure Managed Disks** - -For the virtual machine storage, use Azure Managed Disks. Microsoft recommends Managed Disks for SQL Server virtual machines as they handle storage behind the scenes. For more information, see [Azure Managed Disks Overview](../../../virtual-machines/managed-disks-overview.md). - -**Network - Private IP addresses in production** - -For the virtual machines, this tutorial uses public IP addresses. A public IP address enables remote connection directly to the virtual machine over the internet and makes configuration steps easier. In production environments, Microsoft recommends only private IP addresses in order to reduce the vulnerability footprint of the SQL Server instance VM resource. - -**Network - Single NIC per server** - -Use a single NIC per server (cluster node). Azure networking has physical redundancy, which makes additional NICs unnecessary on a failover cluster deployed to an Azure virtual machine. The cluster validation report will warn you that the nodes are reachable only on a single network. You can ignore this warning when your failover cluster is on Azure virtual machines. - -To create your VMs, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select **+ Create**. -1. Search for **Azure SQL** and select the **Azure SQL** tile from Microsoft. -1. On the **Azure SQL** page, select **Create** and then choose the **SQL Server 2016 SP2 Enterprise on Windows Server 2016** image from the drop-down. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/18-select-sql-vm-image.png" alt-text=" On the Azure SQL page of the portal, select Create and then choose the SQL Server 2016 SP2 Enterprise on Windows Server 2016 image from the drop-down."::: - -Use the following table to fill out the values on the **Create a virtual machine** page to create both SQL Server VMs, such as **SQL-VM-1** and **SQL-VM-2** (your IP addresses may differ from the examples in the table): - -| Configuration | SQL-VM-1 | SQL-VM-2 | -| --- | --- | --- | -| Gallery image |**SQL Server 2016 SP2 Enterprise on Windows Server 2016** |**SQL Server 2016 SP2 Enterprise on Windows Server 2016** | -| **VM basics** |**Name** = SQL-VM-1
    **User Name** = DomainAdmin
    **Password** = Contoso!0000
    **Subscription** = Your subscription
    **Resource group** = SQL-HA-RG
    **Location** = Your Azure location |**Name** = SQL-VM-2
    **User Name** = DomainAdmin
    **Password** = Contoso!0000
    **Subscription** = Your subscription
    **Resource group** = SQL-HA-RG
    **Location** = Your Azure location | -| **VM Size** |**SIZE** = E2ds_v4 (2 vCPUs, 16 GB RAM)
    |**SIZE** = E2ds_v4 (2 vCPUs, 16 GB RAM) | -| **VM Settings** |**Availability options** = Availability zone
    **Availability zone** = 1
    **Public inbound ports** = Allow selected ports
    **Select inbound ports** = RDP (3389)
    **OS disk type** = Premium SSD (locally-redundant storage)
    **Virtual network** = SQLHAVNET
    **Subnet** = SQL-subnet-1(10.38.1.0/24)
    **Public IP address** = Automatically generated.
    **NIC network security group** = Basic
    **Public inbound ports** = Allow selected ports
    **Select inbound ports** = RDP (3389)
    **Boot Diagnostics** = Enable with managed storage account (recommended)
    |**Availability options** = Availability zone
    **Availability zone** = 2
    **Public inbound ports** = Allow selected ports
    **Select inbound ports** = RDP (3389)
    **OS disk type** = Premium SSD (locally-redundant storage)
    **Virtual network** = SQLHAVNET
    **Subnet** = SQL-subnet-2(10.38.2.0/24)
    **Public IP address** = Automatically generated.
    **NIC network security group** = Basic
    **Public inbound ports** = Allow selected ports
    **Select inbound ports** = RDP (3389)
    **Boot Diagnostics** = Enable with managed storage account (recommended)
    | -| **SQL Server settings** |**SQL connectivity** = Private (within Virtual Network)
    **Port** = 1433
    **SQL Authentication** = Disable
    **Azure Key Vault integration** = Disable
    **Storage optimization** = Transactional processing
    **SQL Data** = 1024 GiB, 5000 IOPS, 200 MB/s
    **SQL Log** = 1024 GiB, 5000 IOPS, 200 MB/s
    **SQL TempDb** = Use local SSD drive
    **Automated patching** = Sunday at 2:00
    **Automated backup** = Disable |**SQL connectivity** = Private (within Virtual Network)
    **Port** = 1433
    **SQL Authentication** = Disable
    **Azure Key Vault integration** = Disable
    **Storage optimization** = Transactional processing
    **SQL Data** = 1024 GiB, 5000 IOPS, 200 MB/s
    **SQL Log** = 1024 GiB, 5000 IOPS, 200 MB/s
    **SQL TempDb** = Use local SSD drive
    **Automated patching** = Sunday at 2:00
    **Automated backup** = Disable | - -
    - -> [!NOTE] -> These suggested machine sizes are only intended for testing availability groups in Azure Virtual Machines. For optimized production workloads, see the size recommendations in [Performance best practices for SQL Server on Azure VMs](./performance-guidelines-best-practices-checklist.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -> - -## Configure SQL Server VMs - -After VM creation completes, configure your SQL Server VMs by adding a secondary IP address to each VM, and joining them to the domain. - - -### Add secondary IPs to SQL Server VMs - -In the multi-subnet environment, assign secondary IP addresses to each SQL Server VM to use for the availability group listener, and for Windows Server 2016 and earlier, assign secondary IP addresses to each SQL Server VM for the cluster IP address as well. Doing this negates the need for an Azure Load Balancer, as is the requirement in a single subnet environment. - -On Windows Server 2016 and earlier, you need to assign an additional secondary IP address to each SQL Server VM to use for the windows cluster IP since the cluster uses the **Cluster Network Name** rather than the default Distributed Network Name (DNN) introduced in Windows Server 2019. With a DNN, the cluster name object (CNO) is automatically registered with the IP addresses for all the nodes of the cluster, eliminating the need for a dedicated windows cluster IP address. - -If you're on Windows Server 2016 and prior, follow the steps in this section to assign a secondary IP address to each SQL Server VM for *both* the availability group listener, *and* the cluster. - -If you're on Windows Server 2019 or later, only assign a secondary IP address for the availability group listener, and skip the steps to assign a windows cluster IP, unless you plan to configure your cluster with a virtual network name (VNN), in which case assign both IP addresses to each SQL Server VM as you would for Windows Server 2016. - -To assign additional secondary IPs to the VMs, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com/) and select the first SQL Server VM, such as **SQL-VM-1**. -1. Select **Networking** in the **Settings** pane, and then select the **Network Interface**: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/19-sql-vm-network-interface.png" alt-text="Select Networking in the Settings pane, and then select the Network Interface"::: - -1. On the **Network Interface** page, select **IP configurations** in the **Settings** pane and then choose **+ Add** to add an additional IP address: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/20-ip-configurations-add.png" alt-text="IP configurations"::: - -1. On the **Add IP configuration** page, do the following: - 1. Specify the **Name** as the Windows Cluster IP, such as **windows-cluster-ip** for Windows 2016 and earlier. Skip this step if you're on Windows Server 2019 or later. - 1. Set the **Allocation** to **Static**. - 1. Enter an unused **IP address** in the same subnet (**SQL-subnet-1**) as the SQL Server VM (**SQL-VM-1**), such as `10.38.1.10`. - 1. Leave the **Public IP address** at the default of **Disassociate**. - 1. Select **OK** to finish adding the IP configuration. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/21-add-ip-windows-cluster.png" alt-text="Add Cluster IP by entering in an used IP address in the subnet of the first SQL Server VM"::: - -1. Select **+ Add** again to configure an additional IP address for the availability group listener (with a name such as **availability-group-listener**), again specifying an unused IP address in **SQL-subnet-1** such as `10.38.1.11`: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/22-add-ip-ag-listener.png" alt-text="Select + Add again to configure an additional IP address for the availability group listener (with a name such as availability-group-listener), again using an unused IP address in SQL-subnet-1 such as 10.31.1.11"::: - -1. Repeat these steps again for the second SQL Server VM, such as **SQL-VM-2**. Assign two unused secondary IP addresses within **SQL-subnet-2**. Use the values from the following table to add the IP configuration: - - - | **Field** | Input | Input | - | --- | --- | --- | - | **Name** |windows-cluster-ip | availability-group-listener | - | **Allocation** | Static | Static | - | **IP address** | 10.38.2.10 | 10.38.2.11 | - - -Now you are ready to join the **corp.contoso.com**. - -### Join the servers to the domain - -Once your two secondary IP addresses have been assigned to both SQL Server VMs, join each SQL Server VM to the **corp.contoso.com** domain. - -To join the corp.contoso.com domain, follow the same steps for the SQL Server VM as you did when you [joined the domain](#join-the-domain) with the secondary domain controller. - -Wait for each SQL Server VM to restart, and then you can add your accounts. - - -## Add accounts - -Add the installation account as an administrator on each VM, grant permission to the installation account and local accounts within SQL Server, and update the SQL Server service account. - -### Add install account - -Once both SQL Server VMs have joined the domain, add **CORP\Install** as a member of the local administrators group. - ->[!TIP] -> Be sure you sign in with the *domain* administrator account. In previous steps, you were using the **BUILTIN** administrator account. Now that the server is part of the domain, use the domain account. In your RDP session, specify *DOMAIN*\\*username*, such as **CORP\DomainAdmin**. - -To add the account as an admin, follow these steps: - -1. Wait until the VM is restarted, then launch the RDP file again from the first SQL Server VM to sign in to **SQL-VM-1** by using the **CORP\DomainAdmin** account. -1. In **Server Manager**, select **Tools**, and then select **Computer Management**. -1. In the **Computer Management** window, expand **Local Users and Groups**, and then select **Groups**. -1. Double-click the **Administrators** group. -1. In the **Administrators Properties** dialog, select the **Add** button. -1. Enter the user **CORP\Install**, and then select **OK**. -1. Select **OK** to close the **Administrator Properties** dialog. -1. Repeat these steps on **SQL-VM-2**. - -### Add account to sysadmin - -The installation account (CORP\install) used to configure the availability group must be part of the **sysadmin** fixed server role on each SQL Server VM. - -To grant **sysadmin** rights to the installation account, follow these steps: - -1. Connect to the server through the Remote Desktop Protocol (RDP) by using the *\\DomainAdmin* account, such as `SQL-VM-1\DomainAdmin`. -1. Open SQL Server Management Studio and connect to the local instance of SQL Server. -1. In **Object Explorer**, select **Security**. -1. Right-click **Logins**. Select **New Login**. -1. In **Login - New**, select **Search**. -1. Select **Locations**. -1. Enter the domain administrator network credentials. -1. Use the installation account (CORP\install). -1. Set the sign-in to be a member of the **sysadmin** fixed server role. -1. Select **OK**. -1. Repeat these steps on the second SQL Server VM, such as **SQL-VM-2**, connecting with the relevant machine name account, such as `SQL-VM-2\DomainAdmin`. - - -### Add system account - -In later versions of SQL Server, the [NT AUTHORITY\SYSTEM] account does not have permissions to SQL Server by default, and must be granted manually. - -To add the [NT AUTHORITY\SYSTEM] and grant appropriate permissions, follow these steps: - -1. Connect to the first SQL Server VM through the Remote Desktop Protocol (RDP) by using the *\\DomainAdmin* account, such as `SQL-VM-1\DomainAdmin`. -1. Open SQL Server Management Studio and connect to the local instance of SQL Server. -1. Create an account for `[NT AUTHORITY\SYSTEM]` on each SQL Server instance by using the following Transact-SQL (T-SQL) command: - - ```sql - USE [master] - GO - CREATE LOGIN [NT AUTHORITY\SYSTEM] FROM WINDOWS WITH DEFAULT_DATABASE=[master] - GO - ``` - -1. Grant the following permissions to `[NT AUTHORITY\SYSTEM]` on each SQL Server instance: - - - `ALTER ANY AVAILABILITY GROUP` - - `CONNECT SQL` - - `VIEW SERVER STATE` - - To grant these permissions, use the following Transact-SQL (T-SQL) command: - - ```sql - GRANT ALTER ANY AVAILABILITY GROUP TO [NT AUTHORITY\SYSTEM] - GO - GRANT CONNECT SQL TO [NT AUTHORITY\SYSTEM] - GO - GRANT VIEW SERVER STATE TO [NT AUTHORITY\SYSTEM] - GO - ``` - -1. Repeat these steps on the second SQL Server VM, such as **SQL-VM-2**, connecting with the relevant machine name account, such as `SQL-VM-2\DomainAdmin`. - -### Set the SQL Server service accounts - -The SQL Server service on each VM needs to use a dedicated domain account. Use the domain accounts you created earlier: **Corp\SQLSvc1** for **SQL-VM-1** and **Corp\SQLSvc2** for **SQL-VM-2**. - -To set the service account, follow these steps: - -1. Connect to the first SQL Server VM through the Remote Desktop Protocol (RDP) by using the *\\DomainAdmin* account, such as `SQL-VM-1\DomainAdmin`. -1. Open **SQL Server Configuration Manager**. -1. Right-click the SQL Server service, and then select **Properties**. -1. Provide the account (**Corp\SQLSvc1**) and password. -1. Select **Apply** to commit your change and restart the SQL Server service. -1. Repeat these steps on the other SQL Server VM (SQL-VM-1), signing in with the machine domain account, such as `SQL-VM-2\DomainAdmin`, and providing the second service account (**Corp\SQLSvc2**). - - -## Create Azure Storage Account - -To deploy a two-node Windows Server Failover Cluster, a third member is necessary to establish quorum. On Azure VMs, the cloud witness is the recommended quorum option. To configure a cloud witness, you need an Azure Storage account. To learn more, see [Deploy a Cloud Witness for a Failover Cluster](/windows-server/failover-clustering/deploy-cloud-witness). - -To create the Azure Storage Account in the portal: - -1. In the portal, open the **SQL-HA-RG** resource group and select **+ Create** -1. Search for **storage account**. -1. Select **Storage account** and select **Create**, configuring it with the following values: - - 1. Select your subscription and select the resource group **SQL-HA-RG.** - 1. Enter a **Storage Account Name** for your storage account. - Storage account names must be between 3 and 24 characters in length and may contain numbers and lowercase letters only. The storage account name must also be unique within Azure. - 1. Select your **Region.** - 1. For **Performance**, select **Standard: Recommended for most scenarios (general-purpose v2 account)**. Azure Premium Storage is not supported for a cloud witness. - 1. For **Redundancy**, select **Locally-redundant storage (LRS).** - Failover Clustering uses the blob file as the arbitration point, which requires some consistency guarantees when reading the data. Therefore you must select Locally-redundant storage for the Replication type. - 1. Select **Review + create** - -## Configure the firewall - -The availability group feature relies on traffic through the following TCP ports: - -- **SQL Server VM**: Port 1433 for a default instance of SQL Server. -- **Database mirroring endpoint:** Any available port. Examples frequently use 5022. - -Open these firewall ports on both SQL Server VMs. The method of opening the ports depends on the firewall solution that you use, and may vary from the Windows Firewall example provided in this section. - -To open these ports on a Windows Firewall, follow these steps: - -1. On the first SQL Server **Start** screen, launch **Windows Firewall with Advanced Security**. -1. On the left pane, select **Inbound Rules**. On the right pane, select **New Rule**. -1. For **Rule Type**, choose **Port**. -1. For the port, specify **TCP** and type the appropriate port numbers. See the following example: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/17-firewall-tcp-ports.png" alt-text="SQL firewall"::: - -1. Select **Next**. -1. On the **Action** page, select **Allow the connection** , and then select **Next**. -1. On the **Profile** page, accept the default settings, and then select **Next**. -1. On the **Name** page, specify a rule name (such as **SQL Inbound**) in the **Name** text box, and then select **Finish**. -1. Repeat these steps on the second SQL Server VM. - -## Next steps - -Now that you've configured the prerequisites, get started with [configuring your availability group](availability-group-manually-configure-tutorial-multi-subnet.md) in multiple subnets. - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-single-subnet.md b/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-single-subnet.md deleted file mode 100644 index 02633ced7479d..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-prerequisites-tutorial-single-subnet.md +++ /dev/null @@ -1,552 +0,0 @@ ---- -title: "Tutorial: Prerequisites for a single-subnet availability group" -description: "This tutorial shows how to configure the prerequisites for creating a SQL Server Always On availability group on Azure Virtual Machines in a single subnet." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.assetid: c492db4c-3faa-4645-849f-5a1a663be55a -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.custom: "seo-lt-2019" -ms.reviewer: mathoma ---- - -# Tutorial: Prerequisites for creating availability groups on SQL Server on Azure Virtual Machines - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer or distributed network name (DNN) for your Always On availability group by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - - -This tutorial shows how to complete the prerequisites for creating a [SQL Server Always On availability group on Azure Virtual Machines (VMs)](availability-group-manually-configure-tutorial-single-subnet.md) within a single subnet. When you've completed the prerequisites, you'll have a domain controller, two SQL Server VMs, and a witness server in a single resource group. - -While this article manually configures the availability group environment, it is also possible to do so using the [Azure portal](availability-group-azure-portal-configure.md), [PowerShell or the Azure CLI](availability-group-az-commandline-configure.md), or [Azure Quickstart templates](availability-group-quickstart-template-configure.md) as well. - -**Time estimate**: It might take a couple of hours to complete the prerequisites. Much of this time is spent creating virtual machines. - -The following diagram illustrates what you build in the tutorial. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/00-EndstateSampleNoELB.png" alt-text="Availability group"::: - ->[!NOTE] -> It's now possible to lift and shift your availability group solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) to learn more. - -## Review availability group documentation - -This tutorial assumes that you have a basic understanding of SQL Server Always On availability groups. If you're not familiar with this technology, see [Overview of Always On availability groups (SQL Server)](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). - - -## Create an Azure account - -You need an Azure account. You can [open a free Azure account](https://signup.azure.com/signup?offer=ms-azr-0044p&appId=102&ref=azureplat-generic) or [activate Visual Studio subscriber benefits](/visualstudio/subscriptions/subscriber-benefits). - -## Create a resource group - -1. Sign in to the [Azure portal](https://portal.azure.com). -2. Select **+** to create a new object in the portal. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-portalplus.png" alt-text="New object"::: - -3. Type **resource group** in the **Marketplace** search window. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroupsymbol.png" alt-text="Resource group"::: - -4. Select **Resource group**. -5. Select **Create**. -6. Under **Resource group name**, type a name for the resource group. For example, type **sql-ha-rg**. -7. If you have multiple Azure subscriptions, verify that the subscription is the Azure subscription that you want to create the availability group in. -8. Select a location. The location is the Azure region where you want to create the availability group. This article builds all resources in one Azure location. -9. Verify that **Pin to dashboard** is checked. This optional setting places a shortcut for the resource group on the Azure portal dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroup.png" alt-text="Resource group shortcut for the Azure portal"::: - -10. Select **Create** to create the resource group. - -Azure creates the resource group and pins a shortcut to the resource group in the portal. - -## Create the network and subnet - -The next step is to create the networks and subnet in the Azure resource group. - -The solution uses one virtual network and one subnet. The [Virtual network overview](../../../virtual-network/virtual-networks-overview.md) provides more information about networks in Azure. - -To create the virtual network in the Azure portal: - -1. In your resource group, select **+ Add**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/02-newiteminrg.png" alt-text="New item"::: -2. Search for **virtual network**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/04-findvirtualnetwork.png" alt-text="Search virtual network"::: -3. Select **Virtual network**. -4. On the **Virtual network**, select the **Resource Manager** deployment model, and then select **Create**. - - The following table shows the settings for the virtual network: - - | **Field** | Value | - | --- | --- | - | **Name** |autoHAVNET | - | **Address space** |10.0.0.0/24 | - | **Subnet name** |Admin | - | **Subnet address range** |10.0.0.0/29 | - | **Subscription** |Specify the subscription that you intend to use. **Subscription** is blank if you only have one subscription. | - | **Resource group** |Choose **Use existing** and pick the name of the resource group. | - | **Location** |Specify the Azure location. | - - Your address space and subnet address range might be different from the table. Depending on your subscription, the portal suggests an available address space and corresponding subnet address range. If no sufficient address space is available, use a different subscription. - - The example uses the subnet name **Admin**. This subnet is for the domain controllers and SQL Server VMs. - -5. Select **Create**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/06-configurevirtualnetwork.png" alt-text="Configure the virtual network"::: - -Azure returns you to the portal dashboard and notifies you when the new network is created. - -## Create availability sets - -Before you create virtual machines, you need to create availability sets. Availability sets reduce the downtime for planned or unplanned maintenance events. An Azure availability set is a logical group of resources that Azure places on physical fault domains and update domains. A fault domain ensures that the members of the availability set have separate power and network resources. An update domain ensures that members of the availability set aren't brought down for maintenance at the same time. For more information, see [Manage the availability of virtual machines](../../../virtual-machines/availability.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -You need two availability sets. One is for the domain controllers. The second is for the SQL Server VMs. - -To create an availability set, go to the resource group and select **Add**. Filter the results by typing **availability set**. Select **Availability Set** in the results, and then select **Create**. - -Configure two availability sets according to the parameters in the following table: - -| **Field** | Domain controller availability set | SQL Server availability set | -| --- | --- | --- | -| **Name** |adavailabilityset |sqlavailabilityset | -| **Resource group** |SQL-HA-RG |SQL-HA-RG | -| **Fault domains** |3 |3 | -| **Update domains** |5 |3 | - -After you create the availability sets, return to the resource group in the Azure portal. - -## Create domain controllers - -After you've created the network, subnet, and availability sets, you're ready to create the virtual machines for the domain controllers. - -### Create virtual machines for the domain controllers - -To create and configure the domain controllers, return to the **SQL-HA-RG** resource group. - -1. Select **Add**. -2. Type **Windows Server 2016 Datacenter**. -3. Select **Windows Server 2016 Datacenter**. In **Windows Server 2016 Datacenter**, verify that the deployment model is **Resource Manager**, and then select **Create**. - -Repeat the preceding steps to create two virtual machines. Name the two virtual machines: - -* ad-primary-dc -* ad-secondary-dc - - > [!NOTE] - > The **ad-secondary-dc** virtual machine is optional, to provide high availability for Active Directory Domain Services. - > - -The following table shows the settings for these two machines: - -| **Field** | Value | -| --- | --- | -| **Name** |First domain controller: *ad-primary-dc*.
    Second domain controller *ad-secondary-dc*. | -| **VM disk type** |SSD | -| **User name** |DomainAdmin | -| **Password** |Contoso!0000 | -| **Subscription** |*Your subscription* | -| **Resource group** |SQL-HA-RG | -| **Location** |*Your location* | -| **Size** |DS1_V2 | -| **Storage** | **Use managed disks** - **Yes** | -| **Virtual network** |autoHAVNET | -| **Subnet** |admin | -| **Public IP address** |*Same name as the VM* | -| **Network security group** |*Same name as the VM* | -| **Availability set** |adavailabilityset
    **Fault domains**:2
    **Update domains**:2| -| **Diagnostics** |Enabled | -| **Diagnostics storage account** |*Automatically created* | - - >[!IMPORTANT] - >You can only place a VM in an availability set when you create it. You can't change the availability set after a VM is created. See [Manage the availability of virtual machines](../../../virtual-machines/availability.md). - -Azure creates the virtual machines. - -After the virtual machines are created, configure the domain controller. - -### Configure the domain controller - -In the following steps, configure the **ad-primary-dc** machine as a domain controller for corp.contoso.com. - -1. In the portal, open the **SQL-HA-RG** resource group and select the **ad-primary-dc** machine. On **ad-primary-dc**, select **Connect** to open an RDP file for remote desktop access. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/20-connectrdp.png" alt-text="Connect to a virtual machine"::: - -2. Sign in with your configured administrator account (**\DomainAdmin**) and password (**Contoso!0000**). -3. By default, the **Server Manager** dashboard should be displayed. -4. Select the **Add roles and features** link on the dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png" alt-text="elect the Add roles and features link on the dashboard."::: - -5. Select **Next** until you get to the **Server Roles** section. -6. Select the **Active Directory Domain Services** and **DNS Server** roles. When you're prompted, add any additional features that are required by these roles. - - > [!NOTE] - > Windows warns you that there is no static IP address. If you're testing the configuration, select **Continue**. For production scenarios, set the IP address to static in the Azure portal, or [use PowerShell to set the static IP address of the domain controller machine](/previous-versions/azure/virtual-network/virtual-networks-reserved-private-ip). - > - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/23-add-roles.png" alt-text=" Select the Active Directory Domain Services and DNS Server roles."::: - -7. Select **Next** until you reach the **Confirmation** section. Select the **Restart the destination server automatically if required** check box. -8. Select **Install**. -9. After the features finish installing, return to the **Server Manager** dashboard. -10. Select the new **AD DS** option on the left-hand pane. -11. Select the **More** link on the yellow warning bar. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/24-addsmore.png" alt-text="AD DS dialog on the DNS Server VM"::: - -12. In the **Action** column of the **All Server Task Details** dialog, select **Promote this server to a domain controller**. -13. In the **Active Directory Domain Services Configuration Wizard**, use the following values: - - | **Page** | Setting | - | --- | --- | - | **Deployment Configuration** |**Add a new forest**
    **Root domain name** = corp.contoso.com | - | **Domain Controller Options** |**DSRM Password** = Contoso!0000
    **Confirm Password** = Contoso!0000 | - -14. Select **Next** to go through the other pages in the wizard. On the **Prerequisites Check** page, verify that you see the following message: **All prerequisite checks passed successfully**. You can review any applicable warning messages, but it's possible to continue with the installation. -15. Select **Install**. The **ad-primary-dc** virtual machine automatically reboots. - -### Note the IP address of the primary domain controller - -Use the primary domain controller for DNS. Note the primary domain controller IP address. - -One way to get the primary domain controller IP address is through the Azure portal. - -1. On the Azure portal, open the resource group. - -2. Select the primary domain controller. - -3. On the primary domain controller, select **Network interfaces**. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/25-primarydcip.png" alt-text="Network interfaces"::: - -Note the private IP address for this server. - -### Configure the virtual network DNS - -After you create the first domain controller and enable DNS on the first server, configure the virtual network to use this server for DNS. - -1. In the Azure portal, select on the virtual network. - -2. Under **Settings**, select **DNS Server**. - -3. Select **Custom**, and type the private IP address of the primary domain controller. - -4. Select **Save**. - -### Configure the second domain controller - -After the primary domain controller reboots, you can configure the second domain controller. This optional step is for high availability. Follow these steps to configure the second domain controller: - -1. In the portal, open the **SQL-HA-RG** resource group and select the **ad-secondary-dc** machine. On **ad-secondary-dc**, select **Connect** to open an RDP file for remote desktop access. -2. Sign in to the VM by using your configured administrator account (**BUILTIN\DomainAdmin**) and password (**Contoso!0000**). -3. Change the preferred DNS server address to the address of the domain controller. -4. In **Network and Sharing Center**, select the network interface. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/26-networkinterface.png" alt-text="Network interface"::: - -5. Select **Properties**. -6. Select **Internet Protocol Version 4 (TCP/IPv4)** and then select **Properties**. -7. Select **Use the following DNS server addresses** and then specify the address of the primary domain controller in **Preferred DNS server**. -8. Select **OK**, and then **Close** to commit the changes. You are now able to join the VM to **corp.contoso.com**. - - >[!IMPORTANT] - >If you lose the connection to your remote desktop after changing the DNS setting, go to the Azure portal and restart the virtual machine. - -9. From the remote desktop to the secondary domain controller, open **Server Manager Dashboard**. -10. Select the **Add roles and features** link on the dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png" alt-text="Select the Add roles and features link on the dashboard."::: - -11. Select **Next** until you get to the **Server Roles** section. -12. Select the **Active Directory Domain Services** and **DNS Server** roles. When you're prompted, add any additional features that are required by these roles. -13. After the features finish installing, return to the **Server Manager** dashboard. -14. Select the new **AD DS** option on the left-hand pane. -15. Select the **More** link on the yellow warning bar. -16. In the **Action** column of the **All Server Task Details** dialog, select **Promote this server to a domain controller**. -17. Under **Deployment Configuration**, select **Add a domain controller to an existing domain**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/28-deploymentconfig.png" alt-text="Deployment configuration"::: - -18. Click **Select**. -19. Connect by using the administrator account (**CORP.CONTOSO.COM\domainadmin**) and password (**Contoso!0000**). -20. In **Select a domain from the forest**, choose your domain and then select **OK**. -21. In **Domain Controller Options**, use the default values and set a DSRM password. - - >[!NOTE] - >The **DNS Options** page might warn you that a delegation for this DNS server can't be created. You can ignore this warning in non-production environments. - > - -22. Select **Next** until the dialog reaches the **Prerequisites** check. Then select **Install**. - -After the server finishes the configuration changes, restart the server. - -### Add the private IP address to the second domain controller to the VPN DNS Server - -In the Azure portal, under virtual network, change the DNS Server to include the IP address of the secondary domain controller. This setting allows the DNS service redundancy. - -### Configure the domain accounts - -In the next steps, you configure the Active Directory accounts. The following table shows the accounts: - -| |Installation account
    |sqlserver-0
    SQL Server and SQL Agent Service account |sqlserver-1
    SQL Server and SQL Agent Service account -| --- | --- | --- | --- -|**First Name** |Install |SQLSvc1 | SQLSvc2 -|**User SamAccountName** |Install |SQLSvc1 | SQLSvc2 - -Use the following steps to create each account. - -1. Sign in to the **ad-primary-dc** machine. -2. In **Server Manager**, select **Tools**, and then select **Active Directory Administrative Center**. -3. Select **corp (local)** from the left pane. -4. On the right **Tasks** pane, select **New**, and then select **User**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/29-addcnewuser.png" alt-text="Active Directory Administrative Center"::: - - >[!TIP] - >Set a complex password for each account.
    For non-production environments, set the user account to never expire. - > - -5. Select **OK** to create the user. -6. Repeat the preceding steps for each of the three accounts. - -### Grant the required permissions to the installation account - -1. In the **Active Directory Administrative Center**, select **corp (local)** in the left pane. Then in the right-hand **Tasks** pane, select **Properties**. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/31-addcproperties.png" alt-text="CORP user properties"::: - -2. Select **Extensions**, and then select the **Advanced** button on the **Security** tab. -3. In the **Advanced Security Settings for corp** dialog, select **Add**. -4. Click **Select a principal**, search for **CORP\Install**, and then select **OK**. -5. Select the **Read all properties** check box. - -6. Select the **Create Computer objects** check box. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/33-addpermissions.png" alt-text="Corp user permissions"::: - -7. Select **OK**, and then select **OK** again. Close the **corp** properties window. - -Now that you've finished configuring Active Directory and the user objects, create two SQL Server VMs and a witness server VM. Then join all three to the domain. - -## Create SQL Server VMs - -Create three additional virtual machines. The solution requires two virtual machines with SQL Server instances. A third virtual machine will function as a witness. Windows Server 2016 can use a [cloud witness](/windows-server/failover-clustering/deploy-cloud-witness). However for consistency with previous operating systems, this article uses a virtual machine for a witness. - -Before you proceed consider the following design decisions. - -* **Storage - Azure Managed Disks** - - For the virtual machine storage, use Azure Managed Disks. Microsoft recommends Managed Disks for SQL Server virtual machines. Managed Disks handles storage behind the scenes. In addition, when virtual machines with Managed Disks are in the same availability set, Azure distributes the storage resources to provide appropriate redundancy. For more information, see [Azure Managed Disks Overview](../../../virtual-machines/managed-disks-overview.md). For specifics about managed disks in an availability set, see [Use Managed Disks for VMs in an availability set](../../../virtual-machines/availability.md). - -* **Network - Private IP addresses in production** - - For the virtual machines, this tutorial uses public IP addresses. A public IP address enables remote connection directly to the virtual machine over the internet and makes configuration steps easier. In production environments, Microsoft recommends only private IP addresses in order to reduce the vulnerability footprint of the SQL Server instance VM resource. - -* **Network - Recommend a single NIC per server** - -Use a single NIC per server (cluster node) and a single subnet. Azure networking has physical redundancy, which makes additional NICs and subnets unnecessary on an Azure virtual machine guest cluster. The cluster validation report will warn you that the nodes are reachable only on a single network. You can ignore this warning on Azure virtual machine guest failover clusters. - -### Create and configure the SQL Server VMs - -Next, create three VMs - two SQL Server VMs and one VM for an additional cluster node. To create each of the VMs, go back to the **SQL-HA-RG** resource group, and then select **Add**. Search for the appropriate gallery item, select **Virtual Machine**, and then select **From Gallery**. Use the information in the following table to help you create the VMs: - - -| Page | VM1 | VM2 | VM3 | -| --- | --- | --- | --- | -| Select the appropriate gallery item |**Windows Server 2016 Datacenter** |**SQL Server 2016 SP1 Enterprise on Windows Server 2016** |**SQL Server 2016 SP1 Enterprise on Windows Server 2016** | -| Virtual machine configuration **Basics** |**Name** = cluster-fsw
    **User Name** = DomainAdmin
    **Password** = Contoso!0000
    **Subscription** = Your subscription
    **Resource group** = SQL-HA-RG
    **Location** = Your Azure location |**Name** = sqlserver-0
    **User Name** = DomainAdmin
    **Password** = Contoso!0000
    **Subscription** = Your subscription
    **Resource group** = SQL-HA-RG
    **Location** = Your Azure location |**Name** = sqlserver-1
    **User Name** = DomainAdmin
    **Password** = Contoso!0000
    **Subscription** = Your subscription
    **Resource group** = SQL-HA-RG
    **Location** = Your Azure location | -| Virtual machine configuration **Size** |**SIZE** = DS1\_V2 (1 vCPU, 3.5 GB) |**SIZE** = DS2\_V2 (2 vCPUs, 7 GB)
    The size must support SSD storage (Premium disk support. )) |**SIZE** = DS2\_V2 (2 vCPUs, 7 GB) | -| Virtual machine configuration **Settings** |**Storage**: Use managed disks.
    **Virtual network** = autoHAVNET
    **Subnet** = sqlsubnet(10.1.1.0/24)
    **Public IP address** automatically generated.
    **Network security group** = None
    **Monitoring Diagnostics** = Enabled
    **Diagnostics storage account** = Use an automatically generated storage account
    **Availability set** = sqlAvailabilitySet
    |**Storage**: Use managed disks.
    **Virtual network** = autoHAVNET
    **Subnet** = sqlsubnet(10.1.1.0/24)
    **Public IP address** automatically generated.
    **Network security group** = None
    **Monitoring Diagnostics** = Enabled
    **Diagnostics storage account** = Use an automatically generated storage account
    **Availability set** = sqlAvailabilitySet
    |**Storage**: Use managed disks.
    **Virtual network** = autoHAVNET
    **Subnet** = sqlsubnet(10.1.1.0/24)
    **Public IP address** automatically generated.
    **Network security group** = None
    **Monitoring Diagnostics** = Enabled
    **Diagnostics storage account** = Use an automatically generated storage account
    **Availability set** = sqlAvailabilitySet
    | -| Virtual machine configuration **SQL Server settings** |Not applicable |**SQL connectivity** = Private (within Virtual Network)
    **Port** = 1433
    **SQL Authentication** = Disable
    **Storage configuration** = General
    **Automated patching** = Sunday at 2:00
    **Automated backup** = Disabled
    **Azure Key Vault integration** = Disabled |**SQL connectivity** = Private (within Virtual Network)
    **Port** = 1433
    **SQL Authentication** = Disable
    **Storage configuration** = General
    **Automated patching** = Sunday at 2:00
    **Automated backup** = Disabled
    **Azure Key Vault integration** = Disabled | - -
    - -> [!NOTE] -> The machine sizes suggested here are meant for testing availability groups in Azure Virtual Machines. For the best performance on production workloads, see the recommendations for SQL Server machine sizes and configuration in [Performance best practices for SQL Server in Azure Virtual Machines](./performance-guidelines-best-practices-checklist.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -> - -After the three VMs are fully provisioned, you need to join them to the **corp.contoso.com** domain and grant CORP\Install administrative rights to the machines. - -### Join the servers to the domain - -You're now able to join the VMs to **corp.contoso.com**. Do the following steps for both the SQL Server VMs and the file share witness server: - -1. Remotely connect to the virtual machine with **BUILTIN\DomainAdmin**. -2. In **Server Manager**, select **Local Server**. -3. Select the **WORKGROUP** link. -4. In the **Computer Name** section, select **Change**. -5. Select the **Domain** check box and type **corp.contoso.com** in the text box. Select **OK**. -6. In the **Windows Security** popup dialog, specify the credentials for the default domain administrator account (**CORP\DomainAdmin**) and the password (**Contoso!0000**). -7. When you see the "Welcome to the corp.contoso.com domain" message, select **OK**. -8. Select **Close**, and then select **Restart Now** in the popup dialog. - -## Add accounts - -Add the installation account as an administrator on each VM, grant permission to the installation account and local accounts within SQL Server, and update the SQL Server service account. - -### Add the Corp\Install user as an administrator on each cluster VM - -After each virtual machine restarts as a member of the domain, add **CORP\Install** as a member of the local administrators group. - -1. Wait until the VM is restarted, then launch the RDP file again from the primary domain controller to sign in to **sqlserver-0** by using the **CORP\DomainAdmin** account. - - >[!TIP] - >Make sure that you sign in with the domain administrator account. In the previous steps, you were using the BUILT IN administrator account. Now that the server is in the domain, use the domain account. In your RDP session, specify *DOMAIN*\\*username*. - > - -2. In **Server Manager**, select **Tools**, and then select **Computer Management**. -3. In the **Computer Management** window, expand **Local Users and Groups**, and then select **Groups**. -4. Double-click the **Administrators** group. -5. In the **Administrators Properties** dialog, select the **Add** button. -6. Enter the user **CORP\Install**, and then select **OK**. -7. Select **OK** to close the **Administrator Properties** dialog. -8. Repeat the previous steps on **sqlserver-1** and **cluster-fsw**. - - -### Create a sign-in on each SQL Server VM for the installation account - -Use the installation account (CORP\install) to configure the availability group. This account needs to be a member of the **sysadmin** fixed server role on each SQL Server VM. The following steps create a sign-in for the installation account: - -1. Connect to the server through the Remote Desktop Protocol (RDP) by using the *\\DomainAdmin* account. - -1. Open SQL Server Management Studio and connect to the local instance of SQL Server. - -1. In **Object Explorer**, select **Security**. - -1. Right-click **Logins**. Select **New Login**. - -1. In **Login - New**, select **Search**. - -1. Select **Locations**. - -1. Enter the domain administrator network credentials. - -1. Use the installation account (CORP\install). - -1. Set the sign-in to be a member of the **sysadmin** fixed server role. - -1. Select **OK**. - -Repeat the preceding steps on the other SQL Server VM. - -### Configure system account permissions - -To create an account for the system account and grant appropriate permissions, complete the following steps on each SQL Server instance: - -1. Create an account for `[NT AUTHORITY\SYSTEM]` on each SQL Server instance. The following script creates this account: - - ```sql - USE [master] - GO - CREATE LOGIN [NT AUTHORITY\SYSTEM] FROM WINDOWS WITH DEFAULT_DATABASE=[master] - GO - ``` - -1. Grant the following permissions to `[NT AUTHORITY\SYSTEM]` on each SQL Server instance: - - - `ALTER ANY AVAILABILITY GROUP` - - `CONNECT SQL` - - `VIEW SERVER STATE` - - The following script grants these permissions: - - ```sql - GRANT ALTER ANY AVAILABILITY GROUP TO [NT AUTHORITY\SYSTEM] - GO - GRANT CONNECT SQL TO [NT AUTHORITY\SYSTEM] - GO - GRANT VIEW SERVER STATE TO [NT AUTHORITY\SYSTEM] - GO - ``` - -### Set the SQL Server service accounts - -On each SQL Server VM, set the SQL Server service account. Use the accounts that you created when you configured the domain accounts. - -1. Open **SQL Server Configuration Manager**. -2. Right-click the SQL Server service, and then select **Properties**. -3. Set the account and password. -4. Repeat these steps on the other SQL Server VM. - -For SQL Server availability groups, each SQL Server VM needs to run as a domain account. - -## Add Failover Clustering features to both SQL Server VMs - -To add Failover Clustering features, do the following steps on both SQL Server VMs: - -1. Connect to the SQL Server virtual machine through the Remote Desktop Protocol (RDP) by using the *CORP\install* account. Open **Server Manager Dashboard**. -2. Select the **Add roles and features** link on the dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png" alt-text="Select the Add roles and features link on the dashboard."::: - -3. Select **Next** until you get to the **Server Features** section. -4. In **Features**, select **Failover Clustering**. -5. Add any additional required features. -6. Select **Install** to add the features. - -Repeat the steps on the other SQL Server VM. - - >[!NOTE] - > This step, along with actually joining the SQL Server VMs to the failover cluster, can now be automated with [Azure SQL VM CLI](./availability-group-az-commandline-configure.md) and [Azure Quickstart Templates](availability-group-quickstart-template-configure.md). - > - -### Tuning Failover Cluster Network Thresholds - -When running Windows Failover Cluster nodes in Azure VMs with SQL Server availability groups, change the cluster setting to a more relaxed monitoring state. This will make the cluster much more stable and reliable. For details on this, see [IaaS with SQL Server - Tuning Failover Cluster Network Thresholds](/windows-server/troubleshoot/iaas-sql-failover-cluster). - - -## Configure the firewall on each SQL Server VM - -The solution requires the following TCP ports to be open in the firewall: - -- **SQL Server VM**: Port 1433 for a default instance of SQL Server. -- **Azure load balancer probe:** Any available port. Examples frequently use 59999. -- **Database mirroring endpoint:** Any available port. Examples frequently use 5022. - -The firewall ports need to be open on both SQL Server VMs. - -The method of opening the ports depends on the firewall solution that you use. The next section explains how to open the ports in Windows Firewall. Open the required ports on each of your SQL Server VMs. - -### Open a TCP port in the firewall - -1. On the first SQL Server **Start** screen, launch **Windows Firewall with Advanced Security**. -2. On the left pane, select **Inbound Rules**. On the right pane, select **New Rule**. -3. For **Rule Type**, choose **Port**. -4. For the port, specify **TCP** and type the appropriate port numbers. See the following example: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/35-tcpports.png" alt-text="SQL firewall"::: - -5. Select **Next**. -6. On the **Action** page, keep **Allow the connection** selected, and then select **Next**. -7. On the **Profile** page, accept the default settings, and then select **Next**. -8. On the **Name** page, specify a rule name (such as **Azure LB Probe**) in the **Name** text box, and then select **Finish**. - -Repeat these steps on the second SQL Server VM. - - -## Next steps - -Now that you've configured the prerequisites, get started with [configuring your availability group](availability-group-manually-configure-tutorial-single-subnet.md) - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-multi-subnet.md b/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-multi-subnet.md deleted file mode 100644 index 781f9da886cb2..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-multi-subnet.md +++ /dev/null @@ -1,388 +0,0 @@ ---- -title: "Tutorial: Configure availability group in multiple subnets" -description: "This tutorial shows how to create an Always On availability group within multiple subnets for SQL Server on Azure Virtual Machines (VMs). " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management - -ms.assetid: 08a00342-fee2-4afe-8824-0db1ed4b8fca -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: tutorial -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Tutorial: Configure an availability group in multiple subnets (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - -This tutorial shows how to create an Always On availability group for SQL Server on Azure Virtual Machines (VMs) within multiple subnets. The complete tutorial creates a Windows Server Failover Cluster, and an availability group with a two SQL Server replicas and a listener. - - -**Time estimate**: Assuming your [prerequisites](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) are complete, this tutorial should take about 30 minutes to complete. - - -## Prerequisites - -The following table lists the [prerequisites](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) that you need to complete before starting this tutorial: - -| Requirement | Description | -|----- |----- | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-multi-subnet/square.png" border="false"::: **Two SQL Server instances** | - Each VM in two different Azure availability zones or the same availability set
    - In separate subnets within an Azure Virtual Network
    - With two secondary IPs assigned to each VM
    - In a single domain
    | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-multi-subnet/square.png" border="false"::: **SQL Server service account** | A domain account used by the SQL Server service for each machine | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-multi-subnet/square.png" border="false"::: **Open firewall ports** | - SQL Server: **1433** for default instance
    - Database mirroring endpoint: **5022** or any available port
    | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-multi-subnet/square.png" border="false"::: **Domain installation account** | - Local administrator on each SQL Server
    - Member of SQL Server sysadmin fixed server role for each instance of SQL Server | - -The tutorial assumes you have a basic understanding of [SQL Server Always On availability groups](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). - - - -## Create the cluster - -The Always On availability group lives on top of the Windows Server Failover Cluster infrastructure, so before deploying your availability group, you must first configure the Windows Server Failover Cluster, which includes adding the feature, creating the cluster, and setting the cluster IP address. - -### Add failover cluster feature - -Add the failover cluster feature to both SQL Server VMs. To do so, follow these steps: - -1. Connect to the SQL Server virtual machine through the Remote Desktop Protocol (RDP) using a domain account that has permissions to create objects in AD, such as the **CORP\Install** domain account created in the [prerequisites article](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). -1. Open **Server Manager Dashboard**. -1. Select the **Add roles and features** link on the dashboard. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png" alt-text="Select the Add roles and features link on the dashboard."::: - -1. Select **Next** until you get to the **Server Features** section. -1. In **Features**, select **Failover Clustering**. -1. Add any additional required features. -1. Select **Install** to add the features. -1. Repeat the steps on the other SQL Server VM. - - -### Create cluster - -After the cluster feature has been added to each SQL Server VM, you are ready to create the Windows Server Failover Cluster. - -To create the cluster, follow these steps: - -1. Use Remote Desktop Protocol (RDP) to connect to the first SQL Server VM (such as **SQL-VM-1**) using a domain account that has permissions to create objects in AD, such as the **CORP\Install** domain account created in the [prerequisites article](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). -2. In the **Server Manager** dashboard, select **Tools**, and then select **Failover Cluster Manager**. -3. In the left pane, right-click **Failover Cluster Manager**, and then select **Create a Cluster**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/01-create-cluster.png" alt-text="Create Cluster"::: - -4. In the **Create Cluster Wizard**, create a two-node cluster by stepping through the pages using the settings provided in the following table: - - | Page | Settings | - | --- | --- | - | Before You Begin |Use defaults. | - | Select Servers |Type the first SQL Server name (such as **SQL-VM-1**) in **Enter server name** and select **Add**.
    Type the second SQL Server name (such as **SQL-VM-2**) in **Enter server name** and select **Add**. | - | Validation Warning |Select **Yes. When I click Next, run configuration validation tests, and then return to the process of creating the cluster**. | - | Before you Begin | Select Next. | - | Testing Options | Choose **Run only the tests I select**. | - | Test Selection | Uncheck Storage. Ensure **Inventory**, **Network** and **System Configuration** are selected. - | Confirmation | Select Next.
    Wait for the validation to complete.
    Select **View Report** to review the report. You can safely ignore the warning regarding VMs being reachable on only one network interface. Azure infrastructure has physical redundancy and therefore it is not required to add additional network interfaces.
    Select **Finish**.| - | Access Point for Administering the Cluster |Type a cluster name, for example **SQLAGCluster1** in **Cluster Name**.| - | Confirmation | Uncheck **Add all eligible storage to the cluster** and select **Next**. | - | Summary | Select **Finish**. | - - >[!WARNING] - >If you do not uncheck **Add all eligible storage to the cluster**, Windows detaches the virtual disks during the clustering process. As a result, they don't appear in Disk Manager or Explorer until the storage is removed from the cluster and reattached using PowerShell. - > - - -### Set the failover cluster IP address - -Typically, the IP address assigned to the cluster is the same IP address assigned to the VM, which means that in Azure, the cluster IP address will be in a failed state, and cannot be brought online. Change the cluster IP address to bring the IP resource online. - -During the prerequisites, you should have [assigned secondary IP addresses](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#add-secondary-ips-to-sql-server-vms) to each SQL Server VM, as the example table below (your specific IP addresses may vary): - - | VM Name | Subnet name | Subnet address range | Secondary IP name | Secondary IP address | - | --- | --- | --- | --- | --- | - | SQL-VM-1 | SQL-subnet-1 | 10.38.1.0/24 | windows-cluster-ip | 10.38.1.10 | - | SQL-VM-2 | SQL-subnet-2 | 10.38.2.0/24 | windows-cluster-ip | 10.38.2.10 - -Assign these IP addresses as the cluster IP addresses for each relevant subnet. - -> [!NOTE] -> On Windows Server 2019, the cluster creates a **Distributed Server Name** instead of the **Cluster Network Name**, and the cluster name object (CNO) is automatically registered with the IP addresses for all of the nodes in the cluster, eliminating the need for a dedicated windows cluster IP address. If you're on Windows Server 2019, either skip this section, and any other steps that refer to the **Cluster Core Resources** or create a virtual network name (VNN)-based cluster using using [PowerShell](failover-cluster-instance-storage-spaces-direct-manually-configure.md#create-windows-failover-cluster). See the blog [Failover Cluster: Cluster Network Object](https://blogs.windows.com/windowsexperience/2018/08/14/announcing-windows-server-2019-insider-preview-build-17733/#W0YAxO8BfwBRbkzG.97) for more information. - - -To change the cluster IP address, follow these steps: - -1. In **Failover Cluster Manager**, scroll down to **Cluster Core Resources** and expand the cluster details. You should see the **Name** and two **IP Address** resources from each subnet in the **Failed** state. -1. Right-click the first failed **IP Address** resource, and then select **Properties**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/02-failed-ip-address.png" alt-text="Cluster Properties"::: - -1. Select **Static IP Address** and update the IP address to the [dedicated windows cluster IP address](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#add-secondary-ips-to-sql-server-vms) in the subnet you assigned to the first SQL Server VM (such as **SQL-VM-1**). Select **OK**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/03-first-static-ip-address.png" alt-text=" Select **Static IP Address** and update the IP address to the dedicated windows cluster IP address in the same subnet you assigned to the SQL Server VM in the prerequisites article"::: - -1. Repeat the steps for the second failed **IP Address** resource, using the [dedicated windows cluster IP address](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#add-secondary-ips-to-sql-server-vms) for the subnet of the second SQL Server VM (such as **SQL-VM-2**). - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/04-second-static-ip-address.png" alt-text="Repeat the steps for the second failed **IP Address** resource, using the dedicated windows cluster IP address for the subnet of the other SQL Server VM."::: - -1. In the **Cluster Core Resources** section, right-click cluster name and select **Bring Online**. Wait until the name and one of the IP address resource are online. - -Since the SQL Server VMs are in different subnets the cluster will have an OR dependency on the two dedicated windows cluster IP addresses. When the cluster name resource comes online, it updates the domain controller (DC) server with a new Active Directory (AD) computer account. If the cluster core resources move nodes, one IP address goes offline, while the other comes online, updating the DC server with the new IP address association. - ->[!TIP] -> When running the cluster on Azure VMs in a production environment, change the cluster settings to a more relaxed monitoring state to improve cluster stability and reliability in a cloud environment. To learn more, see [SQL Server VM - HADR configuration best practices](hadr-cluster-best-practices.md#checklist). - -## Configure quorum - -On a two node cluster, a quorum device is necessary for cluster reliability and stability. On Azure VMs, the cloud witness is the recommended quorum configuration, though there are [other options available](hadr-cluster-quorum-configure-how-to.md). The steps in this section configure a cloud witness for quorum. Identify the access keys to the storage account and then configure the cloud witness. - -## Get access keys for storage account - -When you create a Microsoft Azure Storage Account, it is associated with two Access Keys that are automatically generated - primary access key and secondary access key. Use the primary access key the first time you create the cloud witness, but subsequently there are no restrictions to which key to use for the cloud witness. - -Use the Azure portal to view and copy storage access keys for the Azure Storage Account created in the [prerequisites article](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#create-azure-storage-account). - - -To view and copy the storage access keys, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select the storage account you created. -1. Select **Access Keys** under **Security + networking**. -1. Select **Show Keys** and copy the key. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/05-storage-account-keys.png" alt-text="Select **Show Keys** and copy the key"::: - -### Configure cloud witness - -After you have the access key copied, create the cloud witness for the cluster quorum. - -To create the cloud witness, follow these steps: - -1. Connect to the first SQL Server VM **SQL-VM-1** with remote desktop. -1. Open **Windows PowerShell** in Administrator mode. -1. Run the PowerShell script to set TLS (Transport Layer Security) value for the connection to 1.2: - - ```powershell - [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - ``` - -1. Use PowerShell to configure the cloud witness. Replace the values for storage account name and access key with your specific information: - - ```powershell - Set-ClusterQuorum -CloudWitness -AccountName "Storage_Account_Name" -AccessKey "Storage_Account_Access_Key" - ``` - -1. The following example output indicates success: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/06-configure-quorum.png" alt-text="Your output should display the Cloud Witness as this example"::: - -The cluster core resources are configured with a cloud witness. - -## Enable AG feature - -The Always On availability group feature is disabled by default. Use the **SQL Server Configuration Manager** to enable the feature on both SQL Server instances. - -To enable the availability group feature, follow these steps: - -1. Launch the RDP file to the first SQL Server VM (such as **SQL-VM-1**) with a domain account that is a member of sysadmin fixed server role, such as the **CORP\Install** domain account created in the [prerequisites document](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) -1. From the **Start** screen of one your SQL Server VMs, launch **SQL Server Configuration Manager**. -1. In the browser tree, highlight **SQL Server Services**, right-click the **SQL Server (MSSQLSERVER)** service and select **Properties**. -1. Select the **AlwaysOn High Availability** tab, then check the box to **Enable AlwaysOn availability groups**: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/08-enable-always-on.png" alt-text="Enable AlwaysOn availability groups"::: - -1. Select **Apply**. Select **OK** in the pop-up dialog. -1. Restart the SQL Server service. -1. Repeat these steps for the other SQL Server instance. - - -## Create database - -For your database, you can either follow the steps in this section to create a new database, or restore an [AdventureWorks database](/sql/samples/sql-samples-where-are). You also need to back up the database to initialize the log chain. Databases that have not been backed up do not meet the prerequisites for an availability group. - -To create a database, follow these steps: - -1. Launch the RDP file to the first SQL Server VM (such as **SQL-VM-1**) with a domain account that is a member of the sysadmin fixed server role, such as the **CORP\Install** domain account created in the [prerequisites document](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). -1. Open **SQL Server Management Studio** and connect to the SQL Server instance. -1. In **Object Explorer**, right-click **Databases** and select **New Database**. -1. In **Database name**, type **MyDB1**. -1. Select the **Options** page, and choose **Full** from the **Recovery model** drop-down, if it's not full by default. The database must be in full recovery mode to meet the prerequisites of participating in an availability group. -1. Select **OK** to close the **New Database** page and create your new database. - - -To back up the database, follow these steps: - -1. In **Object Explorer**, right-click the database, highlight **Tasks**, and then select **Back Up...**. - -1. Select **OK** to take a full backup of the database to the default backup location. - -## Create file share - -Create a backup file share that both SQL Server VMs and their service accounts have access to. - -To create the backup file share, follow these steps: - - -1. On the first SQL Server VM in **Server Manager**, select **Tools**. Open **Computer Management**. - -2. Select **Shared Folders**. - -3. Right-click **Shares**, and select **New Share...** and then use the **Create a Shared Folder Wizard** to create a share. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/09-new-share.png" alt-text="Select New Share"::: - -4. For **Folder Path**, select **Browse** and locate or create a path for the database backup shared folder, such as `C:\Backup`. Select **Next**. - -5. In **Name, Description, and Settings** verify the share name and path. Select **Next**. - -6. On **Shared Folder Permissions** set **Customize permissions**. Select **Custom...**. - -7. On **Customize Permissions**, select **Add...**. - -8. Check **Full Control** to grant full access to the share for *both* SQL Server service accounts (`Corp\SQLSvc1` and `Corp\SQLSvc2`): - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/10-backup-share-permission.png" alt-text="Make sure that the SQL Server service accounts for both servers have full control."::: - -9. Select **OK**. - -10. In **Shared Folder Permissions**, select **Finish**. Select **Finish** again. - -## Create availability group - -After your database has been backed up, you are ready to create your availability group, which automatically takes a full backup and transaction log backup from the primary SQL Server replica and restores it on the secondary SQL Server instance with the **NORECOVERY** option. - -To create your availability group, follow these steps. - -1. In **Object Explorer** in SQL Server Management Studio (SSMS) on the first SQL Server VM (such as **SQL-VM-1**), right-click **Always On High Availability** and select **New Availability Group Wizard**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/11-new-ag-wizard.png" alt-text="Launch New availability group Wizard"::: - -1. On the **Introduction** page, select **Next**. In the **Specify availability group Name** page, type a name for the availability group in **Availability group name**, such as **AG1**. Select **Next**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/12-new-ag-name.png" alt-text="New availability group Wizard, Specify availability group Name"::: - -1. On the **Select Databases** page, select your database, and then select **Next**. If your database does not meet the prerequisites, make sure it's in full recovery mode, and [take a backup](#create-database): - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/13-new-ag-select-database.png" alt-text="New availability group Wizard, Select Databases"::: - -1. On the **Specify Replicas** page, select **Add Replica**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/14-new-ag-add-replica.png" alt-text="New availability group Wizard, Specify Replicas"::: - -1. The **Connect to Server** dialog pops up. Type the name of the second server in **Server name**, such as **SQL-VM-2**. Select **Connect**. -1. On the **Specify Replicas** page, check the boxes for **Automatic Failover** and choose **Synchronous commit** for the availability mode from the drop-down: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/15-new-ag-replica.png" alt-text=" On the **Specify Replicas** page, check the boxes for Automatic Failover and choose Synchronous commit for the availability mode"::: - -1. Select the **Endpoints** tab to confirm the ports used for the database mirroring endpoint are those you [opened in the firewall](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md#configure-the-firewall): - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/16-endpoint.png" alt-text="New availability group Wizard, Select Initial Data Synchronization"::: - -1. Select the **Listener** tab and choose to **Create an availability group listener** using the following values for the listener: - - |Field | Value | - | --- | --- | - | Listener DNS Name: | AG1-Listener | - | Port | Use the default SQL Server port. 1433 | - | Network Mode: | Static IP | - -1. Select **Add** to provide the secondary dedicated IP address for the listener for both SQL Server VMs. - - The following table shows the example IP addresses created for the listener from the [prerequisites document](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) (though your specific IP addresses may vary): - - | VM Name | Subnet name | Subnet address range | Secondary IP name | Secondary IP address | - | --- | --- | --- | --- | --- | - | SQL-VM-1 | SQL-subnet-1 | 10.38.1.0/24 | availability-group-listener | 10.38.1.11 | - | SQL-VM-2 | SQL-subnet-2 | 10.38.2.0/24 | availability-group-listener | 10.38.2.11 - -1. Choose the first subnet (such as 10.38.1.0/24) from the drop-down on the **Add IP address** dialog box and then provide the secondary dedicated listener **IPv4 address**, such as `10.38.1.11`. Select **OK**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/18-add-listener-ip-subnet-1.png" alt-text="Choose the first subnet (such as 10.38.1.0/24) from the drop-down on the Add IP address dialog box and, and then provide the secondary dedicated listener IPv4 address, such as 10.38.1.11"::: - -1. Repeat this step again, but choose the other subnet from the drop-down (such as 10.38.2.0/24), and provide the secondary dedicated listener **IPv4 address** from the other SQL Server VM, such as `10.38.2.11`. Select **OK**. - - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/19-add-listener-ip-subnet-2.png" alt-text="Add Listener IP"::: - -1. After reviewing the values on the **Listener** page, select **Next**: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/20-listener.png" alt-text="After reviewing the values on the Listener page, select Next:"::: - -1. On the **Select Initial Data Synchronization** page, choose **Full database and log backup** and provide the [network share location you created previously](#create-file-share), such as `\\SQL-VM-1\Backup`. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/21-full-data-sync.png" alt-text="Choose full data synchronization"::: - - > [!NOTE] - > Full synchronization takes a full backup of the database on the first instance of SQL Server and restores it to the second instance. For large databases, full synchronization is not recommended because it may take a long time. You can reduce this time by manually taking a backup of the database and restoring it with `NO RECOVERY`. If the database is already restored with `NO RECOVERY` on the second SQL Server before configuring the availability group, choose **Join only**. If you want to take the backup after configuring the availability group, choose **Skip initial data synchronization**. - - -1. On the **Validation** page, confirm that all validation checks have passed, and then choose **Next**: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/22-validation.png" alt-text="New availability group Wizard, Validation"::: - - -1. On the **Summary** page, select **Finish** and wait for the wizard to configure your new availability group. Choose **More details** on the **Progress** page to view the detailed progress. When you see that the **wizard completed successfully** on the **Results** page, inspect the summary to verify the availability group and listener were created successfully. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/23-results.png" alt-text="New availability group Wizard, Results"::: - -1. Select **Close** to exit the wizard. - -## Check availability group - -You can check the health of the availability group by using **SQL Server Management Studio**, and the **Failover Cluster Manager**. - -To check the status of the availability group, follow these steps: - -1. In **Object Explorer**, expand **Always On High Availability**, and then expand **availability groups**. You should now see the new availability group in this container. Right-click the availability group and select **Show Dashboard**. - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/24-show-dashboard.png" alt-text="Show availability group Dashboard"::: - - The availability group dashboard shows the replica, the failover mode of each replica, and the synchronization state, such as the following example: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/25-ag-dashboard.png" alt-text="availability group Dashboard"::: - - -2. Open the **Failover Cluster Manager**, select your cluster, and choose **Roles** to view the availability group role you created within the cluster. Choose the role **AG1** and select the **Resources** tab to view the listener and the associated IP addresses, such as the following example: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/26-cluster-manager.png" alt-text="availability group in Failover Cluster Manager"::: - -At this point, you have an availability group with replicas on two instances of SQL Server and a corresponding availability group listener as well. You can connect using the listener and you can move the availability group between instances using **SQL Server Management Studio**. - -> [!WARNING] -> Do not try to fail over the availability group by using the Failover Cluster Manager. All failover operations should be performed from within **SQL Server Management Studio**, such as by using the **Always On Dashboard** or Transact-SQL (T-SQL). For more information, see [Restrictions for using the Failover Cluster Manager with availability groups](/sql/database-engine/availability-groups/windows/failover-clustering-and-always-on-availability-groups-sql-server). - - - -## Test listener connection - -After your availability group is ready, and your listener has been configured with the appropriate secondary IP addresses, test the connection to the listener. - -To test the connection, follow these steps: - -1. Use RDP to connect to a SQL Server that is in the same virtual network, but does not own the replica, such as the other SQL Server instance within the cluster, or any other VM with **SQL Server Management Studio** installed to it. - -2. Open **SQL Server Management Studio**, and in the **Connect to Server** dialog box type the name of the listener (such as **AG1-Listener**) in **Server name:**, and then select **Options**: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/27-ssms-listener-connect.png" alt-text="Open SQL Server Management Studio and in Server name: type the name of the listener, such as AG1-Listener"::: - -3. Enter `MultiSubnetFailover=True` in the **Additional Connection Parameters** window and then choose **Connect** to automatically connect to whichever instance is hosting the primary SQL Server replica: - - :::image type="content" source="./media/availability-group-manually-configure-tutorial-multi-subnet/28-ssms-connection-parameters.png" alt-text="SSMS connection"::: - -> [!NOTE] -> - While connecting to availability group on different subnets, setting `MultiSubnetFailover=true` provides faster detection of and connection to the current primary replica. See [Connecting with MultiSubnetFailover](/dotnet/framework/data/adonet/sql/sqlclient-support-for-high-availability-disaster-recovery#connecting-with-multisubnetfailover) -> - Setting `MultiSubnetFailover=True` isn't required with .NET Framework 4.6.1 or later versions. - -## Next steps - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-single-subnet.md b/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-single-subnet.md deleted file mode 100644 index 594a81594d118..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-manually-configure-tutorial-single-subnet.md +++ /dev/null @@ -1,557 +0,0 @@ ---- -title: "Tutorial: Configure a SQL Server Always On availability group" -description: "This tutorial shows how to create a SQL Server Always On availability group on Azure Virtual Machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management - -ms.assetid: 08a00342-fee2-4afe-8824-0db1ed4b8fca -ms.service: virtual-machines-sql -ms.subservice: hadr - - -ms.topic: tutorial -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.custom: "seo-lt-2019" -ms.reviewer: mathoma ---- - -# Tutorial: Manually configure an availability group (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -This tutorial shows how to create an Always On availability group for SQL Server on Azure Virtual Machines (VMs) within a single subnet. The complete tutorial creates an availability group with a database replica on two SQL Servers. - -While this article manually configures the availability group environment, it is also possible to do so using the [Azure portal](availability-group-azure-portal-configure.md), [PowerShell or the Azure CLI](availability-group-az-commandline-configure.md), or [Azure Quickstart templates](availability-group-quickstart-template-configure.md) as well. - - -**Time estimate**: Takes about 30 minutes to complete once the [prerequisites](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md) are met. - - -## Prerequisites - -The tutorial assumes you have a basic understanding of SQL Server Always On availability groups. If you need more information, see [Overview of Always On availability groups (SQL Server)](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server). - -Before you begin the tutorial, you need to [Complete prerequisites for creating Always On availability groups in Azure Virtual Machines](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md). If these prerequisites are completed already, you can jump to [Create Cluster](#CreateCluster). - -The following table lists the prerequisites that you need to complete before starting this tutorial: - -| Requirement |Description | -|----- |----- |----- | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **Two SQL Server instances** | - In an Azure availability set
    - In a single domain
    - With Failover Clustering feature installed | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **Windows Server** | File share for cluster witness | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **SQL Server service account** | Domain account | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **SQL Server Agent service account** | Domain account | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **Firewall ports open** | - SQL Server: **1433** for default instance
    - Database mirroring endpoint: **5022** or any available port
    - Availability group load balancer IP address health probe: **59999** or any available port
    - Cluster core load balancer IP address health probe: **58888** or any available port | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **Add Failover Clustering Feature** | Both SQL Server instances require this feature | -|:::image type="icon" source="./media/availability-group-manually-configure-tutorial-single-subnet/square.png" border="false"::: **Installation domain account** | - Local administrator on each SQL Server
    - Member of SQL Server sysadmin fixed server role for each instance of SQL Server | - ->[!NOTE] -> Many of the steps provided in this tutorial can now be automated with the [Azure portal](availability-group-azure-portal-configure.md), [PowerShell and the Az CLI](./availability-group-az-commandline-configure.md) and [Azure Quickstart Templates](availability-group-quickstart-template-configure.md). - - - - - - -## Create the cluster - -After the prerequisites are completed, the first step is to create a Windows Server Failover Cluster that includes two SQL Severs and a witness server. - -1. Use Remote Desktop Protocol (RDP) to connect to the first SQL Server. Use a domain account that is an administrator on both SQL Servers and the witness server. - - >[!TIP] - >If you followed the [prerequisites document](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md), you created an account called **CORP\Install**. Use this account. - -2. In the **Server Manager** dashboard, select **Tools**, and then select **Failover Cluster Manager**. -3. In the left pane, right-click **Failover Cluster Manager**, and then select **Create a Cluster**. - - ![Create Cluster](./media/availability-group-manually-configure-tutorial-single-subnet/40-createcluster.png) - -4. In the Create Cluster Wizard, create a one-node cluster by stepping through the pages with the settings in the following table: - - | Page | Settings | - | --- | --- | - | Before You Begin |Use defaults | - | Select Servers |Type the first SQL Server name in **Enter server name** and select **Add**. | - | Validation Warning |Select **No. I do not require support from Microsoft for this cluster, and therefore do not want to run the validation tests. When I select Next, continue Creating the cluster**. | - | Access Point for Administering the Cluster |Type a cluster name, for example **SQLAGCluster1** in **Cluster Name**.| - | Confirmation |Use defaults unless you are using Storage Spaces. See the note following this table. | - -### Set the Windows server failover cluster IP address - - > [!NOTE] - > On Windows Server 2019, the cluster creates a **Distributed Server Name** instead of the **Cluster Network Name**. If you're using Windows Server 2019, skip any steps that refer to the cluster core name in this tutorial. You can create a cluster network name using [PowerShell](failover-cluster-instance-storage-spaces-direct-manually-configure.md#create-windows-failover-cluster). Review the blog [Failover Cluster: Cluster Network Object](https://blogs.windows.com/windowsexperience/2018/08/14/announcing-windows-server-2019-insider-preview-build-17733/#W0YAxO8BfwBRbkzG.97) for more information. - -1. In **Failover Cluster Manager**, scroll down to **Cluster Core Resources** and expand the cluster details. You should see both the **Name** and the **IP Address** resources in the **Failed** state. The IP address resource cannot be brought online because the cluster is assigned the same IP address as the machine itself, therefore it is a duplicate address. - -2. Right-click the failed **IP Address** resource, and then select **Properties**. - - ![Cluster Properties](./media/availability-group-manually-configure-tutorial-single-subnet/42_IPProperties.png) - -3. Select **Static IP Address** and specify an available address from the same subnet as your virtual machines. - -4. In the **Cluster Core Resources** section, right-click cluster name and select **Bring Online**. Wait until both resources are online. When the cluster name resource comes online, it updates the domain controller (DC) server with a new Active Directory (AD) computer account. Use this AD account to run the availability group clustered service later. - -### Add the other SQL Server to cluster - -Add the other SQL Server to the cluster. - -1. In the browser tree, right-click the cluster and select **Add Node**. - - ![Add Node to the Cluster](./media/availability-group-manually-configure-tutorial-single-subnet/44-addnode.png) - -1. In the **Add Node Wizard**, select **Next**. In the **Select Servers** page, add the second SQL Server. Type the server name in **Enter server name** and then select **Add**. When you are done, select **Next**. - -1. In the **Validation Warning** page, select **No** (in a production scenario you should perform the validation tests). Then, select **Next**. - -8. In the **Confirmation** page if you are using Storage Spaces, clear the checkbox labeled **Add all eligible storage to the cluster.** - - ![Add Node Confirmation](./media/availability-group-manually-configure-tutorial-single-subnet/46-addnodeconfirmation.png) - - >[!WARNING] - >If you do not uncheck **Add all eligible storage to the cluster**, Windows detaches the virtual disks during the clustering process. As a result, they don't appear in Disk Manager or Explorer until the storage is removed from the cluster and reattached using PowerShell. - > - -1. Select **Next**. - -1. Select **Finish**. - - Failover Cluster Manager shows that your cluster has a new node and lists it in the **Nodes** container. - -10. Log out of the remote desktop session. - -### Add a cluster quorum file share - -In this example, the Windows cluster uses a file share to create a cluster quorum. This tutorial uses a Node and File Share Majority quorum. For more information, see [Configure and Manage Quorum](/windows-server/failover-clustering/manage-cluster-quorum). - -1. Connect to the file share witness member server with a remote desktop session. - -1. On **Server Manager**, select **Tools**. Open **Computer Management**. - -1. Select **Shared Folders**. - -1. Right-click **Shares**, and select **New Share...**. - - ![Right-click shares and select new share](./media/availability-group-manually-configure-tutorial-single-subnet/48-newshare.png) - - Use **Create a Shared Folder Wizard** to create a share. - -1. On **Folder Path**, select **Browse** and locate or create a path for the shared folder. Select **Next**. - -1. In **Name, Description, and Settings** verify the share name and path. Select **Next**. - -1. On **Shared Folder Permissions** set **Customize permissions**. Select **Custom...**. - -1. On **Customize Permissions**, select **Add...**. - -1. Make sure that the account used to create the cluster has full control. - - ![Make sure the account used to create the cluster has full control](./media/availability-group-manually-configure-tutorial-single-subnet/50-filesharepermissions.png) - -1. Select **OK**. - -1. In **Shared Folder Permissions**, select **Finish**. Select **Finish** again. - -1. Log out of the server - -### Configure the cluster quorum - -Next, set the cluster quorum. - - > [!NOTE] - > Depending on the configuration of your availability group it may be necessary to change the quorum vote of a node partipating in the Windows Server Failover Cluster. For more information, see [Configure Cluster Quorum for SQL Server on Azure VMs](hadr-cluster-quorum-configure-how-to.md). - > - -1. Connect to the first cluster node with remote desktop. - -1. In **Failover Cluster Manager**, right-click the cluster, point to **More Actions**, and select **Configure Cluster Quorum Settings...**. - - ![Select configure cluster quorum settings](./media/availability-group-manually-configure-tutorial-single-subnet/52-configurequorum.png) - -1. In **Configure Cluster Quorum Wizard**, select **Next**. - -1. In **Select Quorum Configuration Option**, choose **Select the quorum witness**, and select **Next**. - -1. On **Select Quorum Witness**, select **Configure a file share witness**. - - >[!TIP] - >Windows Server 2016 supports a cloud witness. If you choose this type of witness, you do not need a file share witness. For more information, see [Deploy a cloud witness for a Failover Cluster](/windows-server/failover-clustering/deploy-cloud-witness). This tutorial uses a file share witness, which is supported by previous operating systems. - > - -1. On **Configure File Share Witness**, type the path for the share you created. Select **Next**. - -1. Verify the settings on **Confirmation**. Select **Next**. - -1. Select **Finish**. - -The cluster core resources are configured with a file share witness. - -## Enable availability groups - -Next, enable the **AlwaysOn availability groups** feature. Do these steps on both SQL Servers. - -1. From the **Start** screen, launch **SQL Server Configuration Manager**. -2. In the browser tree, select **SQL Server Services**, then right-click the **SQL Server (MSSQLSERVER)** service and select **Properties**. -3. Select the **AlwaysOn High Availability** tab, then select **Enable AlwaysOn availability groups**, as follows: - - ![Enable AlwaysOn availability groups](./media/availability-group-manually-configure-tutorial-single-subnet/54-enableAlwaysOn.png) - -4. Select **Apply**. Select **OK** in the pop-up dialog. - -5. Restart the SQL Server service. - -Repeat these steps on the other SQL Server. - - - -## Create a database on the first SQL Server - -1. Launch the RDP file to the first SQL Server with a domain account that is a member of sysadmin fixed server role. -1. Open SQL Server Management Studio and connect to the first SQL Server. -7. In **Object Explorer**, right-click **Databases** and select **New Database**. -8. In **Database name**, type **MyDB1**, then select **OK**. - -### Create a backup share - -1. On the first SQL Server in **Server Manager**, select **Tools**. Open **Computer Management**. - -1. Select **Shared Folders**. - -1. Right-click **Shares**, and select **New Share...**. - - ![Select New Share](./media/availability-group-manually-configure-tutorial-single-subnet/48-newshare.png) - - Use **Create a Shared Folder Wizard** to create a share. - -1. On **Folder Path**, select **Browse** and locate or create a path for the database backup shared folder. Select **Next**. - -1. In **Name, Description, and Settings** verify the share name and path. Select **Next**. - -1. On **Shared Folder Permissions** set **Customize permissions**. Select **Custom...**. - -1. On **Customize Permissions**, select **Add...**. - -1. Make sure that the SQL Server and SQL Server Agent service accounts for both servers have full control. - - ![Make sure that the SQL Server and SQL Server Agent service accounts for both servers have full control.](./media/availability-group-manually-configure-tutorial-single-subnet/68-backupsharepermission.png) - -1. Select **OK**. - -1. In **Shared Folder Permissions**, select **Finish**. Select **Finish** again. - -### Take a full backup of the database - -You need to back up the new database to initialize the log chain. If you do not take a backup of the new database, it cannot be included in an availability group. - -1. In **Object Explorer**, right-click the database, point to **Tasks...**, select **Back Up**. - -1. Select **OK** to take a full backup to the default backup location. - -## Create the availability group - -You are now ready to configure an availability group using the following steps: - -* Create a database on the first SQL Server. -* Take both a full backup and a transaction log backup of the database. -* Restore the full and log backups to the second SQL Server with the **NORECOVERY** option. -* Create the availability group (**AG1**) with synchronous commit, automatic failover, and readable secondary replicas. - -### Create the availability group: - -1. On remote desktop session to the first SQL Server. In **Object Explorer** in SSMS, right-click **AlwaysOn High Availability** and select **New availability group Wizard**. - - ![Launch New availability group Wizard](./media/availability-group-manually-configure-tutorial-single-subnet/56-newagwiz.png) - -2. In the **Introduction** page, select **Next**. In the **Specify availability group Name** page, type a name for the availability group in **Availability group name**. For example, **AG1**. Select **Next**. - - ![New availability group Wizard, Specify availability group Name](./media/availability-group-manually-configure-tutorial-single-subnet/58-newagname.png) - -3. In the **Select Databases** page, select your database, and then select **Next**. - - >[!NOTE] - >The database meets the prerequisites for an availability group because you have taken at least one full backup on the intended primary replica. - > - - ![New availability group Wizard, Select Databases](./media/availability-group-manually-configure-tutorial-single-subnet/60-newagselectdatabase.png) - -4. In the **Specify Replicas** page, select **Add Replica**. - - ![New availability group Wizard, Specify Replicas](./media/availability-group-manually-configure-tutorial-single-subnet/62-newagaddreplica.png) - -5. The **Connect to Server** dialog pops up. Type the name of the second server in **Server name**. Select **Connect**. - - Back in the **Specify Replicas** page, you should now see the second server listed in **Availability Replicas**. Configure the replicas as follows. - - ![New availability group Wizard, Specify Replicas (Complete)](./media/availability-group-manually-configure-tutorial-single-subnet/64-newagreplica.png) - -6. Select **Endpoints** to see the database mirroring endpoint for this availability group. Use the same port that you used when you set the [firewall rule for database mirroring endpoints](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md#endpoint-firewall). - - ![New availability group Wizard, Select Initial Data Synchronization](./media/availability-group-manually-configure-tutorial-single-subnet/66-endpoint.png) - -8. In the **Select Initial Data Synchronization** page, select **Full** and specify a shared network location. For the location, use the [backup share that you created](#backupshare). In the example it was, **\\\\\Backup\\**. Select **Next**. - - >[!NOTE] - >Full synchronization takes a full backup of the database on the first instance of SQL Server and restores it to the second instance. For large databases, full synchronization is not recommended because it may take a long time. You can reduce this time by manually taking a backup of the database and restoring it with `NO RECOVERY`. If the database is already restored with `NO RECOVERY` on the second SQL Server before configuring the availability group, choose **Join only**. If you want to take the backup after configuring the availability group, choose **Skip initial data synchronization**. - > - - ![Choose Skip initial data synchronization](./media/availability-group-manually-configure-tutorial-single-subnet/70-datasynchronization.png) - -9. In the **Validation** page, select **Next**. This page should look similar to the following image: - - ![New availability group Wizard, Validation](./media/availability-group-manually-configure-tutorial-single-subnet/72-validation.png) - - >[!NOTE] - >There is a warning for the listener configuration because you have not configured an availability group listener. You can ignore this warning because on Azure virtual machines you create the listener after creating the Azure load balancer. - -10. In the **Summary** page, select **Finish**, then wait while the wizard configures the new availability group. In the **Progress** page, you can select **More details** to view the detailed progress. Once the wizard is finished, inspect the **Results** page to verify that the availability group is successfully created. - - ![New availability group Wizard, Results](./media/availability-group-manually-configure-tutorial-single-subnet/74-results.png) - -11. Select **Close** to exit the wizard. - -### Check the availability group - -1. In **Object Explorer**, expand **AlwaysOn High Availability**, and then expand **availability groups**. You should now see the new availability group in this container. Right-click the availability group and select **Show Dashboard**. - - ![Show availability group Dashboard](./media/availability-group-manually-configure-tutorial-single-subnet/76-showdashboard.png) - - Your **AlwaysOn Dashboard** should look similar to the following screenshot: - - ![availability group Dashboard](./media/availability-group-manually-configure-tutorial-single-subnet/78-agdashboard.png) - - You can see the replicas, the failover mode of each replica, and the synchronization state. - -2. In **Failover Cluster Manager**, select your cluster. Select **Roles**. The availability group name you used is a role on the cluster. That availability group does not have an IP address for client connections because you did not configure a listener. You will configure the listener after you create an Azure load balancer. - - ![availability group in Failover Cluster Manager](./media/availability-group-manually-configure-tutorial-single-subnet/80-clustermanager.png) - - > [!WARNING] - > Do not try to fail over the availability group from the Failover Cluster Manager. All failover operations should be performed from within **AlwaysOn Dashboard** in SSMS. For more information, see [Restrictions on Using The Failover Cluster Manager with availability groups](/sql/database-engine/availability-groups/windows/failover-clustering-and-always-on-availability-groups-sql-server). - > - -At this point, you have an availability group with replicas on two instances of SQL Server. You can move the availability group between instances. You cannot connect to the availability group yet because you do not have a listener. In Azure virtual machines, the listener requires a load balancer. The next step is to create the load balancer in Azure. - - - -## Create an Azure load balancer - -[!INCLUDE [sql-ag-use-dnn-listener](../../includes/sql-ag-use-dnn-listener.md)] - -On Azure virtual machines, a SQL Server availability group requires a load balancer. The load balancer holds the IP addresses for the availability group listeners and the Windows Server Failover Cluster. This section summarizes how to create the load balancer in the Azure portal. - -A load balancer in Azure can be either a Standard Load Balancer or a Basic Load Balancer. Standard Load Balancer has more features than the Basic Load Balancer. For an availability group, the Standard Load Balancer is required if you use an Availability Zone (instead of an Availability Set). For details on the difference between the load balancer SKUs, see [Load Balancer SKU comparison](../../../load-balancer/skus.md). - -1. In the Azure portal, go to the resource group where your SQL Servers are and select **+ Add**. -1. Search for **Load Balancer**. Choose the load balancer published by Microsoft. - - ![Choose the load balancer published by Microsoft](./media/availability-group-manually-configure-tutorial-single-subnet/82-azureloadbalancer.png) - -1. Select **Create**. -1. Configure the following parameters for the load balancer. - - | Setting | Field | - | --- | --- | - | **Name** |Use a text name for the load balancer, for example **sqlLB**. | - | **Type** |Internal | - | **Virtual network** |Use the name of the Azure virtual network. | - | **Subnet** |Use the name of the subnet that the virtual machine is in. | - | **IP address assignment** |Static | - | **IP address** |Use an available address from subnet. Use this address for your availability group listener. Note that this is different from your cluster IP address. | - | **Subscription** |Use the same subscription as the virtual machine. | - | **Location** |Use the same location as the virtual machine. | - - The Azure portal blade should look like this: - - ![Create Load Balancer](./media/availability-group-manually-configure-tutorial-single-subnet/84-createloadbalancer.png) - -1. Select **Create**, to create the load balancer. - -To configure the load balancer, you need to create a backend pool, a probe, and set the load balancing rules. Do these in the Azure portal. - -### Add a backend pool for the availability group listener - -1. In the Azure portal, go to your availability group. You might need to refresh the view to see the newly created load balancer. - - ![Find Load Balancer in Resource Group](./media/availability-group-manually-configure-tutorial-single-subnet/86-findloadbalancer.png) - -1. Select the load balancer, select **Backend pools**, and select **+Add**. - -1. Type a name for the backend pool. - -1. Associate the backend pool with the availability set that contains the VMs. - -1. Under **Target network IP configurations**, check **VIRTUAL MACHINE** and choose both of the virtual machines that will host availability group replicas. Do not include the file share witness server. - - >[!NOTE] - >If both virtual machines are not specified, connections will only succeed to the primary replica. - -1. Select **OK** to create the backend pool. - -### Set the probe - -1. Select the load balancer, choose **Health probes**, and then select **+Add**. - -1. Set the listener health probe as follows: - - | Setting | Description | Example - | --- | --- |--- - | **Name** | Text | SQLAlwaysOnEndPointProbe | - | **Protocol** | Choose TCP | TCP | - | **Port** | Any unused port | 59999 | - | **Interval** | The amount of time between probe attempts in seconds |5 | - | **Unhealthy threshold** | The number of consecutive probe failures that must occur for a virtual machine to be considered unhealthy | 2 | - -1. Select **OK** to set the health probe. - -### Set the load balancing rules - -1. Select the load balancer, choose **Load balancing rules**, and select **+Add**. - -1. Set the listener load balancing rules as follows. - - | Setting | Description | Example - | --- | --- |--- - | **Name** | Text | SQLAlwaysOnEndPointListener | - | **Frontend IP address** | Choose an address |Use the address that you created when you created the load balancer. | - | **Protocol** | Choose TCP |TCP | - | **Port** | Use the port for the availability group listener | 1433 | - | **Backend Port** | This field is not used when Floating IP is set for direct server return | 1433 | - | **Probe** |The name you specified for the probe | SQLAlwaysOnEndPointProbe | - | **Session Persistence** | Drop down list | **None** | - | **Idle Timeout** | Minutes to keep a TCP connection open | 4 | - | **Floating IP (direct server return)** | |Enabled | - - > [!WARNING] - > Direct server return is set during creation. It cannot be changed. - > - -1. Select **OK** to set the listener load balancing rules. - -### Add the cluster core IP address for the Windows Server Failover Cluster (WSFC) - -The WSFC IP address also needs to be on the load balancer. - -1. In the Azure portal, go to the same Azure load balancer. Select **Frontend IP configuration** and select **+Add**. Use the IP Address you configured for the WSFC in the cluster core resources. Set the IP address as static. - -1. On the load balancer, select **Health probes**, and then select **+Add**. - -1. Set the WSFC cluster core IP address health probe as follows: - - | Setting | Description | Example - | --- | --- |--- - | **Name** | Text | WSFCEndPointProbe | - | **Protocol** | Choose TCP | TCP | - | **Port** | Any unused port | 58888 | - | **Interval** | The amount of time between probe attempts in seconds |5 | - | **Unhealthy threshold** | The number of consecutive probe failures that must occur for a virtual machine to be considered unhealthy | 2 | - -1. Select **OK** to set the health probe. - -1. Set the load balancing rules. Select **Load balancing rules**, and select **+Add**. - -1. Set the cluster core IP address load balancing rules as follows. - - | Setting | Description | Example - | --- | --- |--- - | **Name** | Text | WSFCEndPoint | - | **Frontend IP address** | Choose an address |Use the address that you created when you configured the WSFC IP address. This is different from the listener IP address | - | **Protocol** | Choose TCP |TCP | - | **Port** | Use the port for the cluster IP address. This is an available port that is not used for the listener probe port. | 58888 | - | **Backend Port** | This field is not used when Floating IP is set for direct server return | 58888 | - | **Probe** |The name you specified for the probe | WSFCEndPointProbe | - | **Session Persistence** | Drop down list | **None** | - | **Idle Timeout** | Minutes to keep a TCP connection open | 4 | - | **Floating IP (direct server return)** | |Enabled | - - > [!WARNING] - > Direct server return is set during creation. It cannot be changed. - > - -1. Select **OK** to set the load balancing rules. - -## Configure the listener - -The next thing to do is to configure an availability group listener on the failover cluster. - -> [!NOTE] -> This tutorial shows how to create a single listener, with one ILB IP address. To create one or more listeners using one or more IP addresses, see [Create availability group listener and load balancer | Azure](availability-group-listener-powershell-configure.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -> - -[!INCLUDE [ag-listener-configure](../../../../includes/virtual-machines-ag-listener-configure.md)] - -## Set listener port - -In SQL Server Management Studio, set the listener port. - -1. Launch SQL Server Management Studio and connect to the primary replica. - -1. Navigate to **AlwaysOn High Availability** > **availability groups** > **availability group Listeners**. - -1. You should now see the listener name that you created in Failover Cluster Manager. Right-click the listener name and select **Properties**. - -1. In the **Port** box, specify the port number for the availability group listener. 1433 is the default. Select **OK**. - -You now have a SQL Server availability group in Azure virtual machines running in Resource Manager mode. - -## Test connection to listener - -To test the connection: - -1. Use RDP to connect to a SQL Server that is in the same virtual network, but does not own the replica. You can use the other SQL Server in the cluster. - -1. Use the **sqlcmd** utility to test the connection. For example, the following script establishes a **sqlcmd** connection to the primary replica through the listener with Windows authentication: - - ```cmd - sqlcmd -S -E - ``` - - If the listener is using a port other than the default port (1433), specify the port in the connection string. For example, the following `sqlcmd` command connects to a listener at port 1435: - - ```cmd - sqlcmd -S ,1435 -E - ``` - -The SQLCMD connection automatically connects to whichever instance of SQL Server hosts the primary replica. - -> [!TIP] -> Make sure that the port you specify is open on the firewall of both SQL Servers. Both servers require an inbound rule for the TCP port that you use. For more information, see [Add or Edit Firewall Rule](/previous-versions/orphan-topics/ws.11/cc753558(v=ws.11)). -> - -## Next steps - -- [Add an IP address to a load balancer for a second availability group](availability-group-listener-powershell-configure.md#Add-IP). - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-overview.md b/articles/azure-sql/virtual-machines/windows/availability-group-overview.md deleted file mode 100644 index bfc045b662800..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-overview.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Overview of SQL Server Always On availability groups -description: This article introduces SQL Server Always On availability groups on Azure Virtual Machines. -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management - -ms.assetid: 601eebb1-fc2c-4f5b-9c05-0e6ffd0e5334 -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: overview -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.custom: "seo-lt-2019" -ms.reviewer: mathoma ---- - -# Always On availability group on SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article introduces Always On availability groups (AG) for SQL Server on Azure Virtual Machines (VMs). - -To get started, see the [availability group tutorial](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). - -## Overview - -Always On availability groups on Azure Virtual Machines are similar to [Always On availability groups on-premises](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server), and rely on the underlying [Windows Server Failover Cluster](hadr-windows-server-failover-cluster-overview.md). However, since the virtual machines are hosted in Azure, there are a few additional considerations as well, such as VM redundancy, and routing traffic on the Azure network. - -The following diagram illustrates an availability group for SQL Server on Azure VMs: - -![Availability Group](./media/availability-group-overview/00-EndstateSampleNoELB.png) - -> [!NOTE] -> It's now possible to lift and shift your availability group solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) to learn more. - -## VM redundancy - -To increase redundancy and high availability, SQL Server VMs should either be in the same [availability set](../../../virtual-machines/availability-set-overview.md), or different [availability zones](../../../availability-zones/az-overview.md). - -Placing a set of VMs in the same availability set protects from outages within a data center caused by equipment failure (VMs within an Availability Set do not share resources) or from updates (VMs within an availability set are not updated at the same time). - -Availability Zones protect against the failure of an entire data center, with each Zone representing a set of data centers within a region. By ensuring resources are placed in different Availability Zones, no data center-level outage can take all of your VMs offline. - -When creating Azure VMs, you must choose between configuring Availability Sets vs Availability Zones. An Azure VM cannot participate in both. - -While Availability Zones may provide better availability than Availability Sets (99.99% vs 99.95%), performance should also be a consideration. VMs within an Availability Set can be placed in a [proximity placement group](../../../virtual-machines/co-location.md) which guarantees that they are close to each other, minimizing network latency between them. VMs located in different Availability Zones will have greater network latency between them, which can increase the time it takes to synchronize data between the primary and secondary replica(s). This may cause delays on the primary replica as well as increase the chance of data loss in the event of an unplanned failover. It is important to test the proposed solution under load and ensure that it meets SLAs for both performance and availability. - -## Connectivity - -To match the on-premises experience for connecting to your availability group listener, deploy your SQL Server VMs to [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same virtual network. Having multiple subnets negates the need for the extra dependency on an Azure Load Balancer, or a distributed network name (DNN) to route your traffic to your listener. - -If you deploy your SQL Server VMs to a single subnet, you can configure a virtual network name (VNN) and an Azure Load Balancer, or a distributed network name (DNN) to route traffic to your availability group listener. [Review the differences between the two](hadr-windows-server-failover-cluster-overview.md) and then deploy either a [distributed network name (DNN)](availability-group-distributed-network-name-dnn-listener-configure.md) or a [virtual network name (VNN)](availability-group-vnn-azure-load-balancer-configure.md) for your availability group. - -Most SQL Server features work transparently with availability groups when using the DNN, but there are certain features that may require special consideration. See [AG and DNN interoperability](availability-group-dnn-interoperability.md) to learn more. - -Additionally, there are some behavior differences between the functionality of the VNN listener and DNN listener that are important to note: - -- **Failover time**: Failover time is faster when using a DNN listener since there is no need to wait for the network load balancer to detect the failure event and change its routing. -- **Existing connections**: Connections made to a *specific database* within a failing-over availability group will close, but other connections to the primary replica will remain open since the DNN stays online during the failover process. This is different than a traditional VNN environment where all connections to the primary replica typically close when the availability group fails over, the listener goes offline, and the primary replica transitions to the secondary role. When using a DNN listener, you may need to adjust application connection strings to ensure that connections are redirected to the new primary replica upon failover. -- **Open transactions**: Open transactions against a database in a failing-over availability group will close and roll back, and you need to *manually* reconnect. For example, in SQL Server Management Studio, close the query window and open a new one. - -Setting up a VNN listener in Azure requires a load balancer. There are two main options for load balancers in Azure: external (public) or internal. The external (public) load balancer is internet-facing and is associated with a public virtual IP that's accessible over the internet. An internal load balancer supports only clients within the same virtual network. For either load balancer type, you must enable [Direct Server Return](../../../load-balancer/load-balancer-multivip-overview.md#rule-type-2-backend-port-reuse-by-using-floating-ip). - -You can still connect to each availability replica separately by connecting directly to the service instance. Also, because availability groups are backward compatible with database mirroring clients, you can connect to the availability replicas like database mirroring partners as long as the replicas are configured similarly to database mirroring: - -* There's one primary replica and one secondary replica. -* The secondary replica is configured as non-readable (**Readable Secondary** option set to **No**). - -The following is an example client connection string that corresponds to this database mirroring-like configuration using ADO.NET or SQL Server Native Client: - -```console -Data Source=ReplicaServer1;Failover Partner=ReplicaServer2;Initial Catalog=AvailabilityDatabase; -``` - -For more information on client connectivity, see: - -* [Using Connection String Keywords with SQL Server Native Client](/sql/relational-databases/native-client/applications/using-connection-string-keywords-with-sql-server-native-client) -* [Connect Clients to a Database Mirroring Session (SQL Server)](/sql/database-engine/database-mirroring/connect-clients-to-a-database-mirroring-session-sql-server) -* [Connecting to Availability Group Listener in Hybrid IT](/archive/blogs/sqlalwayson/connecting-to-availability-group-listener-in-hybrid-it) -* [Availability Group Listeners, Client Connectivity, and Application Failover (SQL Server)](/sql/database-engine/availability-groups/windows/listeners-client-connectivity-application-failover) -* [Using Database-Mirroring Connection Strings with Availability Groups](/sql/database-engine/availability-groups/windows/listeners-client-connectivity-application-failover) - -## Lease mechanism - -For SQL Server, the AG resource DLL determines the health of the AG based on the AG lease mechanism and Always On health detection. The AG resource DLL exposes resource health through the *IsAlive* operation. The resource monitor polls IsAlive at the cluster heartbeat interval, which is set by the **CrossSubnetDelay** and **SameSubnetDelay** cluster-wide values. On a primary node, the cluster service initiates failover whenever the IsAlive call to the resource DLL returns that the AG is not healthy. - -The AG resource DLL monitors the status of internal SQL Server components. Sp_server_diagnostics reports the health of these components to SQL Server on an interval controlled by **HealthCheckTimeout**. - -Unlike other failover mechanisms, the SQL Server instance plays an active role in the lease mechanism. The lease mechanism is used as a *LooksAlive* validation between the Cluster resource host and the SQL Server process. The mechanism is used to ensure that the two sides (the Cluster Service and SQL Server service) are in frequent contact, checking each other's state and ultimately preventing a split-brain scenario. - -When configuring an AG in Azure VMs, there is often a need to configure these thresholds differently than they would be configured in an on-premises environment. To configure threshold settings according to best practices for Azure VMs, see the [cluster best practices](hadr-cluster-best-practices.md). - - -## Network configuration - -Deploy your SQL Server VMs to multiple subnets whenever possible to avoid the dependency on an Azure Load Balancer or a distributed network name (DNN) to route traffic to your availability group listener. - -On an Azure VM failover cluster, we recommend a single NIC per server (cluster node). Azure networking has physical redundancy, which makes additional NICs unnecessary on an Azure VM failover cluster. Although the cluster validation report will issue a warning that the nodes are only reachable on a single network, this warning can be safely ignored on Azure VM failover clusters. - -## Basic availability group - -As basic availability group does not allow more than one secondary replica and there is no read access to the secondary replica, you can use the database mirroring connection strings for basic availability groups. Using the connection string eliminates the need to have listeners. Removing the listener dependency is helpful for availability groups on Azure VMs as it eliminates the need for a load balancer or having to add additional IPs to the load balancer when you have multiple listeners for additional databases. - -For example, to explicitly connect using TCP/IP to the AG database AdventureWorks on either Replica_A or Replica_B of a Basic AG (or any AG that that has only one secondary replica and the read access is not allowed in the secondary replica), a client application could supply the following database mirroring connection string to successfully connect to the AG - -`Server=Replica_A; Failover_Partner=Replica_B; Database=AdventureWorks; Network=dbmssocn` - - -## Deployment options - -There are multiple options for deploying an availability group to SQL Server on Azure VMs, some with more automation than others. - -The following table provides a comparison of the options available: - -| | [Azure portal](availability-group-azure-portal-configure.md), | [Azure CLI / PowerShell](./availability-group-az-commandline-configure.md) | [Quickstart Templates](availability-group-quickstart-template-configure.md) | [Manual (single subnet)](availability-group-manually-configure-prerequisites-tutorial-single-subnet.md) | [Manual (multi-subnet)](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) -|---------|---------|---------|---------|---------| -|**SQL Server version** |2016 + |2016 +|2016 +|2012 +|2012 +| -|**SQL Server edition** |Enterprise |Enterprise |Enterprise |Enterprise, Standard|Enterprise, Standard| -|**Windows Server version**| 2016 + | 2016 + | 2016 + | All| All| -|**Creates the cluster for you**|Yes|Yes | Yes |No| No| -|**Creates the availability group for you** |Yes |No|No|No| No| -|**Creates listener and load balancer independently** |No|No|No|Yes|N/A| -|**Possible to create DNN listener using this method?**|No|No|No|Yes|N/A| -|**WSFC quorum configuration**|Cloud witness|Cloud witness|Cloud witness|All|All| -|**DR with multiple regions** |No|No|No|Yes|Yes| -|**Multisubnet support** |No|No|No|N/A|Yes| -|**Support for an existing AD**|Yes|Yes|Yes|Yes|Yes| -|**DR with multizone in the same region**|Yes|Yes|Yes|Yes|Yes| -|**Distributed AG with no AD**|No|No|No|Yes| Yes| -|**Distributed AG with no cluster** |No|No|No|Yes|Yes| -|**Requires load balancer or DNN**| Yes | Yes | Yes | Yes | No| - -## Next steps - -To get started, review the [HADR best practices](hadr-cluster-best-practices.md), and then deploy your availability group manually with the [availability group tutorial](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-quickstart-template-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-quickstart-template-configure.md deleted file mode 100644 index c400c908d09c7..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-quickstart-template-configure.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Configure availability group (Azure quickstart template) -description: "Use Azure quickstart templates to create the Windows Failover Cluster, join SQL Server VMs to the cluster, create the listener, and configure the internal load balancer in Azure." -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.assetid: aa5bf144-37a3-4781-892d-e0e300913d03 -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma -ms.custom: "seo-lt-2019, devx-track-azurepowershell" - ---- -# Use Azure quickstart templates to configure an availability group for SQL Server on Azure VM -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -This article describes how to use the Azure quickstart templates to partially automate the deployment of an Always On availability group configuration for SQL Server virtual machines (VMs) within a single subnet in Azure. Two Azure quickstart templates are used in this process: - - | Template | Description | - | --- | --- | - | [sql-vm-ag-setup](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-ag-setup) | Creates the Windows failover cluster and joins the SQL Server VMs to it. | - | [sql-vm-aglistener-setup](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-aglistener-setup) | Creates the availability group listener and configures the internal load balancer. This template can be used only if the Windows failover cluster was created with the **101-sql-vm-ag-setup** template. | - - -Other parts of the availability group configuration must be done manually, such as creating the availability group and creating the internal load balancer. This article provides the sequence of automated and manual steps. - -While this article uses the Azure Quickstart templates to configure the availability group environment, it is also possible to do so using the [Azure portal](availability-group-azure-portal-configure.md), [PowerShell or the Azure CLI](availability-group-az-commandline-configure.md), or [Manually](availability-group-manually-configure-tutorial-single-subnet.md) as well. - -> [!NOTE] -> It's now possible to lift and shift your availability group solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) to learn more. - - -## Prerequisites -To automate the setup of an Always On availability group by using quickstart templates, you must have the following prerequisites: -- An [Azure subscription](https://azure.microsoft.com/free/). -- A resource group with a domain controller. -- One or more domain-joined [VMs in Azure running SQL Server 2016 (or later) Enterprise edition](./create-sql-vm-portal.md) that are in the same availability set or availability zone and that have been [registered with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). -- An internal Azure Load Balancer and an available (not used by any entity) IP address for the availability group listener within the same subnet as the SQL Server VM. - - -## Permissions -The following permissions are necessary to configure the Always On availability group by using Azure quickstart templates: - -- An existing domain user account that has **Create Computer Object** permission in the domain. For example, a domain admin account typically has sufficient permission (for example: account@domain.com). _This account should also be part of the local administrator group on each VM to create the cluster._ -- The domain user account that controls SQL Server. - - -## Create cluster -After your SQL Server VMs have been registered with the SQL IaaS Agent extension, you can join your SQL Server VMs to *SqlVirtualMachineGroups*. This resource defines the metadata of the Windows failover cluster. Metadata includes the version, edition, fully qualified domain name, Active Directory accounts to manage both the cluster and SQL Server, and the storage account as the cloud witness. - -Adding SQL Server VMs to the *SqlVirtualMachineGroups* resource group bootstraps the Windows Failover Cluster Service to create the cluster and then joins those SQL Server VMs to that cluster. This step is automated with the **101-sql-vm-ag-setup** quickstart template. You can implement it by using the following steps: - -1. Go to the [**sql-vm-ag-setup**](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-ag-setup) quickstart template. Then, select **Deploy to Azure** to open the quickstart template in the Azure portal. -1. Fill out the required fields to configure the metadata for the Windows failover cluster. You can leave the optional fields blank. - - The following table shows the necessary values for the template: - - | **Field** | Value | - | --- | --- | - | **Subscription** | The subscription where your SQL Server VMs exist. | - |**Resource group** | The resource group where your SQL Server VMs reside. | - |**Failover Cluster Name** | The name that you want for your new Windows failover cluster. | - | **Existing Vm List** | The SQL Server VMs that you want to participate in the availability group and be part of this new cluster. Separate these values with a comma and a space (for example: *SQLVM1, SQLVM2*). | - | **SQL Server Version** | The SQL Server version of your SQL Server VMs. Select it from the drop-down list. Currently, only SQL Server 2016 and SQL Server 2017 images are supported. | - | **Existing Fully Qualified Domain Name** | The existing FQDN for the domain in which your SQL Server VMs reside. | - | **Existing Domain Account** | An existing domain user account that has **Create Computer Object** permission in the domain as the [CNO](/windows-server/failover-clustering/prestage-cluster-adds) is created during template deployment. For example, a domain admin account typically has sufficient permission (for example: account@domain.com). *This account should also be part of the local administrator group on each VM to create the cluster.*| - | **Domain Account Password** | The password for the previously mentioned domain user account. | - | **Existing Sql Service Account** | The domain user account that controls the [SQL Server service](/sql/database-engine/configure-windows/configure-windows-service-accounts-and-permissions) during availability group deployment (for example: account@domain.com). | - | **Sql Service Password** | The password used by the domain user account that controls SQL Server. | - | **Cloud Witness Name** | A new Azure storage account that will be created and used for the cloud witness. You can modify this name. | - | **\_artifacts Location** | This field is set by default and should not be modified. | - | **\_artifacts Location SaS Token** | This field is intentionally left blank. | - - -1. If you agree to the terms and conditions, select the **I Agree to the terms and conditions stated above** check box. Then select **Purchase** to finish deployment of the quickstart template. -1. To monitor your deployment, either select the deployment from the **Notifications** bell icon in the top navigation banner or go to **Resource Group** in the Azure portal. Select **Deployments** under **Settings**, and choose the **Microsoft.Template** deployment. - ->[!NOTE] -> Credentials provided during template deployment are stored only for the length of the deployment. After deployment finishes, those passwords are removed. You'll be asked to provide them again if you add more SQL Server VMs to the cluster. - -## Configure quorum - -Although the disk witness is the most resilient quorum option, it requires an Azure shared disk which imposes some limitations to the availability group. As such, the cloud witness is the recommended quorum solution for clusters hosting availability groups for SQL Server on Azure VMs. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - -## Validate cluster - -For a failover cluster to be supported by Microsoft, it must pass cluster validation. Connect to the VM using your preferred method, such as Remote Desktop Protocol (RDP) and validate that your cluster passes validation before proceeding further. Failure to do so leaves your cluster in an unsupported state. - -You can validate the cluster using Failover Cluster Manager (FCM) or the following PowerShell command: - - ```powershell - Test-Cluster –Node ("","") –Include "Inventory", "Network", "System Configuration" - ``` - - -## Create availability group -Manually create the availability group as you normally would, by using [SQL Server Management Studio](/sql/database-engine/availability-groups/windows/use-the-availability-group-wizard-sql-server-management-studio), [PowerShell](/sql/database-engine/availability-groups/windows/create-an-availability-group-sql-server-powershell), or [Transact-SQL](/sql/database-engine/availability-groups/windows/create-an-availability-group-transact-sql). - ->[!IMPORTANT] -> Do *not* create a listener at this time, because the **101-sql-vm-aglistener-setup** quickstart template does that automatically in step 4. - -## Create load balancer - -[!INCLUDE [sql-ag-use-dnn-listener](../../includes/sql-ag-use-dnn-listener.md)] - -The Always On availability group listener requires an internal instance of Azure Load Balancer. The internal load balancer provides a "floating" IP address for the availability group listener that allows for faster failover and reconnection. If the SQL Server VMs in an availability group are part of the same availability set, you can use a Basic load balancer. Otherwise, you need to use a Standard load balancer. - -> [!IMPORTANT] -> The internal load balancer should be in the same virtual network as the SQL Server VM instances. - -You just need to create the internal load balancer. In step 4, the **101-sql-vm-aglistener-setup** quickstart template handles the rest of the configuration (such as the backend pool, health probe, and load-balancing rules). - -1. In the Azure portal, open the resource group that contains the SQL Server virtual machines. -2. In the resource group, select **Add**. -3. Search for **load balancer**. In the search results, select **Load Balancer**, which is published by **Microsoft**. -4. On the **Load Balancer** blade, select **Create**. -5. In the **Create load balancer** dialog box, configure the load balancer as follows: - - | Setting | Value | - | --- | --- | - | **Name** |Enter a text name that represents the load balancer. For example, enter **sqlLB**. | - | **Type** |**Internal**: Most implementations use an internal load balancer, which allows applications within the same virtual network to connect to the availability group.
    **External**: Allows applications to connect to the availability group through a public internet connection. | - | **Virtual network** | Select the virtual network that the SQL Server instances are in. | - | **Subnet** | Select the subnet that the SQL Server instances are in. | - | **IP address assignment** |**Static** | - | **Private IP address** | Specify an available IP address from the subnet. | - | **Subscription** |If you have multiple subscriptions, this field might appear. Select the subscription that you want to associate with this resource. It's normally the same subscription as all the resources for the availability group. | - | **Resource group** |Select the resource group that the SQL Server instances are in. | - | **Location** |Select the Azure location that the SQL Server instances are in. | - - -6. Select **Create**. - - ->[!IMPORTANT] -> The public IP resource for each SQL Server VM should have a Standard SKU to be compatible with the Standard load balancer. To determine the SKU of your VM's public IP resource, go to **Resource Group**, select your **Public IP Address** resource for the SQL Server VM, and locate the value under **SKU** in the **Overview** pane. - -## Create listener - -Create the availability group listener and configure the internal load balancer automatically by using the **101-sql-vm-aglistener-setup** quickstart template. The template provisions the Microsoft.SqlVirtualMachine/SqlVirtualMachineGroups/AvailabilityGroupListener resource. The **101-sql-vm-aglistener-setup** quickstart template, via the SQL IaaS Agent extension, does the following actions: - -- Creates a new frontend IP resource (based on the IP address value provided during deployment) for the listener. -- Configures the network settings for the cluster and the internal load balancer. -- Configures the backend pool for the internal load balancer, the health probe, and the load-balancing rules. -- Creates the availability group listener with the given IP address and name. - ->[!NOTE] -> You can use **101-sql-vm-aglistener-setup** only if the Windows failover cluster was created with the **101-sql-vm-ag-setup** template. - - -To configure the internal load balancer and create the availability group listener, do the following: -1. Go to the [sql-vm-aglistener-setup](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-aglistener-setup) quickstart template and select **Deploy to Azure** to start the quickstart template in the Azure portal. -1. Fill out the required fields to configure the internal load balancer, and create the availability group listener. You can leave the optional fields blank. - - The following table shows the necessary values for the template: - - | **Field** | Value | - | --- | --- | - |**Resource group** | The resource group where your SQL Server VMs and availability group exist. | - |**Existing Failover Cluster Name** | The name of the cluster that your SQL Server VMs are joined to. | - | **Existing Sql Availability Group**| The name of the availability group that your SQL Server VMs are a part of. | - | **Existing Vm List** | The names of the SQL Server VMs that are part of the previously mentioned availability group. Separate the names with a comma and a space (for example: *SQLVM1, SQLVM2*). | - | **Listener** | The DNS name that you want to assign to the listener. By default, this template specifies the name "aglistener," but you can change it. The name should not exceed 15 characters. | - | **Listener Port** | The port that you want the listener to use. Typically, this port should be the default of 1433. This is the port number that the template specifies. But if your default port has been changed, the listener port should use that value instead. | - | **Listener IP** | The IP address that you want the listener to use. This address will be created during template deployment, so provide one that isn't already in use. | - | **Existing Subnet** | The name of the internal subnet of your SQL Server VMs (for example: *default*). You can determine this value by going to **Resource Group**, selecting your virtual network, selecting **Subnets** in the **Settings** pane, and copying the value under **Name**. | - | **Existing Internal Load Balancer** | The name of the internal load balancer that you created in step 3. | - | **Probe Port** | The probe port that you want the internal load balancer to use. The template uses 59999 by default, but you can change this value. | - - -1. If you agree to the terms and conditions, select the **I Agree to the terms and conditions stated above** check box. Select **Purchase** to finish deployment of the quickstart template. -1. To monitor your deployment, either select the deployment from the **Notifications** bell icon in the top navigation banner or go to **Resource Group** in the Azure portal. Select **Deployments** under **Settings**, and choose the **Microsoft.Template** deployment. - ->[!NOTE] ->If your deployment fails halfway through, you'll need to manually [remove the newly created listener](#remove-listener) by using PowerShell before you redeploy the **101-sql-vm-aglistener-setup** quickstart template. - -## Remove listener -If you later need to remove the availability group listener that the template configured, you must go through the SQL IaaS Agent extension. Because the listener is registered through the SQL IaaS Agent extension, just deleting it via SQL Server Management Studio is insufficient. - -The best method is to delete it through the SQL IaaS Agent extension by using the following code snippet in PowerShell. Doing so removes the availability group listener metadata from the SQL IaaS Agent extension. It also physically deletes the listener from the availability group. - -```PowerShell -# Remove the availability group listener -# example: Remove-AzResource -ResourceId '/subscriptions/a1a11a11-1a1a-aa11-aa11-1aa1a11aa11a/resourceGroups/SQLAG-RG/providers/Microsoft.SqlVirtualMachine/SqlVirtualMachineGroups/Cluster/availabilitygrouplisteners/aglistener' -Force -Remove-AzResource -ResourceId '/subscriptions//resourceGroups//providers/Microsoft.SqlVirtualMachine/SqlVirtualMachineGroups//availabilitygrouplisteners/' -Force -``` - -## Common errors -This section discusses some known issues and their possible resolution. - -**Availability group listener for availability group '\' already exists** -The selected availability group used in the Azure quickstart template for the availability group listener already contains a listener. Either it is physically within the availability group, or its metadata remains within the SQL IaaS Agent extension. Remove the listener by using [PowerShell](#remove-listener) before redeploying the **101-sql-vm-aglistener-setup** quickstart template. - -**Connection only works from primary replica** -This behavior is likely from a failed **101-sql-vm-aglistener-setup** template deployment that has left the configuration of the internal load balancer in an inconsistent state. Verify that the backend pool lists the availability set, and that rules exist for the health probe and for the load-balancing rules. If anything is missing, the configuration of the internal load balancer is an inconsistent state. - -To resolve this behavior, remove the listener by using [PowerShell](#remove-listener), delete the internal load balancer via the Azure portal, and start again at step 3. - -**BadRequest - Only SQL virtual machine list can be updated** -This error might occur when you're deploying the **101-sql-vm-aglistener-setup** template if the listener was deleted via SQL Server Management Studio (SSMS), but was not deleted from the SQL IaaS Agent extension. Deleting the listener via SSMS does not remove the metadata of the listener from the SQL IaaS Agent extension. The listener must be deleted from the resource provider through [PowerShell](#remove-listener). - -**Domain account does not exist** -This error can have two causes. Either the specified domain account doesn't exist, or it's missing the [User Principal Name (UPN)](/windows/desktop/ad/naming-properties#userprincipalname) data. The **101-sql-vm-ag-setup** template expects a domain account in the UPN form (that is, user@domain.com), but some domain accounts might be missing it. This typically happens when a local user has been migrated to be the first domain administrator account when the server was promoted to a domain controller, or when a user was created through PowerShell. - -Verify that the account exists. If it does, you might be running into the second situation. To resolve it, do the following: - -1. On the domain controller, open the **Active Directory Users and Computers** window from the **Tools** option in **Server Manager**. -2. Go to the account by selecting **Users** in the left pane. -3. Right-click the account, and select **Properties**. -4. Select the **Account** tab. If the **User logon name** box is blank, this is the cause of your error. - - ![Blank user account indicates missing UPN](./media/availability-group-quickstart-template-configure/account-missing-upn.png) - -5. Fill in the **User logon name** box to match the name of the user, and select the proper domain from the drop-down list. -6. Select **Apply** to save your changes, and close the dialog box by selecting **OK**. - -After you make these changes, try to deploy the Azure quickstart template once more. - - -## Next steps - -To learn more, see: - -* [Overview of SQL Server VMs](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server VMs](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server VMs](pricing-guidance.md) -* [What's new in SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) -* [Switching licensing models for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md) diff --git a/articles/azure-sql/virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure.md b/articles/azure-sql/virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure.md deleted file mode 100644 index b2ec11279aaa8..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/availability-group-vnn-azure-load-balancer-configure.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: Configure load balancer for AG VNN listener -description: Learn to configure an Azure Load Balancer to route traffic to the virtual network name (VNN) listener for your availability group with SQL Server on Azure VMs for high availability and disaster recovery (HADR). -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma - ---- -# Configure load balancer for AG VNN listener -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for your Always On availability (AG) group by creating your SQL Server VMs in [multiple subnets](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) within the same Azure virtual network. - -On Azure Virtual Machines, clusters use a load balancer to hold an IP address that needs to be on one cluster node at a time. In this solution, the load balancer holds the IP address for the virtual network name (VNN) listener for the Always On availability group (AG) when the SQL Server VMs are in a single subnet. - -This article teaches you to configure a load balancer by using the Azure Load Balancer service. The load balancer will route traffic to your [availability group (AG) listener](availability-group-overview.md) with SQL Server on Azure VMs for high availability and disaster recovery (HADR). - -For an alternative connectivity option for customers that are on SQL Server 2019 CU8 and later, consider a [DNN listener](availability-group-vnn-azure-load-balancer-configure.md) instead for simplified configuration and improved failover. - - - -## Prerequisites - -Before you complete the steps in this article, you should already have: - -- Decided that Azure Load Balancer is the appropriate [connectivity option for your availability group](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn). -- Configured your [availability group listener](availability-group-overview.md). -- Installed the latest version of [PowerShell](/powershell/scripting/install/installing-powershell-core-on-windows). - - -## Create load balancer - -You can create either an internal load balancer or an external load balancer. An internal load balancer can only be from accessed private resources that are internal to the network. An external load balancer can route traffic from the public to internal resources. When you configure an internal load balancer, use the same IP address as the availability group listener resource for the frontend IP when configuring the load-balancing rules. When you configure an external load balancer, you cannot use the same IP address as the availability group listener as the the listener IP address cannot be a public IP address. As such, to use an external load balancer, logically allocate an IP address in the same subnet as the availability group that does not conflict with any other IP address, and use this address as the frontend IP address for the load-balancing rules. - -Use the [Azure portal](https://portal.azure.com) to create the load balancer: - -1. In the Azure portal, go to the resource group that contains the virtual machines. - -1. Select **Add**. Search Azure Marketplace for **Load Balancer**. Select **Load Balancer**. - -1. Select **Create**. - -1. Set up the load balancer by using the following values: - - - **Subscription**: Your Azure subscription. - - **Resource group**: The resource group that contains your virtual machines. - - **Name**: A name that identifies the load balancer. - - **Region**: The Azure location that contains your virtual machines. - - **Type**: Either public or private. A private load balancer can be accessed from within the virtual network. Most Azure applications can use a private load balancer. If your application needs access to SQL Server directly over the internet, use a public load balancer. - - **SKU**: Standard. - - **Virtual network**: The same network as the virtual machines. - - **IP address assignment**: Static. - - **Private IP address**: The IP address that you assigned to the clustered network resource. - - The following image shows the **Create load balancer** UI: - - ![Set up the load balancer](./media/failover-cluster-instance-premium-file-share-manually-configure/30-load-balancer-create.png) - - -## Configure backend pool - -1. Return to the Azure resource group that contains the virtual machines and locate the new load balancer. You might need to refresh the view on the resource group. Select the load balancer. - -1. Select **Backend pools**, and then select **Add**. - -1. Associate the backend pool with the availability set that contains the VMs. - -1. Under **Target network IP configurations**, select **VIRTUAL MACHINE** and choose the virtual machines that will participate as cluster nodes. Be sure to include all virtual machines that will host the availability group. - -1. Select **OK** to create the backend pool. - -## Configure health probe - -1. On the load balancer pane, select **Health probes**. - -1. Select **Add**. - -1. On the **Add health probe** pane, set the following health probe parameters: - - - **Name**: A name for the health probe. - - **Protocol**: TCP. - - **Port**: The port you created in the firewall for the health probe [when preparing the VM](failover-cluster-instance-prepare-vm.md#uninstall-sql-server-1). In this article, the example uses TCP port `59999`. - - **Interval**: 5 Seconds. - - **Unhealthy threshold**: 2 consecutive failures. - -1. Select **OK**. - -## Set load-balancing rules - -Set the load-balancing rules for the load balancer. - -# [Private load balancer](#tab/ilb) - -1. On the load balancer pane, select **Load-balancing rules**. - -1. Select **Add**. - -1. Set the load-balancing rule parameters: - - - **Name**: A name for the load-balancing rules. - - **Frontend IP address**: The IP address for the AG listener's clustered network resource. - - **Port**: The SQL Server TCP port. The default instance port is 1433. - - **Backend port**: The same port as the **Port** value when you enable **Floating IP (direct server return)**. - - **Backend pool**: The backend pool name that you configured earlier. - - **Health probe**: The health probe that you configured earlier. - - **Session persistence**: None. - - **Idle timeout (minutes)**: 4. - - **Floating IP (direct server return)**: Enabled. - -1. Select **OK**. - -# [Public load balancer](#tab/elb) - -1. On the load balancer pane, select **Load-balancing rules**. - -1. Select **Add**. - -1. Set the load-balancing rule parameters: - - - **Name**: A name for the load-balancing rules. - - **Frontend IP address**: The public IP address that clients use to connect to the public endpoint. - - **Port**: The SQL Server TCP port. The default instance port is 1433. - - **Backend port**: The same port used by the listener of the AG. The port is 1433 by default. - - **Backend pool**: The backend pool name that you configured earlier. - - **Health probe**: The health probe that you configured earlier. - - **Session persistence**: None. - - **Idle timeout (minutes)**: 4. - - **Floating IP (direct server return)**: Disabled. - -1. Select **OK**. - ---- - -## Configure cluster probe - -Set the cluster probe port parameter in PowerShell. - -# [Private load balancer](#tab/ilb) - -To set the cluster probe port parameter, update the variables in the following script with values from your environment. Remove the angle brackets (`<` and `>`) from the script. - -```powershell -$ClusterNetworkName = "" -$IPResourceName = "" -$ILBIP = "" -[int]$ProbePort = - -Import-Module FailoverClusters - -Get-ClusterResource $IPResourceName | Set-ClusterParameter -Multiple @{"Address"="$ILBIP";"ProbePort"=$ProbePort;"SubnetMask"="255.255.255.255";"Network"="$ClusterNetworkName";"EnableDhcp"=0} -``` - -The following table describes the values that you need to update: - - -|**Value**|**Description**| -|---------|---------| -|`Cluster Network Name`| The Windows Server Failover Cluster name for the network. In **Failover Cluster Manager** > **Networks**, right-click the network and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`AG listener IP Address Resource Name`|The resource name for the IP address of the AG listener. In **Failover Cluster Manager** > **Roles**, under the availability group role, under **Server Name**, right-click the IP address resource and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`ILBIP`|The IP address of the internal load balancer (ILB). This address is configured in the Azure portal as the frontend address of the ILB. This is the same IP address as the availability group listener. You can find it in **Failover Cluster Manager** on the same properties page where you located the ``.| -|`nnnnn`|The probe port that you configured in the health probe of the load balancer. Any unused TCP port is valid.| -|"SubnetMask"| The subnet mask for the cluster parameter. It must be the TCP IP broadcast address: `255.255.255.255`.| - - -After you set the cluster probe, you can see all the cluster parameters in PowerShell. Run this script: - -```powershell -Get-ClusterResource $IPResourceName | Get-ClusterParameter -``` - -# [Public load balancer](#tab/elb) - -To set the cluster probe port parameter, update the variables in the following script with values from your environment. Remove the angle brackets (`<` and `>`) from the script. - -```powershell -$ClusterNetworkName = "" -$IPResourceName = "" -$ELBIP = "" -[int]$ProbePort = - -Import-Module FailoverClusters - -Get-ClusterResource $IPResourceName | Set-ClusterParameter -Multiple @{"Address"="$ELBIP";"ProbePort"=$ProbePort;"SubnetMask"="255.255.255.255";"Network"="$ClusterNetworkName";"EnableDhcp"=0} -``` - -The following table describes the values that you need to update: - - -|**Value**|**Description**| -|---------|---------| -|`Cluster Network Name`| The Windows Server Failover Cluster name for the network. In **Failover Cluster Manager** > **Networks**, right-click the network and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`AG listener IP Address Resource Name`|The resource name for the IP address of the AG listener.In **Failover Cluster Manager** > **Roles**, under the availability group role, under **Server Name**, right-click the IP address resource and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`ELBIP`|The IP address of the external load balancer (ELB). This address is configured in the Azure portal as the frontend address of the ELB and is used to connect to the public load balancer from external resources.| -|`nnnnn`|The probe port that you configured in the health probe of the load balancer. Any unused TCP port is valid.| -|"SubnetMask"| The subnet mask for the cluster parameter. It must be the TCP IP broadcast address: `255.255.255.255`.| - - -After you set the cluster probe, you can see all the cluster parameters in PowerShell. Run this script: - -```powershell -Get-ClusterResource $IPResourceName | Get-ClusterParameter -``` - -> [!NOTE] -> Since there is no private IP address for the external load balancer, users cannot directly use the VNN DNS name as it resolves the IP address within the subnet. Use either the public IP address of the public LB or configure another DNS mapping on the DNS server. - - ---- - -## Modify connection string - -For clients that support it, add the `MultiSubnetFailover=True` to the connection string. While the MultiSubnetFailover connection option is not required, it does provide the benefit of a faster subnet failover. This is because the client driver will attempt to open up a TCP socket for each IP address in parallel. The client driver will wait for the first IP to respond with success and once it does, will then use it for the connection. - -If your client does not support the MultiSubnetFailover parameter, you can modify the RegisterAllProvidersIP and HostRecordTTL settings to prevent connectivity delays post-failover. - -Use PowerShell to modify the RegisterAllProvidersIp and HostRecordTTL settings: - -```powershell -Get-ClusterResource yourListenerName | Set-ClusterParameter RegisterAllProvidersIP 0 -Get-ClusterResource yourListenerName|Set-ClusterParameter HostRecordTTL 300 -``` - -To learn more, see the SQL Server [listener connection timeout](/troubleshoot/sql/availability-groups/listener-connection-times-out) documentation. - - -> [!TIP] -> - Set the MultiSubnetFailover parameter = true in the connection string even for HADR solutions that span a single subnet to support future spanning of subnets without the need to update connection strings. -> - By default, clients cache cluster DNS records for 20 minutes. By reducing HostRecordTTL you reduce the Time to Live (TTL) for the cached record, legacy clients may reconnect more quickly. As such, reducing the HostRecordTTL setting may result in increased traffic to the DNS servers. - -## Test failover - -Test failover of the clustered resource to validate cluster functionality. - -Take the following steps: - -1. Open [SQL Server Management Studio)](/sql/ssms/download-sql-server-management-studio-ssms) and connect to your availability group listener. -1. Expand **Always On Availability Group** in **Object Explorer**. -1. Right-click the availability group and select **Failover**. -1. Follow the wizard prompts to fail over the availability group to a secondary replica. - -Failover succeeds when the replicas switch roles and are both synchronized. - - -## Test connectivity - -To test connectivity, sign in to another virtual machine in the same virtual network. Open **SQL Server Management Studio** and connect to the availability group listener. - ->[!NOTE] ->If you need to, you can [download SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - -## Next steps - -Once the VNN is created, consider optimizing the [cluster settings for SQL Server VMs](hadr-cluster-best-practices.md). - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Always On availability groups overview](/sql/database-engine/availability-groups/windows/overview-of-always-on-availability-groups-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - - - diff --git a/articles/azure-sql/virtual-machines/windows/azure-key-vault-integration-configure.md b/articles/azure-sql/virtual-machines/windows/azure-key-vault-integration-configure.md deleted file mode 100644 index 8cacda7f2b36d..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/azure-key-vault-integration-configure.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Integrate Key Vault with SQL Server on Windows VMs in Azure (Resource Manager) | Microsoft Docs -description: Learn how to automate the configuration of SQL Server encryption for use with Azure Key Vault. This topic explains how to use Azure Key Vault Integration with SQL virtual machines created with Resource Manager. -services: virtual-machines-windows -documentationcenter: '' -author: rajeshsetlem -editor: '' -tags: azure-service-management -ms.assetid: cd66dfb1-0e9b-4fb0-a471-9deaf4ab4ab8 -ms.service: virtual-machines-sql -ms.subservice: security - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 02/10/2022 -ms.author: rsetlem -ms.reviewer: mathoma ---- -# Configure Azure Key Vault integration for SQL Server on Azure VMs (Resource Manager) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -There are multiple SQL Server encryption features, such as [transparent data encryption (TDE)](/sql/relational-databases/security/encryption/transparent-data-encryption), [column level encryption (CLE)](/sql/t-sql/functions/cryptographic-functions-transact-sql), and [backup encryption](/sql/relational-databases/backup-restore/backup-encryption). These forms of encryption require you to manage and store the cryptographic keys you use for encryption. The Azure Key Vault service is designed to improve the security and management of these keys in a secure and highly available location. The [SQL Server Connector](https://www.microsoft.com/download/details.aspx?id=45344) enables SQL Server to use these keys from Azure Key Vault. - -If you are running SQL Server on-premises, there are steps you can follow to [access Azure Key Vault from your on-premises SQL Server instance](/sql/relational-databases/security/encryption/extensible-key-management-using-azure-key-vault-sql-server). But for SQL Server on Azure VMs, you can save time by using the *Azure Key Vault Integration* feature. - -> [!NOTE] -> The Azure Key Vault integration is available only for the Enterprise, Developer, and Evaluation Editions of SQL Server. Starting with SQL Server 2019, Standard edition is also supported. - -When this feature is enabled, it automatically installs the SQL Server Connector, configures the EKM provider to access Azure Key Vault, and creates the credential to allow you to access your vault. If you looked at the steps in the previously mentioned on-premises documentation, you can see that this feature automates steps 2 and 3. The only thing you would still need to do manually is to create the key vault and keys. From there, the entire setup of your SQL Server VM is automated. Once this feature has completed this setup, you can execute Transact-SQL (T-SQL) statements to begin encrypting your databases or backups as you normally would. - -> [!NOTE] -> You can also configure Key Vault integration by using a template. For more information, see [Azure quickstart template for Azure Key Vault integration](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-sql-existing-keyvault-update). - - -[!INCLUDE [Prepare for Key Vault integration](../../../../includes/virtual-machines-sql-server-akv-prepare.md)] - - >[!NOTE] - > Extensible Key Management (EKM) Provider version 1.0.4.0 is installed on the SQL Server VM through the [SQL infrastructure as a service (IaaS) extension](./sql-server-iaas-agent-extension-automate-management.md). Upgrading the SQL IaaS extension will not update the provider version. Please considering manually upgrading the EKM provider version if needed (for example, when migrating to a SQL Managed Instance). - - -## Enable and configure Key Vault integration -You can enable Key Vault integration during provisioning or configure it for existing VMs. - -### New VMs -If you are provisioning a new SQL virtual machine with Resource Manager, the Azure portal provides a way to enable Azure Key Vault integration. - -![SQL Azure Key Vault Integration](./media/azure-key-vault-integration-configure/azure-sql-arm-akv.png) - -For a detailed walkthrough of provisioning, see [Provision a SQL virtual machine in the Azure portal](create-sql-vm-portal.md). - -### Existing VMs - -For existing SQL virtual machines, open your [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource) and select **Security** under **Settings**. Select **Enable** to enable Azure Key Vault integration. - -![SQL Key Vault integration for existing VMs](./media/azure-key-vault-integration-configure/azure-sql-rm-akv-existing-vms.png) - -When you're finished, select the **Apply** button on the bottom of the **Security** page to save your changes. - -> [!NOTE] -> The credential name we created here will be mapped to a SQL login later. This allows the SQL login to access the key vault. - -[!INCLUDE [Key Vault integration next steps](../../../../includes/virtual-machines-sql-server-akv-next-steps.md)] diff --git a/articles/azure-sql/virtual-machines/windows/azure-storage-sql-server-backup-restore-use.md b/articles/azure-sql/virtual-machines/windows/azure-storage-sql-server-backup-restore-use.md deleted file mode 100644 index 9659d12213872..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/azure-storage-sql-server-backup-restore-use.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: How to use Azure Storage for SQL Server backup and restore | Microsoft Docs -description: Learn how to back up SQL Server to Azure Storage. Explains the benefits of backing up SQL databases to Azure Storage. -services: virtual-machines-windows -documentationcenter: '' -author: rajeshsetlem -tags: azure-service-management - -ms.assetid: 0db7667d-ef63-4e2b-bd4d-574802090f8b -ms.service: virtual-machines-sql -ms.subservice: backup - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 01/31/2017 -ms.author: rsetlem -ms.reviewer: mathoma ---- -# Use Azure Storage for SQL Server backup and restore -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -Starting with SQL Server 2012 SP1 CU2, you can now write back up SQL Server databases directly to Azure Blob storage. Use this functionality to back up to and restore from Azure Blob storage. Back up to the cloud offers benefits of availability, limitless geo-replicated off-site storage, and ease of migration of data to and from the cloud. You can issue `BACKUP` or `RESTORE` statements by using Transact-SQL or SMO. - -## Overview -SQL Server 2016 introduces new capabilities; you can use [file-snapshot backup](/sql/relational-databases/backup-restore/file-snapshot-backups-for-database-files-in-azure) to perform nearly instantaneous backups and incredibly quick restores. - -This topic explains why you might choose to use Azure Storage for SQL Server backups and then describes the components involved. You can use the resources provided at the end of the article to access walk-throughs and additional information to start using this service with your SQL Server backups. - -## Benefits of using Azure Blob storage for SQL Server backups -There are several challenges that you face when backing up SQL Server. These challenges include storage management, risk of storage failure, access to off-site storage, and hardware configuration. Many of these challenges are addressed by using Azure Blob storage for SQL Server backups. Consider the following benefits: - -* **Ease of use**: Storing your backups in Azure blobs can be a convenient, flexible, and easy to access off-site option. Creating off-site storage for your SQL Server backups can be as easy as modifying your existing scripts/jobs to use the **BACKUP TO URL** syntax. Off-site storage should typically be far enough from the production database location to prevent a single disaster that might impact both the off-site and production database locations. By choosing to [geo-replicate your Azure blobs](../../../storage/common/storage-redundancy.md), you have an extra layer of protection in the event of a disaster that could affect the whole region. -* **Backup archive**: Azure Blob storage offers a better alternative to the often used tape option to archive backups. Tape storage might require physical transportation to an off-site facility and measures to protect the media. Storing your backups in Azure Blob storage provides an instant, highly available, and a durable archiving option. -* **Managed hardware**: There is no overhead of hardware management with Azure services. Azure services manage the hardware and provide geo-replication for redundancy and protection against hardware failures. -* **Unlimited storage**: By enabling a direct backup to Azure blobs, you have access to virtually unlimited storage. Alternatively, backing up to an Azure virtual machine disk has limits based on machine size. There is a limit to the number of disks you can attach to an Azure virtual machine for backups. This limit is 16 disks for an extra large instance and fewer for smaller instances. -* **Backup availability**: Backups stored in Azure blobs are available from anywhere and at any time and can easily be accessed for restores to a SQL Server instance, without the need for database attach/detach or downloading and attaching the VHD. -* **Cost**: Pay only for the service that is used. Can be cost-effective as an off-site and backup archive option. See the [Azure pricing calculator](https://go.microsoft.com/fwlink/?LinkId=277060 "Pricing Calculator"), and the [Azure Pricing article](https://go.microsoft.com/fwlink/?LinkId=277059 "Pricing article") for more information. -* **Storage snapshots**: When database files are stored in an Azure blob and you are using SQL Server 2016, you can use [file-snapshot backup](/sql/relational-databases/backup-restore/file-snapshot-backups-for-database-files-in-azure) to perform nearly instantaneous backups and incredibly quick restores. - -For more details, see [SQL Server Backup and Restore with Azure Blob storage](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service). - -The following two sections introduce Azure Blob storage, including the required SQL Server components. It is important to understand the components and their interaction to successfully use backup and restore from Azure Blob storage. - -## Azure Blob storage components -The following Azure components are used when backing up to Azure Blob storage. - -| Component | Description | -| --- | --- | -| **Storage account** |The storage account is the starting point for all storage services. To access Azure Blob storage, first create an Azure Storage account. SQL Server is agnostic to the type of storage redundancy used. Backup to Page blobs and block blobs is supported for every storage redundancy (LRS\ZRS\GRS\RA-GRS\RA-GZRS\etc.). For more information about Azure Blob storage, see [How to use Azure Blob storage](https://azure.microsoft.com/develop/net/how-to-guides/blob-storage/). | -| **Container** |A container provides a grouping of a set of blobs, and can store an unlimited number of Blobs. To write a SQL Server backup to Azure Blob storage, you must have at least the root container created. | -| **Blob** |A file of any type and size. Blobs are addressable using the following URL format: `https://.blob.core.windows.net//`. For more information about page Blobs, see [Understanding Block and Page Blobs](/rest/api/storageservices/Understanding-Block-Blobs--Append-Blobs--and-Page-Blobs) | - -## SQL Server components -The following SQL Server components are used when backing up to Azure Blob storage. - -| Component | Description | -| --- | --- | -| **URL** |A URL specifies a Uniform Resource Identifier (URI) to a unique backup file. The URL provides the location and name of the SQL Server backup file. The URL must point to an actual blob, not just a container. If the blob does not exist, Azure creates it. If an existing blob is specified, the backup command fails, unless the `WITH FORMAT` option is specified. The following is an example of the URL you would specify in the BACKUP command: `https://.blob.core.windows.net//`.

    HTTPS is recommended but not required. | -| **Credential** |The information that is required to connect and authenticate to Azure Blob storage is stored as a credential. In order for SQL Server to write backups to an Azure Blob or restore from it, a SQL Server credential must be created. For more information, see [SQL Server Credential](/sql/t-sql/statements/create-credential-transact-sql). | - -> [!NOTE] -> SQL Server 2016 has been updated to support block blobs. Please see [Tutorial: Using Microsoft Azure Blob storage with SQL Server 2016 databases](/sql/relational-databases/tutorial-use-azure-blob-storage-service-with-sql-server-2016) for more details. -> - -## Next steps - -1. Create an Azure account if you don't already have one. If you are evaluating Azure, consider the [free trial](https://azure.microsoft.com/free/). -2. Then go through one of the following tutorials that walk you through creating a storage account and performing a restore. - - * **SQL Server 2014**: [Tutorial: SQL Server 2014 Backup and Restore to Microsoft Azure Blob storage](/previous-versions/sql/2014/relational-databases/backup-restore/sql-server-backup-to-url). - * **SQL Server 2016**: [Tutorial: Using the Microsoft Azure Blob storage with SQL Server 2016 databases](/sql/relational-databases/tutorial-use-azure-blob-storage-service-with-sql-server-2016) -3. Review additional documentation starting with [SQL Server Backup and Restore with Microsoft Azure Blob storage](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service). - -If you have any problems, review the topic [SQL Server Backup to URL Best Practices and Troubleshooting](/sql/relational-databases/backup-restore/sql-server-backup-to-url-best-practices-and-troubleshooting). - -For other SQL Server backup and restore options, see [Backup and Restore for SQL Server on Azure Virtual Machines](backup-restore.md). diff --git a/articles/azure-sql/virtual-machines/windows/backup-restore.md b/articles/azure-sql/virtual-machines/windows/backup-restore.md deleted file mode 100644 index fc6745f35de7f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/backup-restore.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Backup and restore for SQL Server on Azure VMs | Microsoft Docs -description: Describes backup and restore considerations for SQL Server databases running on Azure Virtual Machines. -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -editor: '' -tags: azure-resource-management -ms.assetid: 95a89072-0edf-49b5-88ed-584891c0e066 -ms.service: virtual-machines-sql -ms.subservice: backup - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 06/04/2018 -ms.author: rsetlem -ms.reviewer: mathoma ---- -# Backup and restore for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides guidance on the backup and restore options available for SQL Server running on a Windows virtual machine (VM) in Azure. Azure Storage maintains three copies of every Azure VM disk to guarantee protection against data loss or physical data corruption. Thus, unlike SQL Server on-premises, you don't need to focus on hardware failures. However, you should still back up your SQL Server databases to protect against application or user errors, such as inadvertent data insertions or deletions. In this situation, it is important to be able to restore to a specific point in time. - -The first part of this article provides an overview of the available backup and restore options. This is followed by sections that provide more information on each strategy. - -## Backup and restore options - -The following table provides information on various backup and restore options for SQL Server on Azure VMs: - -| Strategy | SQL versions | Description | -|---|---|---| -| [Automated Backup](#automated) | 2014
    2016
    2017
    2019 | Automated Backup allows you to schedule regular backups for all databases on a SQL Server VM. Backups are stored in Azure storage for up to 30 days. Beginning with SQL Server 2016, Automated Backup v2 offers additional options such as configuring manual scheduling and the frequency of full and log backups. | -| [Azure Backup for SQL VMs](#azbackup) | 2008
    2012
    2014
    2016
    2017
    2019 | Azure Backup provides an Enterprise class backup capability for SQL Server on Azure VMs. With this service, you can centrally manage backups for multiple servers and thousands of databases. Databases can be restored to a specific point in time in the portal. It offers a customizable retention policy that can maintain backups for years. | -| [Manual backup](#manual) | All | Depending on your version of SQL Server, there are various techniques to manually backup and restore SQL Server on Azure VM. In this scenario, you are responsible for how your databases are backed up and the storage location and management of these backups. | - -The following sections describe each option in more detail. The final section of this article provides a summary in the form of a feature matrix. - -## Automated Backup - -Automated Backup provides an automatic backup service for SQL Server Standard and Enterprise editions running on a Windows VM in Azure. This service is provided by the [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md), which is automatically installed on SQL Server Windows virtual machine images in the Azure portal. - -All databases are backed up to an Azure storage account that you configure. Backups can be encrypted and retained for up to 30 days. - -SQL Server 2016 and higher VMs offer more customization options with Automated Backup v2. These improvements include: - -- System database backups -- Manual backup schedule and time window -- Full and log file backup frequency - -To restore a database, you must locate the required backup file(s) in the storage account and perform a restore on your SQL VM using SQL Server Management Studio (SSMS) or Transact-SQL commands. - -For more information on how to configure Automated Backup for SQL VMs, see one of the following articles: - -- **SQL Server 2016/2017**: [Automated Backup v2 for Azure Virtual Machines](automated-backup.md) -- **SQL Server 2014**: [Automated Backup for SQL Server 2014 Virtual Machines](automated-backup-sql-2014.md) - -## Azure Backup for SQL VMs - -[Azure Backup](../../../backup/index.yml) provides an Enterprise class backup capability for SQL Server on Azure VMs. All backups are stored and managed in a Recovery Services vault. There are several advantages that this solution provides, especially for Enterprises: - -- **Zero-infrastructure backup**: You do not have to manage backup servers or storage locations. -- **Scale**: Protect many SQL VMs and thousands of databases. -- **Pay-As-You-Go**: This capability is a separate service provided by Azure Backup, but as with all Azure services, you only pay for what you use. -- **Central management and monitoring**: Centrally manage all of your backups, including other workloads that Azure Backup supports, from a single dashboard in Azure. -- **Policy driven backup and retention**: Create standard backup policies for regular backups. Establish retention policies to maintain backups for years. -- **Support for SQL Always On**: Detect and protect a SQL Server Always On configuration and honor the backup Availability Group backup preference. -- **15-minute Recovery Point Objective (RPO)**: Configure SQL transaction log backups up to every 15 minutes. -- **Point in time restore**: Use the portal to recover databases to a specific point in time without having to manually restore multiple full, differential, and log backups. -- **Consolidated email alerts for failures**: Configure consolidated email notifications for any failures. -- **Azure role-based access control**: Determine who can manage backup and restore operations through the portal. - -This Azure Backup solution for SQL VMs is generally available. For more information, see [Back up SQL Server database to Azure](../../../backup/backup-azure-sql-database.md). - -## Manual backup - -If you want to manually manage backup and restore operations on your SQL VMs, there are several options depending on the version of SQL Server you are using. For an overview of backup and restore, see one of the following articles based on your version of SQL Server: - -- [Backup and restore for SQL Server 2016 and later](/sql/relational-databases/backup-restore/back-up-and-restore-of-sql-server-databases) -- [Backup and restore for SQL Server 2014](/sql/relational-databases/backup-restore/back-up-and-restore-of-sql-server-databases?viewFallbackFrom=sql-server-2014) -- [Backup and restore for SQL Server 2012](/previous-versions/sql/sql-server-2012/ms187048(v=sql.110)) -- [Backup and restore for SQL Server SQL Server 2008 R2](/previous-versions/sql/sql-server-2008-r2/ms187048(v=sql.105)) -- [Backup and restore for SQL Server 2008](/previous-versions/sql/sql-server-2008/ms187048(v=sql.100)) - -The following sections describe several manual backup and restore options in more detail. - -### Backup to attached disks - -For SQL Server on Azure VMs, you can use native backup and restore techniques using attached disks on the VM for the destination of the backup files. However, there is a limit to the number of disks you can attach to an Azure virtual machine, based on the [size of the virtual machine](../../../virtual-machines/sizes.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). There is also the overhead of disk management to consider. - -For an example of how to manually create a full database backup using SQL Server Management Studio (SSMS) or Transact-SQL, see [Create a Full Database Backup](/sql/relational-databases/backup-restore/create-a-full-database-backup-sql-server). - -### Backup to URL - -Beginning with SQL Server 2012 SP1 CU2, you can back up and restore directly to Microsoft Azure Blob storage, which is also known as backup to URL. SQL Server 2016 also introduced the following enhancements for this feature: - -| 2016 enhancement | Details | -| --- | --- | -| **Striping** |When backing up to Microsoft Azure blob storage, SQL Server 2016 supports backing up to multiple blobs to enable backing up large databases, up to a maximum of 12.8 TB. | -| **Snapshot Backup** |Through the use of Azure snapshots, SQL Server File-Snapshot Backup provides nearly instantaneous backups and rapid restores for database files stored using the Azure Blob storage service. This capability enables you to simplify your backup and restore policies. File-snapshot backup also supports point in time restore. For more information, see [Snapshot Backups for Database Files in Azure](/sql/relational-databases/backup-restore/file-snapshot-backups-for-database-files-in-azure). | - -For more information, see the one of the following articles based on your version of SQL Server: - -- **SQL Server 2016/2017**: [SQL Server Backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service) -- **SQL Server 2014**: [SQL Server 2014 Backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service?viewFallbackFrom=sql-server-2014) -- **SQL Server 2012**: [SQL Server 2012 Backup to URL](/previous-versions/sql/sql-server-2012/jj919148(v=sql.110)) - -### Managed Backup - -Beginning with SQL Server 2014, Managed Backup automates the creation of backups to Azure storage. Behind the scenes, Managed Backup makes use of the Backup to URL feature described in the previous section of this article. Managed Backup is also the underlying feature that supports the SQL Server VM Automated Backup service. - -Beginning in SQL Server 2016, Managed Backup got additional options for scheduling, system database backup, and full and log backup frequency. - -For more information, see one of the following articles based on your version of SQL Server: - -- [Managed Backup to Microsoft Azure for SQL Server 2016 and later](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure) -- [Managed Backup to Microsoft Azure for SQL Server 2014](/sql/relational-databases/backup-restore/sql-server-managed-backup-to-microsoft-azure?viewFallbackFrom=sql-server-2014) - -## Decision matrix - -The following table summarizes the capabilities of each backup and restore option for SQL Server virtual machines in Azure. - -| Option | Automated Backup | Azure Backup for SQL | Manual backup | -|---|---|---|---| -| Requires additional Azure service | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Configure backup policy in Azure portal | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Restore databases in Azure portal | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Manage multiple servers in one dashboard | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Point-in-time restore | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | -| 15-minute Recovery Point Objective (RPO) | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | -| Short-term backup retention policy (days) | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Long-term backup retention policy (months, years) | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Built-in support for SQL Server Always On | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Backup to Azure Storage account(s) | ![Green checkmark.](./media/backup-restore/yes.png)(automatic) | ![Green checkmark.](./media/backup-restore/yes.png)(automatic) | ![Green checkmark.](./media/backup-restore/yes.png)(customer managed) | -| Management of storage and backup files | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Backup to attached disks on the VM | | | ![Green checkmark.](./media/backup-restore/yes.png) | -| Central customizable backup reports | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Consolidated email alerts for failures | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Customize monitoring based on Azure Monitor logs | | ![Green checkmark.](./media/backup-restore/yes.png) | | -| Monitor backup jobs with SSMS or Transact-SQL scripts | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | ![Green checkmark.](./media/backup-restore/yes.png) | -| Restore databases with SSMS or Transact-SQL scripts | ![Green checkmark.](./media/backup-restore/yes.png) | | ![Green checkmark.](./media/backup-restore/yes.png) | - -## Next steps - -If you are planning your deployment of SQL Server on Azure VM, you can find provisioning guidance in the following guide: [How to provision a Windows SQL Server virtual machine in the Azure portal](create-sql-vm-portal.md). - -Although backup and restore can be used to migrate your data, there are potentially easier data migration paths to SQL Server on VM. For a full discussion of migration options and recommendations, see [Migrating a Database to SQL Server on Azure VM](migrate-to-vm-from-sql-server.md). diff --git a/articles/azure-sql/virtual-machines/windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md b/articles/azure-sql/virtual-machines/windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md deleted file mode 100644 index a7436775afca4..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/business-continuity-high-availability-disaster-recovery-hadr-overview.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: High availability, disaster recovery, business continuity -description: Learn about the high availability, disaster recovery (HADR), and business continuity options available for SQL Server on Azure VMs, such as Always On availability groups, failover cluster instance, database mirroring, log shipping, and backup & restore to Azure Storage. -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -editor: '' -tags: azure-service-management - -ms.assetid: 53981f7e-8370-4979-b26a-93a5988d905f -ms.service: virtual-machines-sql -ms.subservice: hadr - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 06/27/2020 -ms.author: rsetlem -ms.reviewer: mathoma ---- -# Business continuity and HADR for SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -Business continuity means continuing your business in the event of a disaster, planning for recovery, and ensuring that your data is highly available. SQL Server on Azure Virtual Machines can help lower the cost of a high-availability and disaster recovery (HADR) database solution. - -Most SQL Server HADR solutions are supported on virtual machines (VMs), as both Azure-only and hybrid solutions. In an Azure-only solution, the entire HADR system runs in Azure. In a hybrid configuration, part of the solution runs in Azure and the other part runs on-premises in your organization. The flexibility of the Azure environment enables you to move partially or completely to Azure to satisfy the budget and HADR requirements of your SQL Server database systems. - -This article compares and contrasts the business continuity solutions available for SQL Server on Azure VMs. - -## Overview - -It's up to you to ensure that your database system has the HADR capabilities that the service-level agreement (SLA) requires. The fact that Azure provides high-availability mechanisms, such as service healing for cloud services and failure recovery detection for virtual machines, does not itself guarantee that you can meet the SLA. Although these mechanisms help protect the high availability of the virtual machine, they don't protect the availability of SQL Server running inside the VM. - -It's possible for the SQL Server instance to fail while the VM is online and healthy. Even the high-availability mechanisms provided by Azure allow for downtime of the VMs due to events like recovery from software or hardware failures and operating system upgrades. - -Geo-redundant storage (GRS) in Azure is implemented with a feature called geo-replication. GRS might not be an adequate disaster recovery solution for your databases. Because geo-replication sends data asynchronously, recent updates can be lost in a disaster. More information about geo-replication limitations is covered in the [Geo-replication support](#geo-replication-support) section. - -> [!NOTE] -> It's now possible to lift and shift both your [failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) and [availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) solution to SQL Server on Azure VMs using Azure Migrate. - - -## Deployment architectures - -Azure supports these SQL Server technologies for business continuity: - -* [Always On availability groups](/sql/database-engine/availability-groups/windows/always-on-availability-groups-sql-server) -* [Always On failover cluster instances (FCIs)](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -* [Log shipping](/sql/database-engine/log-shipping/about-log-shipping-sql-server) -* [SQL Server backup and restore with Azure Blob storage](/sql/relational-databases/backup-restore/sql-server-backup-and-restore-with-microsoft-azure-blob-storage-service) -* [Database mirroring](/sql/database-engine/database-mirroring/database-mirroring-sql-server) - Deprecated in SQL Server 2016 -* [Azure Site Recovery](../../../site-recovery/site-recovery-sql.md) - -You can combine the technologies to implement a SQL Server solution that has both high-availability and disaster recovery capabilities. Depending on the technology that you use, a hybrid deployment might require a VPN tunnel with the Azure virtual network. The following sections show you some example deployment architectures. - -## Azure only: High-availability solutions - -You can have a high-availability solution for SQL Server at a database level with Always On availability groups. You can also create a high-availability solution at an instance level with Always On failover cluster instances. For additional protection, you can create redundancy at both levels by creating availability groups on failover cluster instances. - -| Technology | Example architectures | -| --- | --- | -| [**Availability groups**](availability-group-overview.md) |Availability replicas running in Azure VMs in the same region provide high availability. You need to configure a domain controller VM, because Windows failover clustering requires an Active Directory domain.

    For higher redundancy and availability, the Azure VMs can be deployed in different [availability zones](../../../availability-zones/az-overview.md) as documented in the [availability group overview](availability-group-overview.md). ![Diagram that shows the "Domain Controller" above the "WSFC Cluster" made of the "Primary Replica", "Secondary Replica", and "File Share Witness".](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-ha-always-on.png)
    To get started, review the[availability group tutorial](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md). | -| [**Failover cluster instances**](failover-cluster-instance-overview.md) |Failover cluster instances are supported on SQL Server VMs. Because the FCI feature requires shared storage, five solutions will work with SQL Server on Azure VMs:

    - Using [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) for Windows Server 2019. Shared managed disks are an Azure product that allows attaching a managed disk to multiple virtual machines simultaneously. VMs in the cluster can read or write to your attached disk based on the reservation chosen by the clustered application through SCSI Persistent Reservations (SCSI PR). SCSI PR is an industry-standard storage solution that's used by applications running on a storage area network (SAN) on-premises. Enabling SCSI PR on a managed disk allows you to migrate these applications to Azure as is.

    - Using [Storage Spaces Direct \(S2D\)](failover-cluster-instance-storage-spaces-direct-manually-configure.md) to provide a software-based virtual SAN for Windows Server 2016 and later.

    - Using a [Premium file share](failover-cluster-instance-premium-file-share-manually-configure.md) for Windows Server 2012 and later. Premium file shares are SSD backed, have consistently low latency, and are fully supported for use with FCI.

    - Using storage supported by a partner solution for clustering. For a specific example that uses SIOS DataKeeper, see the blog entry [Failover clustering and SIOS DataKeeper](https://azure.microsoft.com/blog/high-availability-for-a-file-share-using-wsfc-ilb-and-3rd-party-software-sios-datakeeper/).

    - Using shared block storage for a remote iSCSI target via Azure ExpressRoute. For example, NetApp Private Storage (NPS) exposes an iSCSI target via ExpressRoute with Equinix to Azure VMs.

    For shared storage and data replication solutions from Microsoft partners, contact the vendor for any issues related to accessing data on failover.

    To get started, [prepare your VM for FCI](failover-cluster-instance-prepare-vm.md)| - -## Azure only: Disaster recovery solutions -You can have a disaster recovery solution for your SQL Server databases in Azure by using availability groups, database mirroring, or backup and restore with storage blobs. - -| Technology | Example architectures | -| --- | --- | -| [**Availability groups**](availability-group-overview.md) |Availability replicas running across multiple datacenters in Azure VMs for disaster recovery. This cross-region solution helps protect against a complete site outage.
    ![Diagram that shows two regions with a "Primary Replica" and "Secondary Replica" connected by an "Asynchronous Commit".](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-alwayson.png)
    Within a region, all replicas should be within the same cloud service and the same virtual network. Because each region will have a separate virtual network, these solutions require network-to-network connectivity. For more information, see [Configure a network-to-network connection by using the Azure portal](../../../vpn-gateway/vpn-gateway-howto-vnet-vnet-resource-manager-portal.md). For detailed instructions, see [Configure a SQL Server Always On availability group across different Azure regions](availability-group-manually-configure-multiple-regions.md).| -| **Database mirroring** |Principal and mirror and servers running in different datacenters for disaster recovery. You must deploy them by using server certificates. SQL Server database mirroring is not supported for SQL Server 2008 or SQL Server 2008 R2 on an Azure VM.
    ![Diagram that shows the "Principal" in one region connected to the "Mirror in another region with "High Performance".](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-dbmirroring.png) | -| **Backup and restore with Azure Blob storage** |Production databases backed up directly to Blob storage in a different datacenter for disaster recovery.
    ![Diagram that shows a "Database" in one region backing up to "Blob Storage" in another region.](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-backup-restore.png)
    For more information, see [Backup and restore for SQL Server on Azure VMs](/azure/azure-sql/virtual-machines/windows/backup-restore). | -| **Replicate and fail over SQL Server to Azure with Azure Site Recovery** |Production SQL Server instance in one Azure datacenter replicated directly to Azure Storage in a different Azure datacenter for disaster recovery.
    ![Diagram that shows a "Database" in one Azure datacenter using "ASR Replication" for disaster recovery in another datacenter. ](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-standalone-sqlserver-asr.png)
    For more information, see [Protect SQL Server using SQL Server disaster recovery and Azure Site Recovery](../../../site-recovery/site-recovery-sql.md). | - - -## Hybrid IT: Disaster recovery solutions -You can have a disaster recovery solution for your SQL Server databases in a hybrid IT environment by using availability groups, database mirroring, log shipping, and backup and restore with Azure Blob storage. - -| Technology | Example Architectures | -| --- | --- | -| [**Availability groups**](availability-group-overview.md) |Some availability replicas running in Azure VMs and other replicas running on-premises for cross-site disaster recovery. The production site can be either on-premises or in an Azure datacenter.
    ![Availability groups](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-alwayson.png)
    Because all availability replicas must be in the same failover cluster, the cluster must span both networks (a multi-subnet failover cluster). This configuration requires a VPN connection between Azure and the on-premises network.

    For successful disaster recovery of your databases, you should also install a replica domain controller at the disaster recovery site. To get started, review the[availability group tutorial](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md).| -| **Database mirroring** |One partner running in an Azure VM and the other running on-premises for cross-site disaster recovery by using server certificates. Partners don't need to be in the same Active Directory domain, and no VPN connection is required.
    ![Database mirroring](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-dbmirroring.png)
    Another database mirroring scenario involves one partner running in an Azure VM and the other running on-premises in the same Active Directory domain for cross-site disaster recovery. A [VPN connection between the Azure virtual network and the on-premises network](../../../vpn-gateway/tutorial-site-to-site-portal.md) is required.

    For successful disaster recovery of your databases, you should also install a replica domain controller at the disaster recovery site. SQL Server database mirroring is not supported for SQL Server 2008 or SQL Server 2008 R2 on an Azure VM. | -| **Log shipping** |One server running in an Azure VM and the other running on-premises for cross-site disaster recovery. Log shipping depends on Windows file sharing, so a VPN connection between the Azure virtual network and the on-premises network is required.
    ![Log shipping](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-log-shipping.png)
    For successful disaster recovery of your databases, you should also install a replica domain controller at the disaster recovery site. | -| **Backup and restore with Azure Blob storage** |On-premises production databases backed up directly to Azure Blob storage for disaster recovery.
    ![Backup and restore](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-backup-restore.png)
    For more information, see [Backup and restore for SQL Server on Azure Virtual Machines](/azure/azure-sql/virtual-machines/windows/backup-restore). | -| **Replicate and fail over SQL Server to Azure with Azure Site Recovery** |On-premises production SQL Server instance replicated directly to Azure Storage for disaster recovery.
    ![Replicate using Azure Site Recovery](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-standalone-sqlserver-asr.png)
    For more information, see [Protect SQL Server using SQL Server disaster recovery and Azure Site Recovery](../../../site-recovery/site-recovery-sql.md). | - - -## Free DR replica in Azure - -If you have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default?rtc=1&activetab=software-assurance-default-pivot:primaryr3), you can implement hybrid disaster recovery (DR) plans with SQL Server without incurring additional licensing costs for the passive disaster recovery instance. - -For example, you can have two free passive secondaries when all three replicas are hosted in Azure: - -![Two free passives when everything in Azure](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-primary-in-azure.png) - -Or you can configure a hybrid failover environment, with a licensed primary on-premises, one free passive for HA, one free passive for DR on-premises, and one free passive for DR in Azure: - -![Three free passives when environment is hybrid with one primary on-premises replica](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-with-primary-on-prem.png) - -For more information, see the [product licensing terms](https://www.microsoft.com/licensing/product-licensing/products). - -To enable this benefit, go to your [SQL Server virtual machine resource](manage-sql-vm-portal.md#access-the-resource). Select **Configure** under **Settings**, and then choose the **Disaster Recovery** option under **SQL Server License**. Select the check box to confirm that this SQL Server VM will be used as a passive replica, and then select **Apply** to save your settings. - -![Configure a disaster recovery replica in Azure](./media/business-continuity-high-availability-disaster-recovery-hadr-overview/dr-replica-in-portal.png) - - -## Important considerations for SQL Server HADR in Azure -Azure VMs, storage, and networking have different operational characteristics than an on-premises, non-virtualized IT infrastructure. A successful implementation of an HADR SQL Server solution in Azure requires that you understand these differences and design your solution to accommodate them. - -### High-availability nodes in an availability set -Availability sets in Azure enable you to place the high-availability nodes into separate fault domains and update domains. The Azure platform assigns an update domain and a fault domain to each virtual machine in your availability set. This configuration within a datacenter ensures that during either a planned or unplanned maintenance event, at least one virtual machine is available and meets the Azure SLA of 99.95 percent. - -To configure a high-availability setup, place all participating SQL Server virtual machines in the same availability set to avoid application or data loss during a maintenance event. Only nodes in the same cloud service can participate in the same availability set. For more information, see [Manage the availability of virtual machines](../../../virtual-machines/availability.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -### High-availability nodes in an availability zone -Availability zones are unique physical locations within an Azure region. Each zone consists of one or more datacenters equipped with independent power, cooling, and networking. The physical separation of availability zones within a region helps protect applications and data from datacenter failures by ensuring that at least one virtual machine is available and meets the Azure SLA of 99.99 percent. - -To configure high availability, place participating SQL Server virtual machines spread across availability zones in the region. There will be additional charges for network-to-network transfers between availability zones. For more information, see [Availability zones](../../../availability-zones/az-overview.md). - -### Network latency in hybrid IT -Deploy your HADR solution with the assumption that there might be periods of high network latency between your on-premises network and Azure. When you're deploying replicas to Azure, use asynchronous commit instead of synchronous commit for the synchronization mode. When you're deploying database mirroring servers both on-premises and in Azure, use the high-performance mode instead of the high-safety mode. - -See the [HADR configuration best practices](hadr-cluster-best-practices.md) for cluster and HADR settings that can help accommodate the cloud environment. - -### Geo-replication support -Geo-replication in Azure disks does not support the data file and log file of the same database to be stored on separate disks. GRS replicates changes on each disk independently and asynchronously. This mechanism guarantees the write order within a single disk on the geo-replicated copy, but not across geo-replicated copies of multiple disks. If you configure a database to store its data file and its log file on separate disks, the recovered disks after a disaster might contain a more up-to-date copy of the data file than the log file, which breaks the write-ahead log in SQL Server and the ACID properties (atomicity, consistency, isolation, and durability) of transactions. - -If you don't have the option to disable geo-replication on the storage account, keep all data and log files for a database on the same disk. If you must use more than one disk due to the size of the database, deploy one of the disaster recovery solutions listed earlier to ensure data redundancy. - -## Next steps - -Decide if an [availability group](availability-group-overview.md) or a [failover cluster instance](failover-cluster-instance-overview.md) is the best business continuity solution for your business. Then review the [best practices](hadr-cluster-best-practices.md) for configuring your environment for high availability and disaster recovery. diff --git a/articles/azure-sql/virtual-machines/windows/change-sql-server-edition.md b/articles/azure-sql/virtual-machines/windows/change-sql-server-edition.md deleted file mode 100644 index 5594368589efc..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/change-sql-server-edition.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: In-place change of SQL Server edition -description: Learn how to change the edition of your SQL Server virtual machine in Azure to downgrade to reduce cost or upgrade to enable more features. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 01/14/2020 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: "seo-lt-2019" - ---- -# In-place change of SQL Server edition on Azure VM -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article describes how to change the edition of SQL Server on a Windows virtual machine in Azure. - -The edition of SQL Server is determined by the product key, and is specified during the installation process using the installation media. The edition dictates what [features](/sql/sql-server/editions-and-components-of-sql-server-2017) are available in the SQL Server product. You can change the SQL Server edition with the installation media and either downgrade to reduce cost or upgrade to enable more features. - -Once the edition of SQL Server has been changed internally to the SQL Server VM, you must then update the edition property of SQL Server in the Azure portal for billing purposes. - -## Prerequisites - -To do an in-place change of the edition of SQL Server, you need the following: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- A [SQL Server VM on Windows](./create-sql-vm-portal.md) registered with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). -- Setup media with the **desired edition** of SQL Server. Customers who have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default) can obtain their installation media from the [Volume Licensing Center](https://www.microsoft.com/Licensing/servicecenter/default.aspx). Customers who don't have Software Assurance can use the setup media from an Azure Marketplace SQL Server VM image that has their desired edition (typically located in `C:\SQLServerFull`). - - -## Upgrade an edition - -> [!WARNING] -> Upgrading the edition of SQL Server will restart the service for SQL Server, along with any associated services, such as Analysis Services and R Services. - -To upgrade the edition of SQL Server, obtain the SQL Server setup media for the desired edition of SQL Server, and then do the following: - -1. Open Setup.exe from the SQL Server installation media. -1. Go to **Maintenance** and choose the **Edition Upgrade** option. - - ![Selection for upgrading the edition of SQL Server](./media/change-sql-server-edition/edition-upgrade.png) - -1. Select **Next** until you reach the **Ready to upgrade edition** page, and then select **Upgrade**. The setup window might stop responding for a few minutes while the change is taking effect. A **Complete** page will confirm that your edition upgrade is finished. -1. After the SQL Server edition is upgraded, modify the edition property of the SQL Server virtual machine in the Azure portal. This will update the metadata and billing associated with this VM. - - - -## Downgrade an edition - -To downgrade the edition of SQL Server, you need to completely uninstall SQL Server, and reinstall it again with the desired edition setup media. - -> [!WARNING] -> Uninstalling SQL Server might incur additional downtime. - -You can downgrade the edition of SQL Server by following these steps: - -1. Back up all databases, including the system databases. -1. Move system databases (master, model, and msdb) to a new location. -1. Completely uninstall SQL Server and all associated services. -1. Restart the virtual machine. -1. Install SQL Server by using the media with the desired edition of SQL Server. -1. Install the latest service packs and cumulative updates. -1. Replace the new system databases that were created during installation with the system databases that you previously moved to a different location. -1. After the SQL Server edition is downgraded, modify the edition property of the SQL Server virtual machine in the Azure portal. This will update the metadata and billing associated with this VM. - - - -## Change edition in portal - -Once you've changed the edition of SQL Server using the installation media, and you've registered your SQL Server VM with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md), you can then use the Azure portal to modify the Edition property of the SQL Server VM for billing purposes. To do so, follow these steps: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Go to your SQL Server virtual machine resource. -1. Under **Settings**, select **Configure**. Then select your desired edition of SQL Server from the drop-down list under **Edition**. - - ![Change edition metadata](./media/change-sql-server-edition/edition-change-in-portal.png) - -1. Review the warning that says you must change the SQL Server edition first, and that the edition property must match the SQL Server edition. -1. Select **Apply** to apply your edition metadata changes. - - -## Remarks - -- The edition property for the SQL Server VM must match the edition of the SQL Server instance installed for all SQL Server virtual machines, including both pay-as-you-go and bring-your-own-license types of licenses. -- If you drop your SQL Server VM resource, you will go back to the hard-coded edition setting of the image. -- The ability to change the edition is a feature of the SQL IaaS Agent extension. Deploying an Azure Marketplace image through the Azure portal automatically registers a SQL Server VM with the SQL IaaS Agent extension. However, customers who are self-installing SQL Server will need to manually [register their SQL Server VM](sql-agent-extension-manually-register-single-vm.md). -- Adding a SQL Server VM to an availability set requires re-creating the VM. Any VMs added to an availability set will go back to the default edition, and the edition will need to be modified again. - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/change-sql-server-version.md b/articles/azure-sql/virtual-machines/windows/change-sql-server-version.md deleted file mode 100644 index d833d6aa2796f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/change-sql-server-version.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: In-place change of SQL Server version -description: Learn how to change the version of your SQL Server virtual machine in Azure. -services: virtual-machines-windows -documentationcenter: na -author: ramakoni1 -manager: ramakoni1 -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 06/08/2020 -ms.author: RamaKoni -ms.reviewer: sqlblt, daleche, mathoma -ms.custom: "seo-lt-2019" ---- - -# In-place Change of SQL Server Version on Azure VM - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article describes how to change the version of Microsoft SQL Server on a Windows virtual machine (VM) in Microsoft Azure. - -## Prerequisites - -To do an in-place upgrade of SQL Server, the following conditions apply: - -- The setup media of the desired version of SQL Server is required. Customers who have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default) can obtain their installation media from the [Volume Licensing Center](https://www.microsoft.com/Licensing/servicecenter/default.aspx). Customers who don't have Software Assurance can use the setup media from an Azure Marketplace SQL Server VM image that has a later version of SQL Server (typically located in C:\SQLServerFull). -- Version upgrades should follow the [support upgrade paths](/sql/database-engine/install-windows/supported-version-and-edition-upgrades-version-15). - -## Planning for version change - -We recommend that you review the following items before you do the version change: - -1. Check what's new in the version that you are planning to upgrade to: - - - What's new in [SQL 2019](/sql/sql-server/what-s-new-in-sql-server-ver15) - - What's new in [SQL 2017](/sql/sql-server/what-s-new-in-sql-server-2017) - - What's new in [SQL 2016](/sql/sql-server/what-s-new-in-sql-server-2016) - - -1. We recommend that you check the [compatibility certification](/sql/database-engine/install-windows/compatibility-certification) for the version that you are going to change to so that you can use the database compatibility modes to minimize the effect of the upgrade. -1. You can review to the following articles to help ensure a successful outcome: - - - [Video: Modernizing SQL Server | Pam Lahoud & Pedro Lopes | 20 Years of PASS](https://www.youtube.com/watch?v=5RPkuQHcxxs&feature=youtu.be) - - [Database Experimentation Assistant for AB testing](/sql/dea/database-experimentation-assistant-overview) - - [Upgrading Databases by using the Query Tuning Assistant](/sql/relational-databases/performance/upgrade-dbcompat-using-qta) - - [Change the Database Compatibility Level and use the Query Store](/sql/database-engine/install-windows/change-the-database-compatibility-mode-and-use-the-query-store) - -## Upgrade SQL Version - -> [!WARNING] -> Upgrading the version of SQL Server will restart the service for SQL Server in addition to any associated services, such as Analysis Services and R Services. - -To upgrade the version of SQL Server, obtain the SQL Server setup media for the later version that would [support the upgrade path](/sql/database-engine/install-windows/supported-version-and-edition-upgrades-version-15) of SQL Server, and do the following steps: - -1. Back up the databases, including system (except tempdb) and user databases, before you start the process. You can also create an application-consistent VM-level backup by using Azure Backup Services. -1. Start Setup.exe from the SQL Server installation media. -1. The Installation Wizard starts the SQL Server Installation Center. To upgrade an existing instance of SQL Server, select **Installation** on the navigation pane, and then select **Upgrade from an earlier version of SQL Server**. - - :::image type="content" source="./media/change-sql-server-version/upgrade.png" alt-text="Selection for upgrading the version of SQL Server"::: - -1. On the **Product Key** page, select an option to indicate whether you are upgrading to a free edition of SQL Server or you have a PID key for a production version of the product. For more information, see [Editions and supported features of SQL Server 2019 (15.x)](/sql/sql-server/editions-and-components-of-sql-server-version-15) and [Supported version and edition Upgrades (SQL Server 2016)](/sql/database-engine/install-windows/supported-version-and-edition-upgrades). -1. Select **Next** until you reach the **Ready to upgrade** page, and then select **Upgrade**. The setup window might stop responding for several minutes while the change is taking effect. A **Complete** page will confirm that your upgrade is completed. For a step-by-step procedure to upgrade, see [the complete procedure](/sql/database-engine/install-windows/upgrade-sql-server-using-the-installation-wizard-setup#procedure). - - :::image type="content" source="./media/change-sql-server-version/complete-page.png" alt-text="Complete page"::: - -If you have changed the SQL Server edition in addition to changing the version, also update the edition, and refer to the **Verify Version and Edition in Portal** section to change the SQL VM instance. - - :::image type="content" source="./media/change-sql-server-version/change-portal.png" alt-text="Change version metadata"::: - -## Downgrade the version of SQL Server - -To downgrade the version of SQL Server, you have to completely uninstall SQL Server, and reinstall it again by using the desired version. This is similar to a fresh installation of SQL Server because you will not be able to restore the earlier database from a later version to the newly installed earlier version. The databases will have to be re-created from scratch. If you also changed the edition of SQL Server during the upgrade, change the **Edition** property of the SQL Server VM in the Azure portal to the new edition value. This updates the metadata and billing that is associated with this VM. - -> [!WARNING] -> An in-place downgrade of SQL Server is not supported. - -You can downgrade the version of SQL Server by following these steps: - -1. Make sure that you are not using any feature that is [available in the later version only](https://social.technet.microsoft.com/wiki/contents/articles/24222.find-enterprise-only-features-in-your-database.aspx). -1. Back up all databases, including system (except tempdb) and user databases. -1. Export all the necessary server-level objects (such as server triggers, roles, logins, linked servers, jobs, credentials, and certificates). -1. If you do not have scripts to re-create your user databases on the earlier version, you must script out all objects and export all data by using BCP.exe, SSIS, or DACPAC. - - Make sure that you select the correct options when you script such items as the target version, dependent objects, and advanced options. - - :::image type="content" source="./media/change-sql-server-version/scripting-options.png" alt-text="Scripting options"::: - -1. Completely uninstall SQL Server and all associated services. -1. Restart the VM. -1. Install SQL Server by using the media for the desired version of the program. -1. Install the latest service packs and cumulative updates. -1. Import all the necessary server-level objects (that were exported in Step 3). -1. Re-create all the necessary user databases from scratch (by using created scripts or the files from Step 4). - -## Verify the version and edition in the portal - -After you change the version of SQL Server, register your SQL Server VM with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) again so that you can use the Azure portal to view the version of SQL Server. The listed version number should now reflect the newly upgraded version and edition of your SQL Server installation. - -:::image type="content" source="./media/change-sql-server-version/verify-portal.png" alt-text="Verify version"::: - -> [!NOTE] -> If you have already registered with the SQL IaaS Agent extension, [unregister from the RP](sql-agent-extension-manually-register-single-vm.md#unregister-from-extension) and then [Register the SQL VM resource](sql-agent-extension-manually-register-single-vm.md#full-mode) again so that it detects the correct version and edition of SQL Server that is installed on the VM. This updates the metadata and billing information that is associated with this VM. - -## Remarks - -- We recommend that you initiate backups/update statistics/rebuild indexes/check consistency after the upgrade is finished. You can also check the individual database compatibility levels to make sure that they reflect your desired level. -- After SQL Server is updated on the VM, make sure that the **Edition** property of SQL Server in the Azure portal matches the installed edition number for billing. -- The ability to [change the edition](change-sql-server-edition.md#change-edition-in-portal) is a feature of the SQL IaaS Agent extension. Deploying an Azure Marketplace image through the Azure portal automatically registers a SQL Server VM with the extension. However, customers who are self-installing SQL Server will have to manually [register their SQL Server VM](sql-agent-extension-manually-register-single-vm.md). -- If you drop your SQL Server VM resource, the hard-coded edition setting of the image is restored. - -## Next steps - -For more information, see the following articles: - -- [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -- [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -- [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) diff --git a/articles/azure-sql/virtual-machines/windows/create-sql-vm-portal.md b/articles/azure-sql/virtual-machines/windows/create-sql-vm-portal.md deleted file mode 100644 index bc6f27a5acdb3..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/create-sql-vm-portal.md +++ /dev/null @@ -1,290 +0,0 @@ ---- -title: Provision SQL Server on Azure VM (Azure portal) -description: This detailed guide explains available configuration options when deploying your SQL Server on Azure VM by using the Azure portal. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.assetid: 1aff691f-a40a-4de2-b6a0-def1384e086e -ms.service: virtual-machines-sql -ms.subservice: deployment - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: infrastructure-services -ms.date: 12/21/2021 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: contperf-fy22q1-portal ---- -# Provision SQL Server on Azure VM (Azure portal) - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides a detailed description of the available configuration options when deploying your SQL Server on Azure Virtual Machines (VMs) by using the Azure portal. For a quick guide, see the [SQL Server VM quickstart](sql-vm-create-portal-quickstart.md) instead. - - -## Prerequisites - -An Azure subscription. Create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) to get started. - - -## Choose Marketplace image - -Use the Azure Marketplace to choose one of several pre-configured images from the virtual machine gallery. - -The Developer edition is used in this article because it is a full-featured, free edition of SQL Server for development testing. You pay only for the cost of running the VM. However, you are free to choose any of the images to use in this walkthrough. For a description of available images, see the [SQL Server Windows Virtual Machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md#payasyougo). - -Licensing costs for SQL Server are incorporated into the per-second pricing of the VM you create and varies by edition and cores. However, SQL Server Developer edition is free for development and testing, not production. Also, SQL Express is free for lightweight workloads (less than 1 GB of memory, less than 10 GB of storage). You can also bring-your-own-license (BYOL) and pay only for the VM. Those image names are prefixed with {BYOL}. For more information on these options, see [Pricing guidance for SQL Server Azure VMs](pricing-guidance.md). - -To choose an image, follow these steps: - -1. Select **Azure SQL** in the left-hand menu of the Azure portal. If **Azure SQL** is not in the list, select **All services**, then type *Azure SQL* in the search box. You can select the star next to **Azure SQL** to save it as a favorite to pin it to the left-hand navigation. - -1. Select **+ Create** to open the **Select SQL deployment option** page. Select the **Image** drop-down and then type **2019** in the SQL Server image search box. Choose a SQL Server image, such as **Free SQL Server License: SQL 2019 on Windows Server 2019** from the drop-down. Choose **Show details** for additional information about the image. - - - ![Select SQL VM image](./media/create-sql-vm-portal/select-sql-vm-image-portal.png) - -1. Select **Create**. - - -## Basic settings - -The **Basics** tab allows you to select the subscription, resource group, and instance details. - -Using a new resource group is helpful if you are just testing or learning about SQL Server deployments in Azure. After you finish with your test, delete the resource group to automatically delete the VM and all resources associated with that resource group. For more information about resource groups, see [Azure Resource Manager Overview](../../../active-directory-b2c/overview.md). - -On the **Basics** tab, provide the following information: - -* Under **Project Details**, make sure the correct subscription is selected. -* In the **Resource group** section, either select an existing resource group from the list or choose **Create new** to create a new resource group. A resource group is a collection of related resources in Azure (virtual machines, storage accounts, virtual networks, etc.). - - ![Subscription](./media/create-sql-vm-portal/basics-project-details.png) - -* Under **Instance details**: - - 1. Enter a unique **Virtual machine name**. - 1. Choose a location for your **Region**. - 1. For the purpose of this guide, leave **Availability options** set to _No infrastructure redundancy required_. To find out more information about availability options, see [Availability](../../../virtual-machines/availability.md). - 1. In the **Image** list, select _Free SQL Server License: SQL Server 2019 Developer on Windows Server 2019_ if it's not already selected. - 1. Choose **Standard** for **Security type**. - 1. Select **See all sizes** for the **Size** of the virtual machine and search for the **E4ds_v5** offering. This is one of the minimum recommended VM sizes for SQL Server on Azure VMs. If this is for testing purposes, be sure to clean up your resources once you're done with them to prevent any unexpected charges. For production workloads, see the recommended machine sizes and configuration in [Performance best practices for SQL Server in Azure Virtual Machines](./performance-guidelines-best-practices-vm-size.md). - - ![Instance details](./media/create-sql-vm-portal/basics-instance-details.png) - -> [!IMPORTANT] -> The estimated monthly cost displayed on the **Choose a size** window does not include SQL Server licensing costs. This estimate is the cost of the VM alone. For the Express and Developer editions of SQL Server, this estimate is the total estimated cost. For other editions, see the [Windows Virtual Machines pricing page](https://azure.microsoft.com/pricing/details/virtual-machines/windows/) and select your target edition of SQL Server. Also see the [Pricing guidance for SQL Server Azure VMs](pricing-guidance.md) and [Sizes for virtual machines](../../../virtual-machines/sizes.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - -* Under **Administrator account**, provide a username and password. The password must be at least 12 characters long and meet the [defined complexity requirements](../../../virtual-machines/windows/faq.yml#what-are-the-password-requirements-when-creating-a-vm-). - - ![Administrator account](./media/create-sql-vm-portal/basics-administrator-account.png) - -* Under **Inbound port rules**, choose **Allow selected ports** and then select **RDP (3389)** from the drop-down. - - ![Inbound port rules](./media/create-sql-vm-portal/basics-inbound-port-rules.png) - -You also have the option to enable the [Azure Hybrid Benefit](../../../virtual-machines/windows/hybrid-use-benefit-licensing.md) to use your own SQL Server license and save on licensing cost. - - -## Disks - -On the **Disks** tab, configure your disk options. - -* Under **OS disk type**, select the type of disk you want for your OS from the drop-down. Premium is recommended for production systems but is not available for a Basic VM. To use a Premium SSD, change the virtual machine size. -* Under **Advanced**, select **Yes** under use **Managed Disks**. - -Microsoft recommends Managed Disks for SQL Server. Managed Disks handles storage behind the scenes. In addition, when virtual machines with Managed Disks are in the same availability set, Azure distributes the storage resources to provide appropriate redundancy. For more information, see [Azure Managed Disks Overview](../../../virtual-machines/managed-disks-overview.md). For specifics about managed disks in an availability set, see [Use managed disks for VMs in availability set](../../../virtual-machines/availability.md). - - - - -## Networking - -On the **Networking** tab, configure your networking options. - -* Create a new **virtual network** or use an existing virtual network for your SQL Server VM. Designate a **Subnet** as well. - -* Under **NIC network security group**, select either a basic security group or the advanced security group. Choosing the basic option allows you to select inbound ports for the SQL Server VM which are the same values configured on the **Basic** tab. Selecting the advanced option allows you to choose an existing network security group, or create a new one. - -* You can make other changes to network settings, or keep the default values. - -## Management - -On the **Management** tab, configure monitoring and auto-shutdown. - -* Azure enables **Boot diagnostics** by default with the same storage account designated for the VM. On this tab, you can change these settings and enable **OS guest diagnostics**. -* You can also enable **System assigned managed identity** and **auto-shutdown** on this tab. - -## SQL Server settings - -On the **SQL Server settings** tab, configure specific settings and optimizations for SQL Server. You can configure the following settings for SQL Server: - -- [Connectivity](#connectivity) -- [Authentication](#authentication) -- [Azure Key Vault integration](#azure-key-vault-integration) -- [Storage configuration](#storage-configuration) -- [SQL instance settings](#sql-instance-settings) -- [Automated patching](#automated-patching) -- [Automated backup](#automated-backup) -- [Machine Learning Services](#machine-learning-services) - - -### Connectivity - -Under **SQL connectivity**, specify the type of access you want to the SQL Server instance on this VM. For the purposes of this walkthrough, select **Public (internet)** to allow connections to SQL Server from machines or services on the internet. With this option selected, Azure automatically configures the firewall and the network security group to allow traffic on the port selected. - -> [!TIP] -> By default, SQL Server listens on a well-known port, **1433**. For increased security, change the port in the previous dialog to listen on a non-default port, such as 1401. If you change the port, you must connect using that port from any client tools, such as SQL Server Management Studio (SSMS). - -![SQL VM Security](./media/create-sql-vm-portal/azure-sqlvm-security.png) - -To connect to SQL Server via the internet, you also must enable SQL Server Authentication, which is described in the next section. - -If you would prefer to not enable connections to the Database Engine via the internet, choose one of the following options: - -* **Local (inside VM only)** to allow connections to SQL Server only from within the VM. -* **Private (within Virtual Network)** to allow connections to SQL Server from machines or services in the same virtual network. - -In general, improve security by choosing the most restrictive connectivity that your scenario allows. But all the options are securable through network security group (NSG) rules and SQL/Windows Authentication. You can edit the NSG after the VM is created. For more information, see [Security Considerations for SQL Server in Azure Virtual Machines](security-considerations-best-practices.md). - -### Authentication - -If you require SQL Server Authentication, select **Enable** under **SQL Authentication** on the **SQL Server settings** tab. - -![SQL Server Authentication](./media/create-sql-vm-portal/azure-sqlvm-authentication.png) - -> [!NOTE] -> If you plan to access SQL Server over the internet (the Public connectivity option), you must enable SQL Authentication here. Public access to the SQL Server requires SQL Authentication. - -If you enable SQL Server Authentication, specify a **Login name** and **Password**. This login name is configured as a SQL Server Authentication login and a member of the **sysadmin** fixed server role. For more information about Authentication Modes, see [Choose an Authentication Mode](/sql/relational-databases/security/choose-an-authentication-mode). - -If you prefer not to enable SQL Server Authentication, you can use the local Administrator account on the VM to connect to the SQL Server instance. - -### Azure Key Vault integration - -To store security secrets in Azure for encryption, select **SQL Server settings**, and scroll down to **Azure key vault integration**. Select **Enable** and fill in the requested information. - -![Azure Key Vault integration](./media/create-sql-vm-portal/azure-sqlvm-akv.png) - -The following table lists the parameters required to configure Azure Key Vault (AKV) Integration. - -| PARAMETER | DESCRIPTION | EXAMPLE | -| --- | --- | --- | -| **Key Vault URL** |The location of the key vault. |`https://contosokeyvault.vault.azure.net/` | -| **Principal name** |Azure Active Directory service principal name. This name is also referred to as the Client ID. |`fde2b411-33d5-4e11-af04eb07b669ccf2` | -| **Principal secret** |Azure Active Directory service principal secret. This secret is also referred to as the Client Secret. |`9VTJSQwzlFepD8XODnzy8n2V01Jd8dAjwm/azF1XDKM=` | -| **Credential name** |**Credential name**: AKV Integration creates a credential within SQL Server and allows the VM to access the key vault. Choose a name for this credential. |`mycred1` | - -For more information, see [Configure Azure Key Vault Integration for SQL Server on Azure VMs](azure-key-vault-integration-configure.md). - -### Storage configuration - - -On the **SQL Server settings** tab, under **Storage configuration**, select **Change configuration** to open the **Configure storage** page and specify storage requirements. You can choose to leave the values at default, or you can manually change the storage topology to suit your IOPS needs. For more information, see [storage configuration](storage-configuration.md). - -![Screenshot that highlights where you can change the storage configuration.](./media/create-sql-vm-portal/sql-vm-storage-configuration-provisioning.png) - -Under **Data storage**, choose the location for your data drive, the disk type, and the number of disks. You can also select the checkbox to store your system databases on your data drive instead of the local C:\ drive. - -![Screenshot that shows where you can configure the data files storage for your SQL VM](./media/create-sql-vm-portal/storage-configuration-data-storage.png) - -Under **Log storage**, you can choose to use the same drive as the data drive for your transaction log files, or you can choose to use a separate drive from the drop-down. You can also choose the name of the drive, the disk type, and the number of disks. - -![Screenshot that shows where you can configure the transaction log storage for your SQL VM](./media/create-sql-vm-portal/storage-configuration-log-storage.png) - -Configure your tempdb database settings under **Tempdb storage**, such as the location of the database files, as well as the number of files, initial size, and autogrowth size in MB. Currently, the max number of tempdb files. Currently, during deployment, the max number of tempdb files is 8, but more files can be added after the SQL Server VM is deployed. - -![Screenshot that shows where you can configure the tempdb storage for your SQL VM](./media/create-sql-vm-portal/storage-configuration-tempdb-storage.png) - -Select **OK** to save your storage configuration settings. - -### SQL instance settings - -Select **Change SQL instance settings** to modify SQL Server configuration options, such as the server collation, max degree of parallelism (MAXDOP), SQL Server min and max memory limits, and whether you want to optimize for ad-hoc workloads. - -![Screenshot that shows where you can configure the SQL Server settings for your SQL VM instance](./media/create-sql-vm-portal/sql-instance-settings.png). - -### SQL Server license - -If you're a Software Assurance customer, you can use the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) to bring your own SQL Server license and save on resources. Select **Yes** to enable the Azure Hybrid Benefit, and then confirm that you have Software Assurance by selecting the checkbox. - -![SQL VM License](./media/create-sql-vm-portal/azure-sqlvm-license.png) - -If you chose a free license image, such as the developer edition, the **SQL Server license** option is grayed out. - -### Automated patching - -**Automated patching** is enabled by default. Automated patching allows Azure to automatically apply SQL Server and operating system security updates. Specify a day of the week, time, and duration for a maintenance window. Azure performs patching in this maintenance window. The maintenance window schedule uses the VM locale. If you do not want Azure to automatically patch SQL Server and the operating system, select **Disable**. - -![SQL VM automated patching](./media/create-sql-vm-portal/azure-sqlvm-automated-patching.png) - -For more information, see [Automated Patching for SQL Server in Azure Virtual Machines](automated-patching.md). - -### Automated backup - -Enable automatic database backups for all databases under **Automated backup**. Automated backup is disabled by default. - -When you enable SQL automated backup, you can configure the following settings: - -* Retention period for backups (up to 90 days) -* Storage account, and storage container, to use for backups -* Encryption option and password for backups -* Backup system databases -* Configure backup schedule - -To encrypt the backup, select **Enable**. Then specify the **Password**. Azure creates a certificate to encrypt the backups and uses the specified password to protect that certificate. - -Choose **Select Storage Container** to specify the container where you want to store your backups. - -By default the schedule is set automatically, but you can create your own schedule by selecting **Manual**, which allows you to configure the backup frequency, backup time window, and the log backup frequency in minutes. - -![SQL VM automated backups](./media/create-sql-vm-portal/automated-backup.png) - -For more information, see [Automated Backup for SQL Server in Azure Virtual Machines](automated-backup-sql-2014.md). - - -### Machine Learning Services - -You have the option to enable [Machine Learning Services](/sql/advanced-analytics/). This option lets you use machine learning with Python and R in SQL Server 2017. Select **Enable** on the **SQL Server Settings** window. - - -## 4. Review + create - -On the **Review + create** tab: -1. Review the summary. -1. Select **Create** to create the SQL Server, resource group, and resources specified for this VM. - -You can monitor the deployment from the Azure portal. The **Notifications** button at the top of the screen shows basic status of the deployment. - -> [!NOTE] -> An example of time for Azure to deploy a SQL Server VM: A test SQL Server VM provisioned to the East US region with default settings takes approximately 12 minutes to complete. You might experience faster or slower deployment times based on your region and selected settings. - -## Open the VM with Remote Desktop - -Use the following steps to connect to the SQL Server virtual machine with Remote Desktop Protocol (RDP): - -[!INCLUDE [Connect to SQL Server VM with remote desktop](../../../../includes/virtual-machines-sql-server-remote-desktop-connect.md)] - -After you connect to the SQL Server virtual machine, you can launch SQL Server Management Studio and connect with Windows Authentication using your local administrator credentials. If you enabled SQL Server Authentication, you can also connect with SQL Authentication using the SQL login and password you configured during provisioning. - -Access to the machine enables you to directly change machine and SQL Server settings based on your requirements. For example, you could configure the firewall settings or change SQL Server configuration settings. - -## Connect to SQL Server remotely - -In this walkthrough, you selected **Public** access for the virtual machine and **SQL Server Authentication**. These settings automatically configured the virtual machine to allow SQL Server connections from any client over the internet (assuming they have the correct SQL login). - -> [!NOTE] -> If you did not select Public during provisioning, then you can change your SQL connectivity settings through the portal after provisioning. For more information, see [Change your SQL connectivity settings](ways-to-connect-to-sql.md#change). - -The following sections show how to connect over the internet to your SQL Server VM instance. - -[!INCLUDE [Connect to SQL Server in a VM Resource Manager](../../../../includes/virtual-machines-sql-server-connection-steps-resource-manager.md)] - - > [!NOTE] - > This example uses the common port 1433. However, this value will need to be modified if a different port (such as 1401) was specified during the deployment of the SQL Server VM. - - -## Next steps - -For other information about using SQL Server in Azure, see [SQL Server on Azure Virtual Machines](sql-server-on-azure-vm-iaas-what-is-overview.md) and the [Frequently Asked Questions](frequently-asked-questions-faq.yml). \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/create-sql-vm-powershell.md b/articles/azure-sql/virtual-machines/windows/create-sql-vm-powershell.md deleted file mode 100644 index 77e18c119e762..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/create-sql-vm-powershell.md +++ /dev/null @@ -1,468 +0,0 @@ ---- -title: Guide to use Azure PowerShell to provision SQL Server on Azure VM -description: Provides steps and PowerShell commands for creating an Azure VM with SQL Server virtual machine gallery images. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-resource-manager -ms.assetid: 98d50dd8-48ad-444f-9031-5378d8270d7b -ms.service: virtual-machines-sql -ms.subservice: deployment - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/21/2018 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell ---- -# How to use Azure PowerShell to provision SQL Server on Azure Virtual Machines - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This guide covers options for using PowerShell to provision SQL Server on Azure Virtual Machines (VMs). For a streamlined Azure PowerShell example that relies on default values, see the [SQL VM Azure PowerShell quickstart](sql-vm-create-powershell-quickstart.md). - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - -## Configure your subscription - -1. Open PowerShell and establish access to your Azure account by running the **Connect-AzAccount** command. - - ```powershell - Connect-AzAccount - ``` - -1. When prompted, enter your credentials. Use the same email and password that you use to sign in to the Azure portal. - -## Define image variables - -To reuse values and simplify script creation, start by defining a number of variables. Change the parameter values as you want, but be aware of naming restrictions related to name lengths and special characters when modifying the values provided. - -### Location and resource group - -Define the data region and the resource group where you want to create the other VM resources. - -Modify as you want and then run these cmdlets to initialize these variables. - -```powershell -$Location = "SouthCentralUS" -$ResourceGroupName = "sqlvm2" -``` - -### Storage properties - -Define the storage account and the type of storage to be used by the virtual machine. - -Modify as you want, and then run the following cmdlet to initialize these variables. We recommend using [premium SSDs](../../../virtual-machines/disks-types.md#premium-ssds) for production workloads. - -```powershell -$StorageName = $ResourceGroupName + "storage" -$StorageSku = "Premium_LRS" -``` - -### Network properties - -Define the properties to be used by the network in the virtual machine. - -- Network interface -- TCP/IP allocation method -- Virtual network name -- Virtual subnet name -- Range of IP addresses for the virtual network -- Range of IP addresses for the subnet -- Public domain name label - -Modify as you want and then run this cmdlet to initialize these variables. - -```powershell -$InterfaceName = $ResourceGroupName + "ServerInterface" -$NsgName = $ResourceGroupName + "nsg" -$TCPIPAllocationMethod = "Dynamic" -$VNetName = $ResourceGroupName + "VNet" -$SubnetName = "Default" -$VNetAddressPrefix = "10.0.0.0/16" -$VNetSubnetAddressPrefix = "10.0.0.0/24" -$DomainName = $ResourceGroupName -``` - -### Virtual machine properties - -Define the following properties: - -- Virtual machine name -- Computer name -- Virtual machine size -- Operating system disk name for the virtual machine - -Modify as you want and then run this cmdlet to initialize these variables. - -```powershell -$VMName = $ResourceGroupName + "VM" -$ComputerName = $ResourceGroupName + "Server" -$VMSize = "Standard_DS13" -$OSDiskName = $VMName + "OSDisk" -``` - -### Choose a SQL Server image - -Use the following variables to define the SQL Server image to use for the virtual machine. - -1. First, list all of the SQL Server image offerings with the `Get-AzVMImageOffer` command. This command lists the current images that are available in the Azure portal and also older images that can only be installed with PowerShell: - - ```powershell - Get-AzVMImageOffer -Location $Location -Publisher 'MicrosoftSQLServer' - ``` - -1. For this tutorial, use the following variables to specify SQL Server 2017 on Windows Server 2016. - - ```powershell - $OfferName = "SQL2017-WS2016" - $PublisherName = "MicrosoftSQLServer" - $Version = "latest" - ``` - -1. Next, list the available editions for your offer. - - ```powershell - Get-AzVMImageSku -Location $Location -Publisher 'MicrosoftSQLServer' -Offer $OfferName | Select Skus - ``` - -1. For this tutorial, use the SQL Server 2017 Developer edition (**SQLDEV**). The Developer edition is freely licensed for testing and development, and you only pay for the cost of running the VM. - - ```powershell - $Sku = "SQLDEV" - ``` - -## Create a resource group - -With the Resource Manager deployment model, the first object that you create is the resource group. Use the [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) cmdlet to create an Azure resource group and its resources. Specify the variables that you previously initialized for the resource group name and location. - -Run this cmdlet to create your new resource group. - -```powershell -New-AzResourceGroup -Name $ResourceGroupName -Location $Location -``` - -## Create a storage account - -The virtual machine requires storage resources for the operating system disk and for the SQL Server data and log files. For simplicity, you'll create a single disk for both. You can attach additional disks later using the [Add-Azure Disk](/powershell/module/servicemanagement/azure.service/add-azuredisk) cmdlet to place your SQL Server data and log files on dedicated disks. Use the [New-AzStorageAccount](/powershell/module/az.storage/new-azstorageaccount) cmdlet to create a standard storage account in your new resource group. Specify the variables that you previously initialized for the storage account name, storage SKU name, and location. - -Run this cmdlet to create your new storage account. - -```powershell -$StorageAccount = New-AzStorageAccount -ResourceGroupName $ResourceGroupName ` - -Name $StorageName -SkuName $StorageSku ` - -Kind "Storage" -Location $Location -``` - -> [!TIP] -> Creating the storage account can take a few minutes. - -## Create network resources - -The virtual machine requires a number of network resources for network connectivity. - -* Each virtual machine requires a virtual network. -* A virtual network must have at least one subnet defined. -* A network interface must be defined with either a public or a private IP address. - -### Create a virtual network subnet configuration - -Start by creating a subnet configuration for your virtual network. For this tutorial, create a default subnet using the [New-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/new-azvirtualnetworksubnetconfig) cmdlet. Specify the variables that you previously initialized for the subnet name and address prefix. - -> [!NOTE] -> You can define additional properties of the virtual network subnet configuration using this cmdlet, but that is beyond the scope of this tutorial. - -Run this cmdlet to create your virtual subnet configuration. - -```powershell -$SubnetConfig = New-AzVirtualNetworkSubnetConfig -Name $SubnetName -AddressPrefix $VNetSubnetAddressPrefix -``` - -### Create a virtual network - -Next, create your virtual network in your new resource group using the [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork) cmdlet. Specify the variables that you previously initialized for the name, location, and address prefix. Use the subnet configuration that you defined in the previous step. - -Run this cmdlet to create your virtual network. - -```powershell -$VNet = New-AzVirtualNetwork -Name $VNetName ` - -ResourceGroupName $ResourceGroupName -Location $Location ` - -AddressPrefix $VNetAddressPrefix -Subnet $SubnetConfig -``` - -### Create the public IP address - -Now that your virtual network is defined, you must configure an IP address for connectivity to the virtual machine. For this tutorial, create a public IP address using dynamic IP addressing to support Internet connectivity. Use the [New-AzPublicIpAddress](/powershell/module/az.network/new-azpublicipaddress) cmdlet to create the public IP address in your new resource group. Specify the variables that you previously initialized for the name, location, allocation method, and DNS domain name label. - -> [!NOTE] -> You can define additional properties of the public IP address using this cmdlet, but that is beyond the scope of this initial tutorial. You could also create a private address or an address with a static address, but that is also beyond the scope of this tutorial. - -Run this cmdlet to create your public IP address. - -```powershell -$PublicIp = New-AzPublicIpAddress -Name $InterfaceName ` - -ResourceGroupName $ResourceGroupName -Location $Location ` - -AllocationMethod $TCPIPAllocationMethod -DomainNameLabel $DomainName -``` - -### Create the network security group - -To secure the VM and SQL Server traffic, create a network security group. - -1. First, create a network security group rule for remote desktop (RDP) to allow RDP connections. - - ```powershell - $NsgRuleRDP = New-AzNetworkSecurityRuleConfig -Name "RDPRule" -Protocol Tcp ` - -Direction Inbound -Priority 1000 -SourceAddressPrefix * -SourcePortRange * ` - -DestinationAddressPrefix * -DestinationPortRange 3389 -Access Allow - ``` -1. Configure a network security group rule that allows traffic on TCP port 1433. Doing so enables connections to SQL Server over the internet. - - ```powershell - $NsgRuleSQL = New-AzNetworkSecurityRuleConfig -Name "MSSQLRule" -Protocol Tcp ` - -Direction Inbound -Priority 1001 -SourceAddressPrefix * -SourcePortRange * ` - -DestinationAddressPrefix * -DestinationPortRange 1433 -Access Allow - ``` - -1. Create the network security group. - - ```powershell - $Nsg = New-AzNetworkSecurityGroup -ResourceGroupName $ResourceGroupName ` - -Location $Location -Name $NsgName ` - -SecurityRules $NsgRuleRDP,$NsgRuleSQL - ``` - -### Create the network interface - -Now you're ready to create the network interface for your virtual machine. Use the [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface) cmdlet to create the network interface in your new resource group. Specify the name, location, subnet, and public IP address previously defined. - -Run this cmdlet to create your network interface. - -```powershell -$Interface = New-AzNetworkInterface -Name $InterfaceName ` - -ResourceGroupName $ResourceGroupName -Location $Location ` - -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PublicIp.Id ` - -NetworkSecurityGroupId $Nsg.Id -``` - -## Configure a VM object - -Now that storage and network resources are defined, you're ready to define compute resources for the virtual machine. - -- Specify the virtual machine size and various operating system properties. -- Specify the network interface that you previously created. -- Define blob storage. -- Specify the operating system disk. - -### Create the VM object - -Start by specifying the virtual machine size. For this tutorial, specify a DS13. Use the [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig) cmdlet to create a configurable virtual machine object. Specify the variables that you previously initialized for the name and size. - -Run this cmdlet to create the virtual machine object. - -```powershell -$VirtualMachine = New-AzVMConfig -VMName $VMName -VMSize $VMSize -``` - -### Create a credential object to hold the name and password for the local administrator credentials - -Before you can set the operating system properties for the virtual machine, you must supply the credentials for the local administrator account as a secure string. To accomplish this, use the [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential) cmdlet. - -Run the following cmdlet. You'll need to type the VM's local administrator name and password into the PowerShell credential request window. - -```powershell -$Credential = Get-Credential -Message "Type the name and password of the local administrator account." -``` - -### Set the operating system properties for the virtual machine - -Now you're ready to set the virtual machine's operating system properties with the [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem) cmdlet. - -- Set the type of operating system as Windows. -- Require the [virtual machine agent](../../../virtual-machines/extensions/agent-windows.md) to be installed. -- Specify that the cmdlet enables auto update. -- Specify the variables that you previously initialized for the virtual machine name, the computer name, and the credential. - -Run this cmdlet to set the operating system properties for your virtual machine. - -```powershell -$VirtualMachine = Set-AzVMOperatingSystem -VM $VirtualMachine ` - -Windows -ComputerName $ComputerName -Credential $Credential ` - -ProvisionVMAgent -EnableAutoUpdate -``` - -### Add the network interface to the virtual machine - -Next, use the [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) cmdlet to add the network interface using the variable that you defined earlier. - -Run this cmdlet to set the network interface for your virtual machine. - -```powershell -$VirtualMachine = Add-AzVMNetworkInterface -VM $VirtualMachine -Id $Interface.Id -``` - -### Set the blob storage location for the disk to be used by the virtual machine - -Next, set the blob storage location for the VM's disk with the variables that you defined earlier. - -Run this cmdlet to set the blob storage location. - -```powershell -$OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + "vhds/" + $OSDiskName + ".vhd" -``` - -### Set the operating system disk properties for the virtual machine - -Next, set the operating system disk properties for the virtual machine using the [Set-AzVMOSDisk](/powershell/module/az.compute/set-azvmosdisk) cmdlet. - -- Specify that the operating system for the virtual machine will come from an image. -- Set caching to read only (because SQL Server is being installed on the same disk). -- Specify the variables that you previously initialized for the VM name and the operating system disk. - -Run this cmdlet to set the operating system disk properties for your virtual machine. - -```powershell -$VirtualMachine = Set-AzVMOSDisk -VM $VirtualMachine -Name ` - $OSDiskName -VhdUri $OSDiskUri -Caching ReadOnly -CreateOption FromImage -``` - -### Specify the platform image for the virtual machine - -The last configuration step is to specify the platform image for your virtual machine. For this tutorial, use the latest SQL Server 2016 CTP image. Use the [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage) cmdlet to use this image with the variables that you defined earlier. - -Run this cmdlet to specify the platform image for your virtual machine. - -```powershell -$VirtualMachine = Set-AzVMSourceImage -VM $VirtualMachine ` - -PublisherName $PublisherName -Offer $OfferName ` - -Skus $Sku -Version $Version -``` - -## Create the SQL VM - -Now that you've finished the configuration steps, you're ready to create the virtual machine. Use the [New-AzVM](/powershell/module/az.compute/new-azvm) cmdlet to create the virtual machine using the variables that you defined. - -> [!TIP] -> Creating the VM can take a few minutes. - -Run this cmdlet to create your virtual machine. - -```powershell -New-AzVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $VirtualMachine -``` - -The virtual machine is created. - -> [!NOTE] -> If you get an error about boot diagnostics, you can ignore it. A standard storage account is created for boot diagnostics because the specified storage account for the virtual machine's disk is a premium storage account. - -## Install the SQL Iaas Agent - -SQL Server virtual machines support automated management features with the [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). To register your SQL Server with the extension run the [New-AzSqlVM](/powershell/module/az.sqlvirtualmachine/new-azsqlvm) command after the virtual machine is created. Specify the license type for your SQL Server VM, choosing between either pay-as-you-go or bring-your-own-license via the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/). For more information about licensing, see [licensing model](licensing-model-azure-hybrid-benefit-ahb-change.md). - - - ```powershell - New-AzSqlVM -ResourceGroupName $ResourceGroupName -Name $VMName -Location $Location -LicenseType - ``` - -There are three ways to register with the extension: -- [Automatically for all current and future VMs in a subscription](sql-agent-extension-automatic-registration-all-vms.md) -- [Manually for a single VM](sql-agent-extension-manually-register-single-vm.md) -- [Manually for multiple VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md) - - -## Stop or remove a VM - -If you don't need the VM to run continuously, you can avoid unnecessary charges by stopping it when not in use. The following command stops the VM but leaves it available for future use. - -```powershell -Stop-AzVM -Name $VMName -ResourceGroupName $ResourceGroupName -``` - -You can also permanently delete all resources associated with the virtual machine with the **Remove-AzResourceGroup** command. Doing so permanently deletes the virtual machine as well, so use this command with care. - -## Example script - -The following script contains the complete PowerShell script for this tutorial. It assumes that you have already set up the Azure subscription to use with the **Connect-AzAccount** and **Select-AzSubscription** commands. - -```powershell -# Variables - -## Global -$Location = "SouthCentralUS" -$ResourceGroupName = "sqlvm2" - -## Storage -$StorageName = $ResourceGroupName + "storage" -$StorageSku = "Premium_LRS" - -## Network -$InterfaceName = $ResourceGroupName + "ServerInterface" -$NsgName = $ResourceGroupName + "nsg" -$VNetName = $ResourceGroupName + "VNet" -$SubnetName = "Default" -$VNetAddressPrefix = "10.0.0.0/16" -$VNetSubnetAddressPrefix = "10.0.0.0/24" -$TCPIPAllocationMethod = "Dynamic" -$DomainName = $ResourceGroupName - -##Compute -$VMName = $ResourceGroupName + "VM" -$ComputerName = $ResourceGroupName + "Server" -$VMSize = "Standard_DS13" -$OSDiskName = $VMName + "OSDisk" - -##Image -$PublisherName = "MicrosoftSQLServer" -$OfferName = "SQL2017-WS2016" -$Sku = "SQLDEV" -$Version = "latest" - -# Resource Group -New-AzResourceGroup -Name $ResourceGroupName -Location $Location - -# Storage -$StorageAccount = New-AzStorageAccount -ResourceGroupName $ResourceGroupName -Name $StorageName -SkuName $StorageSku -Kind "Storage" -Location $Location - -# Network -$SubnetConfig = New-AzVirtualNetworkSubnetConfig -Name $SubnetName -AddressPrefix $VNetSubnetAddressPrefix -$VNet = New-AzVirtualNetwork -Name $VNetName -ResourceGroupName $ResourceGroupName -Location $Location -AddressPrefix $VNetAddressPrefix -Subnet $SubnetConfig -$PublicIp = New-AzPublicIpAddress -Name $InterfaceName -ResourceGroupName $ResourceGroupName -Location $Location -AllocationMethod $TCPIPAllocationMethod -DomainNameLabel $DomainName -$NsgRuleRDP = New-AzNetworkSecurityRuleConfig -Name "RDPRule" -Protocol Tcp -Direction Inbound -Priority 1000 -SourceAddressPrefix * -SourcePortRange * -DestinationAddressPrefix * -DestinationPortRange 3389 -Access Allow -$NsgRuleSQL = New-AzNetworkSecurityRuleConfig -Name "MSSQLRule" -Protocol Tcp -Direction Inbound -Priority 1001 -SourceAddressPrefix * -SourcePortRange * -DestinationAddressPrefix * -DestinationPortRange 1433 -Access Allow -$Nsg = New-AzNetworkSecurityGroup -ResourceGroupName $ResourceGroupName -Location $Location -Name $NsgName -SecurityRules $NsgRuleRDP,$NsgRuleSQL -$Interface = New-AzNetworkInterface -Name $InterfaceName -ResourceGroupName $ResourceGroupName -Location $Location -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PublicIp.Id -NetworkSecurityGroupId $Nsg.Id - -# Compute -$VirtualMachine = New-AzVMConfig -VMName $VMName -VMSize $VMSize -$Credential = Get-Credential -Message "Type the name and password of the local administrator account." -$VirtualMachine = Set-AzVMOperatingSystem -VM $VirtualMachine -Windows -ComputerName $ComputerName -Credential $Credential -ProvisionVMAgent -EnableAutoUpdate #-TimeZone = $TimeZone -$VirtualMachine = Add-AzVMNetworkInterface -VM $VirtualMachine -Id $Interface.Id -$OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() + "vhds/" + $OSDiskName + ".vhd" -$VirtualMachine = Set-AzVMOSDisk -VM $VirtualMachine -Name $OSDiskName -VhdUri $OSDiskUri -Caching ReadOnly -CreateOption FromImage - -# Image -$VirtualMachine = Set-AzVMSourceImage -VM $VirtualMachine -PublisherName $PublisherName -Offer $OfferName -Skus $Sku -Version $Version - -# Create the VM in Azure -New-AzVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $VirtualMachine - -# Add the SQL IaaS Extension, and choose the license type -New-AzSqlVM -ResourceGroupName $ResourceGroupName -Name $VMName -Location $Location -LicenseType -``` - -## Next steps - -After the virtual machine is created, you can: - -- Connect to the virtual machine using RDP -- Configure SQL Server settings in the portal for your VM, including: - - [Storage settings](storage-configuration.md) - - [Automated management tasks](sql-server-iaas-agent-extension-automate-management.md) -- [Configure connectivity](ways-to-connect-to-sql.md) -- Connect clients and applications to the new SQL Server instance \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/create-sql-vm-resource-manager-template.md b/articles/azure-sql/virtual-machines/windows/create-sql-vm-resource-manager-template.md deleted file mode 100644 index 93f4402b3551f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/create-sql-vm-resource-manager-template.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Create SQL Server VM using an ARM template -description: Learn how to create a SQL Server on Azure Virtual Machine (VM) by using an Azure Resource Manager template (ARM template). -author: bluefooted -ms.topic: quickstart -ms.custom: subject-armqs, devx-track-azurepowershell, mode-arm -ms.author: pamela -ms.date: 06/29/2020 -ms.service: virtual-machines-sql -ms.subservice: deployment -ms.reviewer: mathoma ---- - -# Quickstart: Create SQL Server VM using an ARM template - -Use this Azure Resource Manager template (ARM template) to deploy a SQL Server on Azure Virtual Machine (VM). - -[!INCLUDE [About Azure Resource Manager](../../../../includes/resource-manager-quickstart-introduction.md)] - -If your environment meets the prerequisites and you're familiar with using ARM templates, select the **Deploy to Azure** button. The template will open in the Azure portal. - -[![Deploy to Azure](../../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.sqlvirtualmachine%2Fsql-vm-new-storage%2Fazuredeploy.json) - -## Prerequisites - -The SQL Server VM ARM template requires the following: - -- The latest version of the [Azure CLI](/cli/azure/install-azure-cli) and/or [PowerShell](/powershell/scripting/install/installing-powershell). -- A preconfigured [resource group](../../../azure-resource-manager/management/manage-resource-groups-portal.md#create-resource-groups) with a prepared [virtual network](../../../virtual-network/quick-create-portal.md) and [subnet](../../../virtual-network/virtual-network-manage-subnet.md#add-a-subnet). -- An Azure subscription. If you don't have one, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - - -## Review the template - -The template used in this quickstart is from [Azure Quickstart Templates](https://azure.microsoft.com/resources/templates/sql-vm-new-storage/). - -:::code language="json" source="~/quickstart-templates/quickstarts/microsoft.sqlvirtualmachine/sql-vm-new-storage/azuredeploy.json"::: - -Five Azure resources are defined in the template: - -- [Microsoft.Network/publicIpAddresses](/azure/templates/microsoft.network/publicipaddresses): Creates a public IP address. -- [Microsoft.Network/networkSecurityGroups](/azure/templates/microsoft.network/networksecuritygroups): Creates a network security group. -- [Microsoft.Network/networkInterfaces](/azure/templates/microsoft.network/networkinterfaces): Configures the network interface. -- [Microsoft.Compute/virtualMachines](/azure/templates/microsoft.compute/virtualmachines): Creates a virtual machine in Azure. -- [Microsoft.SqlVirtualMachine/SqlVirtualMachines](/azure/templates/microsoft.sqlvirtualmachine/sqlvirtualmachines): registers the virtual machine with the SQL IaaS Agent extension. - -More SQL Server on Azure VM templates can be found in the [quickstart template gallery](https://azure.microsoft.com/resources/templates/?resourceType=Microsoft.Sqlvirtualmachine&pageNumber=1&sort=Popular). - - -## Deploy the template - -1. Select the following image to sign in to Azure and open a template. The template creates a virtual machine with the intended SQL Server version installed to it, and registered with the SQL IaaS Agent extension. - - [![Deploy to Azure](../../../media/template-deployments/deploy-to-azure.svg)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2Fquickstarts%2Fmicrosoft.sqlvirtualmachine%2Fsql-vm-new-storage%2Fazuredeploy.json) - -2. Select or enter the following values. - - * **Subscription**: Select an Azure subscription. - * **Resource group**: The prepared resource group for your SQL Server VM. - * **Region**: Select a region. For example, **Central US**. - * **Virtual Machine Name**: Enter a name for SQL Server virtual machine. - * **Virtual Machine Size**: Choose the appropriate size for your virtual machine from the drop-down. - * **Existing Virtual Network Name**: Enter the name of the prepared virtual network for your SQL Server VM. - * **Existing Vnet Resource Group**: Enter the resource group where your virtual network was prepared. - * **Existing Subnet Name**: The name of your prepared subnet. - * **Image Offer**: Choose the SQL Server and Windows Server image that best suits your business needs. - * **SQL Sku**: Choose the edition of SQL Server SKU that best suits your business needs. - * **Admin Username**: The username for the administrator of the virtual machine. - * **Admin Password**: The password used by the VM administrator account. - * **Storage Workload Type**: The type of storage for the workload that best matches your business. - * **Sql Data Disks Count**: The number of disks SQL Server uses for data files. - * **Data Path**: The path for the SQL Server data files. - * **Sql Log Disks Count**: The number of disks SQL Server uses for log files. - * **Log Path**: The path for the SQL Server log files. - * **Location**: The location for all of the resources, this value should remain the default of `[resourceGroup().location]`. - -3. Select **Review + create**. After the SQL Server VM has been deployed successfully, you get a notification. - -The Azure portal is used to deploy the template. In addition to the Azure portal, you can also use Azure PowerShell, the Azure CLI, and REST API. To learn other deployment methods, see [Deploy templates](../../../azure-resource-manager/templates/deploy-powershell.md). - -## Review deployed resources - -You can use the Azure CLI to check deployed resources. - - -```azurecli-interactive -echo "Enter the resource group where your SQL Server VM exists:" && -read resourcegroupName && -az resource list --resource-group $resourcegroupName -``` - -## Clean up resources - -When no longer needed, delete the resource group by using Azure CLI or Azure PowerShell: - -# [CLI](#tab/CLI) - -```azurecli-interactive -echo "Enter the Resource Group name:" && -read resourceGroupName && -az group delete --name $resourceGroupName && -echo "Press [ENTER] to continue ..." -``` - -# [PowerShell](#tab/PowerShell) - -```azurepowershell-interactive -$resourceGroupName = Read-Host -Prompt "Enter the Resource Group name" -Remove-AzResourceGroup -Name $resourceGroupName -Write-Host "Press [ENTER] to continue..." -``` - ---- - -## Next steps - -For a step-by-step tutorial that guides you through the process of creating a template, see: - -> [!div class="nextstepaction"] -> [ Tutorial: Create and deploy your first ARM template](../../../azure-resource-manager/templates/template-tutorial-create-first-template.md) - -For other ways to deploy a SQL Server VM, see: -- [Azure portal](create-sql-vm-portal.md) -- [PowerShell](create-sql-vm-powershell.md) - -To learn more, see [an overview of SQL Server on Azure VMs](sql-server-on-azure-vm-iaas-what-is-overview.md). diff --git a/articles/azure-sql/virtual-machines/windows/dedicated-host.md b/articles/azure-sql/virtual-machines/windows/dedicated-host.md deleted file mode 100644 index 71f79f0a2aee9..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/dedicated-host.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Run SQL Server VM on an Azure Dedicated Host -description: Learn how to run a SQL Server VM on an Azure Dedicated Host. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 08/12/2019 -ms.author: pamela -ms.reviewer: mathoma - ---- -# Run SQL Server VM on an Azure Dedicated Host -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article details the specifics of using a SQL Server virtual machine (VM) with [Azure Dedicated Host](../../../virtual-machines/dedicated-hosts.md). Additional information about Azure Dedicated Host can be found in the blog post [Introducing Azure Dedicated Host](https://azure.microsoft.com/blog/introducing-azure-dedicated-host/). - -## Overview -[Azure Dedicated Host](../../../virtual-machines/dedicated-hosts.md) is a service that provides physical servers - able to host one or more virtual machines - dedicated to one Azure subscription. Dedicated hosts are the same physical servers used in Microsoft's data centers, provided as a resource. You can provision dedicated hosts within a region, availability zone, and fault domain. Then, you can place VMs directly into your provisioned hosts, in whatever configuration best meets your needs. - -## Limitations - -- Not all VM series are supported on dedicated hosts, and VM series availability varies by region. For more information, see [Overview of Azure Dedicated Hosts](../../../virtual-machines/dedicated-hosts.md). - -## Licensing - -You can choose between two different licensing options when you place your SQL Server VM in an Azure Dedicated Host. - - - **SQL VM licensing**: This is the existing licensing option, where you pay for each SQL Server VM license individually. - - **Dedicated host licensing**: The new licensing model available for the Azure Dedicated Host, where SQL Server licenses are bundled and paid for at the host level. - - -Host-level options for using existing SQL Server licenses: - - SQL Server Enterprise Edition Azure Hybrid Benefit (AHB) - - Available to customers with SA or subscription. - - License all available physical cores and enjoy unlimited virtualization (up to the max vCPUs supported by the host). - - For more information about applying the AHB to Azure Dedicated Host, see [Azure Hybrid Benefit FAQ](https://azure.microsoft.com/pricing/hybrid-benefit/faq/). - - SQL Server licenses acquired before October 1 - - SQL Server Enterprise edition has both host-level and by-VM license options. - - SQL Server Standard edition has only a by-VM license option available. - - For details, see [Microsoft Product Terms](https://www.microsoft.com/licensing/product-licensing/products). - - If no SQL Server dedicated host-level option is selected, you may select SQL Server AHB at the level of individual VMs, just as you would with multi-tenant VMs. - - - -## Provisioning -Provisioning a SQL Server VM to the dedicated host is no different than any other Azure virtual machine. You can do so using [Azure PowerShell](../../../virtual-machines/windows/dedicated-hosts-powershell.md), the [Azure portal](../../../virtual-machines/dedicated-hosts-portal.md), and the [Azure CLI](../../../virtual-machines/linux/dedicated-hosts-cli.md). - -The process of adding an existing SQL Server VM to the dedicated host requires downtime, but will not affect data, and will not have data loss. Nonetheless, all databases, including system databases, should be backed up prior to the move. - -## Virtualization - -One of the benefits of a dedicated host is unlimited virtualization. For example, you can have licenses for 64 vCores, but you can configure the host to have 128 vCores, so you get double the vCores but pay only half of what you would for the SQL Server licenses. - -Because since it's your host, you are eligible to set the virtualization with a 1:2 ratio. - -## FAQ - -**Q: How does the Azure Hybrid Benefit work for Windows Server/SQL Server licenses on Azure Dedicated Host?** - -A: Customers can use the value of their existing Windows Server and SQL Server licenses with Software Assurance, or qualifying subscription licenses, to pay a reduced rate on Azure Dedicated Host using Azure Hybrid Benefit. Windows Server Datacenter and SQL Server Enterprise Edition customers get unlimited virtualization (deploy as many Windows Server virtual machines as possible on the host subject to the physical capacity of the underlying server) when they license the entire host and use Azure Hybrid Benefit. All Windows Server and SQL Server workloads in Azure Dedicated Host are also eligible for Extended Security Updates for Windows Server and SQL Server 2008/R2 at no additional charge. - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/doc-changes-updates-release-notes-whats-new.md b/articles/azure-sql/virtual-machines/windows/doc-changes-updates-release-notes-whats-new.md deleted file mode 100644 index edc0d91947c65..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/doc-changes-updates-release-notes-whats-new.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Documentation changes for SQL Server on Azure Virtual Machines -description: Learn about the new features and improvements for different releases of SQL Server on Azure Virtual Machines. -services: virtual-machines-windows -author: MashaMSFT -ms.author: mathoma -tags: azure-service-management -ms.assetid: 2fa5ee6b-51a6-4237-805f-518e6c57d11b -ms.service: virtual-machines-sql -ms.subservice: service-overview -ms.topic: reference -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/02/2022 -ms.custom: ignite-fall-2021 ---- -# Documentation changes for SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -When you deploy an Azure virtual machine (VM) with SQL Server installed on it, either manually, or through a built-in image, you can leverage Azure features to improve your experience. This article summarizes the documentation changes associated with new features and improvements in the recent releases of [SQL Server on Azure Virtual Machines (VMs)](https://azure.microsoft.com/services/virtual-machines/sql-server/). To learn more about SQL Server on Azure VMs, see the [overview](sql-server-on-azure-vm-iaas-what-is-overview.md). - -## April 2022 - - -| Changes | Details | -| --- | --- | -| **Ebdsv5-series** | The new [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) provides the highest I/O throughput-to-vCore ratio in Azure along with a memory-to-vCore ratio of 8. This series offers the best price-performance for SQL Server workloads on Azure VMs. Consider this series first for most SQL Server workloads. To learn more, see the updates in [VM sizes](performance-guidelines-best-practices-vm-size.md). | - - -## March 2022 - -| Changes | Details | -| --- | --- | -| **Security best practices** | The [SQL Server VM security best practices](security-considerations-best-practices.md) have been rewritten and refreshed! | - - -## January 2022 - -| Changes | Details | -| --- | --- | -| **Migrate with distributed AG** | It's now possible to migrate your database(s) from a [standalone instance](../../migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-standalone-instance.md) of SQL Server or an [entire availability group](../../migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-ag.md) over to SQL Server on Azure VMs using a distributed availability group! See the [prerequisites](../../migration-guides/virtual-machines/sql-server-distributed-availability-group-migrate-prerequisites.md) to get started. | - - - -## 2021 - -| Changes | Details | -| --- | --- | -| **Deployment configuration improvements** | It's now possible to configure the following options when deploying your SQL Server VM from an Azure Marketplace image: System database location, number of tempdb data files, collation, max degree of parallelism, min and max server memory settings, and optimize for ad hoc workloads. Review [Deploy SQL Server VM](create-sql-vm-portal.md) to learn more. | -| **Automated backup improvements** | The possible maximum automated backup retention period has changed from 30 days to 90, and you're now able to choose a specific container within the storage account. Review [automated backup](automated-backup.md) to learn more. | -| **Tempdb configuration** | You can now modify tempdb settings directly from the [SQL virtual machines](manage-sql-vm-portal.md) blade in the Azure portal, such as increasing the size, and adding data files. | -| **Eliminate need for HADR Azure Load Balancer or DNN** | Deploy your SQL Server VMs to multiple subnets to eliminate the dependency on the Azure Load Balancer or distributed network name (DNN) to route traffic to your high availability / disaster recovery (HADR) solution! See the [multi-subnet availability group](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) tutorial, or [prepare SQL Server VM for FCI](failover-cluster-instance-prepare-vm.md#subnets) article to learn more. | -| **SQL Assessment** | It's now possible to assess the health of your SQL Server VM in the Azure portal using [SQL Assessment](sql-assessment-for-sql-vm.md) to surface recommendations that improve performance, and identify missing best practices configurations. This feature is currently in preview. | -| **SQL IaaS extension now supports Ubuntu** | Support has been added to [register](../linux/sql-iaas-agent-extension-register-vm-linux.md) your SQL Server VM running on Ubuntu Linux with the [SQL Server IaaS Extension](../linux/sql-server-iaas-agent-extension-linux.md) for limited functionality. | -| **SQL IaaS extension full mode no longer requires restart** | Restarting the SQL Server service is no longer necessary when registering your SQL Server VM with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) in [full mode](sql-agent-extension-manually-register-single-vm.md#full-mode)! | -| **Repair SQL Server IaaS extension in portal** | It's now possible to verify the status of your SQL Server IaaS Agent extension directly from the Azure portal, and [repair](sql-agent-extension-manually-register-single-vm.md#repair-extension) it, if necessary. | -| **Security enhancements in the Azure portal** | Once you've enabled [Azure Defender for SQL](../../../security-center/defender-for-sql-usage.md), you can view Security Center recommendations in the [SQL virtual machines resource in the Azure portal](manage-sql-vm-portal.md#security-center). | -| **HADR content refresh** | We've refreshed and enhanced our high availability and disaster recovery (HADR) content! There's now an [Overview of the Windows Server Failover Cluster](hadr-windows-server-failover-cluster-overview.md), as well as a consolidated [how-to configure quorum](hadr-cluster-quorum-configure-how-to.md) for SQL Server VMs. Additionally, we've enhanced the [cluster best practices](hadr-cluster-best-practices.md) with more comprehensive setting recommendations adopted to the cloud.| -| **Migrate high availability to VM** | Azure Migrate brings support to lift and shift your entire high availability solution to SQL Server on Azure VMs! Bring your [availability group](../../migration-guides/virtual-machines/sql-server-availability-group-to-sql-on-azure-vm.md) or your [failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to SQL Server VMs using Azure Migrate today! -| **Performance best practices refresh** | We've rewritten, refreshed, and updated the performance best practices documentation, splitting one article into a series that contain: [a checklist](performance-guidelines-best-practices-checklist.md), [VM size guidance](performance-guidelines-best-practices-vm-size.md), [Storage guidance](performance-guidelines-best-practices-storage.md), and [collecting baseline instructions](performance-guidelines-best-practices-collect-baseline.md). | - - - -## 2020 - -| Changes | Details | -| --- | --- | -| **Azure Government support** | It's now possible to register SQL Server virtual machines with the SQL IaaS Agent extension for virtual machines hosted in the [Azure Government](https://azure.microsoft.com/global-infrastructure/government/) cloud. | -| **Azure SQL family** | SQL Server on Azure Virtual Machines is now a part of the [Azure SQL family of products](../../azure-sql-iaas-vs-paas-what-is-overview.md). Check out our [new look](../index.yml)! Nothing has changed in the product, but the documentation aims to make the Azure SQL product decision easier. | -| **Distributed network name (DNN)** | SQL Server 2019 on Windows Server 2016+ is now previewing support for routing traffic to your failover cluster instance (FCI) by using a [distributed network name](./failover-cluster-instance-distributed-network-name-dnn-configure.md) rather than using Azure Load Balancer. This support simplifies and streamlines connecting to your high-availability (HA) solution in Azure. | -| **FCI with Azure shared disks** | It's now possible to deploy your [failover cluster instance (FCI)](failover-cluster-instance-overview.md) by using [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md). | -| **Reorganized FCI docs** | The documentation around [failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) has been rewritten and reorganized for clarity. We've separated some of the configuration content, like the [cluster configuration best practices](hadr-cluster-best-practices.md), how to prepare a [virtual machine for a SQL Server FCI](failover-cluster-instance-prepare-vm.md), and how to configure [Azure Load Balancer](./availability-group-vnn-azure-load-balancer-configure.md). | -| **Migrate log to ultra disk** | Learn how you can [migrate your log file to an ultra disk](storage-migrate-to-ultradisk.md) to leverage high performance and low latency. | -| **Create AG using Azure PowerShell** | It's now possible to simplify the creation of an availability group by using [Azure PowerShell](availability-group-az-commandline-configure.md) as well as the Azure CLI. | -| **Configure ag in portal** | It is now possible to [configure your availability group via the Azure portal](availability-group-azure-portal-configure.md). This feature is currently in preview and being deployed so if your desired region is unavailable, check back soon. | -| **Automatic extension registration** | You can now enable the [Automatic registration](sql-agent-extension-automatic-registration-all-vms.md) feature to automatically register all SQL Server VMs already deployed to your subscription with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md). This applies to all existing VMs, and will also automatically register all SQL Server VMs added in the future. | -| **DNN for AG** | You can now configure a [distributed network name (DNN) listener)](availability-group-distributed-network-name-dnn-listener-configure.md) for SQL Server 2019 CU8 and later to replace the traditional [VNN listener](availability-group-overview.md#connectivity), negating the need for an Azure Load Balancer. | - - -## 2019 - -|Changes | Details | - --- | --- | -| **Free DR replica in Azure** | You can host a [free passive instance](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure) for disaster recovery in Azure for your on-premises SQL Server instance if you have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default?rtc=1&activetab=software-assurance-default-pivot:primaryr3). | -| **Bulk SQL IaaS extension registration** | You can now [bulk register](sql-agent-extension-manually-register-vms-bulk.md) SQL Server virtual machines with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md). | -|**Performance-optimized storage configuration** | You can now [fully customize your storage configuration](storage-configuration.md#new-vms) when creating a new SQL Server VM. | -|**Premium file share for FCI** | You can now create a failover cluster instance by using a [Premium file share](failover-cluster-instance-premium-file-share-manually-configure.md) instead of the original method of [Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md). -| **Azure Dedicated Host** | You can run your SQL Server VM on [Azure Dedicated Host](dedicated-host.md). | -| **SQL Server VM migration to a different region** | Use Azure Site Recovery to [migrate your SQL Server VM from one region to another](move-sql-vm-different-region.md). | -| **New SQL IaaS installation modes** | It's now possible to install the SQL Server IaaS extension in [lightweight mode](sql-server-iaas-agent-extension-automate-management.md) to avoid restarting the SQL Server service. | -| **SQL Server edition modification** | You can now change the [edition property](change-sql-server-edition.md) for your SQL Server VM. | -| **Changes to the SQL IaaS Agent extension** | You can [register your SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) by using the new SQL IaaS modes. This capability includes [Windows Server 2008](sql-server-iaas-agent-extension-automate-management.md#management-modes) images.| -| **Bring-your-own-license images using Azure Hybrid Benefit** | Bring-your-own-license images deployed from Azure Marketplace can now switch their [license type to pay-as-you-go](licensing-model-azure-hybrid-benefit-ahb-change.md#remarks).| -| **New SQL Server VM management in the Azure portal** | There's now a way to manage your SQL Server VM in the Azure portal. For more information, see [Manage SQL Server VMs in the Azure portal](manage-sql-vm-portal.md). | -| **Extended support for SQL Server 2008 and 2008 R2** | [Extend support](sql-server-2008-extend-end-of-support.md) for SQL Server 2008 and SQL Server 2008 R2 by migrating *as is* to an Azure VM. | -| **Custom image supportability** | You can now install the [SQL Server IaaS extension](sql-server-iaas-agent-extension-automate-management.md#installation) to custom OS and SQL Server images, which offers the limited functionality of [flexible licensing](licensing-model-azure-hybrid-benefit-ahb-change.md). When you're registering your custom image with the SQL IaaS Agent extension, specify the license type as "AHUB." Otherwise, the registration will fail. | -| **Named instance supportability** | You can now use the [SQL Server IaaS extension](sql-server-iaas-agent-extension-automate-management.md#installation) with a named instance, if the default instance has been uninstalled properly. | -| **Portal enhancement** | The Azure portal experience for deploying a SQL Server VM has been revamped to improve usability. For more information, see the brief [quickstart](sql-vm-create-portal-quickstart.md) and more thorough [how-to guide](create-sql-vm-portal.md) to deploy a SQL Server VM.| -| **Portal improvement** | It's now possible to change the licensing model for a SQL Server VM from pay-as-you-go to bring-your-own-license by using the [Azure portal](licensing-model-azure-hybrid-benefit-ahb-change.md#change-license-model).| -| **Simplification of availability group deployment to a SQL Server VM through the Azure CLI** | It's now easier than ever to deploy an availability group to a SQL Server VM in Azure. You can use the [Azure CLI](/cli/azure/sql/vm?view=azure-cli-2018-03-01-hybrid&preserve-view=true) to create the Windows failover cluster, internal load balancer, and availability group listeners, all from the command line. For more information, see [Use the Azure CLI to configure an Always On availability group for SQL Server on an Azure VM](./ - -## 2018 - - Changes | Details | -| --- | --- | -| **New resource provider for a SQL Server cluster** | A new resource provider (Microsoft.SqlVirtualMachine/SqlVirtualMachineGroups) defines the metadata of the Windows failover cluster. Joining a SQL Server VM to *SqlVirtualMachineGroups* bootstraps the Windows Server Failover Cluster (WSFC) service and joins the VM to the cluster. | -| **Automated setup of an availability group deployment with Azure quickstart templates** |It's now possible to create the Windows failover cluster, join SQL Server VMs to it, create the listener, and configure the internal load balancer by using two Azure quickstart templates. For more information, see [Use Azure quickstart templates to configure an Always On availability group for SQL Server on an Azure VM](availability-group-quickstart-template-configure.md). | -| **Automatic registration to the SQL IaaS Agent extension** | SQL Server VMs deployed after this month are automatically registered with the new SQL IaaS Agent extension. SQL Server VMs deployed before this month still need to be manually registered. For more information, see [Register a SQL Server virtual machine in Azure with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md).| -|**New SQL IaaS Agent extension** | A new resource provider (Microsoft.SqlVirtualMachine) provides better management of your SQL Server VMs. For more information on registering your VMs, see [Register a SQL Server virtual machine in Azure with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). | -|**Switch licensing model** | You can now switch between the pay-per-usage and bring-your-own-license models for your SQL Server VM by using the Azure CLI or PowerShell. For more information, see [How to change the licensing model for a SQL Server virtual machine in Azure](licensing-model-azure-hybrid-benefit-ahb-change.md). | - - -## Additional resources - -**Windows VMs**: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [Provision SQL Server on a Windows VM](create-sql-vm-portal.md) -* [Migrate a database to SQL Server on an Azure VM](migrate-to-vm-from-sql-server.md) -* [High availability and disaster recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md) -* [Performance best practices for SQL Server on Azure Virtual Machines](./performance-guidelines-best-practices-checklist.md) -* [Application patterns and development strategies for SQL Server on Azure Virtual Machines](application-patterns-development-strategies.md) - -**Linux VMs**: - -* [Overview of SQL Server on a Linux VM](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md) -* [Provision SQL Server on a Linux virtual machine](../linux/sql-vm-create-portal-quickstart.md) -* [FAQ (Linux)](../linux/frequently-asked-questions-faq.yml) -* [SQL Server on Linux documentation](/sql/linux/sql-server-linux-overview) diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-azure-shared-disks-manually-configure.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-azure-shared-disks-manually-configure.md deleted file mode 100644 index d4fe9bfe315d1..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-azure-shared-disks-manually-configure.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: Create an FCI with Azure shared disks -description: "Use Azure shared disks to create a failover cluster instance (FCI) with SQL Server on Azure Virtual Machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.custom: na, devx-track-azurepowershell -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Create an FCI with Azure shared disks (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer or distributed network name (DNN) for your failover cluster instance by creating your SQL Server VMs in [multiple subnets](failover-cluster-instance-prepare-vm.md#subnets) within the same Azure virtual network. - -This article explains how to create a failover cluster instance (FCI) by using Azure shared disks with SQL Server on Azure Virtual Machines (VMs). - -To learn more, see an overview of [FCI with SQL Server on Azure VMs](failover-cluster-instance-overview.md) and [cluster best practices](hadr-cluster-best-practices.md). - -> [!NOTE] -> It's now possible to lift and shift your failover cluster instance solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to learn more. - -## Prerequisites - -Before you complete the instructions in this article, you should already have: - -- An Azure subscription. Get started for [free](https://azure.microsoft.com/free/). -- [Two or more prepared Windows Azure virtual machines](failover-cluster-instance-prepare-vm.md) in an availability set, or availability zones. -- An account that has permissions to create objects on both Azure virtual machines and in Active Directory. -- The latest version of [Azure PowerShell](/powershell/azure/install-az-ps). - -## Add Azure shared disk - -[Deploy a managed Premium SSD disk with the shared disk feature enabled](../../../virtual-machines/disks-shared-enable.md#deploy-a-premium-ssd-as-a-shared-disk). Set `maxShares` to **align with the number of cluster nodes** to make the disk shareable across all FCI nodes. - -## Attach shared disk to VMs - -Once you've deployed a shared disk with maxShares > 1, you can mount the disk to the VMs that will participate as nodes in the cluster. - -To attach the shared disk to your SQL Server VMs, follow these steps: - -1. Select the VM in the Azure portal that you will attach the shared disk to. -1. Select **Disks** in the **Settings** pane. -1. Select **Attach existing disks** to attach the shared disk to the VM. -1. Choose the shared disk from the **Disk name** drop-down. -1. Select **Save**. -1. Repeat these steps for every cluster node SQL Server VM. - -After a few moments, the shared data disk is attached to the VM and appears in the list of Data disks for that VM. - -## Initialize shared disk - -Once the shared disk is attached on all the VMs, you can initialize the disks of the VMs that will participate as nodes in the cluster. Initialize the disks on **all** of the VMs. - - -To initialize the disks for your SQL Server VM, follow these steps: - -1. Connect to one of the VMs. -2. From inside the VM, open the **Start** menu and type **diskmgmt.msc** in the search box to open the **Disk Management** console. -3. Disk Management recognizes that you have a new, uninitialized disk and the **Initialize Disk** window appears. -4. Verify the new disk is selected and then select **OK** to initialize it. -5. The new disk appears as **unallocated**. Right-click anywhere on the disk and select **New simple volume**. The **New Simple Volume Wizard** window opens. -6. Proceed through the wizard, keeping all of the defaults, and when you're done select **Finish**. -7. Close **Disk Management**. -8. A pop-up window appears notifying you that you need to format the new disk before you can use it. Select **Format disk**. -9. In the **Format new disk** window, check the settings, and then select **Start**. -10. A warning appears notifying you that formatting the disks erases all of the data. Select **OK**. -11. When the formatting is complete, select **OK**. -12. Repeat these steps on each SQL Server VM that will participate in the FCI. - -## Create Windows Failover Cluster - -The steps to create your Windows Server Failover cluster vary depending on if you deployed your SQL Server VMs to a single subnet, or multiple subnets. To create your cluster, follow the steps in the tutorial for either a [multi-subnet scenario](availability-group-manually-configure-tutorial-multi-subnet.md#add-failover-cluster-feature) or a [single subnet scenario](availability-group-manually-configure-tutorial-single-subnet.md#create-the-cluster). Though these tutorials are for creating an availability group, the steps to create the cluster are the same. - -## Configure quorum - -Since the disk witness is the most resilient quorum option, and the FCI solution uses Azure shared disks, it's recommended to configure a disk witness as the quorum solution. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - -## Validate cluster - -Validate the cluster on one of the virtual machines by using the Failover Cluster Manager UI or PowerShell. - -To validate the cluster using the UI, follow these steps: - -1. Under **Server Manager**, select **Tools**, and then select **Failover Cluster Manager**. -1. Under **Failover Cluster Manager**, select **Action**, and then select **Validate Configuration**. -1. Select **Next**. -1. Under **Select Servers or a Cluster**, enter the names of both virtual machines. -1. Under **Testing options**, select **Run only tests I select**. -1. Select **Next**. -1. Under **Test Selection**, select all tests *except* **Storage**. -1. Select **Next**. -1. Under **Confirmation**, select **Next**. The **Validate a Configuration** wizard runs the validation tests. - - -To validate the cluster by using PowerShell, run the following script from an administrator PowerShell session on one of the virtual machines: - -```powershell -Test-Cluster –Node ("","") –Include "Inventory", "Network", "System Configuration" -``` - -## Test cluster failover - -Test the failover of your cluster. In **Failover Cluster Manager**, right-click your cluster, select **More Actions** > **Move Core Cluster Resource** > **Select node**, and then select the other node of the cluster. Move the core cluster resource to every node of the cluster, and then move it back to the primary node. Ensure you can successfully move the cluster to each node before installing SQL Server. - -:::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png" alt-text="Test cluster failover by moving the core resource to the other nodes"::: - -## Add shared disks to cluster - -Use the Failover Cluster Manager to add the attached Azure shared disks to the cluster. - -To add disks to your cluster, follow these steps: - -1. In the **Server Manager** dashboard, select **Tools**, and then select **Failover Cluster Manager**. -1. Select the cluster and expand it in the navigation pane. -1. Select **Storage** and then select **Disks**. -1. Right click **Disks** and select **Add Disk**: - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-add-disk.png" alt-text="Add Disk"::: - -1. Choose the Azure shared disk in the **Add Disks to a Cluster** window. Select **OK**. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-select-shared-disk.png" alt-text="Select Disk"::: - -1. After the shared disk is added to the cluster, you will see it in the Failover Cluster Manager. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-shared-disk.png" alt-text="Cluster Disk"::: - - - -## Create SQL Server FCI - -After you've configured the failover cluster and all cluster components, including storage, you can create the SQL Server FCI. - -1. Connect to the first virtual machine by using Remote Desktop Protocol (RDP). - -1. In **Failover Cluster Manager**, make sure that all core cluster resources are on the first virtual machine. If necessary, move the disks to that virtual machine. - -1. If the version of the operating system is Windows Server 2019 and the Windows Cluster was created using the default [**Distributed Network Name (DNN)**](https://blogs.windows.com/windows-insider/2018/08/14/announcing-windows-server-2019-insider-preview-build-17733/), then the FCI installation for SQL Server 2017 and below will fail with the error `The given key was not present in the dictionary`. - - During installation, SQL Server setup queries for the existing Virtual Network Name (VNN) and doesn't recognize the Windows Cluster DNN. The issue has been fixed in SQL Server 2019 setup. For SQL Server 2017 and below, follow these steps to avoid the installation error: - - - In Failover Cluster Manager, connect to the cluster, right-click **Roles** and select **Create Empty Role**. - - Right-click the newly created empty role, select **Add Resource** and select **Client Access Point**. - - Enter any name and complete the wizard to create the **Client Access Point**. - - After the SQL Server FCI installation completes, the role containing the temporary **Client Access Point** can be deleted. - -1. Locate the installation media. If the virtual machine uses one of the Azure Marketplace images, the media is located at `C:\SQLServer__Full`. - -1. Select **Setup**. - -1. In **SQL Server Installation Center**, select **Installation**. - -1. Select **New SQL Server failover cluster installation**. Follow the instructions in the wizard to install the SQL Server FCI. - -1. On the **Cluster Disk Selection** page, select all the shared disks that were attached to the VM. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-disk-selection.png" alt-text="Cluster Disk Selection"::: - -1. On the **Cluster Network Configuration** page, the IP you provide varies depending on if your SQL Server VMs were deployed to a single subnet, or multiple subnets. - - 1. For a **single subnet environment**, provide the IP address that you plan to add to the [Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) - 1. For a **multi-subnet environment**, provide the secondary IP address in the subnet of the _first_ SQL Server VM that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses): - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png" alt-text="provide the secondary IP address in the subnet of the first SQL Server VM that you previously designated as the IP address of the failover cluster instance network name"::: - -1. On the **Database Engine Configuration** page, ensure the database directories are on the Azure shared disk(s). - -1. After you complete the instructions in the wizard, setup installs the SQL Server FCI on the first node. - -1. After FCI installation succeeds on the first node, connect to the second node by using RDP. - -1. Open the **SQL Server Installation Center**, and then select **Installation**. - -1. Select **Add node to a SQL Server failover cluster**. Follow the instructions in the wizard to install SQL Server and add the node to the FCI. - -1. For a multi-subnet scenario, in **Cluster Network Configuration**, enter the secondary IP address in the subnet of the _second_ SQL Server VM subnet that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses) - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png" alt-text="enter the secondary IP address in the subnet of the second SQL Server VM subnet that you previously designated as the IP address of the failover cluster instance network name"::: - - After selecting **Next** in **Cluster Network Configuration**, setup shows a dialog box indicating that SQL Server Setup detected multiple subnets as in the example image. Select **Yes** to confirm. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png" alt-text="Multi Subnet Confirmation"::: - -1. After you complete the instructions in the wizard, setup adds the second SQL Server FCI node. - -1. Repeat these steps on any other SQL Server VMs you want to participate in the SQL Server failover cluster instance. - - ->[!NOTE] -> Azure Marketplace gallery images come with SQL Server Management Studio installed. If you didn't use a marketplace image [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Register with SQL IaaS extension - -To manage your SQL Server VM from the portal, register it with the SQL IaaS Agent extension in [lightweight management mode](sql-agent-extension-manually-register-single-vm.md#lightweight-mode), currently the only mode supported with FCI and SQL Server on Azure VMs. - -Register a SQL Server VM in lightweight mode with PowerShell (-LicenseType can be `PAYG` or `AHUB`): - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -# Register SQL VM with 'Lightweight' SQL IaaS agent -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType PAYG -SqlManagementType LightWeight -``` - -## Configure connectivity - -If you deployed your SQL Server VMs in multiple subnets, skip this step. If you deployed your SQL Server VMs to a single subnet, then you'll need to configure an additional component to route traffic to your FCI. You can configure a virtual network name (VNN) with an Azure Load Balancer, or a distributed network name for a failover cluster instance. [Review the differences between the two](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn) and then deploy either a [distributed network name](failover-cluster-instance-distributed-network-name-dnn-configure.md) or a [virtual network name and Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) for your failover cluster instance. - -## Limitations - -- Azure virtual machines support Microsoft Distributed Transaction Coordinator (MSDTC) on Windows Server 2019 with storage on CSVs and a [standard load balancer](../../../load-balancer/load-balancer-overview.md). MSDTC is not supported on Windows Server 2016 and earlier. -- Only registering with the SQL IaaS Agent extension in [lightweight management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes) is supported. - -## Next steps - -If Azure shared disks are not the appropriate FCI storage solution for you, consider creating your FCI using [premium file shares](failover-cluster-instance-premium-file-share-manually-configure.md) or [Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md) instead. - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-distributed-network-name-dnn-configure.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-distributed-network-name-dnn-configure.md deleted file mode 100644 index ae25c52c1e978..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-distributed-network-name-dnn-configure.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Configure DNN for failover cluster instance -description: Learn how to configure a distributed network name (DNN) to route traffic to your SQL Server on Azure VM failover cluster instance (FCI). -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma - ---- -# Configure a DNN for failover cluster instance -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for a distributed network name (DNN) for failover cluster instance by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - -On Azure Virtual Machines, the distributed network name (DNN) routes traffic to the appropriate clustered resource. It provides an easier way to connect to the SQL Server failover cluster instance (FCI) than the virtual network name (VNN), without the need for an Azure Load Balancer. - -This article teaches you to configure a DNN resource to route traffic to your failover cluster instance with SQL Server on Azure VMs for high availability and disaster recovery (HADR). - - -For an alternative connectivity option, consider a [virtual network name and Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) instead. - -## Overview - -The distributed network name (DNN) replaces the virtual network name (VNN) as the connection point when used with an [Always On failover cluster instance on SQL Server VMs](failover-cluster-instance-overview.md). This negates the need for an Azure Load Balancer routing traffic to the VNN, simplifying deployment, maintenance, and improving failover. - -With an FCI deployment, the VNN still exists, but the client connects to the DNN DNS name instead of the VNN name. - -## Prerequisites - -Before you complete the steps in this article, you should already have: - -- SQL Server starting with either [SQL Server 2019 CU8](https://support.microsoft.com/topic/cumulative-update-8-for-sql-server-2019-ed7f79d9-a3f0-a5c2-0bef-d0b7961d2d72) and later, [SQL Server 2017 CU25](https://support.microsoft.com/topic/kb5003830-cumulative-update-25-for-sql-server-2017-357b80dc-43b5-447c-b544-7503eee189e9) and later, or [SQL Server 2016 SP3](https://support.microsoft.com/topic/kb5003279-sql-server-2016-service-pack-3-release-information-46ab9543-5cf9-464d-bd63-796279591c31) and later on Windows Server 2016 and later. -- Decided that the distributed network name is the appropriate [connectivity option for your HADR solution](hadr-cluster-best-practices.md#connectivity). -- Configured your [failover cluster instances](failover-cluster-instance-overview.md). -- Installed the latest version of [PowerShell](/powershell/azure/install-az-ps). - -## Create DNN resource - -The DNN resource is created in the same cluster group as the SQL Server FCI. Use PowerShell to create the DNN resource inside the FCI cluster group. - -The following PowerShell command adds a DNN resource to the SQL Server FCI cluster group with a resource name of ``. The resource name is used to uniquely identify a resource. Use one that makes sense to you and is unique across the cluster. The resource type must be `Distributed Network Name`. - -The `-Group` value must be the name of the cluster group that corresponds to the SQL Server FCI where you want to add the distributed network name. For a default instance, the typical format is `SQL Server (MSSQLSERVER)`. - - -```powershell -Add-ClusterResource -Name ` --ResourceType "Distributed Network Name" -Group "" -``` - -For example, to create your DNN resource `dnn-demo` for a default SQL Server FCI, use the following PowerShell command: - -```powershell -Add-ClusterResource -Name dnn-demo ` --ResourceType "Distributed Network Name" -Group "SQL Server (MSSQLSERVER)" - -``` - -## Set cluster DNN DNS name - -Set the DNS name for the DNN resource in the cluster. The cluster then uses this value to route traffic to the node that's currently hosting the SQL Server FCI. - -Clients use the DNS name to connect to the SQL Server FCI. You can choose a unique value. Or, if you already have an existing FCI and don't want to update client connection strings, you can configure the DNN to use the current VNN that clients are already using. To do so, you need to [rename the VNN](#rename-the-vnn) before setting the DNN in DNS. - -Use this command to set the DNS name for your DNN: - -```powershell -Get-ClusterResource -Name | ` -Set-ClusterParameter -Name DnsName -Value -``` - -The `DNSName` value is what clients use to connect to the SQL Server FCI. For example, for clients to connect to `FCIDNN`, use the following PowerShell command: - -```powershell -Get-ClusterResource -Name dnn-demo | ` -Set-ClusterParameter -Name DnsName -Value FCIDNN -``` - -Clients will now enter `FCIDNN` into their connection string when connecting to the SQL Server FCI. - - > [!WARNING] - > Do not delete the current virtual network name (VNN) as it is a necessary component of the FCI infrastructure. - - -### Rename the VNN - -If you have an existing virtual network name and you want clients to continue using this value to connect to the SQL Server FCI, you must rename the current VNN to a placeholder value. After the current VNN is renamed, you can set the DNS name value for the DNN to the VNN. - -Some restrictions apply for renaming the VNN. For more information, see [Renaming an FCI](/sql/sql-server/failover-clusters/install/rename-a-sql-server-failover-cluster-instance). - -If using the current VNN is not necessary for your business, skip this section. After you've renamed the VNN, then [set the cluster DNN DNS name](#set-cluster-dnn-dns-name). - - -## Set DNN resource online - -After your DNN resource is appropriately named, and you've set the DNS name value in the cluster, use PowerShell to set the DNN resource online in the cluster: - -```powershell -Start-ClusterResource -Name -``` - -For example, to start your DNN resource `dnn-demo`, use the following PowerShell command: - -```powershell -Start-ClusterResource -Name dnn-demo -``` - -## Configure possible owners - -By default, the cluster binds the DNN DNS name to all the nodes in the cluster. However, nodes in the cluster that are not part of the SQL Server FCI should be excluded from the list of DNN possible owners. - -To update possible owners, follow these steps: - -1. Go to your DNN resource in Failover Cluster Manager. -1. Right-click the DNN resource and select **Properties**. - - :::image type="content" source="media/hadr-distributed-network-name-dnn-configure/fci-dnn-properties.png" alt-text="Shortcut menu for the DNN resource, with the Properties command highlighted."::: - -1. Clear the check box for any nodes that don't participate in the failover cluster instance. The list of possible owners for the DNN resource should match the list of possible owners for the SQL Server instance resource. For example, assuming that Data3 does not participate in the FCI, the following image is an example of removing Data3 from the list of possible owners for the DNN resource: - - :::image type="content" source="media/hadr-distributed-network-name-dnn-configure/clear-check-for-nodes-not-in-fci.png" alt-text="Clear the check box next to the nodes that do not participate in the FCI for possible owners of the DNN resource"::: - -1. Select **OK** to save your settings. - - -## Restart SQL Server instance - -Use Failover Cluster Manager to restart the SQL Server instance. Follow these steps: - -1. Go to your SQL Server resource in Failover Cluster Manager. -1. Right-click the SQL Server resource, and take it offline. -1. After all associated resources are offline, right-click the SQL Server resource and bring it online again. - -## Update connection string - -Update the connection string of any application connecting to the SQL Server FCI DNN, and include `MultiSubnetFailover=True` in the connection string. If your client does not support the MultiSubnetFailover parameter, it is not compatible with a DNN. - -The following is an example connection string for a SQL FCI DNN with the DNS name of **FCIDNN**: - -`Data Source=FCIDNN, MultiSubnetFailover=True` - -Additionally, if the DNN is not using the original VNN, SQL clients that connect to the SQL Server FCI will need to update their connection string to the DNN DNS name. To avoid this requirement, you can update the DNS name value to be the name of the VNN. But you'll need to [replace the existing VNN with a placeholder](#rename-the-vnn) first. - -## Test failover - - -Test failover of the clustered resource to validate cluster functionality. - - -To test failover, follow these steps: - -1. Connect to one of the SQL Server cluster nodes by using RDP. -1. Open **Failover Cluster Manager**. Select **Roles**. Notice which node owns the SQL Server FCI role. -1. Right-click the SQL Server FCI role. -1. Select **Move**, and then select **Best Possible Node**. - -**Failover Cluster Manager** shows the role, and its resources go offline. The resources then move and come back online in the other node. - -## Test connectivity - -To test connectivity, sign in to another virtual machine in the same virtual network. Open **SQL Server Management Studio** and connect to the SQL Server FCI by using the DNN DNS name. - -If you need to, you can [download SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Avoid IP conflict - -This is an optional step to prevent the virtual IP (VIP) address used by the FCI resource from being assigned to another resource in Azure as a duplicate. - -Although customers now use the DNN to connect to the SQL Server FCI, the virtual network name (VNN) and virtual IP cannot be deleted as they are necessary components of the FCI infrastructure. However, since there is no longer a load balancer reserving the virtual IP address in Azure, there is a risk that another resource on the virtual network will be assigned the same IP address as the virtual IP address used by the FCI. This can potentially lead to a duplicate IP conflict issue. - -Configure an APIPA address or a dedicated network adapter to reserve the IP address. - -### APIPA address - -To avoid using duplicate IP addresses, configure an APIPA address (also known as a link-local address). To do so, run the following command: - -```powershell -Get-ClusterResource "virtual IP address" | Set-ClusterParameter - –Multiple @{"Address”=”169.254.1.1”;”SubnetMask”=”255.255.0.0”;"OverrideAddressMatch"=1;”EnableDhcp”=0} -``` - -In this command, "virtual IP address" is the name of the clustered VIP address resource, and "169.254.1.1" is the APIPA address chosen for the VIP address. Choose the address that best suits your business. Set `OverrideAddressMatch=1` to allow the IP address to be on any network, including the APIPA address space. - -### Dedicated network adapter - -Alternatively, configure a network adapter in Azure to reserve the IP address used by the virtual IP address resource. However, this consumes the address in the subnet address space, and there is the additional overhead of ensuring the network adapter is not used for any other purpose. - -## Limitations - - -- The client connecting to the DNN listener must support the `MultiSubnetFailover=True` parameter in the connection string. -- There might be more considerations when you're working with other SQL Server features and an FCI with a DNN. For more information, see [FCI with DNN interoperability](failover-cluster-instance-dnn-interoperability.md). - -## Next steps - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-dnn-interoperability.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-dnn-interoperability.md deleted file mode 100644 index b95273b05cc96..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-dnn-interoperability.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Feature interoperability with SQL Server FCI & DNN -description: "Learn about the additional considerations when working with certain SQL Server features and a distributed network name (DNN) resource with a failover cluster instance on SQL Server on Azure VMs. " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Feature interoperability with SQL Server FCI & DNN -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for a distributed network name (DNN) for failover cluster instance by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - -There are certain SQL Server features that rely on a hard-coded virtual network name (VNN). As such, when using the distributed network name (DNN) resource with your failover cluster instance and SQL Server on Azure VMs, there are some additional considerations. - -In this article, learn how to configure the network alias when using the DNN resource, as well as which SQL Server features require additional consideration. - -## Create network alias (FCI) - -Some server-side components rely on a hard-coded VNN value, and require a network alias that maps the VNN to the DNN DNS name to function properly. -Follow the steps in [Create a server alias](/sql/database-engine/configure-windows/create-or-delete-a-server-alias-for-use-by-a-client) to create an alias that maps the VNN to the DNN DNS name. - -For a default instance, you can map the VNN to the DNN DNS name directly, such that VNN = DNN DNS name. -For example, if VNN name is `FCI1`, instance name is `MSSQLSERVER`, and the DNN is `FCI1DNN` (clients previously connected to `FCI`, and now they connect to `FCI1DNN`) then map the VNN `FCI1` to the DNN `FCI1DNN`. - -For a named instance the network alias mapping should be done for the full instance, such that `VNN\Instance` = `DNN\Instance`. -For example, if VNN name is `FCI1`, instance name is `instA`, and the DNN is `FCI1DNN` (clients previously connected to `FCI1\instA`, and now they connect to `FCI1DNN\instaA`) then map the VNN `FCI1\instaA` to the DNN `FCI1DNN\instaA`. - - - -## Client drivers - -For ODBC, OLEDB, ADO.NET, JDBC, PHP, and Node.js drivers, users need to explicitly specify the DNN DNS name as the server name in the connection string. To ensure rapid connectivity upon failover, add `MultiSubnetFailover=True` to the connection string if the SQL client supports it. - -## Tools - -Users of [SQL Server Management Studio](/sql/ssms/sql-server-management-studio-ssms), [sqlcmd](/sql/tools/sqlcmd-utility), [Azure Data Studio](/sql/azure-data-studio/what-is), and [SQL Server Data Tools](/sql/ssdt/sql-server-data-tools) need to explicitly specify the DNN DNS name as the server name in the connection string. - -## Availability groups and FCI - -You can configure an Always On availability group by using a failover cluster instance as one of the replicas. In this configuration, the mirroring endpoint URL for the FCI replica needs to use the FCI DNN. Likewise, if the FCI is used as a read-only replica, the read-only routing to the FCI replica needs to use the FCI DNN. - -The format for the mirroring endpoint is: `ENDPOINT_URL = 'TCP://:'`. - -For example, if your DNN DNS name is `dnnlsnr`, and `5022` is the port of the FCI's mirroring endpoint, the Transact-SQL (T-SQL) code snippet to create the endpoint URL looks like: - -```sql -ENDPOINT_URL = 'TCP://dnnlsnr:5022' -``` - -Likewise, the format for the read-only routing URL is: `TCP://:`. - -For example, if your DNN DNS name is `dnnlsnr`, and `1444` is the port used by the read-only target SQL Server FCI, the T-SQL code snippet to create the read-only routing URL looks like: - -```sql -READ_ONLY_ROUTING_URL = 'TCP://dnnlsnr:1444' -``` - -You can omit the port in the URL if it is the default 1433 port. For a named instance, configure a static port for the named instance and specify it in the read-only routing URL. - -## Replication - -Replication has three components: Publisher, Distributor, Subscriber. Any of these components can be a failover cluster instance. Because the FCI VNN is heavily used in replication configuration, both explicitly and implicitly, a network alias that maps the VNN to the DNN might be necessary for replication to work. - -Keep using the VNN name as the FCI name within replication, but create a network alias in the following remote situations *before you configure replication*: - -| **Replication component (FCI with DNN)** | **Remote component** | **Network alias map** | **Server with network map**| -|---------|---------|---------|-------- | -|Publisher | Distributor | Publisher VNN to Publisher DNN| Distributor| -|Distributor|Subscriber |Distributor VNN to Distributor DNN| Subscriber | -|Distributor|Publisher | Distributor VNN to Distributor DNN | Publisher| -|Subscriber| Distributor| Subscriber VNN to Subscriber DNN | Distributor| - -For example, assume you have a Publisher that's configured as an FCI using DNN in a replication topology, and the Distributor is remote. In this case, create a network alias on the Distributor server to map the Publisher VNN to the Publisher DNN: - -:::image type="content" source="media/failover-cluster-instance-dnn-interoperability/alias-in-configuration-manager.png" alt-text="Configure the DNN DNS name as the network alias using SQL Server Configuration Manager." ::: - -Use the full instance name for a named instance, like the following image example: - -:::image type="content" source="media/failover-cluster-instance-dnn-interoperability/alias-named-instance-configuration-manager.png" alt-text="Use the full instance name when configuring a network alias for a named instance." ::: - -## Database mirroring - -You can configure database mirroring with an FCI as either database mirroring partner. Configure it by using [Transact-SQL (T-SQL)](/sql/database-engine/database-mirroring/example-setting-up-database-mirroring-using-windows-authentication-transact-sql) rather than the SQL Server Management Studio GUI. Using T-SQL will ensure that the database mirroring endpoint is created using the DNN instead of the VNN. - -For example, if your DNN DNS name is `dnnlsnr`, and the database mirroring endpoint is 7022, the following T-SQL code snippet configures the database mirroring partner: - -```sql -ALTER DATABASE AdventureWorks - SET PARTNER = - 'TCP://dnnlsnr:7022' -GO -``` - -For client access, the **Failover Partner** property can handle database mirroring failover, but not FCI failover. - -## MSDTC - -The FCI can participate in distributed transactions coordinated by Microsoft Distributed Transaction Coordinator (MSDTC). Clustered MSDTC and local MSDTC are supported with FCI DNN. In Azure, an Azure Load Balancer is necessary for a clustered MSDTC deployment. - -> [!TIP] ->The DNN defined in the FCI does not replace the Azure Load Balancer requirement for the clustered MSDTC. - -## FileStream - -Though FileStream is supported for a database in an FCI, accessing FileStream or FileTable by using File System APIs with DNN is not supported. - -## Linked servers - -Using a linked server with an FCI DNN is supported. Either use the DNN directly to configure a linked server, or use a network alias to map the VNN to the DNN. - - -For example, to create a linked server with DNN DNS name `dnnlsnr` for named instance `insta1`, use the following Transact-SQL (T-SQL) command: - -```sql -USE [master] -GO - -EXEC master.dbo.sp_addlinkedserver - @server = N'dnnlsnr\inst1', - @srvproduct=N'SQL Server' ; -GO -``` - -Alternatively, you can create the linked server using the virtual network name (VNN) instead, but you will then need to define a network alias to map the VNN to the DNN. - -For example, for instance name `insta1`, VNN name `vnnname`, and DNN name `dnnlsnr`, use the following Transact-SQL (T-SQL) command to create a linked server using the VNN: - -```sql -USE [master] -GO - -EXEC master.dbo.sp_addlinkedserver - @server = N'vnnname\inst1', - @srvproduct=N'SQL Server' ; -GO - -``` - -Then, create a network alias to map `vnnname\insta1` to `dnnlsnr\insta1`. - - - -## Frequently asked questions - - -- Which SQL Server version brings DNN support? - - SQL Server 2019 CU2 and later. - -- What is the expected failover time when DNN is used? - - For DNN, the failover time will be just the FCI failover time, without any time added (like probe time when you're using Azure Load Balancer). - -- Is there any version requirement for SQL clients to support DNN with OLEDB and ODBC? - - We recommend `MultiSubnetFailover=True` connection string support for DNN. It's available starting with SQL Server 2012 (11.x). - -- Are any SQL Server configuration changes required for me to use DNN? - - SQL Server does not require any configuration change to use DNN, but some SQL Server features might require more consideration. - -- Does DNN support multiple-subnet clusters? - - Yes. The cluster binds the DNN in DNS with the physical IP addresses of all nodes in the cluster regardless of the subnet. The SQL client tries all IP addresses of the DNS name regardless of the subnet. - - - -## Next steps - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-overview.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-overview.md deleted file mode 100644 index c8c62f9c414dc..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-overview.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Failover cluster instances -description: "Learn about failover cluster instances (FCIs) with SQL Server on Azure Virtual Machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: overview -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem - ---- - -# Failover cluster instances with SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article introduces feature differences when you're working with failover cluster instances (FCI) for SQL Server on Azure Virtual Machines (VMs). - -To get started, [prepare your vm](failover-cluster-instance-prepare-vm.md). - -## Overview - -SQL Server on Azure VMs uses [Windows Server Failover Clustering (WSFC)](hadr-windows-server-failover-cluster-overview.md) functionality to provide local high availability through redundancy at the server-instance level: a failover cluster instance. An FCI is a single instance of SQL Server that's installed across WSFC (or simply the cluster) nodes and, possibly, across multiple subnets. On the network, an FCI appears to be a single instance of SQL Server running on a single computer. But the FCI provides failover from one WSFC node to another if the current node becomes unavailable. - -The rest of the article focuses on the differences for failover cluster instances when they're used with SQL Server on Azure VMs. To learn more about the failover clustering technology, see: - -- [Windows cluster technologies](/windows-server/failover-clustering/failover-clustering-overview) -- [SQL Server failover cluster instances](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) - -> [!NOTE] -> It's now possible to lift and shift your failover cluster instance solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to learn more. - -## Quorum - -Failover cluster instances with SQL Server on Azure Virtual Machines support using a disk witness, a cloud witness, or a file share witness for cluster quorum. - -To learn more, see [Quorum best practices with SQL Server VMs in Azure](hadr-cluster-best-practices.md#quorum). - - -## Storage - -In traditional on-premises clustered environments, a Windows failover cluster uses a storage area network (SAN) that's accessible by both nodes as the shared storage. SQL Server files are hosted on the shared storage, and only the active node can access the files at one time. - -SQL Server on Azure VMs offers various options as a shared storage solution for a deployment of SQL Server failover cluster instances: - -||[Azure shared disks](../../../virtual-machines/disks-shared.md)|[Premium file shares](../../../storage/files/storage-how-to-create-file-share.md) |[Storage Spaces Direct (S2D)](/windows-server/storage/storage-spaces/storage-spaces-direct-overview)| -|---------|---------|---------|---------| -|**Minimum OS version**| All |Windows Server 2012|Windows Server 2016| -|**Minimum SQL Server version**|All|SQL Server 2012|SQL Server 2016| -|**Supported VM availability** |[Premium SSD LRS](../../../virtual-machines/disks-redundancy.md#locally-redundant-storage-for-managed-disks): Availability Sets with or with out [proximity placement group](../../../virtual-machines/windows/proximity-placement-groups-portal.md)
    [Premium SSD ZRS](../../../virtual-machines/disks-redundancy.md#zone-redundant-storage-for-managed-disks): Availability Zones
    [Ultra disks](../../../virtual-machines/disks-enable-ultra-ssd.md): Same availability zone|Availability sets and availability zones|Availability sets | -|**Supports FileStream**|Yes|No|Yes | -|**Azure blob cache**|No|No|Yes| - -The rest of this section lists the benefits and limitations of each storage option available for SQL Server on Azure VMs. - -### Azure shared disks - -[Azure shared disks](../../../virtual-machines/disks-shared.md) are a feature of [Azure managed disks](../../../virtual-machines/managed-disks-overview.md). Windows Server Failover Clustering supports using Azure shared disks with a failover cluster instance. - -**Supported OS**: All -**Supported SQL version**: All - -**Benefits**: -- Useful for applications looking to migrate to Azure while keeping their high-availability and disaster recovery (HADR) architecture as is. -- Can migrate clustered applications to Azure as is because of SCSI Persistent Reservations (SCSI PR) support. -- Supports shared Azure Premium SSD and Azure Ultra Disk storage. -- Can use a single shared disk or stripe multiple shared disks to create a shared storage pool. -- Supports Filestream. -- Premium SSDs support availability sets. -- Premium SSDs Zone Redundant Storage (ZRS) supports Availability Zones. VMs part of FCI can be placed in different availability zones. - -> [!NOTE] -> While Azure shared disks also support [Standard SSD sizes](../../../virtual-machines/disks-shared.md#disk-sizes), we do not recommend using Standard SSDs for SQL Server workloads due to the performance limitations. - -**Limitations**: - -- Premium SSD disk caching is not supported. -- Ultra disks do not support availability sets. -- Availability zones are supported for Ultra Disks, but the VMs must be in the same availability zone, which reduces the availability of the virtual machine to 99.9% -- Ultra disks do not support Zone Redundant Storage (ZRS) - - -To get started, see [SQL Server failover cluster instance with Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md). - -### Storage Spaces Direct - -[Storage Spaces Direct](/windows-server/storage/storage-spaces/storage-spaces-direct-overview) is a Windows Server feature that is supported with failover clustering on Azure Virtual Machines. It provides a software-based virtual SAN. - -**Supported OS**: Windows Server 2016 and later -**Supported SQL version**: SQL Server 2016 and later - - -**Benefits:** - -- Sufficient network bandwidth enables a robust and highly performant shared storage solution. -- Supports Azure blob cache, so reads can be served locally from the cache. (Updates are replicated simultaneously to both nodes.) -- Supports FileStream. - -**Limitations:** - -- Available only for Windows Server 2016 and later. -- Availability zones are not supported. -- Requires the same disk capacity attached to both virtual machines. -- High network bandwidth is required to achieve high performance because of ongoing disk replication. -- Requires a larger VM size and double pay for storage, because storage is attached to each VM. - -To get started, see [SQL Server failover cluster instance with Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md). - -### Premium file share - -[Premium file shares](../../../storage/files/storage-how-to-create-file-share.md) are a feature of [Azure Files](../../../storage/files/index.yml). Premium file shares are SSD backed and have consistently low latency. They're fully supported for use with failover cluster instances for SQL Server 2012 or later on Windows Server 2012 or later. Premium file shares give you greater flexibility, because you can resize and scale a file share without any downtime. - -**Supported OS**: Windows Server 2012 and later -**Supported SQL version**: SQL Server 2012 and later - -**Benefits:** -- Shared storage solution for virtual machines spread over multiple availability zones. -- Fully managed file system with single-digit latencies and burstable I/O performance. - -**Limitations:** -- Available only for Windows Server 2012 and later. -- FileStream is not supported. - - -To get started, see [SQL Server failover cluster instance with Premium file share](failover-cluster-instance-premium-file-share-manually-configure.md). - -### Partner - -There are partner clustering solutions with supported storage. - -**Supported OS**: All -**Supported SQL version**: All - -One example uses SIOS DataKeeper as the storage. For more information, see the blog entry [Failover clustering and SIOS DataKeeper](https://azure.microsoft.com/blog/high-availability-for-a-file-share-using-wsfc-ilb-and-3rd-party-software-sios-datakeeper/). - -### iSCSI and ExpressRoute - -You can also expose an iSCSI target shared block storage via Azure ExpressRoute. - -**Supported OS**: All -**Supported SQL version**: All - -For example, NetApp Private Storage (NPS) exposes an iSCSI target via ExpressRoute with Equinix to Azure VMs. - -For shared storage and data replication solutions from Microsoft partners, contact the vendor for any issues related to accessing data on failover. - -## Connectivity - -To match the on-premises experience for connecting to your failover cluster instance, deploy your SQL Server VMs to [multiple subnets](failover-cluster-instance-prepare-vm.md#subnets) within the same virtual network. Having multiple subnets negates the need for the extra dependency on an Azure Load Balancer, or a distributed network name (DNN) to route your traffic to your FCI. - -If you deploy your SQL Server VMs to a single subnet, you can configure a virtual network name (VNN) and an Azure Load Balancer, or a distributed network name (DNN) to route traffic to your failover cluster instance. [Review the differences between the two](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn) and then deploy either a [distributed network name](failover-cluster-instance-distributed-network-name-dnn-configure.md) or a [virtual network name](failover-cluster-instance-vnn-azure-load-balancer-configure.md) for your failover cluster instance. - -The distributed network name is recommended, if possible, as failover is faster, and the overhead and cost of managing the load balancer is eliminated. - -Most SQL Server features work transparently with FCIs when using the DNN, but there are certain features that may require special consideration. See [FCI and DNN interoperability](failover-cluster-instance-dnn-interoperability.md) to learn more. - -## Limitations - -Consider the following limitations for failover cluster instances with SQL Server on Azure Virtual Machines. - -### Lightweight extension support - -At this time, SQL Server failover cluster instances on Azure virtual machines are supported only with the [lightweight management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes) of the SQL Server IaaS Agent Extension. To change from full extension mode to lightweight, delete the **SQL virtual machine** resource for the corresponding VMs and then register them with the SQL IaaS Agent extension in lightweight mode. When you're deleting the **SQL virtual machine** resource by using the Azure portal, clear the check box next to the correct virtual machine to avoid deleting the virtual machine. - -The full extension supports features such as automated backup, patching, and advanced portal management. These features will not work for SQL Server VMs registered in lightweight management mode. - -### MSDTC - -Azure Virtual Machines support Microsoft Distributed Transaction Coordinator (MSDTC) on Windows Server 2019 with storage on Clustered Shared Volumes (CSV) and [Azure Standard Load Balancer](../../../load-balancer/load-balancer-overview.md) or on SQL Server VMs that are using Azure shared disks. - -On Azure Virtual Machines, MSDTC isn't supported for Windows Server 2016 or earlier with Clustered Shared Volumes because: - -- The clustered MSDTC resource can't be configured to use shared storage. On Windows Server 2016, if you create an MSDTC resource, it won't show any shared storage available for use, even if storage is available. This issue has been fixed in Windows Server 2019. -- The basic load balancer doesn't handle RPC ports. - - -## Next steps - -Review [cluster configurations best practices](hadr-cluster-best-practices.md), and then you can [prepare your SQL Server VM for FCI](failover-cluster-instance-prepare-vm.md). - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) - diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-premium-file-share-manually-configure.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-premium-file-share-manually-configure.md deleted file mode 100644 index 9b5e076029617..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-premium-file-share-manually-configure.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Create an FCI with a premium file share -description: "Use a premium file share (PFS) to create a failover cluster instance (FCI) with SQL Server on Azure virtual machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.custom: na, devx-track-azurepowershell -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- -# Create an FCI with a premium file share (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer or distributed network name (DNN) for your failover cluster instance by creating your SQL Server VMs in [multiple subnets](failover-cluster-instance-prepare-vm.md#subnets) within the same Azure virtual network. - - -This article explains how to create a failover cluster instance (FCI) with SQL Server on Azure Virtual Machines (VMs) by using a [premium file share](../../../storage/files/storage-how-to-create-file-share.md). - -Premium file shares are SSD backed and provide consistently low-latency file shares that are fully supported for use with failover cluster instances for SQL Server 2012 or later on Windows Server 2012 or later. Premium file shares give you greater flexibility, allowing you to resize and scale a file share without any downtime. - -To learn more, see an overview of [FCI with SQL Server on Azure VMs](failover-cluster-instance-overview.md) and [cluster best practices](hadr-cluster-best-practices.md). - -> [!NOTE] -> It's now possible to lift and shift your failover cluster instance solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to learn more. - -## Prerequisites - -Before you complete the instructions in this article, you should already have: - -- An Azure subscription. -- An account that has permissions to create objects on both Azure virtual machines and in Active Directory. -- [Two or more prepared Windows Azure virtual machines](failover-cluster-instance-prepare-vm.md) in an [availability set](../../../virtual-machines/windows/tutorial-availability-sets.md#create-an-availability-set) or different [availability zones](../../../virtual-machines/windows/create-portal-availability-zone.md#confirm-zone-for-managed-disk-and-ip-address). -- A [premium file share](../../../storage/files/storage-how-to-create-file-share.md) to be used as the clustered drive, based on the storage quota of your database for your data files. -- The latest version of [PowerShell](/powershell/azure/install-az-ps). - -## Mount premium file share - -To mount your premium file share, follow these steps: - -1. Sign in to the [Azure portal](https://portal.azure.com). and go to your storage account. -1. Go to **File shares** under **Data storage**, and then select the premium file share you want to use for your SQL storage. -1. Select **Connect** to bring up the connection string for your file share. -1. In the drop-down list, select the drive letter you want to use, choose **Storage account key** as the authentication method, and then copy the code block to a text editor, such as Notepad. - - :::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/premium-file-storage-commands.png" alt-text="Copy the PowerShell command from the file share connect portal"::: - -1. Use Remote Desktop Protocol (RDP) to connect to the SQL Server VM with the **account that your SQL Server FCI will use for the service account**. -1. Open an administrative PowerShell command console. -1. Run the command that you copied earlier to your text editor from the File share portal. -1. Go to the share by using either File Explorer or the **Run** dialog box (Windows + R on your keyboard). Use the network path `\\storageaccountname.file.core.windows.net\filesharename`. For example, `\\sqlvmstorageaccount.file.core.windows.net\sqlpremiumfileshare` -1. Create at least one folder on the newly connected file share to place your SQL data files into. -1. Repeat these steps on each SQL Server VM that will participate in the cluster. - - > [!IMPORTANT] - > Consider using a separate file share for backup files to save the input/output operations per second (IOPS) and space capacity of this share for data and log files. You can use either a Premium or Standard File Share for backup files. - -## Create Windows Failover Cluster - -The steps to create your Windows Server Failover cluster vary depending on if you deployed your SQL Server VMs to a single subnet, or multiple subnets. To create your cluster, follow the steps in the tutorial for either a [multi-subnet scenario](availability-group-manually-configure-tutorial-multi-subnet.md#add-failover-cluster-feature) or a [single subnet scenario](availability-group-manually-configure-tutorial-single-subnet.md#create-the-cluster). Though these tutorials are for creating an availability group, the steps to create the cluster are the same. - - -## Configure quorum - -The cloud witness is the recommended quorum solution for this type of cluster configuration for SQL Server on Azure VMs. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - -## Validate cluster - -Validate the cluster on one of the virtual machines by using the Failover Cluster Manager UI or PowerShell. - -To validate the cluster by using the UI, do the following on one of the virtual machines: - -1. Under **Server Manager**, select **Tools**, and then select **Failover Cluster Manager**. -1. Under **Failover Cluster Manager**, select **Action**, and then select **Validate Configuration**. -1. Select **Next**. -1. Under **Select Servers or a Cluster**, enter the names of both virtual machines. -1. Under **Testing options**, select **Run only tests I select**. -1. Select **Next**. -1. Under **Test Selection**, select all tests except for **Storage** and **Storage Spaces Direct**, as shown here: - - :::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/cluster-validation.png" alt-text="Select cluster validation tests"::: - -1. Select **Next**. -1. Under **Confirmation**, select **Next**. The **Validate a Configuration** wizard runs the validation tests. - - -To validate the cluster by using PowerShell, run the following script from an administrator PowerShell session on one of the virtual machines: - -```powershell -Test-Cluster –Node ("","") –Include "Inventory", "Network", "System Configuration" -``` - - - -## Test cluster failover - -Test the failover of your cluster. In **Failover Cluster Manager**, right-click your cluster, select **More Actions** > **Move Core Cluster Resource** > **Select node**, and then select the other node of the cluster. Move the core cluster resource to every node of the cluster, and then move it back to the primary node. If you can successfully move the cluster to each node, you're ready to install SQL Server. - -:::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png" alt-text="Test cluster failover by moving the core resource to the other nodes"::: - - -## Create SQL Server FCI - -After you've configured the failover cluster, you can create the SQL Server FCI. - -1. Connect to the first virtual machine by using RDP. - -1. In **Failover Cluster Manager**, make sure that all the core cluster resources are on the first virtual machine. If necessary, move all resources to this virtual machine. - -1. If the version of the operating system is Windows Server 2019 and the Windows Cluster was created using the default [**Distributed Network Name (DNN)**](https://blogs.windows.com/windows-insider/2018/08/14/announcing-windows-server-2019-insider-preview-build-17733/), then the FCI installation for SQL Server 2017 and below will fail with the error `The given key was not present in the dictionary`. - - During installation, SQL Server setup queries for the existing Virtual Network Name (VNN) and doesn't recognize the Windows Cluster DNN. The issue has been fixed in SQL Server 2019 setup. For SQL Server 2017 and below, follow these steps to avoid the installation error: - - - In Failover Cluster Manager, connect to the cluster, right-click **Roles** and select **Create Empty Role**. - - Right-click the newly created empty role, select **Add Resource** and select **Client Access Point**. - - Enter any name and complete the wizard to create the **Client Access Point**. - - After the SQL Server FCI installation completes, the role containing the temporary **Client Access Point** can be deleted. - -1. Locate the installation media. If the virtual machine uses one of the Azure Marketplace images, the media is located at `C:\SQLServer__Full`. - -1. Select **Setup**. - -1. In the **SQL Server Installation Center**, select **Installation**. - -1. Select **New SQL Server failover cluster installation**, and then follow the instructions in the wizard to install the SQL Server FCI. - -1. On the **Cluster Network Configuration** page, the IP you provide varies depending on if your SQL Server VMs were deployed to a single subnet, or multiple subnets. - - 1. For a **single subnet environment**, provide the IP address that you plan to add to the [Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) - 1. For a **multi-subnet environment**, provide the secondary IP address in the subnet of the _first_ SQL Server VM that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses): - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png" alt-text="provide the secondary IP address in the subnet of the first SQL Server VM that you previously designated as the IP address of the failover cluster instance network name"::: - -1. In **Database Engine Configuration**, the data directories need to be on the premium file share. Enter the full path of the share, in this format: `\\storageaccountname.file.core.windows.net\filesharename\foldername`. A warning appears, telling you that you've specified a file server as the data directory. This warning is expected. Ensure that the user account you used to access the VM via RDP when you persisted the file share is the same account that the SQL Server service uses to avoid possible failures. - - :::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/use-file-share-as-data-directories.png" alt-text="Use file share as SQL data directories"::: - -1. After you complete the steps in the wizard, Setup installs a SQL Server FCI on the first node. - -1. After FCI installation succeeds on the first node, connect to the second node by using RDP. - -1. Open the **SQL Server Installation Center**, and then select **Installation**. - -1. Select **Add node to a SQL Server failover cluster**. Follow the instructions in the wizard to install SQL Server and add the node to the FCI. - -1. For a multi-subnet scenario, in **Cluster Network Configuration**, enter the secondary IP address in the subnet of the _second_ SQL Server VM that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses) - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png" alt-text="enter the secondary IP address in the subnet of the second SQL Server VM subnet that you previously designated as the IP address of the failover cluster instance network name"::: - - After selecting **Next** in **Cluster Network Configuration**, setup shows a dialog box indicating that SQL Server Setup detected multiple subnets as in the example image. Select **Yes** to confirm. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png" alt-text="Multi Subnet Confirmation"::: - - -1. After you complete the instructions in the wizard, setup adds the second SQL Server FCI node. - -1. Repeat these steps on any other nodes that you want to add to the SQL Server failover cluster instance. - - ->[!NOTE] -> Azure Marketplace gallery images come with SQL Server Management Studio installed. If you didn't use a marketplace image [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Register with SQL IaaS extension - -To manage your SQL Server VM from the portal, register it with the SQL IaaS Agent extension in [lightweight management mode](sql-agent-extension-manually-register-single-vm.md#lightweight-mode), currently the only mode that's supported with FCI and SQL Server on Azure VMs. - -Register a SQL Server VM in lightweight mode with PowerShell (-LicenseType can be `PAYG` or `AHUB`): - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -# Register SQL VM with 'Lightweight' SQL IaaS agent -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType ???? -SqlManagementType LightWeight -``` - -## Configure connectivity - -If you deployed your SQL Server VMs in multiple subnets, skip this step. If you deployed your SQL Server VMs to a single subnet, then you'll need to configure an additional component to route traffic to your FCI. You can configure a virtual network name (VNN) with an Azure Load Balancer, or a distributed network name for a failover cluster instance. [Review the differences between the two](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn) and then deploy either a [distributed network name](failover-cluster-instance-distributed-network-name-dnn-configure.md) or a [virtual network name and Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) for your failover cluster instance. - -## Limitations - -- Microsoft Distributed Transaction Coordinator (MSDTC) is not supported on Windows Server 2016 and earlier. -- Filestream isn't supported for a failover cluster with a premium file share. To use filestream, deploy your cluster by using [Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md) or [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) instead. -- Only registering with the SQL IaaS Agent extension in [lightweight management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes) is supported. -- Database Snapshots are not currently supported with [Azure Files due to sparse files limitations](/rest/api/storageservices/features-not-supported-by-the-azure-file-service). -- Since database snapshots are not supported, CHECKDB for user databases falls back to CHECKDB WITH TABLOCK. TABLOCK limits the checks that are performed - DBCC CHECKCATALOG is not run on the database, and Service Broker data is not validated. -- CHECKDB on MASTER and MSDB database is not supported. -- Databases that use the in-memory OLTP feature are not supported on a failover cluster instance deployed with a premium file share. If your business requires in-memory OLTP, consider deploying your FCI with [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) or [Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md) instead. - -## Next steps - -If premium file shares are not the appropriate FCI storage solution for you, consider creating your FCI by using [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) or [Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md) instead. - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-prepare-vm.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-prepare-vm.md deleted file mode 100644 index b63214de010fa..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-prepare-vm.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Prepare virtual machines for an FCI -description: "Prepare your Azure virtual machines to use them with a failover cluster instance (FCI) and SQL Server on Azure Virtual Machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Prepare virtual machines for an FCI (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article describes how to prepare Azure virtual machines (VMs) to use them with a SQL Server failover cluster instance (FCI). Configuration settings vary depending on the FCI storage solution, so validate that you're choosing the correct configuration to suit your environment and business. - -To learn more, see an overview of [FCI with SQL Server on Azure VMs](failover-cluster-instance-overview.md) and [cluster best practices](hadr-cluster-best-practices.md). - -> [!NOTE] -> It's now possible to lift and shift your failover cluster instance solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to learn more. - -## Prerequisites - -- A Microsoft Azure subscription. Get started for [free](https://azure.microsoft.com/free/). -- A Windows domain on Azure virtual machines or an on-premises active directory extended to Azure with virtual network pairing. -- An account that has permissions to create objects on Azure virtual machines and in Active Directory. -- An Azure virtual network and one or more subnets with enough IP address space for these components: - - Both virtual machines - - An IP address for the Windows failover cluster - - An IP address for each FCI -- DNS configured on the Azure network, pointing to the domain controllers. - - - -## Choose an FCI storage option - -The configuration settings for your virtual machine vary depending on the storage option you're planning to use for your SQL Server failover cluster instance. Before you prepare the virtual machine, review the [available FCI storage options](failover-cluster-instance-overview.md#storage) and choose the option that best suits your environment and business need. Then carefully select the appropriate VM configuration options throughout this article based on your storage selection. - -## Choose VM availability - -The failover cluster feature requires virtual machines to be placed in an [availability set](../../../virtual-machines/linux/tutorial-availability-sets.md) or an [availability zone](../../../availability-zones/az-overview.md#availability-zones). - -Carefully select the VM availability option that matches your intended cluster configuration: - -- **Azure shared disks**: the availability option varies if you're using Premium SSD or UltraDisk: - - **Premium SSD Zone Redundant Storage (ZRS)**: - [Availability Zone](../../../availability-zones/az-overview.md#availability-zones) in different zones. [Premium SSD ZRS](../../../virtual-machines/disks-redundancy.md#zone-redundant-storage-for-managed-disks) replicates your Azure managed disk synchronously across three Azure availability zones in the selected region. VMs part of failover cluster can be placed in different availability zones, helping you achieve a zone-redundant SQL Server FCI that provides a VM availability SLA of 99.99%. Disk latency for ZRS is higher due to the cross-zonal copy of data. - - **Premium SSD Locally Redundant Storage (LRS)**: - [Availability Set](../../../virtual-machines/windows/tutorial-availability-sets.md#create-an-availability-set) in different fault/update domains for [Premium SSD LRS](../../../virtual-machines/disks-redundancy.md#locally-redundant-storage-for-managed-disks). You can also choose to place the VMs inside a [proximity placement group](../../../virtual-machines/windows/proximity-placement-groups-portal.md) to locate them closer to each other. Combining availability set and proximity placement group provides the lowest latency for shared disks as data is replicated locally within one data center and provides VM availability SLA of 99.95%. - - **Ultra Disk Locally Redundant Storage (LRS)**: - [Availability zone](../../../virtual-machines/windows/create-portal-availability-zone.md#confirm-zone-for-managed-disk-and-ip-address) but the VMs must be placed in the same availability zone. [Ultra disks](../../../virtual-machines/disks-enable-ultra-ssd.md) offers lowest disk latency and is best for IO intensive workloads. Since all VMs part of the FCI have be in the same availability zone, the VM availability is only 99.9%. -- **Premium file shares**: [Availability set](../../../virtual-machines/windows/tutorial-availability-sets.md#create-an-availability-set) or [Availability Zone](../../../virtual-machines/windows/create-portal-availability-zone.md#confirm-zone-for-managed-disk-and-ip-address). -- **Storage Spaces Direct**: [Availability Set](../../../virtual-machines/windows/tutorial-availability-sets.md#create-an-availability-set). - -> [!IMPORTANT] -> You can't set or change the availability set after you've created a virtual machine. - -## Subnets - -For SQL Server on Azure VMs, you have the option to deploy your SQL Server VMs to a single subnet, or to multiple subnets. - -Deploying your VMs to multiple subnets leverages the cluster OR dependency for IP addresses and matches the on-premises experience when connecting to your failover cluster instance. The multi-subnet approach is recommend for SQL Server on Azure VMs for simpler manageability, and faster failover times. - -Deploying your VMs to a single subnet requires an additional dependency on an Azure Load Balancer or distributed network name (DNN) to route traffic to your FCI. - -If you deploy your SQL Server VMs to multiple subnets, follow the steps in this section to create your virtual networks with additional subnets, and then once the SQL Server VMs are created, [assign secondary IP addresses](#assign-secondary-ip-addresses) within those subnets to the VMs. Deploying your SQL Server VMs to a single subnet does not require any additional network configuration. - -# [Single subnet](#tab/single-subnet) - -Place both virtual machines in a single subnet that has enough IP addresses for both virtual machines and all FCIs that you might eventually install to the cluster. This approach requires an extra component to route connections to your FCI, such as an Azure Load Balancer or a distributed network name (DNN). - -If you choose to deploy your SQL Server VMs to a single subnet [review the differences between the Azure Load Balancer and DNN connectivity options](hadr-windows-server-failover-cluster-overview.md#distributed-network-name-dnn) and decide which option works best for you before preparing the rest of your environment for your FCI. - -Deploying your SQL Server VMs to a single subnet does not require any additional network configuration. - -# [Multi-subnet](#tab/multi-subnet) - -If you want to route connections directly to your SQL Server FCI, place both virtual machines in separate subnets within a virtual network. Assign a secondary IP address to the SQL Server VM for the failover cluster instance - and, if you're on Windows Server 2016 and below, assign an additional secondary IP address for the Windows Server Failover Cluster as well. Windows Server 2019 and later uses a distributed network name (DNN) for the cluster name so a secondary IP address for the cluster is not necessary. - -This approach eliminates the need for an Azure Load Balancer or a distributed network name (DNN) when connecting to your SQL Server FCI. - -If you choose to deploy your SQL Server VMs to multiple subnets, you'll first need to create the virtual network with two extra subnets, and once your SQL Server VMs are created, [assign secondary IP addresses to the VM](#assign-secondary-ip-addresses). To learn more, see [Virtual network overview](../../../virtual-network/virtual-networks-overview.md). The subnet names and IP addresses in this section are provided as just an example, and may vary in your environment. -To create the virtual network in the Azure portal, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select **+ Create** - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png" alt-text="Create new resource in your resource group"::: - -1. Search for **virtual network** in the **Marketplace** search box and choose the **virtual network** tile from Microsoft. Select **Create** on the **Virtual network** page. -1. On the **Create virtual network** page, enter the following information on the **Basics** tab: - 1. Under **Project details**, choose the appropriate Azure **Subscription**, and the **Resource group** where you plan to deploy your SQL Server VMs. - 1. Under **Instance details**, provide a name for your virtual network and choose the same region as your resource group from the drop-down. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/03-create-vnet-basics.png" alt-text="Choose the resource group you made previously, then provide a name for your virtual network"::: - -1. On the **IP addresses** tab, select **+ Add subnet** to add an additional subnet for your first SQL Server VM, and fill in the following values: - 1. Provide a value for the **Subnet name**, such as **SQL-subnet-1**. - 1. Provide a unique subnet address range within the virtual network address space. For example, you can iterate the third octet of DC-subnet address range by 1. - - For example, if your **default** range is *10.38.0.0/24*, enter the IP address range `10.38.1.0/24` for **SQL-subnet-1**. - - Likewise, if your **default** IP range is *10.5.0.0/24*, then enter `10.5.1.0/24` for the new subnet. - 1. Select **Add** to add your new subnet. - - :::image type="content" source="./media/failover-cluster-instance-prepare-vm/05-create-vnet-ip-address-add-sql-subnet-1.png" alt-text="Name your first subnet, such as sql-subnet-1, and then iterate the third octet by 1, so that if your DC-subnet IP address is 10.5.0.0, your new subnet should be 10.5.1.0"::: - -1. Repeat the previous step to add an additional unique subnet range for your second SQL Server VM with a name such as **SQL-subnet-2**. You can iterate the third octet by one again. - - For example, if your **default** IP range is *10.38.0.0/24*, and your **SQL-subnet-1** is *10.38.1.0/24*, then enter `10.38.2.0/24` for the new subnet - - Likewise, if your **default** IP range is *10.5.0.0/24*, and your **SQL-subnet-1** is *10.5.1.0/24*, then enter the IP address range `10.5.2.0/24` for **SQL-subnet-2** . - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/06-create-vnet-ip-address-add-sql-subnet-2.png" alt-text="Name your second subnet, such as sql-subnet-2, and then iterate the third octet by 2, so that if your DC-subnet IP address is 10.38.0.0/24, your new subnet should be 10.38.2.0/24"::: - -1. After you've added the second subnet, review your subnet names and ranges (your IP address ranges may differ from the image). If everything looks correct, select **Review + create**, then **Create** to create your new virtual network. - - :::image type="content" source="./media/failover-cluster-instance-prepare-vm/07-create-vnet-ip-address.png" alt-text="After you've added the second subnet, review your subnet names and ranges, like the image example (though your IP addresses may be different). If everything looks correct, select Review + create, then Create to create your new virtual network."::: - - Azure returns you to the portal dashboard and notifies you when the new network is created. - ---- - -## Configure DNS - -Configure your virtual network to use your DNS server. First, identify the DNS IP address, and then add it to your virtual network. - -### Identify DNS IP address - -Identify the IP address of the DNS server, and then add it to the virtual network configuration. This section demonstrates how to identify the DNS IP address if the DNS server is on a virtual machine in Azure. - -To identify the IP address of the DNS server VM in the Azure portal, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com) and select the DNS server VM. -1. On the VM page, choose **Networking** in the **Settings** pane. -1. Note the **NIC Private IP** address as this is the IP address of the DNS server. In the example image, the private IP address is **10.38.0.4**. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-dc-vm-1-private-ip.png" alt-text="On the DC-VM-1 page, choose Networking in the Settings pane, and then note the NIC private IP address. Use this IP address as the DNS server. "::: - -### Configure virtual network DNS - -Configure the virtual network to use this the DNS server IP address. - -To configure your virtual network for DNS, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com), and select your virtual network. -1. Select **DNS servers** under the **Settings** pane and then select **Custom**. -1. Enter the private IP address you identified previously in the **IP Address** field, such as `10.38.0.4`, or provide the internal IP address of your internal DNS server. -1. Select **Save**. - -:::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-identify-dns-ip-address.png" alt-text=" Select DNS servers under the Settings pane and then select Custom. Enter the private IP address you identified previously in the IP Address field, such as 10.38.0.4. "::: - -## Create the virtual machines - -After you've configured your VM virtual network and chosen VM availability, you're ready to create your virtual machines. You can choose to use an Azure Marketplace image that does or doesn't have SQL Server already installed on it. However, if you choose an image for SQL Server on Azure VMs, you'll need to uninstall SQL Server from the virtual machine before configuring the failover cluster instance. - -### NIC considerations - -On an Azure VM guest failover cluster, we recommend a single NIC per server (cluster node). Azure networking has physical redundancy, which makes additional NICs unnecessary on an Azure IaaS VM guest cluster. Although the cluster validation report will issue a warning that the nodes are only reachable on a single network, this warning can be safely ignored on Azure IaaS VM guest failover clusters. - -Place both virtual machines: - -- In the same Azure resource group as your availability set, if you're using availability sets. -- On the same virtual network as your domain controller and DNS server or on a virtual network that has suitable connectivity to your domain controller. -- In the Azure availability set or availability zone. - -You can create an Azure virtual machine by using an image [with](sql-vm-create-portal-quickstart.md) or [without](../../../virtual-machines/windows/quick-create-portal.md) SQL Server preinstalled to it. If you choose the SQL Server image, you'll need to manually uninstall the SQL Server instance before installing the failover cluster instance. - -### Assign secondary IP addresses - -If you deployed your SQL Server VMs to a single subnet, skip this step. If you deployed your SQL Server VMs to multiple subnets for improved connectivity to your FCI, you need to assign the secondary IP addresses to each VM. - -Assign secondary IP addresses to each SQL Server VM to use for the failover cluster instance network name, and for Windows Server 2016 and earlier, assign secondary IP addresses to each SQL Server VM for the cluster network name as well. Doing this negates the need for an Azure Load Balancer, as is the requirement in a single subnet environment. - -On Windows Server 2016 and earlier, you need to assign an additional secondary IP address to each SQL Server VM to use for the windows cluster IP since the cluster uses the **Cluster Network Name** rather than the default distributed network name (DNN) introduced in Windows Server 2019. With a DNN, the cluster name object (CNO) is automatically registered with the IP addresses for all the nodes of the cluster, eliminating the need for a dedicated windows cluster IP address. - -If you're on Windows Server 2016 and prior, follow the steps in this section to assign a secondary IP address to each SQL Server VM for *both* the FCI network name, *and* the cluster. - -If you're on Windows Server 2019 or later, only assign a secondary IP address for the FCI network name, and skip the steps to assign a windows cluster IP, unless you plan to configure your cluster with a virtual network name (VNN), in which case assign both IP addresses to each SQL Server VM as you would for Windows Server 2016. - -To assign additional secondary IPs to the VMs, follow these steps: - -1. Go to your resource group in the [Azure portal](https://portal.azure.com/) and select the first SQL Server VM. -1. Select **Networking** in the **Settings** pane, and then select the **Network Interface**: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/19-sql-vm-network-interface.png" alt-text="Select Networking in the Settings pane, and then select the Network Interface"::: - -1. On the **Network Interface** page, select **IP configurations** in the **Settings** pane and then choose **+ Add** to add an additional IP address: - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/20-ip-configurations-add.png" alt-text="IP configurations"::: - -1. On the **Add IP configuration** page, do the following: - 1. Specify the **Name** for the Windows Cluster IP address, such as **windows-cluster-ip** for Windows 2016 and earlier. Skip this step if you're on Windows Server 2019 or later. - 1. Set the **Allocation** to **Static**. - 1. Enter an unused **IP address** in the same subnet (**SQL-subnet-1**) as the SQL Server VM, such as `10.38.1.10`. - 1. Leave the **Public IP address** at the default of **Disassociate**. - 1. Select **OK** to finish adding the IP configuration. - - :::image type="content" source="./media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/21-add-ip-windows-cluster.png" alt-text="Add Cluster IP by entering in an used IP address in the subnet of the first SQL Server VM"::: - -1. Select **+ Add** again to configure an additional IP address for the FCI network name (with a name such as **FCI-network-name**), again specifying an unused IP address in **SQL-subnet-1** such as `10.38.1.11`: - - :::image type="content" source="./media/failover-cluster-instance-prepare-vm/22-add-fci-ip-address.png" alt-text="Select + Add again to configure an additional IP address for the availability group listener (with a name such as availability-group-listener), again using an unused IP address in SQL-subnet-1 such as 10.31.1.11"::: - -1. Repeat these steps again for the second SQL Server VM. Assign two unused secondary IP addresses within **SQL-subnet-2**. Use the values from the following table to add the IP configuration (though the IP addresses are just examples, yours may vary): - - - | **Field** | Input | Input | - | --- | --- | --- | - | **Name** |windows-cluster-ip | FCI-network-name | - | **Allocation** | Static | Static | - | **IP address** | 10.38.2.10 | 10.38.2.11 | - - - - -## Uninstall SQL Server - -As part of the FCI creation process, you'll install SQL Server as a clustered instance to the failover cluster. *If you deployed a virtual machine with an Azure Marketplace image without SQL Server, you can skip this step.* If you deployed an image with SQL Server preinstalled, you'll need to unregister the SQL Server VM from the SQL IaaS Agent extension, and then uninstall SQL Server. - -### Unregister from the SQL IaaS Agent extension - -SQL Server VM images from Azure Marketplace are automatically registered with the SQL IaaS Agent extension. Before you uninstall the preinstalled SQL Server instance, you must first [unregister each SQL Server VM from the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md#unregister-from-extension). - -### Uninstall SQL Server - -After you've unregistered from the extension, you can uninstall SQL Server. Follow these steps on each virtual machine: - -1. Connect to the virtual machine by using RDP. When you first connect to a virtual machine by using RDP, a prompt asks you if you want to allow the PC to be discoverable on the network. Select **Yes**. -1. Open **Programs and Features** in the **Control Panel**. -1. In **Programs and Features**, right-click **Microsoft SQL Server 201_ (64-bit)** and select **Uninstall/Change**. -1. Select **Remove**. -1. Select the default instance. -1. Remove all features under **Database Engine Services**, **Analysis Services** and **Reporting Services - Native**. Don't remove anything under **SharedFeatures**. You'll see something like the following screenshot: - ![Select features](./media/failover-cluster-instance-prepare-vm/remove-features-updated.png) -1. Select **Next**, and then select **Remove**. -1. After the instance is successfully removed, restart the virtual machine. - -## Open the firewall - -On each virtual machine, open the Windows Firewall TCP port that SQL Server uses. By default SQL Server uses port 1433, but if you changed this in your environment, open the port you've configured your SQL Server instance to use. Port 1433 is automatically open on SQL Server images deployed from Azure Marketplace. - -If you use a [load balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) for single subnet scenario, you'll also need to open the port that the health probe uses. By default, the health probe uses port 59999, but it can be any TCP port that you specify when you create the load balancer. - -This table details the ports that you might need to open, depending on your FCI configuration: - - | Purpose | Port | Notes - | ------ | ------ | ------ - | SQL Server | TCP 1433 | Normal port for default instances of SQL Server. If you used an image from the gallery, this port is automatically opened.

    **Used by**: All FCI configurations. | - | Health probe | TCP 59999 | Any open TCP port. Configure the load balancer [health probe](failover-cluster-instance-vnn-azure-load-balancer-configure.md#configure-health-probe) and the cluster to use this port.

    **Used by**: FCI with load balancer in single subnet scenario. | - | File share | UDP 445 | Port that the file share service uses.

    **Used by**: FCI with Premium file share. | - -## Join the domain - -You also need to join your virtual machines to the domain. You can do so by using a [quickstart template](../../../active-directory-domain-services/join-windows-vm-template.md#join-an-existing-windows-server-vm-to-a-managed-domain). - -## Review storage configuration - -Virtual machines created from Azure Marketplace come with attached storage. If you plan to configure your FCI storage by using Premium file shares or Azure shared disks, you can remove the attached storage to save on costs because local storage is not used for the failover cluster instance. However, it's possible to use the attached storage for Storage Spaces Direct FCI solutions, so removing them in this case might be unhelpful. Review your FCI storage solution to determine if removing attached storage is optimal for saving costs. - - -## Next steps - -Now that you've prepared your virtual machine environment, you're ready to configure your failover cluster instance. - -Choose one of the following guides to configure the FCI environment that's appropriate for your business: -- [Configure FCI with Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) -- [Configure FCI with a Premium file share](failover-cluster-instance-premium-file-share-manually-configure.md) -- [Configure FCI with Storage Spaces Direct](failover-cluster-instance-storage-spaces-direct-manually-configure.md) - - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-storage-spaces-direct-manually-configure.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-storage-spaces-direct-manually-configure.md deleted file mode 100644 index 14ce51cbc8396..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-storage-spaces-direct-manually-configure.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: Create an FCI with Storage Spaces Direct -description: "Use Storage Spaces Direct to create a failover cluster instance (FCI) with SQL Server on Azure virtual machines." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.custom: na, devx-track-azurepowershell -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Create an FCI with Storage Spaces Direct (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer or distributed network name (DNN) for your failover cluster instance by creating your SQL Server VMs in [multiple subnets](failover-cluster-instance-prepare-vm.md#subnets) within the same Azure virtual network. - -This article explains how to create a failover cluster instance (FCI) by using [Storage Spaces Direct](/windows-server/storage/storage-spaces/storage-spaces-direct-overview) with SQL Server on Azure Virtual Machines (VMs). Storage Spaces Direct acts as a software-based virtual storage area network (VSAN) that synchronizes the storage (data disks) between the nodes (Azure VMs) in a Windows cluster. - -To learn more, see an overview of [FCI with SQL Server on Azure VMs](failover-cluster-instance-overview.md) and [cluster best practices](hadr-cluster-best-practices.md). - -> [!NOTE] -> It's now possible to lift and shift your failover cluster instance solution to SQL Server on Azure VMs using Azure Migrate. See [Migrate failover cluster instance](../../migration-guides/virtual-machines/sql-server-failover-cluster-instance-to-sql-on-azure-vm.md) to learn more. - - -## Overview - -[Storage Spaces Direct (S2D)](/windows-server/storage/storage-spaces/storage-spaces-direct-overview) supports two types of architectures: converged and hyperconverged. A hyperconverged infrastructure places the storage on the same servers that host the clustered application, so that storage is on each SQL Server FCI node. - -The following diagram shows the complete solution, which uses hyperconverged Storage Spaces Direct with SQL Server on Azure VMs: - -![Diagram of the complete solution, using hyperconverged Storage Spaces Direct](./media/failover-cluster-instance-storage-spaces-direct-manually-configure/00-sql-fci-s2d-complete-solution.png) - -The preceding diagram shows the following resources in the same resource group: - -- Two virtual machines in a Windows Server failover cluster. When a virtual machine is in a failover cluster, it's also called a *cluster node* or *node*. -- Each virtual machine has two or more data disks. -- Storage Spaces Direct synchronizes the data on the data disks and presents the synchronized storage as a storage pool. -- The storage pool presents a Cluster Shared Volume (CSV) to the failover cluster. -- The SQL Server FCI cluster role uses the CSV for the data drives. -- An Azure load balancer to hold the IP address for the SQL Server FCI for a single subnet scenario. -- An Azure availability set holds all the resources. - - > [!NOTE] -> You can create this entire solution in Azure from a template. An example of a template is available on the GitHub [Azure quickstart templates](https://github.com/MSBrett/azure-quickstart-templates/tree/master/sql-server-2016-fci-existing-vnet-and-ad) page. This example isn't designed or tested for any specific workload. You can run the template to create a SQL Server FCI with Storage Spaces Direct storage connected to your domain. You can evaluate the template and modify it for your purposes. - - -## Prerequisites - -Before you complete the instructions in this article, you should already have: - -- An Azure subscription. Get started for [free](https://azure.microsoft.com/free/). -- [Two or more prepared Windows Azure virtual machines](failover-cluster-instance-prepare-vm.md) in an [availability set](../../../virtual-machines/windows/tutorial-availability-sets.md#create-an-availability-set). -- An account that has permissions to create objects on both Azure virtual machines and in Active Directory. -- The latest version of [PowerShell](/powershell/azure/install-az-ps). - -## Create Windows Failover Cluster - -The steps to create your Windows Server Failover cluster vary depending on if you deployed your SQL Server VMs to a single subnet, or multiple subnets. To create your cluster, follow the steps in the tutorial for either a [multi-subnet scenario](availability-group-manually-configure-tutorial-multi-subnet.md#add-failover-cluster-feature) or a [single subnet scenario](availability-group-manually-configure-tutorial-single-subnet.md#create-the-cluster). Though these tutorials are for creating an availability group, the steps to create the cluster are the same. - -## Configure quorum - -Although the disk witness is the most resilient quorum option, it's not supported for failover cluster instances configured with Storage Spaces Direct. As such, the cloud witness is the recommended quorum solution for this type of cluster configuration for SQL Server on Azure VMs. - -If you have an even number of votes in the cluster, configure the [quorum solution](hadr-cluster-quorum-configure-how-to.md) that best suits your business needs. For more information, see [Quorum with SQL Server VMs](hadr-windows-server-failover-cluster-overview.md#quorum). - -## Validate the cluster - -Validate the cluster in the Failover Cluster Manager UI or by using PowerShell. - -To validate the cluster by using the UI, do the following on one of the virtual machines: - -1. Under **Server Manager**, select **Tools**, and then select **Failover Cluster Manager**. -1. Under **Failover Cluster Manager**, select **Action**, and then select **Validate Configuration**. -1. Select **Next**. -1. Under **Select Servers or a Cluster**, enter the names of both virtual machines. -1. Under **Testing options**, select **Run only tests I select**. -1. Select **Next**. -1. Under **Test Selection**, select all tests except for **Storage**, as shown here: - - ![Select cluster validation tests](./media/failover-cluster-instance-storage-spaces-direct-manually-configure/10-validate-cluster-test.png) - -1. Select **Next**. -1. Under **Confirmation**, select **Next**. - - The **Validate a Configuration** wizard runs the validation tests. - -To validate the cluster by using PowerShell, run the following script from an administrator PowerShell session on one of the virtual machines: - - ```powershell - Test-Cluster –Node ("","") –Include "Storage Spaces Direct", "Inventory", "Network", "System Configuration" - ``` - - -## Add storage - -The disks for Storage Spaces Direct need to be empty. They can't contain partitions or other data. To clean the disks, follow the instructions in [Deploy Storage Spaces Direct](/windows-server/storage/storage-spaces/deploy-storage-spaces-direct#step-31-clean-drives). - -1. [Enable Storage Spaces Direct](/windows-server/storage/storage-spaces/deploy-storage-spaces-direct#step-35-enable-storage-spaces-direct). - - The following PowerShell script enables Storage Spaces Direct: - - ```powershell - Enable-ClusterS2D - ``` - - In **Failover Cluster Manager**, you can now see the storage pool. - -1. [Create a volume](/windows-server/storage/storage-spaces/deploy-storage-spaces-direct#step-36-create-volumes). - - Storage Spaces Direct automatically creates a storage pool when you enable it. You're now ready to create a volume. The PowerShell cmdlet `New-Volume` automates the volume creation process. This process includes formatting, adding the volume to the cluster, and creating a CSV. This example creates an 800 gigabyte (GB) CSV: - - ```powershell - New-Volume -StoragePoolFriendlyName S2D* -FriendlyName VDisk01 -FileSystem CSVFS_REFS -Size 800GB - ``` - - After you've run the preceding command, an 800-GB volume is mounted as a cluster resource. The volume is at `C:\ClusterStorage\Volume1\`. - - This screenshot shows a CSV with Storage Spaces Direct: - - ![Screenshot of a Cluster Shared Volume with Storage Spaces Direct](./media/failover-cluster-instance-storage-spaces-direct-manually-configure/15-cluster-shared-volume.png) - - - -## Test cluster failover - -Test the failover of your cluster. In **Failover Cluster Manager**, right-click your cluster, select **More Actions** > **Move Core Cluster Resource** > **Select node**, and then select the other node of the cluster. Move the core cluster resource to every node of the cluster, and then move it back to the primary node. If you can successfully move the cluster to each node, you're ready to install SQL Server. - -:::image type="content" source="media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png" alt-text="Test cluster failover by moving the core resource to the other nodes"::: - -## Create SQL Server FCI - -After you've configured the failover cluster and all cluster components, including storage, you can create the SQL Server FCI. - -1. Connect to the first virtual machine by using RDP. - -1. In **Failover Cluster Manager**, make sure all core cluster resources are on the first virtual machine. If necessary, move all resources to that virtual machine. - -1. If the version of the operating system is Windows Server 2019 and the Windows Cluster was created using the default [**Distributed Network Name (DNN)**](https://blogs.windows.com/windows-insider/2018/08/14/announcing-windows-server-2019-insider-preview-build-17733/), then the FCI installation for SQL Server 2017 and below will fail with the error `The given key was not present in the dictionary`. - - During installation, SQL Server setup queries for the existing Virtual Network Name (VNN) and doesn't recognize the Windows Cluster DNN. The issue has been fixed in SQL Server 2019 setup. For SQL Server 2017 and below, follow these steps to avoid the installation error: - - - In Failover Cluster Manager, connect to the cluster, right-click **Roles** and select **Create Empty Role**. - - Right-click the newly created empty role, select **Add Resource** and select **Client Access Point**. - - Enter any name and complete the wizard to create the **Client Access Point**. - - After the SQL Server FCI installation completes, the role containing the temporary **Client Access Point** can be deleted. - -1. Locate the installation media. If the virtual machine uses one of the Azure Marketplace images, the media is located at `C:\SQLServer__Full`. Select **Setup**. - -1. In **SQL Server Installation Center**, select **Installation**. - -1. Select **New SQL Server failover cluster installation**. Follow the instructions in the wizard to install the SQL Server FCI. - -1. On the **Cluster Network Configuration** page, the IP you provide varies depending on if your SQL Server VMs were deployed to a single subnet, or multiple subnets. - - 1. For a **single subnet environment**, provide the IP address that you plan to add to the [Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) - 1. For a **multi-subnet environment**, provide the secondary IP address in the subnet of the _first_ SQL Server VM that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses): - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png" alt-text="provide the secondary IP address in the subnet of the first SQL Server VM that you previously designated as the IP address of the failover cluster instance network name"::: - -1. In **Database Engine Configuration**, The FCI data directories need to be on clustered storage. With Storage Spaces Direct, it's not a shared disk but a mount point to a volume on each server. Storage Spaces Direct synchronizes the volume between both nodes. The volume is presented to the cluster as a CSV. Use the CSV mount point for the data directories. - - ![Data directories](./media/failover-cluster-instance-storage-spaces-direct-manually-configure/20-data-dicrectories.png) - -1. After you complete the instructions in the wizard, Setup installs a SQL Server FCI on the first node. - -1. After FCI installation succeeds on the first node, connect to the second node by using RDP. - -1. Open the **SQL Server Installation Center**. Select **Installation**. - -1. Select **Add node to a SQL Server failover cluster**. Follow the instructions in the wizard to install SQL Server and add the node to the FCI. - -1. For a multi-subnet scenario, in **Cluster Network Configuration**, enter the secondary IP address in the subnet of the _second_ SQL Server VM that you previously designated as the [IP address of the failover cluster instance network name](failover-cluster-instance-prepare-vm.md#assign-secondary-ip-addresses) - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png" alt-text="enter the secondary IP address in the subnet of the second SQL Server VM subnet that you previously designated as the IP address of the failover cluster instance network name"::: - - After selecting **Next** in **Cluster Network Configuration**, setup shows a dialog box indicating that SQL Server Setup detected multiple subnets as in the example image. Select **Yes** to confirm. - - :::image type="content" source="./media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png" alt-text="Multi Subnet Confirmation"::: - -1. After you complete the instructions in the wizard, setup adds the second SQL Server FCI node. - -1. Repeat these steps on any other nodes that you want to add to the SQL Server failover cluster instance. - - ->[!NOTE] -> Azure Marketplace gallery images come with SQL Server Management Studio installed. If you didn't use a marketplace image [Download SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -## Register with SQL IaaS extension - -To manage your SQL Server VM from the portal, register it with the SQL IaaS Agent extension in [lightweight management mode](sql-agent-extension-manually-register-single-vm.md#lightweight-mode), currently the only mode that's supported with FCI and SQL Server on Azure VMs. - - -Register a SQL Server VM in lightweight mode with PowerShell (-LicenseType can be `PAYG` or `AHUB`): - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -# Register SQL VM with 'Lightweight' SQL IaaS agent -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType PAYG -SqlManagementType LightWeight -``` - -## Configure connectivity - -If you deployed your SQL Server VMs in multiple subnets, skip this step. If you deployed your SQL Server VMs to a single subnet, then you'll need to configure an additional component to route traffic to your FCI. You can configure a virtual network name (VNN) with an Azure Load Balancer, or a distributed network name for a failover cluster instance. [Review the differences between the two](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn) and then deploy either a [distributed network name](failover-cluster-instance-distributed-network-name-dnn-configure.md) or a [virtual network name and Azure Load Balancer](failover-cluster-instance-vnn-azure-load-balancer-configure.md) for your failover cluster instance. - - -## Limitations - -- Azure virtual machines support Microsoft Distributed Transaction Coordinator (MSDTC) on Windows Server 2019 with storage on CSVs and a [standard load balancer](../../../load-balancer/load-balancer-overview.md). MSDTC is not supported on Windows Server 2016 and earlier. -- Disks that have been attached as NTFS-formatted disks can be used with Storage Spaces Direct only if the disk eligibility option is unchecked, or cleared, when storage is being added to the cluster. -- Only registering with the SQL IaaS Agent extension in [lightweight management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes) is supported. -- Failover cluster instances using Storage Spaces Direct as the shared storage do not support using a disk witness for the quorum of the cluster. Use a cloud witness instead. - -## Next steps - -If Storage Spaces Direct isn't the appropriate FCI storage solution for you, consider creating your FCI by using [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md) or [Premium File Shares](failover-cluster-instance-premium-file-share-manually-configure.md) instead. - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) diff --git a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-vnn-azure-load-balancer-configure.md b/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-vnn-azure-load-balancer-configure.md deleted file mode 100644 index fa98eb2af475d..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/failover-cluster-instance-vnn-azure-load-balancer-configure.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: Configure Azure Load Balancer a failover cluster instance VNN -description: Learn to configure an Azure Load Balancer to route traffic to the virtual network name (VNN) for your failover cluster instance (FCI) with SQL Server on Azure VMs for high availability and disaster recovery (HADR). -services: virtual-machines-windows -documentationcenter: na -author: rajeshsetlem -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma - ---- -# Configure Azure Load Balancer for an FCI VNN -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!TIP] -> Eliminate the need for an Azure Load Balancer for failover cluster instance by creating your SQL Server VMs in multiple subnets within the same Azure virtual network. - -On Azure Virtual Machines, clusters use a load balancer to hold an IP address that needs to be on one cluster node at a time. In this solution, the load balancer holds the IP address for the virtual network name (VNN) used by the clustered resource in Azure. - -This article teaches you to configure a load balancer by using the Azure Load Balancer service. The load balancer will route traffic to your [failover cluster instance (FCI)](failover-cluster-instance-overview.md) with SQL Server on Azure VMs for high availability and disaster recovery (HADR). - -For an alternative connectivity option for SQL Server 2019 CU2 and later, consider a [distributed network name](failover-cluster-instance-distributed-network-name-dnn-configure.md) instead for simplified configuration and improved failover. - - -## Prerequisites - -Before you complete the steps in this article, you should already have: - -- Determined that Azure Load Balancer is the appropriate [connectivity option for your FCI](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn). -- Configured your [failover cluster instances](failover-cluster-instance-overview.md). -- Installed the latest version of [PowerShell](/powershell/scripting/install/installing-powershell-core-on-windows). - -## Create load balancer - -You can create either an internal load balancer or an external load balancer. An internal load balancer can only be from accessed private resources that are internal to the network. An external load balancer can route traffic from the public to internal resources. When you configure an internal load balancer, use the same IP address as the FCI resource for the frontend IP when configuring the load-balancing rules. When you configure an external load balancer, you cannot use the same IP address as the FCI IP address cannot be a public IP address. As such, to use an external load balancer, logically allocate an IP address in the same subnet as the FCI that does not conflict with any other IP address, and use this address as the frontend IP address for the load-balancing rules. - - -Use the [Azure portal](https://portal.azure.com) to create the load balancer: - -1. In the Azure portal, go to the resource group that contains the virtual machines. - -1. Select **Add**. Search Azure Marketplace for **Load Balancer**. Select **Load Balancer**. - -1. Select **Create**. - -1. Set up the load balancer by using the following values: - - - **Subscription**: Your Azure subscription. - - **Resource group**: The resource group that contains your virtual machines. - - **Name**: A name that identifies the load balancer. - - **Region**: The Azure location that contains your virtual machines. - - **Type**: Either public or private. A private load balancer can be accessed from within the virtual network. Most Azure applications can use a private load balancer. If your application needs access to SQL Server directly over the internet, use a public load balancer. - - **SKU**: Standard. - - **Virtual network**: The same network as the virtual machines. - - **IP address assignment**: Static. - - **Private IP address**: The IP address that you assigned to the clustered network resource. - - The following image shows the **Create load balancer** UI: - - ![Set up the load balancer](./media/failover-cluster-instance-premium-file-share-manually-configure/30-load-balancer-create.png) - - -## Configure backend pool - -1. Return to the Azure resource group that contains the virtual machines and locate the new load balancer. You might need to refresh the view on the resource group. Select the load balancer. - -1. Select **Backend pools**, and then select **Add**. - -1. Associate the backend pool with the availability set that contains the VMs. - -1. Under **Target network IP configurations**, select **VIRTUAL MACHINE** and choose the virtual machines that will participate as cluster nodes. Be sure to include all virtual machines that will host the FCI. Only add the primary IP address of each VM, do not add any secondary IP addresses. - -1. Select **OK** to create the backend pool. - -## Configure health probe - -1. On the load balancer pane, select **Health probes**. - -1. Select **Add**. - -1. On the **Add health probe** pane, set the following health probe parameters: - - - **Name**: A name for the health probe. - - **Protocol**: TCP. - - **Port**: The port you created in the firewall for the health probe [when preparing the VM](failover-cluster-instance-prepare-vm.md#uninstall-sql-server-1). In this article, the example uses TCP port `59999`. - - **Interval**: 5 Seconds. - - **Unhealthy threshold**: 2 consecutive failures. - -1. Select **OK**. - -## Set load-balancing rules - -Set the load-balancing rules for the load balancer. - - -# [Private load balancer](#tab/ilb) - -Set the load-balancing rules for the private load balancer by following these steps: - -1. On the load balancer pane, select **Load-balancing rules**. -1. Select **Add**. -1. Set the load-balancing rule parameters: - - - **Name**: A name for the load-balancing rules. - - **Frontend IP address**: The IP address for the clustered network resource of the SQL Server FCI. - - **Port**: The SQL Server TCP port. The default instance port is 1433. - - **Backend port**: The same port as the **Port** value when you enable **Floating IP (direct server return)**. - - **Backend pool**: The backend pool name that you configured earlier. - - **Health probe**: The health probe that you configured earlier. - - **Session persistence**: None. - - **Idle timeout (minutes)**: 4. - - **Floating IP (direct server return)**: Enabled. - -1. Select **OK**. - -# [Public load balancer](#tab/elb) - -Set the load-balancing rules for the public load balancer by following these steps: - -1. On the load balancer pane, select **Load-balancing rules**. -1. Select **Add**. -1. Set the load-balancing rule parameters: - - - **Name**: A name for the load-balancing rules. - - **Frontend IP address**: The public IP address that clients use to connect to the public endpoint. - - **Port**: The SQL Server TCP port. The default instance port is 1433. - - **Backend port**: The port used by the FCI instance. The default is 1433. - - **Backend pool**: The backend pool name that you configured earlier. - - **Health probe**: The health probe that you configured earlier. - - **Session persistence**: None. - - **Idle timeout (minutes)**: 4. - - **Floating IP (direct server return)**: Disabled. - -1. Select **OK**. - ---- - - - -## Configure cluster probe - -Set the cluster probe port parameter in PowerShell. - -# [Private load balancer](#tab/ilb) - -To set the cluster probe port parameter, update the variables in the following script with values from your environment. Remove the angle brackets (`<` and `>`) from the script. - -```powershell -$ClusterNetworkName = "" -$IPResourceName = "" -$ILBIP = "" -[int]$ProbePort = - -Import-Module FailoverClusters - -Get-ClusterResource $IPResourceName | Set-ClusterParameter -Multiple @{"Address"="$ILBIP";"ProbePort"=$ProbePort;"SubnetMask"="255.255.255.255";"Network"="$ClusterNetworkName";"EnableDhcp"=0} -``` - -The following table describes the values that you need to update: - - -|**Value**|**Description**| -|---------|---------| -|`Cluster Network Name`| The Windows Server Failover Cluster name for the network. In **Failover Cluster Manager** > **Networks**, right-click the network and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`SQL Server FCI IP Address Resource Name`|The resource name for the SQL Server FCI IP address. In **Failover Cluster Manager** > **Roles**, under the SQL Server FCI role, under **Server Name**, right-click the IP address resource and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`ILBIP`|The IP address of the internal load balancer (ILB). This address is configured in the Azure portal as the ILB's frontend address. This is also the SQL Server FCI's IP address. You can find it in **Failover Cluster Manager** on the same properties page where you located the ``.| -|`nnnnn`|The probe port that you configured in the load balancer's health probe. Any unused TCP port is valid.| -|"SubnetMask"| The subnet mask for the cluster parameter. It must be the TCP IP broadcast address: `255.255.255.255`.| - - -After you set the cluster probe, you can see all the cluster parameters in PowerShell. Run this script: - -```powershell -Get-ClusterResource $IPResourceName | Get-ClusterParameter -``` - -# [Public load balancer](#tab/elb) - -To set the cluster probe port parameter, update the variables in the following script with values from your environment. Remove the angle brackets (`<` and `>`) from the script. - -```powershell -$ClusterNetworkName = "" -$IPResourceName = "" -$ELBIP = "" -[int]$ProbePort = - -Import-Module FailoverClusters - -Get-ClusterResource $IPResourceName | Set-ClusterParameter -Multiple @{"Address"="$ELBIP";"ProbePort"=$ProbePort;"SubnetMask"="255.255.255.255";"Network"="$ClusterNetworkName";"EnableDhcp"=0} -``` - -The following table describes the values that you need to update: - - -|**Value**|**Description**| -|---------|---------| -|`Cluster Network Name`| The Windows Server Failover Cluster name for the network. In **Failover Cluster Manager** > **Networks**, right-click the network and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`SQL Server FCI IP Address Resource Name`|The resource name for the IP address of the SQL Server FCI. In **Failover Cluster Manager** > **Roles**, under the SQL Server FCI role, under **Server Name**, right-click the IP address resource and select **Properties**. The correct value is under **Name** on the **General** tab.| -|`ELBIP`|The IP address of the external load balancer (ELB). This address is configured in the Azure portal as the frontend address of the ELB and is used to connect to the public load balancer from external resources. | -|`nnnnn`|The probe port that you configured in the health probe of the load balancer. Any unused TCP port is valid.| -|"SubnetMask"| The subnet mask for the cluster parameter. It must be the TCP IP broadcast address: `255.255.255.255`.| - -After you set the cluster probe, you can see all the cluster parameters in PowerShell. Run this script: - -```powershell -Get-ClusterResource $IPResourceName | Get-ClusterParameter -``` - -> [!NOTE] -> Since there is no private IP address for the external load balancer, users cannot directly use the VNN DNS name as it resolves the IP address within the subnet. Use either the public IP address of the public LB or configure another DNS mapping on the DNS server. - ---- - -## Modify connection string - -For clients that support it, add the `MultiSubnetFailover=True` to the connection string. While the MultiSubnetFailover connection option is not required, it does provide the benefit of a faster subnet failover. This is because the client driver will attempt to open up a TCP socket for each IP address in parallel. The client driver will wait for the first IP to respond with success and once it does, will then use it for the connection. - -If your client does not support the MultiSubnetFailover parameter, you can modify the RegisterAllProvidersIP and HostRecordTTL settings to prevent connectivity delays upon failover. - -Use PowerShell to modify the RegisterAllProvidersIp and HostRecordTTL settings: - -```powershell -Get-ClusterResource yourFCIname | Set-ClusterParameter RegisterAllProvidersIP 0 -Get-ClusterResource yourFCIname | Set-ClusterParameter HostRecordTTL 300 -``` - -To learn more, see the SQL Server [listener connection timeout](/troubleshoot/sql/availability-groups/listener-connection-times-out) documentation. - -> [!TIP] -> - Set the MultiSubnetFailover parameter = true in the connection string even for HADR solutions that span a single subnet to support future spanning of subnets without the need to update connection strings. -> - By default, clients cache cluster DNS records for 20 minutes. By reducing HostRecordTTL you reduce the Time to Live (TTL) for the cached record, legacy clients may reconnect more quickly. As such, reducing the HostRecordTTL setting may result in increased traffic to the DNS servers. - - -## Test failover - - -Test failover of the clustered resource to validate cluster functionality. - -Take the following steps: - -1. Connect to one of the SQL Server cluster nodes by using RDP. -1. Open **Failover Cluster Manager**. Select **Roles**. Notice which node owns the SQL Server FCI role. -1. Right-click the SQL Server FCI role. -1. Select **Move**, and then select **Best Possible Node**. - -**Failover Cluster Manager** shows the role, and its resources go offline. The resources then move and come back online in the other node. - - -## Test connectivity - -To test connectivity, sign in to another virtual machine in the same virtual network. Open **SQL Server Management Studio** and connect to the SQL Server FCI name. - -> [!NOTE] -> If you need to, you can [download SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms). - - - - - -## Next steps - -To learn more, see: - -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) - - - - diff --git a/articles/azure-sql/virtual-machines/windows/frequently-asked-questions-faq.yml b/articles/azure-sql/virtual-machines/windows/frequently-asked-questions-faq.yml deleted file mode 100644 index 8b58d961f9c60..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/frequently-asked-questions-faq.yml +++ /dev/null @@ -1,419 +0,0 @@ -### YamlMime:FAQ -metadata: - title: 'SQL Server on Windows Virtual Machines in Azure FAQ | Microsoft Docs' - description: This article provides answers to frequently asked questions about running SQL Server on Azure VMs. - services: virtual-machines-windows - documentationcenter: '' - author: bluefooted - editor: '' - tags: azure-service-management - ms.assetid: 2fa5ee6b-51a6-4237-805f-518e6c57d11b - ms.service: virtual-machines-sql - ms.subservice: service-overview - ms.topic: faq - ms.tgt_pltfrm: vm-windows-sql-server - ms.workload: iaas-sql-server - ms.date: 9/01/2021 - ms.author: pamela -title: Frequently asked questions for SQL Server on Azure VMs -summary: | - [!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - - > [!div class="op_single_selector"] - > * [Windows](frequently-asked-questions-faq.yml) - > * [Linux](../linux/frequently-asked-questions-faq.yml) - - This article provides answers to some of the most common questions about running [SQL Server on Windows Azure Virtual Machines (VMs)](https://azure.microsoft.com/services/virtual-machines/sql-server/). - - [!INCLUDE [support-disclaimer](../../../../includes/support-disclaimer.md)] - - -sections: - - name: Images - questions: - - question: | - What SQL Server virtual machine gallery images are available? - answer: | - Azure maintains virtual machine images for all supported major releases of SQL Server on all editions for both Windows and Linux. For more information, see the complete list of [Windows VM images](sql-server-on-azure-vm-iaas-what-is-overview.md#payasyougo) and [Linux VM images](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md#create). - - - question: | - Are existing SQL Server virtual machine gallery images updated? - answer: Every two months, SQL Server images in the virtual machine gallery are updated with the latest Windows and Linux updates. For Windows images, this includes any updates that are marked important in Windows Update, including important SQL Server security updates and service packs. For Linux images, this includes the latest system updates. SQL Server cumulative updates are handled differently for Linux and Windows. For Linux, SQL Server cumulative updates are also included in the refresh. But at this time, Windows VMs are not updated with SQL Server or Windows Server cumulative updates. - - - question: | - Can SQL Server virtual machine images get removed from the gallery? - answer: | - Yes. Azure only maintains one image per major version and edition. For example, when a new SQL Server service pack is released, Azure adds a new image to the gallery for that service pack. The SQL Server image for the previous service pack is immediately removed from the Azure portal. However, it is still available for provisioning from PowerShell for the next three months. After three months, the previous service pack image is no longer available. This removal policy would also apply if a SQL Server version becomes unsupported when it reaches the end of its lifecycle. - - - - question: | - Is it possible to deploy an older image of SQL Server that is not visible in the Azure portal? - answer: | - Yes, by using PowerShell. For more information about deploying SQL Server VMs using PowerShell, see [How to provision SQL Server virtual machines with Azure PowerShell](create-sql-vm-powershell.md). - - - question: | - Is it possible to create a generalized Azure Marketplace SQL Server image of my SQL Server VM and use it to deploy VMs? - answer: | - Yes, but you must then [register each SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) to manage your SQL Server VM in the portal, as well as utilize features such as automated patching and automatic backups. When registering with the extension, you will also need to specify the license type for each SQL Server VM. - - - question: | - How do I generalize SQL Server on Azure VM and use it to deploy new VMs? - answer: | - You can deploy a Windows Server VM (without SQL Server installed on it) and use the [SQL sysprep](/sql/database-engine/install-windows/install-sql-server-using-sysprep) process to generalize SQL Server on Azure VM (Windows) with the SQL Server installation media. Customers who have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default?rtc=1&activetab=software-assurance-default-pivot%3aprimaryr3) can obtain their installation media from the [Volume Licensing Center](https://www.microsoft.com/Licensing/servicecenter/default.aspx). Customers who don't have Software Assurance can use the setup media from an Azure Marketplace SQL Server VM image that has the desired edition. - - Alternatively, use one of the SQL Server images from Azure Marketplace to generalize SQL Server on Azure VM. Note that you must delete the following registry key in the source image before creating your own image. Failure to do so can result in the bloating of the SQL Server setup bootstrap folder and/or SQL IaaS extension in failed state. - - Registry Key path: - `Computer\HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Setup\SysPrepExternal\Specialize` - - > [!NOTE] - > SQL Server on Azure VMs, including those deployed from custom generalized images, should be [registered with the SQL IaaS Agent extension](./sql-agent-extension-manually-register-single-vm.md?tabs=azure-cli%252cbash) to meet compliance requirements and to utilize optional features such as automated patching and automatic backups. The extension also allows you to [specify the license type](./licensing-model-azure-hybrid-benefit-ahb-change.md?tabs=azure-portal) for each SQL Server VM. - - - question: | - Can I use my own VHD to deploy a SQL Server VM? - answer: | - Yes, but you must then [register each SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) to manage your SQL Server VM in the portal, as well as utilize features such as automated patching and automatic backups. - - - question: | - Is it possible to set up configurations not shown in the virtual machine gallery (for example Windows 2008 R2 + SQL Server 2012)? - answer: | - No. For virtual machine gallery images that include SQL Server, you must select one of the provided images either through the Azure portal or via [PowerShell](create-sql-vm-powershell.md). However, you have the ability to deploy a Windows VM and self-install SQL Server to it. You must then [register your SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) to manage your SQL Server VM in the Azure portal, as well as utilize features such as automated patching and automatic backups. - - - - name: Creation - questions: - - question: | - How do I create an Azure virtual machine with SQL Server? - answer: | - The easiest method is to create a virtual machine that includes SQL Server. For a tutorial on signing up for Azure and creating a SQL Server VM from the portal, see [Provision a SQL Server virtual machine in the Azure portal](create-sql-vm-portal.md). You can select a virtual machine image that uses pay-per-second SQL Server licensing, or you can use an image that allows you to bring your own SQL Server license. You also have the option of manually installing SQL Server on a VM with either a freely licensed edition (Developer or Express) or by reusing an on-premises license. Be sure to [register your SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) to manage your SQL Server VM in the portal, as well as utilize features such as automated patching and automatic backups. If you bring your own license, you must have [License Mobility through Software Assurance on Azure](https://azure.microsoft.com/pricing/license-mobility/). For more information, see [Pricing guidance for SQL Server Azure VMs](pricing-guidance.md). - - - question: | - How can I migrate my on-premises SQL Server database to the cloud? - answer: | - First create an Azure virtual machine with a SQL Server instance. Then migrate your on-premises databases to that instance. For data migration strategies, see [Migrate a SQL Server database to SQL Server in an Azure VM](migrate-to-vm-from-sql-server.md). - - - name: Licensing - questions: - - question: | - How can I install my licensed copy of SQL Server on an Azure VM? - answer: | - There are three ways to do this. If you're an Enterprise Agreement (EA) customer, you can provision one of the [virtual machine images that support licenses](sql-server-on-azure-vm-iaas-what-is-overview.md#BYOL), which is also known as bring-your-own-license (BYOL). If you have [Software Assurance](https://www.microsoft.com/en-us/licensing/licensing-programs/software-assurance-default), you can enable the [Azure Hybrid Benefit](licensing-model-azure-hybrid-benefit-ahb-change.md) on an existing pay-as-you-go (PAYG) image. Or you can copy the SQL Server installation media to a Windows Server VM, and then install SQL Server on the VM. Be sure to register your SQL Server VM with the [extension](sql-agent-extension-manually-register-single-vm.md) for features such as portal management, automated backup and automated patching. - - - - question: | - Does a customer need SQL Server Client Access Licenses (CALs) to connect to a SQL Server pay-as-you-go image that is running on Azure Virtual Machines? - answer: No. Customers need CALs when they use bring-your-own-license and move their SQL Server SA server / CAL VM to Azure VMs. - - - question: | - Can I change a VM to use my own SQL Server license if it was created from one of the pay-as-you-go gallery images? - answer: | - Yes. You can easily switch a pay-as-you-go (PAYG) gallery image to bring-your-own-license (BYOL) by enabling the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/faq/). For more information, see [How to change the licensing model for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md). Currently, this facility is only available for public and Azure Government cloud customers. - - - - question: | - Will switching licensing models require any downtime for SQL Server? - answer: | - No. [Changing the licensing model](licensing-model-azure-hybrid-benefit-ahb-change.md) does not require any downtime for SQL Server as the change is effective immediately and does not require a restart of the VM. - - - question: | - Is it possible to switch licensing models on a SQL Server VM deployed using classic model? - answer: No. Changing licensing models is not supported on a classic VM. You may migrate your VM to the Azure Resource Manager model and register with the SQL IaaS Agent extension. Once the VM is registered with the SQL IaaS Agent extension, licensing model changes will be available on the VM. - - - question: | - Can I use the Azure portal to manage multiple instances on the same VM? - answer: | - No. Portal management is a feature provided by the SQL IaaS Agent extension, which relies on the SQL Server IaaS Agent extension. As such, the same limitations apply to the extension as to the extension. The portal can either only manage one default instance, or one named instance, as long as it was configured correctly. For more information on these limitations, see [SQL Server IaaS agent extension](sql-server-iaas-agent-extension-automate-management.md). - - - question: | - Can CSP subscriptions activate the Azure Hybrid Benefit? - answer: | - Yes, the Azure Hybrid Benefit is available for CSP subscriptions. CSP customers should first deploy a pay-as-you-go image, and then [change the licensing model](licensing-model-azure-hybrid-benefit-ahb-change.md) to bring-your-own-license. - - - - question: | - Do I have to pay to license SQL Server on an Azure VM if it is only being used for standby/failover? - answer: | - To have a free passive license for a standby secondary availability group or failover clustered instance, you must meet all of the following criteria as outlined by the [Product Licensing Terms](https://www.microsoft.com/licensing/product-licensing/products): - - 1. You have [license mobility](https://www.microsoft.com/licensing/licensing-programs/software-assurance-license-mobility?activetab=software-assurance-license-mobility-pivot:primaryr2) through [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default?activetab=software-assurance-default-pivot%3aprimaryr3). - 1. The passive SQL Server instance does not serve SQL Server data to clients or run active SQL Server workloads. It is only used to synchronize with the primary server and otherwise maintain the passive database in a warm standby state. If it is serving data, such as reports to clients running active SQL Server workloads, or performing any work other than what is specified in the product terms, it must be a paid licensed SQL Server instance. The following activity is permitted on the secondary instance: database consistency checks or CheckDB, full backups, transaction log backups, and monitoring resource usage data. You may also run the primary and corresponding disaster recovery instance simultaneously for brief periods of disaster recovery testing every 90 days. - 1. The active SQL Server license is covered by Software Assurance and allows for **one** passive secondary SQL Server instance, with up to the same amount of compute as the licensed active server, only. - 1. The secondary SQL Server VM utilizes the [Disaster Recovery](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure) license in the Azure portal. - - - question: | - What is considered a passive instance? - answer: | - The passive SQL Server instance does not serve SQL Server data to clients or run active SQL Server workloads. It is only used to synchronize with the primary server and otherwise maintain the passive database in a warm standby state. If it is serving data, such as reports to clients running active SQL Server workloads, or performing any work other than what is specified in the product terms, it must be a paid licensed SQL Server instance. The following activity is permitted on the secondary instance: database consistency checks or CheckDB, full backups, transaction log backups, and monitoring resource usage data. You may also run the primary and corresponding disaster recovery instance simultaneously for brief periods of disaster recovery testing every 90 days. - - - - question: | - What scenarios can utilize the Disaster Recovery (DR) benefit? - answer: | - The [licensing guide](https://aka.ms/sql2019licenseguide) provides scenarios in which the Disaster Recovery Benefit can be utilized. Refer to your Product Terms and talk to your licensing contacts or account manager for more information. - - - question: | - Which subscriptions support the Disaster Recovery (DR) benefit? - answer: | - Comprehensive programs that offer Software Assurance equivalent subscription rights as a fixed benefit support the DR benefit. This includes. but is not limited to, the Open Value (OV), Open Value Subscription (OVS), Enterprise Agreement (EA), Enterprise Agreement Subscription (EAS), and the Server and Cloud Enrollment (SCE). Refer to the [product terms](https://www.microsoft.com/licensing/product-licensing/products) and talk to your licensing contacts or account manager for more information. - - - - name: Administration - questions: - - question: | - Can I install a second instance of SQL Server on the same VM? Can I change installed features of the default instance? - answer: | - Yes. The SQL Server installation media is located in a folder on the **C** drive. Run **Setup.exe** from that location to add new SQL Server instances or to change other installed features of SQL Server on the machine. Note that some features, such as Automated Backup, Automated Patching, and Azure Key Vault Integration, only operate against the default instance, or a named instance that was configured properly (See Question 3). Customers using [Software Assurance through the Azure Hybrid Benefit](licensing-model-azure-hybrid-benefit-ahb-change.md) or the **pay-as-you-go** licensing model can install multiple instances of SQL Server on the virtual machine without incurring extra licensing costs. Additional SQL Server instances may strain system resources unless configured correctly. - - - question: | - What is the maximum number of instances on a VM? - answer: | - SQL Server 2012 to SQL Server 2019 can support [50 instances](/sql/sql-server/editions-and-components-of-sql-server-version-15#RDBMSSP) on a stand-alone server. This is the same limit regardless of in Azure on-premises. See [best practices](./performance-guidelines-best-practices-checklist.md) to learn how to better prepare your environment. - - - question: | - Can I uninstall the default instance of SQL Server? - answer: | - Yes, but there are some considerations. First, SQL Server-associated billing may continue to occur depending on the license model for the VM. Second, as stated in the previous answer, there are features that rely on the [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md). If you uninstall the default instance without removing the IaaS extension also, the extension continues to look for the default instance and may generate event log errors. These errors are from the following two sources: **Microsoft SQL Server Credential Management** and **Microsoft SQL Server IaaS Agent**. One of the errors might be similar to the following: - - A network-related or instance-specific error occurred while establishing a connection to SQL Server. The server was not found or was not accessible. - - If you do decide to uninstall the default instance, also uninstall the [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md) as well. - - - question: | - Can I use a named instance of SQL Server with the IaaS extension? - answer: | - Yes, if the named instance is the only instance on the SQL Server, and if the original default instance was [uninstalled properly](sql-server-iaas-agent-extension-automate-management.md#named-instance-support). If there is no default instance and there are multiple named instances on a single SQL Server VM, the SQL Server IaaS agent extension will fail to install. - - - question: | - Can I remove SQL Server and the associated license billing from a SQL Server VM? - answer: | - Yes, but you'll need to take additional steps to avoid being charged for your SQL Server instance as described in [Pricing guidance](pricing-guidance.md). If you want to completely remove the SQL Server instance, you can migrate to another Azure VM without SQL Server pre-installed on the VM and delete the current SQL Server VM. If you want to keep the VM but stop SQL Server billing, follow these steps: - - 1. Back up all of your data, including system databases, if necessary. - 1. Uninstall SQL Server completely, including the SQL IaaS extension (if present). - 1. Install the free [SQL Express edition](https://www.microsoft.com/sql-server/sql-server-downloads). - 1. Register with the SQL IaaS Agent extension in [lightweight mode](sql-agent-extension-manually-register-single-vm.md). - 1. [Change the edition of SQL Server](change-sql-server-edition.md#change-edition-in-portal) in the [Azure portal](https://portal.azure.com) to Express to stop billing. - 1. (optional) Disable the Express SQL Server service by disabling service startup. - - - question: | - Can I use the Azure portal to manage multiple instances on the same VM? - answer: | - No. Portal management is provided by the SQL IaaS Agent extension, which relies on the SQL Server IaaS Agent extension. As such, the same limitations apply to the portal as the extension. The portal can either only manage one default instance, or one named instance as long as it's configured correctly. For more information, see [SQL Server IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) - - - question: | - Is Azure Active Directory Domain Services (Azure AD DS) supported with SQL Server on Azure VMs? - answer: | - No. Using Azure Active Directory Domain Services (Azure AD DS) is not currently supported with SQL Server on Azure VMs. Use an Active Directory domain account instead. - - - name: Updating and patching - questions: - - question: | - How do I change to a different version/edition of SQL Server in an Azure VM? - answer: | - Customers can change their version/edition of SQL Server by using setup media that contains their desired version or edition of SQL Server. Once the edition has been changed, use the Azure portal to modify the edition property of the VM to accurately reflect billing for the VM. For more information, see [change edition of a SQL Server VM](change-sql-server-edition.md). There is no billing difference for different versions of SQL Server, so once the version of SQL Server has been changed, no further action is needed. - - - question: | - Where can I get the setup media to change the edition or version of SQL Server? - answer: | - Customers who have [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default) can obtain their installation media from the [Volume Licensing Center](https://www.microsoft.com/Licensing/servicecenter/default.aspx). Customers that do not have Software Assurance can use the setup media from an Azure Marketplace SQL Server VM image that has their desired edition. - - - question: | - How are updates and service packs applied on a SQL Server VM? - answer: | - Virtual machines give you control over the host machine, including when and how you apply updates. For the operating system, you can manually apply windows updates, or you can enable a scheduling service called [Automated Patching](automated-patching.md). Automated Patching installs any updates that are marked important, including SQL Server updates in that category. Other optional updates to SQL Server must be installed manually. - - - question: | - Can I upgrade my SQL Server 2008 / 2008 R2 instance after registering it with the SQL IaaS Agent extension? - answer: | - If the OS is Windows Server 2008 R2 or later, yes. You can use any setup media to upgrade the version and edition of SQL Server, and then you can upgrade your [SQL IaaS extension mode](sql-server-iaas-agent-extension-automate-management.md#management-modes)) from _no agent_ to _full_. Doing so will give you access to all the benefits of the SQL IaaS extension such as portal manageability, automated backups, and automated patching. If the OS version is Windows Server 2008, only NoAgent mode is supported. - - - question: | - How can I get free extended security updates for my end of support SQL Server 2008 and SQL Server 2008 R2 instances? - answer: | - You can get [free extended security updates](sql-server-2008-extend-end-of-support.md) by moving your SQL Server as-is to an Azure virtual machine. For more information, see [end of support options](/sql/sql-server/end-of-support/sql-server-end-of-life-overview). - - - - - name: General - questions: - - question: | - Are SQL Server failover cluster instances (FCI) supported on Azure VMs? - answer: | - Yes. You can configure a [failover cluster instance](failover-cluster-instance-overview.md) using [Azure shared disks](failover-cluster-instance-azure-shared-disks-manually-configure.md), [premium file shares (PFS)](failover-cluster-instance-premium-file-share-manually-configure.md), or [storage spaces direct (S2D)](failover-cluster-instance-storage-spaces-direct-manually-configure.md) for the storage subsystem. Premium file shares provide IOPS and throughput capacities that meet the needs of many workloads. For IO-intensive workloads, consider using storage spaces direct based on managed premium or ultra-disks. Alternatively, you can use third-party clustering or storage solutions as described in [High availability and disaster recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md#azure-only-high-availability-solutions). - - > [!IMPORTANT] - > At this time, the _full_ [SQL Server IaaS Agent Extension](sql-server-iaas-agent-extension-automate-management.md) is not supported for SQL Server FCI on Azure. We recommend that you uninstall the _full_ extension from VMs that participate in the FCI, and install the extension in _lightweight_ mode instead. This extension supports features, such as Automated Backup and Patching and some portal features for SQL Server. These features will not work for SQL Server VMs after the _full_ agent is uninstalled. - - - question: | - What is the difference between SQL Server VMs and the SQL Database service? - answer: | - Conceptually, running SQL Server on an Azure virtual machine is not that different from running SQL Server in a remote datacenter. In contrast, [Azure SQL Database](../../database/sql-database-paas-overview.md) offers database-as-a-service. With SQL Database, you do not have access to the machines that host your databases. For a full comparison, see [Choose a cloud SQL Server option: Azure SQL (PaaS) Database or SQL Server on Azure VMs (IaaS)](../../azure-sql-iaas-vs-paas-what-is-overview.md). - - - question: | - How do I install SQL Data tools on my Azure VM? - answer: | - Download and install the SQL Data tools from [Microsoft SQL Server Data Tools - Business Intelligence for Visual Studio 2013](https://www.microsoft.com/download/details.aspx?id=42313). - - - question: | - Are distributed transactions with MSDTC supported on SQL Server VMs? - answer: Yes. Local DTC is supported for SQL Server 2016 SP2 and greater. However, applications must be tested when utilizing Always On availability groups, as transactions in-flight during a failover will fail and must be retried. Clustered DTC is available starting with Windows Server 2019. - - - question: | - Does Azure SQL virtual machine move or store customer data out of region? - answer: No. In fact, Azure SQL virtual machine and the SQL IaaS Agent Extension do not store any customer data. - - - question: | - What Azure Load Balancer SKU should be used for a cross-cluster migration of an availability group? - answer: To perform a [cross-cluster migration of an availability group](http://download.microsoft.com/download/d/2/0/d20e1c5f-72ea-4505-9f26-fef9550efd44/alwayson%20ag%20os%20upgrade.docx) on SQL Server on Azure VMs, use the standard [Azure Load Balancer SKU](../../../load-balancer/skus.md). - - - question: | - Can I use Azure premium file share to host my database files on a standalone instance of SQL Server? - answer: Yes. Azure premium file shares are supported for both failover cluster instances and standalone instances of SQL Server using the [SMB protocol](/sql/database-engine/install-windows/install-sql-server-with-smb-fileshare-as-a-storage-option). - - - name: SQL Server IaaS Agent extension - questions: - - question: | - Should I register my SQL Server VM provisioned from a SQL Server image in Azure Marketplace? - answer: No. Microsoft automatically registers VMs provisioned from the SQL Server images in Azure Marketplace. Registering with the extension is required only if the VM was *not* provisioned from the SQL Server images in Azure Marketplace and SQL Server was self-installed. - - - question: | - Is the SQL IaaS Agent extension available for all customers? - answer: Yes. Customers should register their SQL Server VMs with the extension if they did not use a SQL Server image from Azure Marketplace and instead self-installed SQL Server, or if they brought their custom VHD. VMs owned by all types of subscriptions (Direct, Enterprise Agreement, and Cloud Solution Provider) can register with the SQL IaaS Agent extension. - - - question: | - What is the default management mode when registering with the SQL IaaS Agent extension? - answer: | - The default management mode when you register with the SQL IaaS Agent extension is *lightweight*. If the SQL Server management property isn't set when you register with the extension, the mode will be set as lightweight. The default management mode when using the [automatic registration feature](sql-agent-extension-automatic-registration-all-vms.md) is also lightweight mode. - - - question: | - What are the prerequisites to register with the SQL IaaS Agent extension? - answer: There are no prerequisites to registering with the SQL IaaS Agent extension other than having SQL Server installed on the VM. - - - question: | - Will registering with the SQL IaaS Agent extension install an agent on my VM? - answer: | - Yes, registering with the SQL IaaS Agent extension in full manageability mode installs an agent to the VM. Registering in lightweight, or NoAgent mode does not. - - Registering with the SQL IaaS Agent extension in lightweight mode only copies the SQL IaaS Agent extension *binaries* to the VM, it does not install the agent. These binaries are then used to install the agent when the management mode is upgraded to full. - - - - question: | - Will registering with the SQL IaaS Agent extension restart SQL Server on my VM? - answer: Starting in September 2021, restarting the SQL Server service is no longer required when registering with the [SQL IaaS extension](sql-server-iaas-agent-extension-automate-management.md) in [full management mode](sql-agent-extension-manually-register-single-vm.md). - - - question: | - What is the difference between lightweight and NoAgent management modes when registering with the SQL IaaS Agent extension? - answer: | - NoAgent management mode is the only available management mode for SQL Server 2008 and SQL Server 2008 R2 on Windows Server 2008. For all later versions of Windows Server, the two available manageability modes are lightweight and full. - - NoAgent mode requires SQL Server version and edition properties to be set by the customer. Lightweight mode queries the VM to find the version and edition of the SQL Server instance. - - - question: | - Can I register with the SQL IaaS Agent extension without specifying the SQL Server license type? - answer: No. The SQL Server license type is not an optional property when you're registering with the SQL IaaS Agent extension. You have to set the SQL Server license type as pay-as-you-go or Azure Hybrid Benefit when registering with the SQL IaaS Agent extension in all manageability modes (NoAgent, lightweight, and full). If you have any of the free versions of SQL Server installed, such as Developer or Evaluation edition, you must register with pay-as-you-go licensing. Azure Hybrid Benefit is only available for paid versions of SQL Server such as Enterprise and Standard editions. - - - question: | - What is the default license type when using the automatic registration feature? - answer: The license type automatically defaults to that of the VM image. If you use a pay-as-you-go image for your VM, then your license type will be `PAYG`, otherwise your license type will be `AHUB` by default. - - - question: | - Can I upgrade the SQL Server IaaS extension from NoAgent mode to full mode? - answer: No. Upgrading the manageability mode to full or lightweight is not available for NoAgent mode. This is a technical limitation of Windows Server 2008. You will need to upgrade the OS first to Windows Server 2008 R2 or greater, and then you will be able to upgrade to full management mode. - - - question: | - Can I upgrade the SQL Server IaaS extension from lightweight mode to full mode? - answer: Yes. Upgrading the manageability mode from lightweight to full is supported via Azure PowerShell or the Azure portal. - - - question: | - Can I downgrade the SQL Server IaaS extension from full mode to NoAgent or lightweight management mode? - answer: | - No. Downgrading the SQL Server IaaS extension manageability mode is not supported. The manageability mode can't be downgraded from full mode to lightweight or NoAgent mode, and it can't be downgraded from lightweight mode to NoAgent mode. - - To change the manageability mode from full manageability, [unregister](sql-agent-extension-manually-register-single-vm.md#unregister-from-extension) the SQL Server VM from the SQL IaaS Agent extension by dropping the SQL virtual machine _resource_ and re-register the SQL Server VM with the SQL IaaS Agent extension again in a different management mode. - - - question: | - Can I register with the SQL IaaS Agent extension from the Azure portal? - answer: No. Registering with the SQL IaaS Agent extension is not available in the Azure portal. Registering with the SQL IaaS Agent extension is only supported with the Azure CLI or Azure PowerShell. - - - question: | - Can I register a VM with the SQL IaaS Agent extension before SQL Server is installed? - answer: No. A VM must have at least one SQL Server (Database Engine) instance to successfully register with the SQL IaaS Agent extension. If there is no SQL Server instance on the VM, the new Microsoft.SqlVirtualMachine resource will be in a failed state. - - - question: | - Can I register a VM with the SQL IaaS Agent extension if there are multiple SQL Server instances? - answer: Yes, provided there is a default instance on the VM. The SQL IaaS Agent extension will register only one SQL Server (Database Engine) instance. The SQL IaaS Agent extension will register the default SQL Server instance in the case of multiple instances. - - - question: | - Can I register a SQL Server failover cluster instance with the SQL IaaS Agent extension? - answer: Yes. SQL Server failover cluster instances on an Azure VM can be registered with the SQL IaaS Agent extension in lightweight mode. However, SQL Server failover cluster instances can't be upgraded to full manageability mode. - - - question: | - Can I register my VM with the SQL IaaS Agent extension if an Always On availability group is configured? - answer: Yes. There are no restrictions to registering a SQL Server instance on an Azure VM with the SQL IaaS Agent extension if you're participating in an Always On availability group configuration. - - - question: | - What is the cost for registering with the SQL IaaS Agent extension, or with upgrading to full manageability mode? - answer: None. There is no fee associated with registering with the SQL IaaS Agent extension, or with using any of the three manageability modes. Managing your SQL Server VM with the extension is completely free. - - - question: | - What is the performance impact of using the different manageability modes? - answer: | - There is no impact when using the *NoAgent* and *lightweight* manageability modes. There is minimal impact when using the *full* manageability mode from two services that are installed to the OS. These can be monitored via task manager and seen in the built-in Windows Services console. - - The two service names are: - - `SqlIaaSExtensionQuery` (Display name - `Microsoft SQL Server IaaS Query Service`) - - `SQLIaaSExtension` (Display name - `Microsoft SQL Server IaaS Agent`) - - - question: | - How do I remove the extension? - answer: | - Remove the extension by [unregistering](sql-agent-extension-manually-register-single-vm.md#unregister-from-extension) the SQL Server VM from the SQL IaaS Agent extension. - - question: | - Will registering my VM with the new SQL IaaS Agent extension bring additional costs? - answer: No. The SQL IaaS Agent extension just enables additional manageability for SQL Server on Azure VM with no additional charges. - - - question: | - Is the SQL IaaS Agent extension available for all customers? - answer: | - Yes, as long as the SQL Server VM was deployed on the public cloud using the Resource Manager model, and not the classic model. All other customers are able to register with the new SQL IaaS Agent extension. However, only customers with the [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default?activetab=software-assurance-default-pivot%3aprimaryr3) benefit can use their own license by activating the [Azure Hybrid Benefit (AHB)](https://azure.microsoft.com/pricing/hybrid-benefit/) on a SQL Server VM. - - - question: | - What happens to the extension ('Microsoft.SqlVirtualMachine') resource if the VM resource is moved or dropped? - answer: When the Microsoft.Compute/VirtualMachine resource is dropped or moved, then the associated Microsoft.SqlVirtualMachine resource is notified to asynchronously replicate the operation. - - - question: | - What happens to the VM if the extension ('Microsoft.SqlVirtualMachine') resource is dropped? - answer: The Microsoft.Compute/VirtualMachine resource is not impacted when the Microsoft.SqlVirtualMachine resource is dropped. However, the licensing changes will default back to the original image source. - - - question: | - Is it possible to register self-deployed SQL Server VMs with the SQL IaaS Agent extension? - answer: | - Yes. If you deployed SQL Server from your own media, and installed the SQL IaaS extension you can register your SQL Server VM with the extension to get the manageability benefits provided by the SQL IaaS extension. - - question: | - Is it possible to repair the SQL IaaS agent extension? - answer: | - Yes. Navigate to the **SQL virtual machines** resource for your SQL Server VM, and choose **Repair** under **Support & troubleshooting** to open the [repair](sql-agent-extension-manually-register-single-vm.md#repair-extension) page and repair the extension. - - - - - -additionalContent: | - - ## Resources - - **Windows VMs**: - - * [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) - * [Provision SQL Server on a Windows VM](create-sql-vm-portal.md) - * [Migrating a Database to SQL Server on an Azure VM](migrate-to-vm-from-sql-server.md) - * [High Availability and Disaster Recovery for SQL Server on Azure Virtual Machines](business-continuity-high-availability-disaster-recovery-hadr-overview.md) - * [Performance best practices for SQL Server on Azure Virtual Machines](./performance-guidelines-best-practices-checklist.md) - * [Application Patterns and Development Strategies for SQL Server on Azure Virtual Machines](application-patterns-development-strategies.md) - - **Linux VMs**: - - * [Overview of SQL Server on a Linux VM](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md) - * [Provision SQL Server on a Linux VM](../linux/sql-vm-create-portal-quickstart.md) - * [FAQ (Linux)](../linux/frequently-asked-questions-faq.yml) - * [SQL Server on Linux documentation](/sql/linux/sql-server-linux-overview) diff --git a/articles/azure-sql/virtual-machines/windows/hadr-cluster-best-practices.md b/articles/azure-sql/virtual-machines/windows/hadr-cluster-best-practices.md deleted file mode 100644 index e962b02b63cd7..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/hadr-cluster-best-practices.md +++ /dev/null @@ -1,386 +0,0 @@ ---- -title: HADR configuration best practices -description: "Learn about the supported cluster configurations when you configure high availability and disaster recovery (HADR) for SQL Server on Azure Virtual Machines, such as supported quorums or connection routing options." -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# HADR configuration best practices (SQL Server on Azure VMs) -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -A [Windows Server Failover Cluster](hadr-windows-server-failover-cluster-overview.md) is used for high availability and disaster recovery (HADR) with SQL Server on Azure Virtual Machines (VMs). - -This article provides cluster configuration best practices for both [failover cluster instances (FCIs)](failover-cluster-instance-overview.md) and [availability groups](availability-group-overview.md) when you use them with SQL Server on Azure VMs. - -To learn more, see the other articles in this series: [Checklist](performance-guidelines-best-practices-checklist.md), [VM size](performance-guidelines-best-practices-vm-size.md), [Storage](performance-guidelines-best-practices-storage.md), [Security](security-considerations-best-practices.md), [HADR configuration](hadr-cluster-best-practices.md), [Collect baseline](performance-guidelines-best-practices-collect-baseline.md). - -## Checklist - -Review the following checklist for a brief overview of the HADR best practices that the rest of the article covers in greater detail. - -For your Windows cluster, consider these best practices: - -* Deploy your SQL Server VMs to multiple subnets whenever possible to avoid the dependency on an Azure Load Balancer or a distributed network name (DNN) to route traffic to your HADR solution. -* Change the cluster to less aggressive parameters to avoid unexpected outages from transient network failures or Azure platform maintenance. To learn more, see [heartbeat and threshold settings](#heartbeat-and-threshold). For Windows Server 2012 and later, use the following recommended values: - - **SameSubnetDelay**: 1 second - - **SameSubnetThreshold**: 40 heartbeats - - **CrossSubnetDelay**: 1 second - - **CrossSubnetThreshold**: 40 heartbeats -* Place your VMs in an availability set or different availability zones. To learn more, see [VM availability settings](#vm-availability-settings). -* Use a single NIC per cluster node. -* Configure cluster [quorum voting](#quorum-voting) to use 3 or more odd number of votes. Do not assign votes to DR regions. -* Carefully monitor [resource limits](#resource-limits) to avoid unexpected restarts or failovers due to resource constraints. - - Ensure your OS, drivers, and SQL Server are at the latest builds. - - Optimize performance for SQL Server on Azure VMs. Review the other sections in this article to learn more. - - Reduce or spread out workload to avoid resource limits. - - Move to a VM or disk that his higher limits to avoid constraints. - -For your SQL Server availability group or failover cluster instance, consider these best practices: - -* If you're experiencing frequent unexpected failures, follow the performance best practices outlined in the rest of this article. -* If optimizing SQL Server VM performance does not resolve your unexpected failovers, consider [relaxing the monitoring](#relaxed-monitoring) for the availability group or failover cluster instance. However, doing so may not address the underlying source of the issue and could mask symptoms by reducing the likelihood of failure. You may still need to investigate and address the underlying root cause. For Windows Server 2012 or higher, use the following recommended values: - - **Lease timeout**: Use this equation to calculate the maximum lease time out value: - `Lease timeout < (2 * SameSubnetThreshold * SameSubnetDelay)`. - Start with 40 seconds. If you're using the relaxed `SameSubnetThreshold` and `SameSubnetDelay` values recommended previously, do not exceed 80 seconds for the lease timeout value. - - **Max failures in a specified period**: Set this value to 6. -* When using the virtual network name (VNN) and an Azure Load Balancer to connect to your HADR solution, specify `MultiSubnetFailover = true` in the connection string, even if your cluster only spans one subnet. - - If the client does not support `MultiSubnetFailover = True` you may need to set `RegisterAllProvidersIP = 0` and `HostRecordTTL = 300` to cache client credentials for shorter durations. However, doing so may cause additional queries to the DNS server. -- To connect to your HADR solution using the distributed network name (DNN), consider the following: - - You must use a client driver that supports `MultiSubnetFailover = True`, and this parameter must be in the connection string. - - Use a unique DNN port in the connection string when connecting to the DNN listener for an availability group. -- Use a database mirroring connection string for a basic availability group to bypass the need for a load balancer or DNN. -- Validate the sector size of your VHDs before deploying your high availability solution to avoid having misaligned I/Os. See [KB3009974](https://support.microsoft.com/topic/kb3009974-fix-slow-synchronization-when-disks-have-different-sector-sizes-for-primary-and-secondary-replica-log-files-in-sql-server-ag-and-logshipping-environments-ed181bf3-ce80-b6d0-f268-34135711043c) to learn more. -- If the SQL Server database engine, Always On availability group listener, or failover cluster instance health probe are configured to use a port between 49,152 and 65,536 (the [default dynamic port range for TCP/IP](/windows/client-management/troubleshoot-tcpip-port-exhaust#default-dynamic-port-range-for-tcpip)), add an exclusion for each port. Doing so will prevent other systems from being dynamically assigned the same port. The following example creates an exclusion for port 59999: -`netsh int ipv4 add excludedportrange tcp startport=59999 numberofports=1 store=persistent` - -## VM availability settings - -To reduce the impact of downtime, consider the following VM best availability settings: - -* Use proximity placement groups together with accelerated networking for lowest latency. -* Place virtual machine cluster nodes in separate availability zones to protect from datacenter-level failures or in a single availability set for lower-latency redundancy within the same datacenter. -* Use premium-managed OS and data disks for VMs in an availability set. -* Configure each application tier into separate availability sets. - -## Quorum - -Although a two-node cluster will function without a [quorum resource](/windows-server/storage/storage-spaces/understand-quorum), customers are strictly required to use a quorum resource to have production support. Cluster validation won't pass any cluster without a quorum resource. - -Technically, a three-node cluster can survive a single node loss (down to two nodes) without a quorum resource. But after the cluster is down to two nodes, there's a risk that the clustered resources will go offline if a node loss or communication failure to prevent a split-brain scenario. Configuring a quorum resource will allow the cluster to continue online with only one node online. - -The disk witness is the most resilient quorum option, but to use a disk witness on a SQL Server on Azure VM, you must use an Azure Shared Disk which imposes some limitations to the high availability solution. As such, use a disk witness when you're configuring your failover cluster instance with Azure Shared Disks, otherwise use a cloud witness whenever possible. - -The following table lists the quorum options available for SQL Server on Azure VMs: - -| |[Cloud witness](/windows-server/failover-clustering/deploy-cloud-witness) |[Disk witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) |[File share witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) | -|---------|---------|---------|---------| -|**Supported OS**| Windows Server 2016+ |All | All| - -- The **cloud witness** is ideal for deployments in multiple sites, multiple zones, and multiple regions. Use a cloud witness whenever possible, unless you're using a shared-storage cluster solution. -- The **disk witness** is the most resilient quorum option and is preferred for any cluster that uses Azure Shared Disks (or any shared-disk solution like shared SCSI, iSCSI, or fiber channel SAN). A Clustered Shared Volume cannot be used as a disk witness. -- The **fileshare witness** is suitable for when the disk witness and cloud witness are unavailable options. - -To get started, see [Configure cluster quorum](hadr-cluster-quorum-configure-how-to.md). - -## Quorum Voting - -It's possible to change the quorum vote of a node participating in a Windows Server Failover Cluster. - -When modifying the node vote settings, follow these guidelines: - -| Qurom voting guidelines | -|-| -| Start with each node having no vote by default. Each node should only have a vote with explicit justification.| -| Enable votes for cluster nodes that host the primary replica of an availability group, or the preferred owners of a failover cluster instance. | -| Enable votes for automatic failover owners. Each node that may host a primary replica or FCI as a result of an automatic failover should have a vote. | -| If an availability group has more than one secondary replica, only enable votes for the replicas that have automatic failover. | -| Disable votes for nodes that are in secondary disaster recovery sites. Nodes in secondary sites should not contribute to the decision of taking a cluster offline if there's nothing wrong with the primary site. | -| Have an odd number of votes, with three quorum votes minimum. Add a [quorum witness](hadr-cluster-quorum-configure-how-to.md) for an additional vote if necessary in a two-node cluster. | -| Reassess vote assignments post-failover. You don't want to fail over into a cluster configuration that doesn't support a healthy quorum. | - - -## Connectivity - -To match the on-premises experience for connecting to your availability group listener or failover cluster instance, deploy your SQL Server VMs to multiple subnets within the same virtual network. Having multiple subnets negates the need for the extra dependency on an Azure Load Balancer, or a distributed network name to route your traffic to your listener. - -To simplify your HADR solution, deploy your SQL Server VMs to multiple subnets whenever possible. To learn more, see [Multi-subnet AG](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md), and [Multi-subnet FCI](failover-cluster-instance-prepare-vm.md#subnets). - -If your SQL Server VMs are in a single subnet, it's possible to configure either a virtual network name (VNN) and an Azure Load Balancer, or a distributed network name (DNN) for both failover cluster instances and availability group listeners. - -The distributed network name is the recommended connectivity option, when available: -- The end-to-end solution is more robust since you no longer have to maintain the load balancer resource. -- Eliminating the load balancer probes minimizes failover duration. -- The DNN simplifies provisioning and management of the failover cluster instance or availability group listener with SQL Server on Azure VMs. - -Consider the following limitations: -- The client driver must support the `MultiSubnetFailover=True` parameter. -- The DNN feature is available starting with [SQL Server 2016 SP3](https://support.microsoft.com/topic/kb5003279-sql-server-2016-service-pack-3-release-information-46ab9543-5cf9-464d-bd63-796279591c31), [SQL Server 2017 CU25](https://support.microsoft.com/topic/kb5003830-cumulative-update-25-for-sql-server-2017-357b80dc-43b5-447c-b544-7503eee189e9), and [SQL Server 2019 CU8](https://support.microsoft.com/topic/cumulative-update-8-for-sql-server-2019-ed7f79d9-a3f0-a5c2-0bef-d0b7961d2d72) on Windows Server 2016 and later. - -To learn more, see the [Windows Server Failover Cluster overview](hadr-windows-server-failover-cluster-overview.md#virtual-network-name-vnn). - -To configure connectivity, see the following articles: -- Availability group: [Configure DNN](availability-group-distributed-network-name-dnn-listener-configure.md), [Configure VNN](availability-group-vnn-azure-load-balancer-configure.md) -- Failover cluster instance: [Configure DNN](failover-cluster-instance-distributed-network-name-dnn-configure.md), [Configure VNN](failover-cluster-instance-vnn-azure-load-balancer-configure.md). - -Most SQL Server features work transparently with FCI and availability groups when using the DNN, but there are certain features that may require special consideration. See [FCI and DNN interoperability](failover-cluster-instance-dnn-interoperability.md) and [AG and DNN interoperability](availability-group-dnn-interoperability.md) to learn more. - ->[!TIP] -> Set the MultiSubnetFailover parameter = true in the connection string even for HADR solutions that span a single subnet to support future spanning of subnets without needing to update connection strings. - -## Heartbeat and threshold - -Change the cluster heartbeat and threshold settings to relaxed settings. The default heartbeat and threshold cluster settings are designed for highly tuned on-premises networks and do not consider the possibility of increased latency in a cloud environment. The heartbeat network is maintained with UDP 3343, which is traditionally far less reliable than TCP and more prone to incomplete conversations. - -Therefore, when running cluster nodes for SQL Server on Azure VM high availability solutions, change the cluster settings to a more relaxed monitoring state to avoid transient failures due to the increased possibility of network latency or failure, Azure maintenance, or hitting resource bottlenecks. - -The delay and threshold settings have a cumulative effect to total health detection. For example, setting *CrossSubnetDelay* to send a heartbeat every 2 seconds and setting the *CrossSubnetThreshold* to 10 missed heartbeats before taking recovery means the cluster can have a total network tolerance of 20 seconds before recovery action is taken. In general, continuing to send frequent heartbeats but having greater thresholds is preferred. - -To ensure recovery during legitimate outages while providing greater tolerance for transient issues, relax your delay and threshold settings to the recommended values detailed in the following table: - -| Setting | Windows Server 2012 or later | Windows Server 2008R2 | -|:---------------------|:----------------------------|:-----------------------| -| SameSubnetDelay | 1 second | 2 second | -| SameSubnetThreshold | 40 heartbeats | 10 heartbeats (max) | -| CrossSubnetDelay | 1 second | 2 second | -| CrossSubnetThreshold | 40 heartbeats | 20 heartbeats (max) | - - -Use PowerShell to change your cluster parameters: - -# [Windows Server 2012-2019](#tab/windows2012) - - -```powershell -(get-cluster).SameSubnetThreshold = 40 -(get-cluster).CrossSubnetThreshold = 40 -``` - -# [Windows Server 2008/R2](#tab/windows2008) - - -```powershell -(get-cluster).SameSubnetThreshold = 10 -(get-cluster).CrossSubnetThreshold = 20 -(get-cluster).SameSubnetDelay = 2000 -(get-cluster).CrossSubnetDelay = 2000 -``` - ---- - -Use PowerShell to verify your changes: - -```powershell -get-cluster | fl *subnet* -``` - -Consider the following: - -* This change is immediate, restarting the cluster or any resources is not required. -* Same subnet values should not be greater than cross subnet values. -* SameSubnetThreshold <= CrossSubnetThreshold -* SameSubnetDelay <= CrossSubnetDelay - -Choose relaxed values based on how much down time is tolerable and how long before a corrective action should occur depending on your application, business needs, and your environment. If you're not able to exceed the default Windows Server 2019 values, then at least try to match them, if possible: - -For reference, the following table details the default values: - - -| Setting | Windows Server 2019 | Windows Server 2016 | Windows Server 2008 - 2012 R2 | -|:---------------------|:----------------| ------------|:----------------------------| -| SameSubnetDelay | 1 second | 1 second | 1 second | -| SameSubnetThreshold | 20 heartbeats | 10 heartbeats | 5 heartbeats | -| CrossSubnetDelay | 1 second | 1 second | 1 second | -| CrossSubnetThreshold | 20 heartbeats | 10 heartbeats | 5 heartbeats | - - -To learn more, see [Tuning Failover Cluster Network Thresholds](/windows-server/troubleshoot/iaas-sql-failover-cluster). - -## Relaxed monitoring - -If tuning your cluster heartbeat and threshold settings as recommended is insufficient tolerance and you're still seeing failures due to transient issues rather than true outages, you can configure your AG or FCI monitoring to be more relaxed. In some scenarios, it may be beneficial to temporarily relax the monitoring for a period of time given the level of activity. For example, you may want to relax the monitoring when you're doing IO intensive workloads such as database backups, index maintenance, DBCC CHECKDB, etc. Once the activity is complete, set your monitoring to less relaxed values. - -> [!WARNING] -> Changing these settings may mask an underlying problem, and should be used as a temporary solution to reduce, rather than eliminate, the likelihood of failure. Underlying issues should still be investigated and addressed. - -Start by increase the following parameters from their default values for relaxed monitoring, and adjust as necessary: - - -|Parameter |Default value |Relaxed Value |Description | -|---------|---------|---------|---------| -|**Healthcheck timeout**|30000 |60000 |Determines health of the primary replica or node. The cluster resource DLL sp_server_diagnostics returns results at an interval that equals 1/3 of the health-check timeout threshold. If sp_server_diagnostics is slow or is not returning information, the resource DLL will wait for the full interval of the health-check timeout threshold before determining that the resource is unresponsive, and initiating an automatic failover, if configured to do so. | -|**Failure-Condition Level** | 3 | 2 |Conditions that trigger an automatic failover. There are five failure-condition levels, which range from the least restrictive (level one) to the most restrictive (level five) | - -Use Transact-SQL (T-SQL) to modify the health check and failure conditions for both AGs and FCIs. - -For availability groups: - -```sql -ALTER AVAILABILITY GROUP AG1 SET (HEALTH_CHECK_TIMEOUT =60000); -ALTER AVAILABILITY GROUP AG1 SET (FAILURE_CONDITION_LEVEL = 2); -``` - -For failover cluster instances: - -```sql -ALTER SERVER CONFIGURATION SET FAILOVER CLUSTER PROPERTY HealthCheckTimeout = 60000; -ALTER SERVER CONFIGURATION SET FAILOVER CLUSTER PROPERTY FailureConditionLevel = 2; -``` - -Specific to **availability groups**, start with the following recommended parameters, and adjust as necessary: - -|Parameter |Default value |Relaxed Value |Description | -|---------|---------|---------|---------| -|**Lease timeout**|20000|40000|Prevents split-brain. | -|**Session timeout**|10000 |20000|Checks communication issues between replicas. The session-timeout period is a replica property that controls how long (in seconds) that an availability replica waits for a ping response from a connected replica before considering the connection to have failed. By default, a replica waits 10 seconds for a ping response. This replica property applies to only the connection between a given secondary replica and the primary replica of the availability group. | -| **Max failures in specified period** | 2 | 6 |Used to avoid indefinite movement of a clustered resource within multiple node failures. Too low of a value can lead to the availability group being in a failed state. Increase the value to prevent short disruptions from performance issues as too low a value can lead to the AG being in a failed state. | - -Before making any changes, consider the following: -- Do not lower any timeout values below their default values. -- Use this equation to calculate the maximum lease time out value: - `Lease timeout < (2 * SameSubnetThreshold * SameSubnetDelay)`. - Start with 40 seconds. If you're using the relaxed `SameSubnetThreshold` and `SameSubnetDelay` values recommended previously, do not exceed 80 seconds for the lease timeout value. -- For synchronous-commit replicas, changing session-timeout to a high value can increase HADR_sync_commit waits. - -**Lease timeout** - -Use the **Failover Cluster Manager** to modify the **lease timeout** settings for your availability group. See the SQL Server [availability group lease health check](/sql/database-engine/availability-groups/windows/availability-group-lease-healthcheck-timeout#lease-timeout) documentation for detailed steps. - -**Session timeout** - -Use Transact-SQL (T-SQL) to modify the **session timeout** for an availability group: - -```sql -ALTER AVAILABILITY GROUP AG1 -MODIFY REPLICA ON 'INSTANCE01' WITH (SESSION_TIMEOUT = 15); -``` - -**Max failures in specified period** - -Use the Failover Cluster Manager to modify the **Max failures in specified period** value: -1. Select **Roles** in the navigation pane. -1. Under **Roles**, right-click the clustered resource and choose **Properties**. -1. Select the **Failover** tab, and increase the **Max failures in specified period** value as desired. - -## Resource limits - -VM or disk limits could result in a resource bottleneck that impacts the health of the cluster, and impedes the health check. If you're experiencing issues with resource limits, consider the following: - -* Ensure your OS, drivers, and SQL Server are at the latest builds. -* Optimize SQL Server on Azure VM environment as described in the [performance guidelines](performance-guidelines-best-practices-checklist.md) for SQL Server on Azure Virtual Machines -* Reduce or spread out the workload to reduce utilization without exceeding resource limits -* Tune the SQL Server workload if there is any opportunity, such as - * Add/optimize indexes - * Update statistics if needed and if possible, with Full scan - * Use features like resource governor (starting with SQL Server 2014, enterprise only) to limit resource utilization during specific workloads, such as backups or index maintenance. -* Move to a VM or disk that has higher limits to meet or exceed the demands of your workload. - -## Networking - -Deploy your SQL Server VMs to multiple subnets whenever possible to avoid the dependency on an Azure Load Balancer or a distributed network name (DNN) to route traffic to your HADR solution. - -Use a single NIC per server (cluster node). Azure networking has physical redundancy, which makes additional NICs unnecessary on an Azure virtual machine guest cluster. The cluster validation report will warn you that the nodes are reachable only on a single network. You can ignore this warning on Azure virtual machine guest failover clusters. - -Bandwidth limits for a particular VM are shared across NICs and adding an additional NIC does not improve availability group performance for SQL Server on Azure VMs. As such, there is no need to add a second NIC. - -The non-RFC-compliant DHCP service in Azure can cause the creation of certain failover cluster configurations to fail. This failure happens because the cluster network name is assigned a duplicate IP address, such as the same IP address as one of the cluster nodes. This is an issue when you use availability groups, which depend on the Windows failover cluster feature. - -Consider the scenario when a two-node cluster is created and brought online: - -1. The cluster comes online, and then NODE1 requests a dynamically assigned IP address for the cluster network name. -2. The DHCP service doesn't give any IP address other than NODE1's own IP address, because the DHCP service recognizes that the request comes from NODE1 itself. -3. Windows detects that a duplicate address is assigned both to NODE1 and to the failover cluster's network name, and the default cluster group fails to come online. -4. The default cluster group moves to NODE2. NODE2 treats NODE1's IP address as the cluster IP address and brings the default cluster group online. -5. When NODE2 tries to establish connectivity with NODE1, packets directed at NODE1 never leave NODE2 because it resolves NODE1's IP address to itself. NODE2 can't establish connectivity with NODE1, and then loses quorum and shuts down the cluster. -6. NODE1 can send packets to NODE2, but NODE2 can't reply. NODE1 loses quorum and shuts down the cluster. - -You can avoid this scenario by assigning an unused static IP address to the cluster network name in order to bring the cluster network name online and add the IP address to [Azure Load Balancer](availability-group-load-balancer-portal-configure.md). - -## Known issues - -Review the resolutions for some commonly known issues and errors: - -**Cluster node removed from membership** - - -If the [Windows Cluster heartbeat and threshold settings](#heartbeat-and-threshold) are too aggressive for your environment, you may see following message in the system event log frequently. - -``` -Error 1135 -Cluster node 'Node1' was removed from the active failover cluster membership. -The Cluster service on this node may have stopped. This could also be due to the node having -lost communication with other active nodes in the failover cluster. Run the Validate a -Configuration Wizard to check your network configuration. If the condition persists, check -for hardware or software errors related to the network adapters on this node. Also check for -failures in any other network components to which the node is connected such as hubs, switches, or bridges. -``` - - -For more information, review [Troubleshooting cluster issue with Event ID 1135.](/windows-server/troubleshoot/troubleshooting-cluster-event-id-1135) - - -**Lease has expired** / **Lease is no longer valid** - - -If [monitoring](#relaxed-monitoring) is too aggressive for your environment, you may see frequent AG or FCI restarts, failures, or failovers. Additionally for availability groups, you may see the following messages in the SQL Server error log: - -``` -Error 19407: The lease between availability group 'PRODAG' and the Windows Server Failover Cluster has expired. -A connectivity issue occurred between the instance of SQL Server and the Windows Server Failover Cluster. -To determine whether the availability group is failing over correctly, check the corresponding availability group -resource in the Windows Server Failover Cluster -``` - -``` -Error 19419: The renewal of the lease between availability group '%.*ls' and the Windows Server Failover Cluster -failed because the existing lease is no longer valid. -``` - -**Connection timeout** - -If the **session timeout** is too aggressive for your availability group environment, you may see following messages frequently: - -``` -Error 35201: A connection timeout has occurred while attempting to establish a connection to availability -replica 'replicaname' with ID [availability_group_id]. Either a networking or firewall issue exists, -or the endpoint address provided for the replica is not the database mirroring endpoint of the host server instance. -``` - -``` -Error 35206 -A connection timeout has occurred on a previously established connection to availability -replica 'replicaname' with ID [availability_group_id]. Either a networking or a firewall issue -exists, or the availability replica has transitioned to the resolving role. -``` - -**Not failing over group** - - - -If the **Maximum Failures in the Specified Period** value is too low and you're experiencing intermittent failures due to transient issues, your availability group could end in a failed state. Increase this value to tolerate more transient failures. - -``` -Not failing over group , failoverCount 3, failoverThresholdSetting , computedFailoverThreshold 2. -``` - - -## Next steps - -To learn more, see: - -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/hadr-cluster-quorum-configure-how-to.md b/articles/azure-sql/virtual-machines/windows/hadr-cluster-quorum-configure-how-to.md deleted file mode 100644 index a56809a9d93fb..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/hadr-cluster-quorum-configure-how-to.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -title: Configure cluster quorum -description: "Learn how to configure a disk witness, cloud witness, or a file share witness as quorum for a Windows Server Failover Cluster on SQL Server on Azure VMs. " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: "06/01/2021" -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Configure cluster quorum for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article teaches you to configure one of the three quorum options for a Windows Server Failover Cluster running on SQL Server on Azure Virtual Machines (VMs) - a disk witness, a cloud witness, and a file share witness. - - -## Overview - -The quorum for a cluster is determined by the number of voting elements that must be part of active cluster membership for the cluster to start properly or continue running. Configuring a quorum resource allows a two-node cluster to continue with only one node online. The Windows Server Failover Cluster is the underlying technology for the SQL Server on Azure VMs high availability options: [failover cluster instances (FCIs)](failover-cluster-instance-overview.md) and [availability groups (AGs)](availability-group-overview.md). - -The disk witness is the most resilient quorum option, but to use a disk witness on a SQL Server on Azure VM, you must use an Azure shared disk which imposes some limitations to the high availability solution. As such, use a disk witness when you're configuring your failover cluster instance with Azure shared disks, otherwise use a cloud witness whenever possible. If you are using Windows Server 2012 R2 or older which does not support cloud witness, you can use a file share witness. - -The following quorum options are available to use for SQL Server on Azure VMs: - -| |[Cloud witness](/windows-server/failover-clustering/deploy-cloud-witness) |[Disk witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) |[File share witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) | -|---------|---------|---------|---------| -|**Supported OS**| Windows Server 2016+ |All | All| - -To learn more about quorum, see the [Windows Server Failover Cluster overview](hadr-windows-server-failover-cluster-overview.md). - -## Cloud witness - -A cloud witness is a type of failover cluster quorum witness that uses Microsoft Azure storage to provide a vote on cluster quorum. - - -The following table provides additional information and considerations about the cloud witness: - -| Witness type | Description | Requirements and recommendations | -| --------- |--------- |--------- | -| Cloud witness |
    • Uses Azure storage as the cloud witness, contains just the time stamp.
    • Ideal for deployments in multiple sites, multiple zones, and multiple regions.
    • Creates well-known container `msft-cloud-witness` under the Microsoft Storage Account.
    • Writes a single blob file with corresponding cluster's unique ID used as the file name of the blob file under the container
    • |
      • Default size is 1 MB.
      • Use **General Purpose** for the account kind. Blob storage is not supported.
      • Use Standard storage. Azure Premium Storage is not supported.
      • Failover Clustering uses the blob file as the arbitration point, which requires some consistency guarantees when reading the data. Therefore you must select **Locally redundant storage** for **Replication** type.
      • Should be excluded from backups and antivirus scanning
      • A Disk witness isn't supported with Storage Spaces Direct
      • Cloud Witness uses HTTPS (default port 443) to establish communication with Azure blob service. Ensure that HTTPS port is accessible via network Proxy.
      • | - -When configuring a Cloud Witness quorum resource for your Failover Cluster, consider: -- Instead of storing the Access Key, your Failover Cluster will generate and securely store a Shared Access Security (SAS) token. -- The generated SAS token is valid as long as the Access Key remains valid. When rotating the Primary Access Key, it is important to first update the Cloud Witness (on all your clusters that are using that Storage Account) with the Secondary Access Key before regenerating the Primary Access Key. -- Cloud Witness uses HTTPS REST interface of the Azure Storage Account service. This means it requires the HTTPS port to be open on all cluster nodes. - - -A cloud witness requires an Azure Storage Account. To configure a storage account, follow these steps: - -1. Sign in to the [Azure portal](https://portal.azure.com). -2. On the Hub menu, select New -> Data + Storage -> Storage account. -3. In the Create a storage account page, do the following: - 1. Enter a name for your storage account. Storage account names must be between 3 and 24 characters in length and may contain numbers and lowercase letters only. The storage account name must also be unique within Azure. - 2. For **Account kind**, select **General purpose**. - 3. For **Performance**, select **Standard**. - 2. For **Replication**, select **Local-redundant storage (LRS)**. - - -Once your storage account is created, follow these steps to configure your cloud witness quorum resource for your failover cluster: - - -# [PowerShell](#tab/powershell) - -The existing Set-ClusterQuorum PowerShell command has new parameters corresponding to Cloud Witness. - -You can configure cloud witness with the cmdlet [`Set-ClusterQuorum`](/powershell/module/failoverclusters/set-clusterquorum) using the PowerShell command: - -```PowerShell -Set-ClusterQuorum -CloudWitness -AccountName -AccessKey -``` - -In the rare instance you need to use a different endpoint, use this PowerShell command: - -```PowerShell -Set-ClusterQuorum -CloudWitness -AccountName -AccessKey -Endpoint -``` - -See the [cloud witness documentation](/windows-server/failover-clustering/deploy-cloud-witness) for help for finding the Storage Account AccessKey. - - -# [Failover Cluster Manager](#tab/fcm-gui) - -Use the Quorum Configuration Wizard built into Failover Cluster Manager to configure your cloud witness. To do so, follow these steps: - -1. Open Failover Cluster Manager. - -2. Right-click the cluster -> **More Actions** -> **Configure Cluster Quorum Settings**. This launches the Configure Cluster Quorum wizard. - - ![Snapshot of the menu path to Configure Cluster Quorum Settings in the Failover Cluster Manager UI](./media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-7.png) - -3. On the **Select Quorum Configurations** page, select **Select the quorum witness**. - - ![Snapshot of the 'select the quorum witness' radio button in the Cluster Quorum wizard](./media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-8.png) - -4. On the **Select Quorum Witness** page, select **Configure a cloud witness**. - - ![Snapshot of the appropriate radio button to select a cloud witness](./media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-9.png) - -5. On the **Configure Cloud Witness** page, enter the Azure Storage Account information. For help with finding this information, see the [cloud witness documentation](/windows-server/failover-clustering/deploy-cloud-witness). - 1. (Required parameter) Azure Storage Account Name. - 2. (Required parameter) Access Key corresponding to the Storage Account. - 1. When creating for the first time, use Primary Access Key - 2. When rotating the Primary Access Key, use Secondary Access Key - 3. (Optional parameter) If you intend to use a different Azure service endpoint (for example the Microsoft Azure service in China), then update the endpoint server name. - - ![Snapshot of the Cloud Witness configuration pane in the Cluster Quorum wizard](./media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-10.png) - - -6. Upon successful configuration of the cloud witness, you can view the newly created witness resource in the Failover Cluster Manager snap-in. - - ![Successful configuration of Cloud Witness](./media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-11.png) - - - ---- - - -## Disk witness - -A disk witness is a small clustered disk in the Cluster Available Storage group. This disk is highly available and can fail over between nodes. - -The disk witness is the recommended quorum option when used with a shared storage high availability solution, such as the failover cluster instance with Azure shared disks. - -The following table provides additional information and considerations about the quorum disk witness: - -| Witness type | Description | Requirements and recommendations | -| --------- |--------- |--------- | -| Disk witness |
        • Dedicated LUN that stores a copy of the cluster database
        • Most useful for clusters with shared (not replicated) storage
        • |
          • Size of LUN must be at least 512 MB
          • Must be dedicated to cluster use and not assigned to a clustered role
          • Must be included in clustered storage and pass storage validation tests
          • Can't be a disk that is a Cluster Shared Volume (CSV)
          • Basic disk with a single volume
          • Doesn't need to have a drive letter
          • Can be formatted with NTFS or ReFS
          • Can be optionally configured with hardware RAID for fault tolerance
          • Should be excluded from backups and antivirus scanning
          • A Disk witness isn't supported with Storage Spaces Direct
          • | - -To use an Azure shared disk for the disk witness, you must first create the disk and mount it. To do so, follow the steps in the [Mount disk](failover-cluster-instance-azure-shared-disks-manually-configure.md#add-azure-shared-disk) section of the Azure shared disk failover cluster instance guide. The disk does not need to be premium. - -After your disk has been mounted, add it to the cluster storage with the following steps: - -1. Open Failover Cluster Manager. -1. Select **Disks** under **Storage** on the left navigation pane. -1. Select **Add Disk** under **Actions** on the right navigation pane. -1. Select the Azure shared drive you just mounted and note the name, such as `Cluster Disk 3`. - -After your disk has been added as clustered storage, configure it as the disk witness using PowerShell: - - -The existing Set-ClusterQuorum PowerShell command has new parameters corresponding to Cloud Witness. - -Use the path for the file share as the parameter for the disk witness when using the PowerShell cmdlet [`Set-ClusterQuorum`](/powershell/module/failoverclusters/set-clusterquorum): - -```PowerShell -Set-ClusterQuorum -NodeAndDiskMajority "Cluster Disk 3" -``` - -You can also use the Failover Cluster manager; follow the same steps as for the cloud witness, but choose the disk witness as the quorum option instead. - - -## File share witness - -A file share witness is an SMB file share that's typically configured on a file server running Windows Server. It maintains clustering information in a witness.log file, but doesn't store a copy of the cluster database. In Azure, you can configure a file share on a separate virtual machine. - -Configure a file share witness if a disk witness or a cloud witness are unavailable or unsupported in your environment. - -The following table provides additional information and considerations about the quorum file share witness: - -| Witness type | Description | Requirements and recommendations | -| --------- |--------- |--------- | -| File share witness |
            • SMB file share that is configured on a file server running Windows Server
            • Does not store a copy of the cluster database
            • Maintains cluster information only in a witness.log file
            • Most useful for multisite clusters with replicated storage
            • |
              • Must have a minimum of 5 MB of free space
              • Must be dedicated to the single cluster and not used to store user or application data
              • Must have write permissions enabled for the computer object for the cluster name

              The following are additional considerations for a file server that hosts the file share witness:
              • A single file server can be configured with file share witnesses for multiple clusters.
              • The file server must be on a site that is separate from the cluster workload. This allows equal opportunity for any cluster site to survive if site-to-site network communication is lost. If the file server is on the same site, that site becomes the primary site, and it is the only site that can reach the file share.
              • The file server can run on a virtual machine if the virtual machine is not hosted on the same cluster that uses the file share witness.
              • For high availability, the file server can be configured on a separate failover cluster.
              • | - -Once you have created your file share and properly configured permissions, mount the file share to your clustered nodes. You can follow the same general steps to mount the file share as described in the [mount file share](failover-cluster-instance-premium-file-share-manually-configure.md) section of the premium file share failover cluster instance how-to guide. - -After your file share has been properly configured and mounted, use PowerShell to add the file share as the quorum witness resource: - -```powershell -Set-ClusterQuorum -FileShareWitness -Credential $(Get-Credential) -``` - -You will be prompted for an account and password for a local (to the file share) non-admin account that has full admin rights to the share. The cluster will keep the name and password encrypted and not accessible by anyone. - -You can also use the Failover Cluster manager; follow the same steps as for the cloud witness, but choose the file share witness as the quorum option instead. - -## Change quorum voting - - -It's possible to change the quorum vote of a node participating in a Windows Server Failover Cluster. - -When modifying the node vote settings, follow these guidelines: - -| Qurom voting guidelines | -|-| -| Start with each node having no vote by default. Each node should only have a vote with explicit justification.| -| Enable votes for cluster nodes that host the primary replica of an availability group, or the preferred owners of a failover cluster instance. | -| Enable votes for automatic failover owners. Each node that may host a primary replica or FCI as a result of an automatic failover should have a vote. | -| If an availability group has more than one secondary replica, only enable votes for the replicas that have automatic failover. | -| Disable votes for nodes that are in secondary disaster recovery sites. Nodes in secondary sites should not contribute to the decision of taking a cluster offline if there's nothing wrong with the primary site. | -| Have an odd number of votes, with three quorum votes minimum. Add a [quorum witness](hadr-cluster-quorum-configure-how-to.md) for an additional vote if necessary in a two-node cluster. | -| Reassess vote assignments post-failover. You don't want to fail over into a cluster configuration that doesn't support a healthy quorum. | - - - - -## Next Steps - -To learn more, see: - -- [HADR settings for SQL Server on Azure VMs](hadr-cluster-best-practices.md) -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Always On availability groups with SQL Server on Azure VMs](availability-group-overview.md) -- [Windows Server Failover Cluster with SQL Server on Azure VMs](hadr-windows-server-failover-cluster-overview.md) -- [Failover cluster instances with SQL Server on Azure VMs](failover-cluster-instance-overview.md) -- [Failover cluster instance overview](/sql/sql-server/failover-clusters/windows/always-on-failover-cluster-instances-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/hadr-windows-server-failover-cluster-overview.md b/articles/azure-sql/virtual-machines/windows/hadr-windows-server-failover-cluster-overview.md deleted file mode 100644 index d0a2a8d18e487..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/hadr-windows-server-failover-cluster-overview.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: Windows Server Failover Cluster overview -description: "Learn about the differences with the Windows Server Failover Cluster technology when used with SQL Server on Azure VMs, such as availability groups, and failover cluster instances. " -services: virtual-machines -documentationCenter: na -author: rajeshsetlem -editor: monicar -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: hadr -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 11/10/2021 -ms.author: rsetlem -ms.reviewer: mathoma ---- - -# Windows Server Failover Cluster with SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article describes the differences when using the Windows Server Failover Cluster feature with SQL Server on Azure VMs for high availability and disaster recovery (HADR), such as for Always On availability groups (AG) or failover cluster instances (FCI). - -To learn more about the Windows feature itself, see the [Windows Server Failover Cluster documentation](/windows-server/failover-clustering/failover-clustering-overview). - -## Overview - -SQL Server high availability solutions on Windows, such as Always On availability groups (AG) or failover cluster instances (FCI) rely on the underlying Windows Server Failover Clustering (WSFC) service. - -The cluster service monitors network connections and the health of nodes in the cluster. This monitoring is in addition to the health checks that SQL Server does as part of the availability group or failover cluster instance feature. If the cluster service is unable to reach the node, or if the AG or FCI role in the cluster becomes unhealthy, then the cluster service initiates appropriate recovery actions to recover and bring applications and services online, either on the same or on another node in the cluster. - -## Cluster health monitoring - -In order to provide high availability, the cluster must ensure the health of the different components that make up the clustered solution. The cluster service monitors the health of the cluster based on a number of system and network parameters in order to detect and respond to failures. - -Setting the threshold for declaring a failure is important in order to achieve a balance between promptly responding to a failure, and avoiding false failures. - -There are two strategies for monitoring: - -| Monitoring | Description | -|-|-| -| Aggressive | Provides rapid failure detection and recovery of hard failures, which delivers the highest levels of availability. The cluster service and SQL Server are both less forgiving of transient failure and in some situations may prematurely fail over resources when there are transient outages. Once failure is detected, the corrective action that follows may take extra time. | -| Relaxed | Provides more forgiving failure detection with a greater tolerance for brief transient network issues. Avoids transient failures, but also introduces the risk of delaying the detection of a true failure. | - -Aggressive settings in a cluster environment in the cloud may lead to premature failures and longer outages, therefore a relaxed monitoring strategy is recommended for failover clusters on Azure VMs. To adjust threshold settings, see [cluster best practices](hadr-cluster-best-practices.md#relaxed-monitoring) for more detail. - -## Cluster heartbeat - -The primary settings that affect cluster heart beating and health detection between nodes: - -| Setting | Description | -|-|-| -| Delay | This defines the frequency at which cluster heartbeats are sent between nodes. The delay is the number of seconds before the next heartbeat is sent. Within the same cluster there can be different delay settings configured between nodes on the same subnet, and between nodes that are on different subnets. | -| Threshold | The threshold is the number of heartbeats that can be missed before the cluster takes recovery action. Within the same cluster there can be different threshold settings configured between nodes on the same subnet, and between nodes that are on different subnets. | - -The default values for these settings may be too low for cloud environments, and could result in unnecessary failures due to transient network issues. To be more tolerant, use relaxed threshold settings for failover clusters in Azure VMs. See [cluster best practices](hadr-cluster-best-practices.md#heartbeat-and-threshold) for more detail. - -## Quorum - -Although a two-node cluster will function without a [quorum resource](/windows-server/storage/storage-spaces/understand-quorum), customers are strictly required to use a quorum resource to have production support. Cluster validation won't pass any cluster without a quorum resource. - -Technically, a three-node cluster can survive a single node loss (down to two nodes) without a quorum resource. But after the cluster is down to two nodes, there's a risk that the clustered resources will go offline to prevent a split-brain scenario if a node is lost or there's a communication failure between the nodes. Configuring a quorum resource will allow the cluster resources to remain online with only one node online. - -The disk witness is the most resilient quorum option, but to use a disk witness on a SQL Server on Azure VM, you must use an Azure Shared Disk which imposes some limitations to the high availability solution. As such, use a disk witness when you're configuring your failover cluster instance with Azure Shared Disks, otherwise use a cloud witness whenever possible. - -The following table lists the quorum options available for SQL Server on Azure VMs: - -| |[Cloud witness](/windows-server/failover-clustering/deploy-cloud-witness) |[Disk witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) |[File share witness](/windows-server/failover-clustering/manage-cluster-quorum#configure-the-cluster-quorum) | -|---------|---------|---------|---------| -|**Supported OS**| Windows Server 2016+ |All | All| -| **Description** | A cloud witness is a type of failover cluster quorum witness that uses Microsoft Azure to provide a vote on cluster quorum. The default size is about 1 MB and contains just the time stamp. A cloud witness is ideal for deployments in multiple sites, multiple zones, and multiple regions. Use a cloud witness whenever possible, unless you have a failover cluster solution with shared storage. | A disk witness is a small clustered disk in the Cluster Available Storage group. This disk is highly available and can fail over between nodes. It contains a copy of the cluster database, with a default size that's less than 1 GB. The disk witness is the preferred quorum option for any cluster that uses Azure Shared Disks (or any shared-disk solution like shared SCSI, iSCSI, or fiber channel SAN). A Clustered Shared Volume cannot be used as a disk witness. Configure an Azure shared disk as the disk witness. | A file share witness is an SMB file share that's typically configured on a file server running Windows Server. It maintains clustering information in a witness.log file, but doesn't store a copy of the cluster database. In Azure, you can configure a file share on a separate virtual machine within the same virtual network. Use a file share witness if a disk witness or cloud witness is unavailable in your environment. | - -To get started, see [Configure cluster quorum](hadr-cluster-quorum-configure-how-to.md). - - -## Virtual network name (VNN) - -To match the on-premises experience for connecting to your availability group listener or failover cluster instance, deploy your SQL Server VMs to multiple subnets within the same virtual network. Having multiple subnets negates the need for the extra dependency on an Azure Load Balancer to route traffic to your HADR solution. To learn more, see [Multi-subnet AG](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md), and [Multi-subnet FCI](failover-cluster-instance-prepare-vm.md#subnets). - -In a traditional on-premises environment, clustered resources such as failover cluster instances or Always On availability groups rely on the Virtual Network Name to route traffic to the appropriate target - either the failover cluster instance, or the listener of the Always On availability group. The virtual name binds the IP address in DNS, and clients can use either the virtual name or the IP address to connect to their high availability target, regardless of which node currently owns the resource. The VNN is a network name and address managed by the cluster, and the cluster service moves the network address from node to node during a failover event. During a failure, the address is taken offline on the original primary replica, and brought online on the new primary replica. - -On Azure Virtual Machines in a single subnet, an additional component is necessary to route traffic from the client to the Virtual Network Name of the clustered resource (failover cluster instance, or the listener of an availability group). In Azure, a load balancer holds the IP address for the VNN that the clustered SQL Server resources rely on and is necessary to route traffic to the appropriate high availability target. The load balancer also detects failures with the networking components and moves the address to a new host. - -The load balancer distributes inbound flows that arrive at the front end, and then routes that traffic to the instances defined by the back-end pool. You configure traffic flow by using load-balancing rules and health probes. With SQL Server FCI, the back-end pool instances are the Azure virtual machines running SQL Server, and with availability groups, the back-end pool is the listener. There is a slight failover delay when you're using the load balancer, because the health probe conducts alive checks every 10 seconds by default. - -To get started, learn how to configure Azure Load Balancer for a [failover cluster instance](failover-cluster-instance-vnn-azure-load-balancer-configure.md) or an [availability group](availability-group-vnn-azure-load-balancer-configure.md). - -**Supported OS**: All -**Supported SQL version**: All -**Supported HADR solution**: Failover cluster instance, and availability group - -Configuration of the VNN can be cumbersome, it's an additional source of failure, it can cause a delay in failure detection, and there is an overhead and cost associated with managing the additional resource. To address some of these limitations, SQL Server introduced support for the Distributed Network Name feature. - -## Distributed network name (DNN) - -To match the on-premises experience for connecting to your availability group listener or failover cluster instance, deploy your SQL Server VMs to multiple subnets within the same virtual network. Having multiple subnets negates the need for the extra dependency on a DNN to route traffic to your HADR solution. To learn more, see [Multi-subnet AG](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md), and [Multi-subnet FCI](failover-cluster-instance-prepare-vm.md#subnets). - -For SQL Server VMs deployed to a single subnet, the distributed network name feature provides an alternative way for SQL Server clients to connect to the SQL Server failover cluster instance or availability group listener without using a load balancer. The DNN feature is available starting with [SQL Server 2016 SP3](https://support.microsoft.com/topic/kb5003279-sql-server-2016-service-pack-3-release-information-46ab9543-5cf9-464d-bd63-796279591c31), [SQL Server 2017 CU25](https://support.microsoft.com/topic/kb5003830-cumulative-update-25-for-sql-server-2017-357b80dc-43b5-447c-b544-7503eee189e9), [SQL Server 2019 CU8](https://support.microsoft.com/topic/cumulative-update-8-for-sql-server-2019-ed7f79d9-a3f0-a5c2-0bef-d0b7961d2d72), on Windows Server 2016 and later. - -When a DNN resource is created, the cluster binds the DNS name with the IP addresses of all the nodes in the cluster. The client will try to connect to each IP address in this list to find which resource to connect to. You can accelerate this process by specifying `MultiSubnetFailover=True` in the connection string. This setting tells the provider to try all IP addresses in parallel, so the client can connect to the FCI or listener instantly. - -A distributed network name is recommended over a load balancer when possible because: -- The end-to-end solution is more robust since you no longer have to maintain the load balancer resource. -- Eliminating the load balancer probes minimizes failover duration. -- The DNN simplifies provisioning and management of the failover cluster instance or availability group listener with SQL Server on Azure VMs. - -Most SQL Server features work transparently with FCI and availability groups when using the DNN, but there are certain features that may require special consideration. - -**Supported OS**: Windows Server 2016 and later -**Supported SQL version**: SQL Server 2019 CU2 (FCI) and SQL Server 2019 CU8 (AG) -**Supported HADR solution**: Failover cluster instance, and availability group - -To get started, learn to configure a distributed network name resource for [a failover cluster instance](failover-cluster-instance-distributed-network-name-dnn-configure.md) or an [availability group](availability-group-distributed-network-name-dnn-listener-configure.md). - -There are additional considerations when using the DNN with other SQL Server features. See [FCI and DNN interoperability](failover-cluster-instance-dnn-interoperability.md) and [AG and DNN interoperability](availability-group-dnn-interoperability.md) to learn more. - -## Recovery actions - -The cluster service takes corrective action when a failure is detected. This could restart the resource on the existing node, or fail the resource over to another node. Once corrective measures are initiated, they make take some time to complete. - -For example, a restarted availability group comes online per the following sequence: - -1. Listener IP comes online -1. Listener network name comes online -1. Availability group comes online -1. Individual databases go through recovery, which can take some time depending on a number of factors, such as the length of the redo log. Connections are routed by the listener only once the database is fully recovered. To learn more, see [Estimating failover time (RTO)](/sql/database-engine/availability-groups/windows/monitor-performance-for-always-on-availability-groups). - -Since recovery could take some time, aggressive monitoring set to detect a failure in 20 seconds could result in an outage of minutes if a transient event occurs (such as memory-preserving [Azure VM maintenance](#azure-platform-maintenance)). Setting the monitoring to a more relaxed value of 40 seconds can help avoid a longer interruption of service. - -To adjust threshold settings, see [cluster best practices](hadr-cluster-best-practices.md) for more detail. - - -## Node location - -Nodes in a Windows cluster on virtual machines in Azure may be physically separated within the same Azure region, or they can be in different regions. The distance may introduce network latency, much like having cluster nodes spread between locations in your own facilities would. In cloud environments, the difference is that within a region you may not be aware of the distance between nodes. Moreover, some other factors like physical and virtual components, number of hops, etc. can also contribute to increased latency. If latency between the nodes is a concern, consider placing the nodes of the cluster within a [proximity placement group](../../../virtual-machines/co-location.md) to guarantee network proximity. - -## Resource limits - -When you configure an Azure VM, you determine the computing resources limits for the CPU, memory, and IO. Workloads that require more resources than the purchased Azure VM, or disk limits may cause VM performance issues. Performance degradation may result in a failed health check for either the cluster service, or for the SQL Server high availability feature. Resource bottlenecks may make the node or resource appear down to the cluster or SQL Server. - -Intensive SQL IO operations or maintenance operations such as backups, index, or statistics maintenance could cause the VM or disk to reach *IOPS* or *MBPS* throughput limits, which could make SQL Server unresponsive to an *IsAlive/LooksAlive* check. - -If your SQL Server is experiencing unexpected failovers, check to make sure you are following all [performance best practices](performance-guidelines-best-practices-checklist.md) and monitor the server for disk or VM-level capping. - -## Azure platform maintenance - -Like any other cloud service, Azure periodically updates its platform to improve the reliability, performance, and security of the host infrastructure for virtual machines. The purpose of these updates ranges from patching software components in the hosting environment to upgrading networking components or decommissioning hardware. - -Most platform updates don't affect customer VMs. When a no-impact update isn't possible, Azure chooses the update mechanism that's least impactful to customer VMs. Most nonzero-impact maintenance pauses the VM for less than 10 seconds. In certain cases, Azure uses memory-preserving maintenance mechanisms. These mechanisms pause the VM for up to 30 seconds and preserve the memory in RAM. The VM is then resumed, and its clock is automatically synchronized. - -Memory-preserving maintenance works for more than 90 percent of Azure VMs. It doesn't work for G, M, N, and H series. Azure increasingly uses live-migration technologies and improves memory-preserving maintenance mechanisms to reduce the pause durations. When the VM is live-migrated to a different host, some sensitive workloads like SQL Server, might show a slight performance degradation in the few minutes leading up to the VM pause. - -A resource bottleneck during platform maintenance may make the AG or FCI appear down to the cluster service. See the [resource limits](#resource-limits) section of this article to learn more. - -If you are using aggressive cluster monitoring, an extended VM pause may trigger a failover. A failover will often cause more downtime than the maintenance pause, so it is recommended to use relaxed monitoring to avoid triggering a failover while the VM is paused for maintenance. See the [cluster best practices](hadr-cluster-best-practices.md) for more information on setting cluster thresholds in Azure VMs. - -## Limitations - -Consider the following limitations when you're working with FCI or availability groups and SQL Server on Azure Virtual Machines. - -### MSDTC - -Azure Virtual Machines support Microsoft Distributed Transaction Coordinator (MSDTC) on Windows Server 2019 with storage on Clustered Shared Volumes (CSV) and [Azure Standard Load Balancer](../../../load-balancer/load-balancer-overview.md) or on SQL Server VMs that are using Azure shared disks. - -On Azure Virtual Machines, MSDTC isn't supported for Windows Server 2016 or earlier with Clustered Shared Volumes because: - -- The clustered MSDTC resource can't be configured to use shared storage. On Windows Server 2016, if you create an MSDTC resource, it won't show any shared storage available for use, even if storage is available. This issue has been fixed in Windows Server 2019. -- The basic load balancer doesn't handle RPC ports. - - - -## Next steps - -Now that you've familiarized yourself with the differences when using a Windows Failover Cluster with SQL Server on Azure VMs, learn about the high availability features [availability groups](availability-group-overview.md) or [failover cluster instances](failover-cluster-instance-overview.md). If you're ready to get started, be sure to review the [best practices](hadr-cluster-best-practices.md) for configuration recommendations. diff --git a/articles/azure-sql/virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md b/articles/azure-sql/virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md deleted file mode 100644 index ba3920521947f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/licensing-model-azure-hybrid-benefit-ahb-change.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Change the license model for a SQL VM in Azure -description: Learn how to switch licensing for a SQL Server VM in Azure from pay-as-you-go to bring-your-own-license by using the Azure Hybrid Benefit. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.subservice: management -ms.workload: iaas-sql-server -ms.date: 11/13/2019 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell - ---- -# Change the license model for a SQL virtual machine in Azure -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - - -This article describes how to change the license model for a SQL Server virtual machine (VM) in Azure by using the [SQL IaaS Agent Extension](./sql-server-iaas-agent-extension-automate-management.md). - -## Overview - -There are three license models for an Azure VM that's hosting SQL Server: pay-as-you-go, Azure Hybrid Benefit (AHB), and High Availability/Disaster Recovery(HA/DR). You can modify the license model of your SQL Server VM by using the Azure portal, the Azure CLI, or PowerShell. - -- The **pay-as-you-go** model means that the per-second cost of running the Azure VM includes the cost of the SQL Server license. -- [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) allows you to use your own SQL Server license with a VM that's running SQL Server. -- The **HA/DR** license type is used for the [free HA/DR replica](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure) in Azure. - -Azure Hybrid Benefit allows the use of SQL Server licenses with Software Assurance ("Qualified License") on Azure virtual machines. With Azure Hybrid Benefit, customers aren't charged for the use of a SQL Server license on a VM. But they must still pay for the cost of the underlying cloud compute (that is, the base rate), storage, and backups. They must also pay for I/O associated with their use of the services (as applicable). - -According to the Microsoft [Product Terms](https://www.microsoft.com/licensing/terms/productoffering/MicrosoftAzureServices/EAEAS): "Customers must indicate that they are using Azure SQL Database (Managed Instance, Elastic Pool, and Single Database), Azure Data Factory, SQL Server Integration Services, or SQL Server Virtual Machines under Azure Hybrid Benefit for SQL Server when configuring workloads on Azure." - -To indicate the use of Azure Hybrid Benefit for SQL Server on Azure VM and be compliant, you have three options: - -- Provision a virtual machine by using a bring-your-own-license SQL Server image from Azure Marketplace. This option is available only for customers who have an Enterprise Agreement. -- Provision a virtual machine by using a pay-as-you-go SQL Server image from Azure Marketplace and activate the Azure Hybrid Benefit. -- Self-install SQL Server on Azure VM, manually [register with the SQL IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md), and activate Azure Hybrid Benefit. - -The license type of SQL Server can be configured when the VM is provisioned, or anytime afterward. Switching between license models incurs no downtime, does not restart the VM or the SQL Server service, doesn't add any additional costs, and is effective immediately. In fact, activating Azure Hybrid Benefit *reduces* cost. - -## Prerequisites - -Changing the licensing model of your SQL Server VM has the following requirements: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- A [SQL Server VM](./create-sql-vm-portal.md) registered with the [SQL IaaS Agent Extension](./sql-server-iaas-agent-extension-automate-management.md). -- [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default) is a requirement to utilize the [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/). - - -## Change license model - -# [Azure portal](#tab/azure-portal) - -You can modify the license model directly from the portal: - -1. Open the [Azure portal](https://portal.azure.com) and open the [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource) for your SQL Server VM. -1. Select **Configure** under **Settings**. -1. Select the **Azure Hybrid Benefit** option, and select the check box to confirm that you have a SQL Server license with Software Assurance. -1. Select **Apply** at the bottom of the **Configure** page. - -![Azure Hybrid Benefit in the portal](./media/licensing-model-azure-hybrid-benefit-ahb-change/ahb-in-portal.png) - - -# [Azure CLI](#tab/azure-cli) - -You can use the Azure CLI to change your license model. - -Specify the following values for **license-type**: -- `AHUB` for the Azure Hybrid Benefit -- `PAYG` for pay as you go -- `DR` to activate the free HA/DR replica - - -```azurecli-interactive -# example: az sql vm update -n AHBTest -g AHBTest --license-type AHUB - -az sql vm update -n -g --license-type -``` - -# [PowerShell](#tab/azure-powershell) - -You can use PowerShell to change your license model. - -Specify the following values for **license-type**: -- `AHUB` for the Azure Hybrid Benefit -- `PAYG` for pay as you go -- `DR` to activate the free HA/DR replica - - -```powershell-interactive -Update-AzSqlVM -ResourceGroupName -Name -LicenseType -``` - ---- - -## Remarks - -- Azure Cloud Solution Provider (CSP) customers can use the Azure Hybrid Benefit by first deploying a pay-as-you-go VM and then converting it to bring-your-own-license, if they have active Software Assurance. -- If you drop your SQL virtual machine resource, you will go back to the hard-coded license setting of the image. -- The ability to change the license model is a feature of the SQL IaaS Agent Extension. Deploying an Azure Marketplace image through the Azure portal automatically registers a SQL Server VM with the extension. But customers who are self-installing SQL Server will need to manually [register their SQL Server VM](sql-agent-extension-manually-register-single-vm.md). -- Adding a SQL Server VM to an availability set requires re-creating the VM. As such, any VMs added to an availability set will go back to the default pay-as-you-go license type. Azure Hybrid Benefit will need to be enabled again. - - -## Limitations - -Changing the license model is: - - Only available to customers with [Software Assurance](https://www.microsoft.com/en-us/licensing/licensing-programs/software-assurance-overview). - - Only supported for the Standard and Enterprise editions of SQL Server. License changes for Express, Web, and Developer are not supported. - - Only supported for virtual machines deployed through the Azure Resource Manager model. Virtual machines deployed through the classic model are not supported. - - Available only for the public or Azure Government clouds. Currently unavailable for the Azure China region. - -> [!Note] -> Only SQL Server core-based licensing with Software Assurance or subscription licenses are eligible for Azure Hybrid Benefit. If you are using Server + CAL licensing for SQL Server and you have Software Assurance, you can use bring-your-own-license to an Azure SQL Server virtual machine image to leverage license mobility for these servers, but you cannot leverage the other features of Azure Hybrid Benefit. - -## Known errors - -Review the commonly known errors and their resolutions. - -**The Resource 'Microsoft.SqlVirtualMachine/SqlVirtualMachines/\' under resource group '\' was not found.** - -This error occurs when you try to change the license model on a SQL Server VM that has not been registered with the SQL Server IaaS Agent Extension: - -`The Resource 'Microsoft.SqlVirtualMachine/SqlVirtualMachines/\' under resource group '\' was not found. The property 'sqlServerLicenseType' cannot be found on this object. Verify that the property exists and can be set.` - -You'll need to register your subscription with the resource provider, and then [register your SQL Server VM with the SQL IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md). - - - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) -* [Overview of SQL IaaS Agent Extension](./sql-server-iaas-agent-extension-automate-management.md) diff --git a/articles/azure-sql/virtual-machines/windows/manage-sql-vm-portal.md b/articles/azure-sql/virtual-machines/windows/manage-sql-vm-portal.md deleted file mode 100644 index 5a602665af8ad..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/manage-sql-vm-portal.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: Manage SQL Server virtual machines in Azure by using the Azure portal | Microsoft Docs -description: Learn how to access the SQL virtual machine resource in the Azure portal for a SQL Server VM hosted on Azure to modify SQL Server settings. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/21/2021 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: ignite-fall-2021 ---- -# Manage SQL Server VMs by using the Azure portal -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -In the [Azure portal](https://portal.azure.com), the [**SQL virtual machines**](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource is an independent management service to manage SQL Server on Azure Virtual Machines (VMs) that have been registered with the SQL Server IaaS Agent extension. You can use the resource to view all of your SQL Server VMs simultaneously and modify settings dedicated to SQL Server: - -![SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-manage.png) - -The **SQL virtual machines** resource management point is different to the **Virtual machine** resource used to manage the VM such as start it, stop it, or restart it. - - -## Prerequisite - -The **SQL virtual machines** resource is only available to SQL Server VMs that have been [registered with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). - - -## Access the resource - -To access the **SQL virtual machines** resource, do the following: - -1. Open the [Azure portal](https://portal.azure.com). -1. Select **All Services**. -1. Enter **SQL virtual machines** in the search box. -1. (Optional): Select the star next to **SQL virtual machines** to add this option to your **Favorites** menu. -1. Select **SQL virtual machines**. - - ![Find SQL Server virtual machines in all services](./media/manage-sql-vm-portal/sql-vm-search.png) - -1. The portal lists all SQL Server VMs available within the subscription. Select the one that you want to manage to open the **SQL virtual machines** resource. Use the search box if your SQL Server VM isn't appearing. - - ![All available SQL Server VMs](./media/manage-sql-vm-portal/all-sql-vms.png) - - Selecting your SQL Server VM opens the **SQL virtual machines** resource: - - - ![View the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-resource.png) - -> [!TIP] -> The **SQL virtual machines** resource is for dedicated SQL Server settings. Select the name of the VM in the **Virtual machine** box to open settings that are specific to the VM, but not exclusive to SQL Server. - - -## License and edition - -Use the **Configure** page of the SQL virtual machine resource to change your SQL Server licensing metadata to **Pay as you go**, **Azure Hybrid Benefit**, or **HA/DR** for your [free Azure replica for disaster recovery](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure). - - - -![Change the version and edition of SQL Server VM metadata in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-license-edition.png) - -You can also modify the edition of SQL Server from the **Configure** page as well, such as **Enterprise**, **Standard**, or **Developer**. - -Changing the license and edition metadata in the Azure portal is only supported once the version and edition of SQL Server has been modified internally to the VM. To learn more see, change the [version](change-sql-server-version.md) and [edition](change-sql-server-edition.md) of SQL Server on Azure VMs. - -## Storage - -Use the **Storage Configuration** page of the SQL virtual machines resource to extend your data, log, and tempdb drives. Review [storage configuration](storage-configuration.md) to learn more. - -![Extend storage in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-storage-configuration.png) - -## Patching - -Use the **Patching** page of the SQL virtual machines resource to enable auto patching of your VM and automatically install Windows and SQL Server updates marked as Important. You can also configure a maintenance schedule here, such as running patching daily, as well as a local start time for maintenance, and a maintenance window. - - -![Configure automated patching and schedule in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-automated-patching.png) - - -To learn more, see, [Automated patching](automated-patching.md). - - - -## Backups - -Use the **Backups** page of the SQL virtual machines resource to configure your automated backup settings, such as the retention period, which storage account to use, encryption, whether or not to back up system databases, and a backup schedule. - -![Configure automated backup and schedule in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-automated-backup.png) - -To learn more, see, [Automated patching](automated-backup.md). - - -## High availability (Preview) - -Use the **High Availability** page of the SQL virtual machines resource to create a Windows Server Failover Cluster, and configure an Always On availability group, availability group listener, and Azure Load Balancer. Configuring high availability using Azure portal is currently in preview. - - -![Configure a Windows Server Failover Cluster and an Always On availability group in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-high-availability.png) - - -To learn more, see [Configure availability group by using the Azure portal](availability-group-azure-portal-configure.md). - -## Security Configuration - -Use the **Security Configuration** page of the SQL virtual machines resource to configure SQL Server security settings such as which port to use, whether or not SQL Authentication is enabled, and to enable Azure Key Vault integration. - -![Configure SQL Server security in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-security-configuration.png) - -To learn more, see the [Security best practices](security-considerations-best-practices.md). - - - -## Defender for Cloud - -Use the **Defender for SQL** page of the SQL virtual machine's resource to view Defender for Cloud recommendations directly in the SQL virtual machine blade. Enable [Microsoft Defender for SQL](../../../security-center/defender-for-sql-usage.md) to leverage this feature. - -![Configure SQL Server Defender for Cloud settings in the Azure portal using the SQL virtual machines resource](./media/manage-sql-vm-portal/sql-vm-security-center.png) - -## SQL best practices assessment - -Use the **SQL best practices assessment** page of the SQL virtual machines resource to assess the health of your SQL Server VM. Once the feature is enabled, your SQL Server instances and databases are scanned and recommendations are surfaced to improve performance (indexes, statistics, trace flags, and so on) and identify missing best practices configurations. - -To learn more, see [SQL best practices assessment for SQL Server on Azure VMs](sql-assessment-for-sql-vm.md). - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728008.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728008.png deleted file mode 100644 index 7bd89c7aae289..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728008.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728009.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728009.png deleted file mode 100644 index b920781cc7a4d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728009.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728010.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728010.png deleted file mode 100644 index 05b4b2ade1d99..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728010.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728011.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728011.png deleted file mode 100644 index 437f42408b792..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728011.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728012.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728012.png deleted file mode 100644 index eaa8350e94480..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728012.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728013.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728013.png deleted file mode 100644 index fc14cb32bab3c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728013.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728014.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728014.png deleted file mode 100644 index 81f19dc45126f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728014.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728015.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728015.png deleted file mode 100644 index 1c684454e2753..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728015.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728016.png b/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728016.png deleted file mode 100644 index 0939e2bfbef69..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/application-patterns-development-strategies/ic728016.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-arm-autobackup.png b/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-arm-autobackup.png deleted file mode 100644 index 9de42c174b6fe..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-arm-autobackup.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-rm-autobackup-existing-vms.png b/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-rm-autobackup-existing-vms.png deleted file mode 100644 index 7b2679a08c67e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-backup-sql-2014/azure-sql-rm-autobackup-existing-vms.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-backup/automated-backup-blade.png b/articles/azure-sql/virtual-machines/windows/media/automated-backup/automated-backup-blade.png deleted file mode 100644 index 21fa9affad7e6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-backup/automated-backup-blade.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-backup/configure-manual-backup-schedule.png b/articles/azure-sql/virtual-machines/windows/media/automated-backup/configure-manual-backup-schedule.png deleted file mode 100644 index 247754ed67753..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-backup/configure-manual-backup-schedule.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-backup/sql-server-configuration.png b/articles/azure-sql/virtual-machines/windows/media/automated-backup/sql-server-configuration.png deleted file mode 100644 index b8bdbae4219a7..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-backup/sql-server-configuration.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-arm-patching.png b/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-arm-patching.png deleted file mode 100644 index c4df20864c744..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-arm-patching.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-rm-patching-existing-vms.png b/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-rm-patching-existing-vms.png deleted file mode 100644 index 77984f8d839fe..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/automated-patching/azure-sql-rm-patching-existing-vms.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-database.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-database.png deleted file mode 100644 index 5ffe1c0545181..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-database.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-replicas.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-replicas.png deleted file mode 100644 index 749bdf1c5e942..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/add-replicas.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-existing-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-existing-cluster.png deleted file mode 100644 index 99384cb3edffb..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-existing-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-listener.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-listener.png deleted file mode 100644 index 393b9d0b3128f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-listener.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-1.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-1.png deleted file mode 100644 index 7bfe9d4269b60..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-1.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-2.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-2.png deleted file mode 100644 index 5146074389c3c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-cluster-2.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-listener.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-listener.png deleted file mode 100644 index 0922b2117ec03..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/configure-new-listener.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-availability-group.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-availability-group.png deleted file mode 100644 index 4dc048058c1ab..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-availability-group.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-availability-group.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-availability-group.png deleted file mode 100644 index dc0c7fb737d05..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-availability-group.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-cluster.png deleted file mode 100644 index 26218c4f491be..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/create-new-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/failed-deployment.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/failed-deployment.png deleted file mode 100644 index 6d740d360bee5..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/failed-deployment.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/healthy-availability-group.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/healthy-availability-group.png deleted file mode 100644 index c45bae7975e97..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/healthy-availability-group.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/onboard-existing-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/onboard-existing-cluster.png deleted file mode 100644 index 248e29ad199fe..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-az-portal-configure/onboard-existing-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/0-endstatesample.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/0-endstatesample.png deleted file mode 100644 index a3172a8423040..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/0-endstatesample.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/1-basics.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/1-basics.png deleted file mode 100644 index 06c94d83a0d2e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/1-basics.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/11-deploydashboard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/11-deploydashboard.png deleted file mode 100644 index 764c92646c1d3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/11-deploydashboard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/2-domain.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/2-domain.png deleted file mode 100644 index 59c9fcc07cf19..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/2-domain.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/3-availabilitygroup.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/3-availabilitygroup.png deleted file mode 100644 index 37eb76c98b9e0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/3-availabilitygroup.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/4-vm.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/4-vm.png deleted file mode 100644 index 1249c8f543de1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/4-vm.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/5-sql.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/5-sql.png deleted file mode 100644 index c738c028a3e99..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-azure-marketplace-template-configure/5-sql.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/1-change-workgroup-name.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/1-change-workgroup-name.png deleted file mode 100644 index b647170e78b1a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/1-change-workgroup-name.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/2-add-dns-suffix.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/2-add-dns-suffix.png deleted file mode 100644 index ef4038558675b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/2-add-dns-suffix.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/3-confirm-full-computer-name.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/3-confirm-full-computer-name.png deleted file mode 100644 index 4eb68cf7aabb0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/3-confirm-full-computer-name.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/4-host-file.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/4-host-file.png deleted file mode 100644 index 865465dd092fc..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/4-host-file.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/5-launch-cluster-name-properties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/5-launch-cluster-name-properties.png deleted file mode 100644 index 2a2598f33e1cc..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/5-launch-cluster-name-properties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/6-provide-static-ip-for-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/6-provide-static-ip-for-cluster.png deleted file mode 100644 index 9461c87a7ddf7..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/6-provide-static-ip-for-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/7-verify-cluster-properties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/7-verify-cluster-properties.png deleted file mode 100644 index 80d44bfb6a235..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-clusterless-workgroup-configure/7-verify-cluster-properties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-in-ssms.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-in-ssms.png deleted file mode 100644 index 659f0a9f00d08..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-in-ssms.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-tsql.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-tsql.png deleted file mode 100644 index 917dcd06f8aea..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-distributed-network-name-dnn-listener-configure/dnn-listener-tsql.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic-dr.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic-dr.png deleted file mode 100644 index 1d11c869d0fac..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic-dr.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic.png deleted file mode 100644 index ba1b146006247..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/00-availability-group-basic.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/01-vpngateway-example.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/01-vpngateway-example.png deleted file mode 100644 index 69f514853b64f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/01-vpngateway-example.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/20-add-ip-resource.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/20-add-ip-resource.png deleted file mode 100644 index 3ffa6d0eed690..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/20-add-ip-resource.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/50-configure-dependency-multiple-ip.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/50-configure-dependency-multiple-ip.png deleted file mode 100644 index 8a48d6c3c4e05..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/50-configure-dependency-multiple-ip.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/add-cluster-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/add-cluster-ip-address.png deleted file mode 100644 index 4916c30d9d807..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/add-cluster-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-ip-dependencies.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-ip-dependencies.png deleted file mode 100644 index 6e8ea6d1304d6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-ip-dependencies.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-name-properties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-name-properties.png deleted file mode 100644 index 36e2f5c78ee86..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-multiple-regions/cluster-name-properties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-portal-plus.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-portal-plus.png deleted file mode 100644 index 0c73dd814f327..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-portal-plus.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create-complete.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create-complete.png deleted file mode 100644 index 46e2b1ad86f0a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create-complete.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create.png deleted file mode 100644 index 1662a5c01d889..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-create.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-search.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-search.png deleted file mode 100644 index aad5689935f82..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/01-resource-group-search.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png deleted file mode 100644 index 1143cf0a02fd3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/02-create-resource-rg.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/03-create-vnet-basics.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/03-create-vnet-basics.png deleted file mode 100644 index ecddffe7e91f3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/03-create-vnet-basics.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/04-create-vnet-ip-address-rename-default-subnet.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/04-create-vnet-ip-address-rename-default-subnet.png deleted file mode 100644 index 470cd0a6a1ede..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/04-create-vnet-ip-address-rename-default-subnet.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/05-create-vnet-ip-address-add-sql-subnet-1.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/05-create-vnet-ip-address-add-sql-subnet-1.png deleted file mode 100644 index ae26c712e1424..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/05-create-vnet-ip-address-add-sql-subnet-1.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/06-create-vnet-ip-address-add-sql-subnet-2.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/06-create-vnet-ip-address-add-sql-subnet-2.png deleted file mode 100644 index c3f89f3a33298..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/06-create-vnet-ip-address-add-sql-subnet-2.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/07-create-vnet-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/07-create-vnet-ip-address.png deleted file mode 100644 index 886f3556a9280..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/07-create-vnet-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/08-dc-vm-1-rdp-connect.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/08-dc-vm-1-rdp-connect.png deleted file mode 100644 index 51d7d94b37424..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/08-dc-vm-1-rdp-connect.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png deleted file mode 100644 index 72aa6694da672..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/09-add-features.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/10-add-roles.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/10-add-roles.png deleted file mode 100644 index 9278a38467ea6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/10-add-roles.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/11-ad-ds-more.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/11-ad-ds-more.png deleted file mode 100644 index cdfb38cfcb46b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/11-ad-ds-more.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-dc-vm-1-private-ip.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-dc-vm-1-private-ip.png deleted file mode 100644 index ebad1a6adf8b2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-dc-vm-1-private-ip.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-identify-dns-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-identify-dns-ip-address.png deleted file mode 100644 index 3aba0aa69bd86..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/12-identify-dns-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/13-network-interface.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/13-network-interface.png deleted file mode 100644 index 404b99dd0e26b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/13-network-interface.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/14-ad-dc-new-user.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/14-ad-dc-new-user.png deleted file mode 100644 index 919d75345069b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/14-ad-dc-new-user.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/15-ad-dc-properties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/15-ad-dc-properties.png deleted file mode 100644 index 8ef6ea06bf33d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/15-ad-dc-properties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/16-add-permissions.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/16-add-permissions.png deleted file mode 100644 index 661a2108b12dc..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/16-add-permissions.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/17-firewall-tcp-ports.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/17-firewall-tcp-ports.png deleted file mode 100644 index 687646a3ed3e9..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/17-firewall-tcp-ports.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/18-select-sql-vm-image.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/18-select-sql-vm-image.png deleted file mode 100644 index 86d24cc09cb61..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/18-select-sql-vm-image.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/19-sql-vm-network-interface.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/19-sql-vm-network-interface.png deleted file mode 100644 index f4e3278953417..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/19-sql-vm-network-interface.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/20-ip-configurations-add.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/20-ip-configurations-add.png deleted file mode 100644 index 36949d7615938..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/20-ip-configurations-add.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/21-add-ip-windows-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/21-add-ip-windows-cluster.png deleted file mode 100644 index 044600aea7a3f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/21-add-ip-windows-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/22-add-ip-ag-listener.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/22-add-ip-ag-listener.png deleted file mode 100644 index d28c6276e402a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/22-add-ip-ag-listener.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/multi-subnet-availability-group-diagram.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/multi-subnet-availability-group-diagram.png deleted file mode 100644 index a2343496c7aad..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-multi-subnet/multi-subnet-availability-group-diagram.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/00-endstatesamplenoelb.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/00-endstatesamplenoelb.png deleted file mode 100644 index 99e786f3e71a1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/00-endstatesamplenoelb.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-portalplus.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-portalplus.png deleted file mode 100644 index e3120373b2aa8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-portalplus.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroup.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroup.png deleted file mode 100644 index c3099d32da441..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroup.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroupsymbol.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroupsymbol.png deleted file mode 100644 index 32cd4d8920789..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/01-resourcegroupsymbol.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/02-newiteminrg.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/02-newiteminrg.png deleted file mode 100644 index fb278d37ee6e6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/02-newiteminrg.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/04-findvirtualnetwork.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/04-findvirtualnetwork.png deleted file mode 100644 index 50482af0822d0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/04-findvirtualnetwork.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/06-configurevirtualnetwork.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/06-configurevirtualnetwork.png deleted file mode 100644 index a41e542406fee..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/06-configurevirtualnetwork.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/07-addsubnet.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/07-addsubnet.png deleted file mode 100644 index 67fd1a1d959a4..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/07-addsubnet.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/08-configuresubnet.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/08-configuresubnet.png deleted file mode 100644 index a017313793ada..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/08-configuresubnet.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/20-connectrdp.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/20-connectrdp.png deleted file mode 100644 index 89892320e5839..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/20-connectrdp.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png deleted file mode 100644 index 72aa6694da672..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/22-add-features.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/23-add-roles.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/23-add-roles.png deleted file mode 100644 index 9278a38467ea6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/23-add-roles.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/24-addsmore.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/24-addsmore.png deleted file mode 100644 index fe64e6337dc60..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/24-addsmore.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/25-primarydcip.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/25-primarydcip.png deleted file mode 100644 index 33973c15f460e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/25-primarydcip.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/26-networkinterface.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/26-networkinterface.png deleted file mode 100644 index e07d68c9e85df..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/26-networkinterface.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/28-deploymentconfig.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/28-deploymentconfig.png deleted file mode 100644 index 09cce07060b49..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/28-deploymentconfig.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/29-addcnewuser.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/29-addcnewuser.png deleted file mode 100644 index 919d75345069b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/29-addcnewuser.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/31-addcproperties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/31-addcproperties.png deleted file mode 100644 index 94e1f4af3580d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/31-addcproperties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/33-addpermissions.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/33-addpermissions.png deleted file mode 100644 index 4bcab3230c6c3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/33-addpermissions.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/35-tcpports.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/35-tcpports.png deleted file mode 100644 index bab39dd3c04ae..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-prerequisites-tutorial-single-subnet/35-tcpports.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/01-create-cluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/01-create-cluster.png deleted file mode 100644 index 66f7ac17a2e39..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/01-create-cluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/02-failed-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/02-failed-ip-address.png deleted file mode 100644 index 0762999623909..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/02-failed-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/03-first-static-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/03-first-static-ip-address.png deleted file mode 100644 index 6c45f527eb2cb..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/03-first-static-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/04-second-static-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/04-second-static-ip-address.png deleted file mode 100644 index c667d43bf39d6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/04-second-static-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/05-storage-account-keys.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/05-storage-account-keys.png deleted file mode 100644 index a2849ed27f7b0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/05-storage-account-keys.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/06-configure-quorum.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/06-configure-quorum.png deleted file mode 100644 index 6e5e48f230786..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/06-configure-quorum.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/07-tls-version-error.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/07-tls-version-error.png deleted file mode 100644 index 082394996c4cd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/07-tls-version-error.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/08-enable-always-on.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/08-enable-always-on.png deleted file mode 100644 index 0d4e28653b635..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/08-enable-always-on.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/09-new-share.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/09-new-share.png deleted file mode 100644 index d825fcd3fbc63..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/09-new-share.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/10-backup-share-permission.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/10-backup-share-permission.png deleted file mode 100644 index e47d4ea01e0ae..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/10-backup-share-permission.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/11-new-ag-wizard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/11-new-ag-wizard.png deleted file mode 100644 index ca271d95361a8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/11-new-ag-wizard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/12-new-ag-name.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/12-new-ag-name.png deleted file mode 100644 index 7334d51efccc0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/12-new-ag-name.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/13-new-ag-select-database.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/13-new-ag-select-database.png deleted file mode 100644 index 8e86aec0a4ed2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/13-new-ag-select-database.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/14-new-ag-add-replica.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/14-new-ag-add-replica.png deleted file mode 100644 index daa1fe6c40852..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/14-new-ag-add-replica.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/15-new-ag-replica.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/15-new-ag-replica.png deleted file mode 100644 index 99c6878b724fe..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/15-new-ag-replica.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/16-endpoint.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/16-endpoint.png deleted file mode 100644 index c21bc5511382e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/16-endpoint.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/17-data-synchronization.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/17-data-synchronization.png deleted file mode 100644 index e34f54d217335..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/17-data-synchronization.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-add-listener-ip-subnet-1.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-add-listener-ip-subnet-1.png deleted file mode 100644 index ed7b20767c79f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-add-listener-ip-subnet-1.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-create-listener.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-create-listener.png deleted file mode 100644 index c8c2376e8d956..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/18-create-listener.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/19-add-listener-ip-subnet-2.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/19-add-listener-ip-subnet-2.png deleted file mode 100644 index d8af61241698d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/19-add-listener-ip-subnet-2.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/20-listener.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/20-listener.png deleted file mode 100644 index 7af9b41ba7243..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/20-listener.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/21-full-data-sync.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/21-full-data-sync.png deleted file mode 100644 index 4081c991fce08..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/21-full-data-sync.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/22-validation.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/22-validation.png deleted file mode 100644 index 67a110626f20a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/22-validation.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/23-results.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/23-results.png deleted file mode 100644 index 7f2d576595491..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/23-results.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/24-show-dashboard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/24-show-dashboard.png deleted file mode 100644 index d4b134ba44339..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/24-show-dashboard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/25-ag-dashboard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/25-ag-dashboard.png deleted file mode 100644 index 3c20504199e51..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/25-ag-dashboard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/26-cluster-manager.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/26-cluster-manager.png deleted file mode 100644 index e6baaf863b69c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/26-cluster-manager.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/27-ssms-listener-connect.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/27-ssms-listener-connect.png deleted file mode 100644 index e411c386071f6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/27-ssms-listener-connect.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/28-ssms-connection-parameters.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/28-ssms-connection-parameters.png deleted file mode 100644 index bb9ea655066c0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/28-ssms-connection-parameters.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/square.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/square.png deleted file mode 100644 index 7ceba01ac94ff..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-multi-subnet/square.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/00-endstatesamplenoelb.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/00-endstatesamplenoelb.png deleted file mode 100644 index 99e786f3e71a1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/00-endstatesamplenoelb.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/40-createcluster.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/40-createcluster.png deleted file mode 100644 index 2dd9299f9b192..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/40-createcluster.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/42_ipproperties.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/42_ipproperties.png deleted file mode 100644 index 5be2193e3c6c9..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/42_ipproperties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/44-addnode.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/44-addnode.png deleted file mode 100644 index 6b8e46f29ce0c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/44-addnode.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/46-addnodeconfirmation.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/46-addnodeconfirmation.png deleted file mode 100644 index 5a885e07c2f1a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/46-addnodeconfirmation.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/48-newshare.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/48-newshare.png deleted file mode 100644 index f9173dea1a3f7..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/48-newshare.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/50-filesharepermissions.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/50-filesharepermissions.png deleted file mode 100644 index 9487718269bad..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/50-filesharepermissions.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/52-configurequorum.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/52-configurequorum.png deleted file mode 100644 index 99bc74d955e2f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/52-configurequorum.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/54-enablealwayson.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/54-enablealwayson.png deleted file mode 100644 index c77853fa6af12..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/54-enablealwayson.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/56-newagwiz.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/56-newagwiz.png deleted file mode 100644 index e685150e65877..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/56-newagwiz.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/58-newagname.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/58-newagname.png deleted file mode 100644 index b1ce746e1a146..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/58-newagname.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/60-newagselectdatabase.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/60-newagselectdatabase.png deleted file mode 100644 index d0a860413a63a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/60-newagselectdatabase.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/62-newagaddreplica.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/62-newagaddreplica.png deleted file mode 100644 index 1af1b49657fa5..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/62-newagaddreplica.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/64-newagreplica.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/64-newagreplica.png deleted file mode 100644 index f75a4a12fecd6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/64-newagreplica.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/66-endpoint.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/66-endpoint.png deleted file mode 100644 index 0c0f4ac4c07fd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/66-endpoint.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/68-backupsharepermission.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/68-backupsharepermission.png deleted file mode 100644 index 41e994c79812a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/68-backupsharepermission.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/70-datasynchronization.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/70-datasynchronization.png deleted file mode 100644 index 6e98aeac44bbd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/70-datasynchronization.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/72-validation.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/72-validation.png deleted file mode 100644 index 80bd9e1e5dcf6..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/72-validation.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/74-results.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/74-results.png deleted file mode 100644 index 51b0b5e61a49e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/74-results.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/76-showdashboard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/76-showdashboard.png deleted file mode 100644 index 5651ec7480cb3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/76-showdashboard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/78-agdashboard.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/78-agdashboard.png deleted file mode 100644 index 28812ade45f1d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/78-agdashboard.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/80-clustermanager.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/80-clustermanager.png deleted file mode 100644 index 4252de983f56f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/80-clustermanager.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/82-azureloadbalancer.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/82-azureloadbalancer.png deleted file mode 100644 index 7123c2dc65204..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/82-azureloadbalancer.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/84-createloadbalancer.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/84-createloadbalancer.png deleted file mode 100644 index 40219f39fd11e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/84-createloadbalancer.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/86-findloadbalancer.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/86-findloadbalancer.png deleted file mode 100644 index 60befe89e3fa2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/86-findloadbalancer.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/square.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/square.png deleted file mode 100644 index 7ceba01ac94ff..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-manually-configure-tutorial-single-subnet/square.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-overview/00-endstatesamplenoelb.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-overview/00-endstatesamplenoelb.png deleted file mode 100644 index 99e786f3e71a1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-overview/00-endstatesamplenoelb.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/availability-group-quickstart-template-configure/account-missing-upn.png b/articles/azure-sql/virtual-machines/windows/media/availability-group-quickstart-template-configure/account-missing-upn.png deleted file mode 100644 index 0584f16e56775..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/availability-group-quickstart-template-configure/account-missing-upn.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-arm-akv.png b/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-arm-akv.png deleted file mode 100644 index c60eb85b38c66..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-arm-akv.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-rm-akv-existing-vms.png b/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-rm-akv-existing-vms.png deleted file mode 100644 index e6c118c3216d7..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/azure-key-vault-integration-configure/azure-sql-rm-akv-existing-vms.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/backup-restore/yes.png b/articles/azure-sql/virtual-machines/windows/media/backup-restore/yes.png deleted file mode 100644 index dd2030fe2cb27..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/backup-restore/yes.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-alwayson.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-alwayson.png deleted file mode 100644 index 7f549a6ebee44..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-alwayson.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-backup-restore.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-backup-restore.png deleted file mode 100644 index 1db4439465428..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-backup-restore.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-dbmirroring.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-dbmirroring.png deleted file mode 100644 index 015987dfdebc3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-dbmirroring.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-standalone-sqlserver-asr.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-standalone-sqlserver-asr.png deleted file mode 100644 index 1806d15ff3124..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-dr-standalone-sqlserver-asr.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-ha-always-on.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-ha-always-on.png deleted file mode 100644 index ba752617d4e97..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/azure-only-ha-always-on.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/dr-replica-in-portal.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/dr-replica-in-portal.png deleted file mode 100644 index 38e478b7c3bc1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/dr-replica-in-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-primary-in-azure.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-primary-in-azure.png deleted file mode 100644 index 8e47fc7ec2246..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-primary-in-azure.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-secondary-in-azure.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-secondary-in-azure.png deleted file mode 100644 index 0f3f78173bc10..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/failover-with-secondary-in-azure.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-alwayson.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-alwayson.png deleted file mode 100644 index eeacaa0424d47..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-alwayson.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-backup-restore.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-backup-restore.png deleted file mode 100644 index 774d731055573..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-backup-restore.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-dbmirroring.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-dbmirroring.png deleted file mode 100644 index e8ade084f768c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-dbmirroring.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-log-shipping.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-log-shipping.png deleted file mode 100644 index fa44c22ec5809..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-log-shipping.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-standalone-sqlserver-asr.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-standalone-sqlserver-asr.png deleted file mode 100644 index 0e043a92509fc..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-dr-standalone-sqlserver-asr.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-with-primary-on-prem.png b/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-with-primary-on-prem.png deleted file mode 100644 index 8a7c50df82765..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/business-continuity-high-availability-disaster-recovery-hadr-overview/hybrid-with-primary-on-prem.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-change-in-portal.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-change-in-portal.png deleted file mode 100644 index a707560f453cd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-change-in-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-upgrade.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-upgrade.png deleted file mode 100644 index 29b6cca0a7816..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-edition/edition-upgrade.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/change-portal.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/change-portal.png deleted file mode 100644 index 4048f5d9ddd40..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/change-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/complete-page.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/complete-page.png deleted file mode 100644 index 15566ba50b9e3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/complete-page.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/scripting-options.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/scripting-options.png deleted file mode 100644 index a05237cbe9d59..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/scripting-options.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/upgrade.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/upgrade.png deleted file mode 100644 index 3687ada13baee..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/upgrade.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/verify-portal.png b/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/verify-portal.png deleted file mode 100644 index 124edd1db3a8f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/change-sql-server-version/verify-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/automated-backup.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/automated-backup.png deleted file mode 100644 index 6bdc7303bedd3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/automated-backup.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-akv.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-akv.png deleted file mode 100644 index cc25eaf7d63fd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-akv.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-authentication.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-authentication.png deleted file mode 100644 index 911b8ad93b896..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-authentication.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-automated-patching.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-automated-patching.png deleted file mode 100644 index 0c1f13444000f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-automated-patching.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-license.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-license.png deleted file mode 100644 index 4e22260355fa2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-license.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-security.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-security.png deleted file mode 100644 index 569e3b77ce5a8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/azure-sqlvm-security.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-administrator-account.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-administrator-account.png deleted file mode 100644 index 0561e4b489d84..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-administrator-account.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-inbound-port-rules.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-inbound-port-rules.png deleted file mode 100644 index 91bd3af9f328a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-inbound-port-rules.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-instance-details.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-instance-details.png deleted file mode 100644 index 9f9402518c183..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-instance-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-project-details.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-project-details.png deleted file mode 100644 index 8ff8aaf336c60..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/basics-project-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/select-sql-vm-image-portal.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/select-sql-vm-image-portal.png deleted file mode 100644 index fe08c16532163..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/select-sql-vm-image-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-instance-settings.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-instance-settings.png deleted file mode 100644 index e2c4708256881..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-instance-settings.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-vm-storage-configuration-provisioning.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-vm-storage-configuration-provisioning.png deleted file mode 100644 index 69c69b77a3156..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/sql-vm-storage-configuration-provisioning.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-data-storage.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-data-storage.png deleted file mode 100644 index 3656ec2e797c8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-data-storage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-log-storage.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-log-storage.png deleted file mode 100644 index 31c6bfb2897c2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-log-storage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-tempdb-storage.png b/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-tempdb-storage.png deleted file mode 100644 index 163efc2a93744..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/create-sql-vm-portal/storage-configuration-tempdb-storage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-add-disk.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-add-disk.png deleted file mode 100644 index a6008312aee23..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-add-disk.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-select-shared-disk.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-select-shared-disk.png deleted file mode 100644 index b317f16d8f889..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-select-shared-disk.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-shared-disk.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-shared-disk.png deleted file mode 100644 index 8b594c057c2e1..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/cluster-shared-disk.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-disk-selection.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-disk-selection.png deleted file mode 100644 index 57d933d04ac2b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-disk-selection.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png deleted file mode 100644 index ff249e4e87b97..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-1.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png deleted file mode 100644 index f0afc75b199f4..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-cluster-network-secondary-ip-vm-2.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png deleted file mode 100644 index 481c10c413ed4..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-azure-shared-disk-manually-configure/sql-install-multi-subnet-confirmation.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-in-configuration-manager.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-in-configuration-manager.png deleted file mode 100644 index 391dadc028fa0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-in-configuration-manager.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-named-instance-configuration-manager.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-named-instance-configuration-manager.png deleted file mode 100644 index 5bcaa738feaec..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-dnn-interoperability/alias-named-instance-configuration-manager.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/03-remove-features.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/03-remove-features.png deleted file mode 100644 index 7fb039be9b305..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/03-remove-features.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/30-load-balancer-create.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/30-load-balancer-create.png deleted file mode 100644 index 868647ecb69d2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/30-load-balancer-create.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/cluster-validation.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/cluster-validation.png deleted file mode 100644 index 20debd474e06a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/cluster-validation.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/file-share-as-storage.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/file-share-as-storage.png deleted file mode 100644 index 455114a165e30..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/file-share-as-storage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/premium-file-storage-commands.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/premium-file-storage-commands.png deleted file mode 100644 index f5de4eb3d244c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/premium-file-storage-commands.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png deleted file mode 100644 index 546e34f642ac9..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/test-cluster-failover.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/use-file-share-as-data-directories.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/use-file-share-as-data-directories.png deleted file mode 100644 index 7a6a291e1419e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-premium-file-share-manually-configure/use-file-share-as-data-directories.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/03-remove-features.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/03-remove-features.png deleted file mode 100644 index 7fb039be9b305..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/03-remove-features.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/05-create-vnet-ip-address-add-sql-subnet-1.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/05-create-vnet-ip-address-add-sql-subnet-1.png deleted file mode 100644 index 42d294ee3051c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/05-create-vnet-ip-address-add-sql-subnet-1.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/07-create-vnet-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/07-create-vnet-ip-address.png deleted file mode 100644 index b6684d3ae4102..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/07-create-vnet-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/22-add-fci-ip-address.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/22-add-fci-ip-address.png deleted file mode 100644 index 3b9da14623e14..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/22-add-fci-ip-address.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/remove-features-updated.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/remove-features-updated.png deleted file mode 100644 index e5362269dfc11..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-prepare-vm/remove-features-updated.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/00-sql-fci-s2d-complete-solution.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/00-sql-fci-s2d-complete-solution.png deleted file mode 100644 index a057b051a8e8e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/00-sql-fci-s2d-complete-solution.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/10-validate-cluster-test.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/10-validate-cluster-test.png deleted file mode 100644 index 4026333bab194..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/10-validate-cluster-test.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/15-cluster-shared-volume.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/15-cluster-shared-volume.png deleted file mode 100644 index a29dcebc1d336..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/15-cluster-shared-volume.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/20-data-dicrectories.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/20-data-dicrectories.png deleted file mode 100644 index 831a1f530425b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/20-data-dicrectories.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/30-load-balancer-create.png b/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/30-load-balancer-create.png deleted file mode 100644 index 868647ecb69d2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/failover-cluster-instance-storage-spaces-direct-manually-configure/30-load-balancer-create.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-10.png b/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-10.png deleted file mode 100644 index 53cb075e8a338..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-10.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-11.png b/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-11.png deleted file mode 100644 index b2808afc8deb3..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-11.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-7.png b/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-7.png deleted file mode 100644 index d1ea548ea3d69..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-7.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-8.png b/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-8.png deleted file mode 100644 index bd98ae2a3adc2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-8.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-9.png b/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-9.png deleted file mode 100644 index 23502a172ee77..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-create-quorum-windows-failover-cluster-how-to/cloud-witness-9.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/clear-check-for-nodes-not-in-fci.png b/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/clear-check-for-nodes-not-in-fci.png deleted file mode 100644 index ab84593e76190..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/clear-check-for-nodes-not-in-fci.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/fci-dnn-properties.png b/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/fci-dnn-properties.png deleted file mode 100644 index 948bb84bfaa2e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/hadr-distributed-network-name-dnn-configure/fci-dnn-properties.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/licensing-model-azure-hybrid-benefit-ahb-change/ahb-in-portal.png b/articles/azure-sql/virtual-machines/windows/media/licensing-model-azure-hybrid-benefit-ahb-change/ahb-in-portal.png deleted file mode 100644 index ef5c0ceb340a8..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/licensing-model-azure-hybrid-benefit-ahb-change/ahb-in-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/all-sql-vms.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/all-sql-vms.png deleted file mode 100644 index 110282e33d63e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/all-sql-vms.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-assessment-workbook.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-assessment-workbook.png deleted file mode 100644 index 059c95ec66a08..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-assessment-workbook.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-backup.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-backup.png deleted file mode 100644 index 2d08075cd8b62..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-backup.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-patching.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-patching.png deleted file mode 100644 index ed98bb5817611..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-automated-patching.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-high-availability.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-high-availability.png deleted file mode 100644 index 146086eaf818e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-high-availability.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-license-edition.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-license-edition.png deleted file mode 100644 index c7830ec3b5672..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-license-edition.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-manage.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-manage.png deleted file mode 100644 index 1d94e0bc26253..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-manage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-resource.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-resource.png deleted file mode 100644 index 1018ad8a7289a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-resource.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-search.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-search.png deleted file mode 100644 index 8287b8343ac7e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-search.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-center.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-center.png deleted file mode 100644 index 4c7bda309e78d..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-center.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-configuration.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-configuration.png deleted file mode 100644 index c060f09840cb5..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-security-configuration.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-storage-configuration.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-storage-configuration.png deleted file mode 100644 index f8ba629938948..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/sql-vm-storage-configuration.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/vm-search.png b/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/vm-search.png deleted file mode 100644 index f909b1ff116dc..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/manage-sql-vm-portal/vm-search.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/check-replication-status.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/check-replication-status.png deleted file mode 100644 index 62b74640259cf..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/check-replication-status.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/cleanup-test-items.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/cleanup-test-items.png deleted file mode 100644 index d15d39754cab2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/cleanup-test-items.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/configure-replication.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/configure-replication.png deleted file mode 100644 index bf3dedf2e4a5f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/configure-replication.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/initiate-failover.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/initiate-failover.png deleted file mode 100644 index 5896dcf59545f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/initiate-failover.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/monitor-failover-test-job.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/monitor-failover-test-job.png deleted file mode 100644 index b0c377115feee..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/monitor-failover-test-job.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/test-failover-of-replicated-vm.png b/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/test-failover-of-replicated-vm.png deleted file mode 100644 index 7097cfa843ec7..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/move-sql-vm-different-region/test-failover-of-replicated-vm.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-disk-bandwidth.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-disk-bandwidth.png deleted file mode 100644 index 640966665b677..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-disk-bandwidth.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-vm-bandwidth.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-vm-bandwidth.png deleted file mode 100644 index b11680538483f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-data-vm-bandwidth.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-metrics-cpu.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-metrics-cpu.png deleted file mode 100644 index 494204b520f12..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-metrics-cpu.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-overview-charts.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-overview-charts.png deleted file mode 100644 index 48c828ad17f38..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/azure-portal-overview-charts.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/configure-storage-data-log.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/configure-storage-data-log.png deleted file mode 100644 index cc6bbf28eb673..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/configure-storage-data-log.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-cached-temp.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-cached-temp.png deleted file mode 100644 index 94a26d3891876..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-cached-temp.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-premium-support.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-premium-support.png deleted file mode 100644 index 759f1a5265999..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table-premium-support.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table.png deleted file mode 100644 index f763cbd6f0384..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/m-series-table.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-default-data-log-backup-locations.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-default-data-log-backup-locations.png deleted file mode 100644 index 5124c169d9388..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-default-data-log-backup-locations.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-error-log-location.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-error-log-location.png deleted file mode 100644 index 94e6b6e12d85b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/sql-server-error-log-location.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/uncheck-eligible-cluster-storage.png b/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/uncheck-eligible-cluster-storage.png deleted file mode 100644 index 27c34c963ca78..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/performance-guidelines-best-practices/uncheck-eligible-cluster-storage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-auto-shutdown.png b/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-auto-shutdown.png deleted file mode 100644 index af706e519807c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-auto-shutdown.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-choose-size-pricing-estimate.png b/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-choose-size-pricing-estimate.png deleted file mode 100644 index 4b530ab079e66..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/sql-vm-choose-size-pricing-estimate.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/virtual-machines-pricing-ui.png b/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/virtual-machines-pricing-ui.png deleted file mode 100644 index f798f0d265523..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/pricing-guidance/virtual-machines-pricing-ui.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-change-tcp-port.png b/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-change-tcp-port.png deleted file mode 100644 index a4f3f959d5db4..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-change-tcp-port.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-connectivity-option.png b/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-connectivity-option.png deleted file mode 100644 index 2b134aade2e71..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-connectivity-option.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-network-security-group-rules.png b/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-network-security-group-rules.png deleted file mode 100644 index 9ff558d03c238..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/security-considerations-best-practices/sql-vm-network-security-group-rules.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-automatic-registration-all-vms/automatic-registration.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-automatic-registration-all-vms/automatic-registration.png deleted file mode 100644 index 0eafd83d292fd..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-automatic-registration-all-vms/automatic-registration.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/change-sql-iaas-mode-portal.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/change-sql-iaas-mode-portal.png deleted file mode 100644 index eca6fdd163e63..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/change-sql-iaas-mode-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/confirm-delete-of-resource-uncheck-box.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/confirm-delete-of-resource-uncheck-box.png deleted file mode 100644 index 9f1b25e325031..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/confirm-delete-of-resource-uncheck-box.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/delete-sql-vm-resource.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/delete-sql-vm-resource.png deleted file mode 100644 index f6d17970b7948..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/delete-sql-vm-resource.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/enable-full-mode-iaas.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/enable-full-mode-iaas.png deleted file mode 100644 index c1f65963e21b0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/enable-full-mode-iaas.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/force-repair-extension.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/force-repair-extension.png deleted file mode 100644 index a8e5ff6dff876..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/force-repair-extension.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/repair-extension.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/repair-extension.png deleted file mode 100644 index 8d94f563d0f19..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/repair-extension.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/select-resource-provider-sql.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/select-resource-provider-sql.png deleted file mode 100644 index 0dffb4f6b5143..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/select-resource-provider-sql.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/sql-vm-manage.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/sql-vm-manage.png deleted file mode 100644 index 1d94e0bc26253..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/sql-vm-manage.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/verify-registration-status.png b/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/verify-registration-status.png deleted file mode 100644 index 6a4910ce41bb2..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-agent-extension-manually-register-single-vm/verify-registration-status.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-portal.png b/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-portal.png deleted file mode 100644 index 11252d890323b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-uninstall.png b/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-uninstall.png deleted file mode 100644 index 80a481502beec..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-uninstall.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/create-azure-sql-resource.png b/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/create-azure-sql-resource.png deleted file mode 100644 index 83231b0462d59..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/create-azure-sql-resource.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/search-for-azure-sql.png b/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/search-for-azure-sql.png deleted file mode 100644 index fb912ad630631..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/search-for-azure-sql.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/sql-vm-details.png b/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/sql-vm-details.png deleted file mode 100644 index 506f47cac9fc5..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-server-on-azure-vm-iaas-what-is-overview/sql-vm-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-administrator-account.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-administrator-account.png deleted file mode 100644 index 0561e4b489d84..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-administrator-account.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-inbound-port-rules.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-inbound-port-rules.png deleted file mode 100644 index 91bd3af9f328a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-inbound-port-rules.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-instance-details.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-instance-details.png deleted file mode 100644 index 22bef9755718b..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-instance-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-project-details.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-project-details.png deleted file mode 100644 index a734cd07a2c09..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/basics-project-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/create-sql-2017-vm-image.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/create-sql-2017-vm-image.png deleted file mode 100644 index 216096b4116a0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/create-sql-2017-vm-image.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/review-create.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/review-create.png deleted file mode 100644 index 34be560a8a716..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/review-create.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/select-sql-2017-vm-image.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/select-sql-2017-vm-image.png deleted file mode 100644 index 06915fe66477a..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/select-sql-2017-vm-image.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/sql-server-settings.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/sql-server-settings.png deleted file mode 100644 index be43966e57bb0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/sql-server-settings.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/ssms-connect.png b/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/ssms-connect.png deleted file mode 100644 index 2234cc031869e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vm-create-portal-quickstart/ssms-connect.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/analyze-report-for-details.png b/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/analyze-report-for-details.png deleted file mode 100644 index 42545887b6722..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/analyze-report-for-details.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/recommendations-in-security-center.png b/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/recommendations-in-security-center.png deleted file mode 100644 index 843748f6e97cb..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/recommendations-in-security-center.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-security-center.png b/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-security-center.png deleted file mode 100644 index 9eb4a76a82a43..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-security-center.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-vm-security-tab.png b/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-vm-security-tab.png deleted file mode 100644 index 61c439cbb86de..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/report-in-vm-security-tab.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/sample-report.png b/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/sample-report.png deleted file mode 100644 index a299810f9810e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/sql-vulnerability-assessment-enable/sample-report.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/azure-disk-config.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/azure-disk-config.png deleted file mode 100644 index 2b6ba1e722b02..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/azure-disk-config.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/disk-in-portal.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/disk-in-portal.png deleted file mode 100644 index 49cb0dd8e619e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/disk-in-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-extend-drive.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-extend-drive.png deleted file mode 100644 index 77288eececf40..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-extend-drive.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-existing.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-existing.png deleted file mode 100644 index d61234df83251..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-existing.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-provisioning.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-provisioning.png deleted file mode 100644 index 69c69b77a3156..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration-provisioning.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration.png deleted file mode 100644 index d10bea3492a4e..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-configuration.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-extend-drive.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-extend-drive.png deleted file mode 100644 index 0164bfa709e1f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/sql-vm-storage-extend-drive.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/tempdb-customization.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/tempdb-customization.png deleted file mode 100644 index b40a008ec1e90..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/tempdb-customization.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/write-accelerator.png b/articles/azure-sql/virtual-machines/windows/media/storage-configuration/write-accelerator.png deleted file mode 100644 index 8202f520d2174..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-configuration/write-accelerator.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/storage-migrate-to-ultradisk/additional-disks-settings-azure-portal.png b/articles/azure-sql/virtual-machines/windows/media/storage-migrate-to-ultradisk/additional-disks-settings-azure-portal.png deleted file mode 100644 index e30ef2e2121d9..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/storage-migrate-to-ultradisk/additional-disks-settings-azure-portal.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/tutorial-prerequisites-manually-configure-availability-group/03-remove-features.png b/articles/azure-sql/virtual-machines/windows/media/tutorial-prerequisites-manually-configure-availability-group/03-remove-features.png deleted file mode 100644 index 7fb039be9b305..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/tutorial-prerequisites-manually-configure-availability-group/03-remove-features.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity-change.png b/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity-change.png deleted file mode 100644 index 2de7cd2407ec0..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity-change.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity.png b/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity.png deleted file mode 100644 index 8b8517f9b939c..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-portal-connectivity.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-updating-notification.png b/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-updating-notification.png deleted file mode 100644 index db9c7d46ff33f..0000000000000 Binary files a/articles/azure-sql/virtual-machines/windows/media/ways-to-connect-to-sql/sql-vm-updating-notification.png and /dev/null differ diff --git a/articles/azure-sql/virtual-machines/windows/migrate-to-vm-from-sql-server.md b/articles/azure-sql/virtual-machines/windows/migrate-to-vm-from-sql-server.md deleted file mode 100644 index c94e388877742..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/migrate-to-vm-from-sql-server.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Migrate a SQL Server database to SQL Server on a virtual machine | Microsoft Docs -description: Learn about how to migrate an on-premises user database to SQL Server on an Azure virtual machine. -services: virtual-machines-windows -documentationcenter: '' -author: bluefooted -editor: '' -tags: azure-service-management -ms.assetid: 00fd08c6-98fa-4d62-a3b8-ca20aa5246b1 -ms.service: virtual-machines-sql -ms.workload: iaas-sql-server -ms.tgt_pltfrm: vm-windows-sql-server -ms.subservice: migration - -ms.topic: how-to -ms.date: 08/18/2018 -ms.author: pamela -ms.reviewer: mathoma ---- -# Migrate a SQL Server database to SQL Server on an Azure virtual machine - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -There are a number of ways to migrate an on-premises SQL Server user database to SQL Server in an Azure virtual machine (VM). This article will briefly discuss various methods and recommend the best method for various scenarios. - - -[!INCLUDE [learn-about-deployment-models](../../../../includes/learn-about-deployment-models-both-include.md)] - - > [!NOTE] - > SQL Server 2008 and SQL Server 2008 R2 are approaching the [end of their support life cycle](https://www.microsoft.com/sql-server/sql-server-2008) for on-premises instances. To extend support, you can either migrate your SQL Server instance to an Azure VM, or buy Extended Security Updates to keep it on-premises. For more information, see [Extend support for SQL Server 2008 and 2008 R2 with Azure](sql-server-2008-extend-end-of-support.md) - -## What are the primary migration methods? - -The primary migration methods are: - -* Perform an on-premises backup using compression, and then manually copy the backup file into the Azure VM. -* Perform a backup to URL and then restore into the Azure VM from the URL. -* Detach the data and log files, copy them to Azure Blob storage, and then attach them to SQL Server in the Azure VM from the URL. -* Convert the on-premises physical machine to a Hyper-V VHD, upload it to Azure Blob storage, and then deploy it as new VM using uploaded VHD. -* Ship the hard drive using the Windows Import/Export Service. -* If you have an AlwaysOn Availability Group deployment on-premises, use the [Add Azure Replica Wizard](/previous-versions/azure/virtual-machines/windows/sqlclassic/virtual-machines-windows-classic-sql-onprem-availability) to create a replica in Azure, failover, and point users to the Azure database instance. -* Use SQL Server [transactional replication](/sql/relational-databases/replication/transactional/transactional-replication) to configure the Azure SQL Server instance as a subscriber, disable replication, and point users to the Azure database instance. - -> [!TIP] -> You can also use these same techniques to move databases between SQL Server VMs in Azure. For example, there is no supported way to upgrade a SQL Server gallery-image VM from one version/edition to another. In this case, you should create a new SQL Server VM with the new version/edition, and then use one of the migration techniques in this article to move your databases. - -## Choose a migration method - -For best data transfer performance, migrate the database files into the Azure VM using a compressed backup file. - -To minimize downtime during the database migration process, use either the AlwaysOn option or the transactional replication option. - -If it is not possible to use the above methods, manually migrate your database. Generally, you start with a database backup, follow it with a copy of the database backup into Azure, and then restore the database. You can also copy the database files themselves into Azure and then attach them. There are several methods by which you can accomplish this manual process of migrating a database into an Azure VM. - -> [!NOTE] -> When you upgrade to SQL Server 2014 or SQL Server 2016 from older versions of SQL Server, you should consider whether changes are needed. We recommend that you address all dependencies on features not supported by the new version of SQL Server as part of your migration project. For more information on the supported editions and scenarios, see [Upgrade to SQL Server](/sql/database-engine/install-windows/upgrade-sql-server). - -The following table lists each of the primary migration methods and discusses when the use of each method is most appropriate. - -| Method | Source database version | Destination database version | Source database backup size constraint | Notes | -| --- | --- | --- | --- | --- | -| [Perform an on-premises backup using compression and manually copy the backup file into the Azure virtual machine](#back-up-and-restore) |SQL Server 2005 or greater |SQL Server 2005 or greater |[Azure VM storage limit](../../../index.yml) | This technique is simple and well-tested for moving databases across machines. | -| [Perform a backup to URL and restore into the Azure virtual machine from the URL](#backup-to-url-and-restore-from-url) |SQL Server 2012 SP1 CU2 or greater | SQL Server 2012 SP1 CU2 or greater | < 12.8 TB for SQL Server 2016, otherwise < 1 TB | This method is just another way to move the backup file to the VM using Azure storage. | -| [Detach and then copy the data and log files to Azure Blob storage and then attach to SQL Server in Azure virtual machine from URL](#detach-and-attach-from-a-url) | SQL Server 2005 or greater |SQL Server 2014 or greater | [Azure VM storage limit](../../../index.yml) | Use this method when you plan to [store these files using the Azure Blob storage service](/sql/relational-databases/databases/sql-server-data-files-in-microsoft-azure) and attach them to SQL Server running in an Azure VM, particularly with very large databases | -| [Convert on-premises machine to Hyper-V VHDs, upload to Azure Blob storage, and then deploy a new virtual machine using uploaded VHD](#convert-to-a-vm-upload-to-a-url-and-deploy-as-a-new-vm) |SQL Server 2005 or greater |SQL Server 2005 or greater |[Azure VM storage limit](../../../index.yml) |Use when [bringing your own SQL Server license](/azure/azure-sql/azure-sql-iaas-vs-paas-what-is-overview), when migrating a database that you'll run on an older version of SQL Server, or when migrating system and user databases together as part of the migration of database dependent on other user databases and/or system databases. | -| [Ship hard drive using Windows Import/Export Service](#ship-a-hard-drive) |SQL Server 2005 or greater |SQL Server 2005 or greater |[Azure VM storage limit](../../../index.yml) |Use the [Windows Import/Export Service](../../../import-export/storage-import-export-service.md) when manual copy method is too slow, such as with very large databases | -| [Use the Add Azure Replica Wizard](/previous-versions/azure/virtual-machines/windows/sqlclassic/virtual-machines-windows-classic-sql-onprem-availability) |SQL Server 2012 or greater |SQL Server 2012 or greater |[Azure VM storage limit](../../../index.yml) |Minimizes downtime, use when you have an Always On on-premises deployment | -| [Use SQL Server transactional replication](/sql/relational-databases/replication/transactional/transactional-replication) |SQL Server 2005 or greater |SQL Server 2005 or greater |[Azure VM storage limit](../../../index.yml) |Use when you need to minimize downtime and don't have an Always On on-premises deployment | - -## Back up and restore - -Back up your database with compression, copy the backup to the VM, and then restore the database. If your backup file is larger than 1 TB, you must create a striped set because the maximum size of a VM disk is 1 TB. Use the following general steps to migrate a user database using this manual method: - -1. Perform a full database backup to an on-premises location. -2. Create or upload a virtual machine with the desired version of SQL Server. -3. Setup connectivity based on your requirements. See [Connect to a SQL Server Virtual Machine on Azure (Resource Manager)](ways-to-connect-to-sql.md). -4. Copy your backup file(s) to your VM using remote desktop, Windows Explorer, or the copy command from a command prompt. - -## Backup to URL and Restore from URL - -Instead of backing up to a local file, you can use [Backup to URL](/sql/relational-databases/backup-restore/sql-server-backup-to-url) and then Restore from URL to the VM. SQL Server 2016 supports striped backup sets. They're recommended for performance and required to exceed the size limits per blob. For very large databases, the use of the [Windows Import/Export Service](../../../import-export/storage-import-export-service.md) is recommended. - -## Detach and attach from a URL - -Detach your database and log files and transfer them to [Azure Blob storage](/sql/relational-databases/databases/sql-server-data-files-in-microsoft-azure). Then attach the database from the URL on your Azure VM. Use this method if you want the physical database files to reside in Blob storage, which might be useful for very large databases. Use the following general steps to migrate a user database using this manual method: - -1. Detach the database files from the on-premises database instance. -2. Copy the detached database files into Azure Blob storage using the [AZCopy command-line utility](../../../storage/common/storage-use-azcopy-v10.md). -3. Attach the database files from the Azure URL to the SQL Server instance in the Azure VM. - -## Convert to a VM, upload to a URL, and deploy as a new VM - -Use this method to migrate all system and user databases in an on-premises SQL Server instance to an Azure virtual machine. Use the following general steps to migrate an entire SQL Server instance using this manual method: - -1. Convert physical or virtual machines to Hyper-V VHDs. -2. Upload VHD files to Azure Storage by using the [Add-AzureVHD cmdlet](/previous-versions/azure/dn495173(v=azure.100)). -3. Deploy a new virtual machine by using the uploaded VHD. - -> [!NOTE] -> To migrate an entire application, consider using [Azure Site Recovery](../../../site-recovery/site-recovery-overview.md)]. - -## Ship a hard drive - -Use the [Windows Import/Export Service method](../../../import-export/storage-import-export-service.md) to transfer large amounts of file data to Azure Blob storage in situations where uploading over the network is prohibitively expensive or not feasible. With this service, you send one or more hard drives containing that data to an Azure data center where your data will be uploaded to your storage account. - -## Next steps - -For more information, see [SQL Server on Azure Virtual Machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md). - -> [!TIP] -> If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). - -For instructions on creating SQL Server on an Azure Virtual Machine from a captured image, see [Tips & Tricks on ‘cloning’ Azure SQL virtual machines from captured images](/archive/blogs/psssql/tips-tricks-on-cloning-azure-sql-virtual-machines-from-captured-images) on the CSS SQL Server Engineers blog. \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/move-sql-vm-different-region.md b/articles/azure-sql/virtual-machines/windows/move-sql-vm-different-region.md deleted file mode 100644 index 3fbd174bcebe7..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/move-sql-vm-different-region.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Move a virtual machine to another region (Azure Site Recovery) -description: Learn how you can migrate your SQL Server virtual machine from one region to another within Azure. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.assetid: aa5bf144-37a3-4781-892d-e0e300913d03 -ms.service: virtual-machines-sql -ms.subservice: migration - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 07/30/2019 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: "seo-lt-2019" - ---- -# Move a SQL Server VM to another region within Azure with Azure Site Recovery -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article teaches you how to use Azure Site Recovery to migrate your SQL Server virtual machine (VM) from one region to another within Azure. - -Moving a SQL Server VM to a different region requires doing the following: -1. [Preparing](#prepare-to-move): Confirm that both your source SQL Server VM and target region are adequately prepared for the move. -1. [Configuring](#configure-azure-site-recovery-vault): Moving your SQL Server VM requires that it is a replicated object within the Azure Site Recovery vault. You need to add your SQL Server VM to the Azure Site Recovery vault. -1. [Testing](#test-move-process): Migrating the SQL Server VM requires failing it over from the source region to the replicated target region. To ensure that the move process will succeed, you need to first test that your SQL Server VM can successfully fail over to the target region. This will help expose any issues and avoid them when performing the actual move. -1. [Moving](#move-the-sql-server-vm): Once your test failover passed, and you know that you are safe to migrate your SQL Server VM, you can perform the move of the VM to the target region. -1. [Cleaning up](#clean-up-source-resources): To avoid billing charges, remove the SQL Server VM from the vault, and any unnecessary resources that are left over in the resource group. - -## Verify prerequisites - -- Confirm that moving from your source region to your target region [is supported](../../../site-recovery/azure-to-azure-support-matrix.md#region-support). -- Review the [scenario architecture and components](../../../site-recovery/azure-to-azure-architecture.md) as well as the [support limitations and requirements](../../../site-recovery/azure-to-azure-support-matrix.md). -- Verify account permissions. If you created your free Azure account, you're the administrator of your subscription. If you're not the subscription administrator, work with the administrator to assign the permissions that you need. To enable replication for a VM and copy data using Azure Site Recovery, you must have: - - Permissions to create a VM. The *Virtual Machine Contributor* built-in role has these permissions, which include: - - Permissions to create a VM in the selected resource group. - - Permissions to create a VM in the selected virtual network. - - Permissions to write to the selected storage account. - - Permissions to manage Azure Site Recovery operations. The *Site Recovery Contributor* role has all the permissions that are required to manage Site Recovery operations in a Recovery Services vault. - -## Prepare to move -Prepare both the source SQL Server VM and the target region for the move. - -### Prepare the source SQL Server VM - -- Ensure that all the latest root certificates are on the SQL Server VM that you want to move. If the latest root certificates are not there, security constraints will prevent data copy to the target region. -- For Windows VMs, install all of the latest Windows updates on the VM, so that all the trusted root certificates are on the machine. In a disconnected environment, follow the standard Windows Update and certificate update process for your organization. -- For Linux VMs, follow the guidance provided by your Linux distributor to get the latest trusted root certificates and certificate revocation list on the VM. -- Make sure you're not using an authentication proxy to control network connectivity for the VMs that you want to move. -- If the VM that you're trying to move doesn't have access to the internet, or it's using a firewall proxy to control outbound access, check the requirements. -- Identify the source networking layout and all the resources that you're currently using. This includes but isn't limited to load balancers, network security groups (NSGs), and public IPs. - -### Prepare the target region - -- Verify that your Azure subscription allows you to create VMs in the target region that's used for disaster recovery. Contact support to enable the required quota. -- Make sure that your subscription has enough resources to support VMs with size that match your source VMs. If you're using Site Recovery to copy data to the target, Site Recovery chooses the same size, or the closest possible size for the target VM. -- Make sure that you create a target resource for every component that's identified in the source networking layout. This step is important to ensure that your VMs have all the functionality and features in the target region that you had in the source region. - - Azure Site Recovery automatically discovers and creates a virtual network when you enable replication for the source VM. You can also pre-create a network and assign it to the VM in the user flow for enabling replication. You need to manually create any other resources in the target region. -- To create the most commonly used network resources that are relevant for you based on the source VM configuration, see the following documentation: - - [Network security groups](../../../virtual-network/tutorial-filter-network-traffic.md) - - [Load balancer](../../../load-balancer/quickstart-load-balancer-standard-internal-portal.md) - - [Public IP address](../../../virtual-network/ip-services/virtual-network-public-ip-address.md) - - For any additional networking components, see the [networking documentation](../../../virtual-network/virtual-networks-overview.md). -- Manually create a non-production network in the target region if you want to test the configuration before you perform the final move to the target region. We recommend this step because it ensures minimal interference with the production network. - -## Configure Azure Site Recovery vault - -The following steps show you how to use Azure Site Recovery to copy data to the target region. Create the Recovery Services vault in any region other than the source region. - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Choose to **Create a resource** from the upper-left hand corner of the navigation pane. -1. Select **IT & Management tools** and then select **Backup and Site Recovery**. -1. On the **Basics** tab, under **Project details**, either create a new resource group in the target region, or select an existing resource group in the target region. -1. Under **Instance Details**, specify a name for your vault, and then select your target **Region** from the drop-down. -1. Select **Review + Create** to create your Recovery Services vault. -1. Select **All services** from the upper-left hand corner of the navigation pane and in the search box type `recovery services`. -1. (Optionally) Select the star next to **Recovery Services vaults** to add it to your quick navigation bar. -1. Select **Recovery services vaults** and then select the Recovery Services vault you created. -1. On the **Overview** pane, select **Replicate**. - - ![Configure replication](./media/move-sql-vm-different-region/configure-replication.png) - -1. Select **Source** and then select **Azure** as the source. Select the appropriate values for the other drop-down fields, such as the location for your source VMs. Only resources groups located in the **Source location** region will be visible in the **Source resource group** field. -1. Select **Virtual machines** and then choose the virtual machines you want to migrate. Select **OK** to save your VM selection. -1. Select **Settings**, and then choose your **Target location** from the drop-down. This should be the resource group you prepared earlier. -1. Once you have customized replication, select **Create target resources** to create the resources in the new location. -1. Once resource creation is complete, select **Enable replication** to start replication of your SQL Server VM from the source to the target region. -1. You can check the status of replication by navigating to your recovery vault, selecting **Replicated items** and viewing the **Status** of your SQL Server VM. A status of **Protected** indicates that replication has completed. - - ![Verify replication status](./media/move-sql-vm-different-region/check-replication-status.png) - -## Test move process -The following steps show you how to use Azure Site Recovery to test the move process. - -1. Navigate to your **Recovery Services vault** in the [Azure portal](https://portal.azure.com) and select **Replicated items**. -1. Select the SQL Server VM you would like to move, verify that the **Replication Health** shows as **Healthy** and then select **Test Failover**. - - ![Test failover for your VM](./media/move-sql-vm-different-region/test-failover-of-replicated-vm.png) - -1. On the **Test Failover** page, select the **Latest app-consistent** recovery point to use for the failover, as that is the only type of snapshot that can guarantee SQL Server data consistency. -1. Select the virtual network under **Azure virtual network** and then select **OK** to test failover. - - >[!IMPORTANT] - > We recommend that you use a separate Azure VM network for the failover test. Don't use the production network that was set up when you enabled replication and that you want to move your VMs into eventually. - -1. To monitor progress, navigate to your vault, select **Site Recovery jobs** under **Monitoring**, and then select the **Test failover** job that's in progress. - - ![Monitor failover test progress](./media/move-sql-vm-different-region/monitor-failover-test-job.png) - -1. Once the test completes, navigate to **Virtual machines** in the portal and review the newly created virtual machine. Make sure the SQL Server VM is running, is sized appropriately, and is connected to the appropriate network. -1. Delete the VM that was created as part of the test, as the **Failover** option will be grayed out until the failover test resources are cleaned up. Navigate back to the vault, select **Replicated items**, select the SQL Server VM, and then select **Cleanup test failover**. Record and save any observations associated with the test in the **Notes** section and select the checkbox next to **Testing is complete. Delete test failover virtual machines**. Select **OK** to clean up resources after the test. - - ![clean up items after failover test](./media/move-sql-vm-different-region/cleanup-test-items.png) - -## Move the SQL Server VM -The following steps show you how to move the SQL Server VM from your source region to your target region. - -1. Navigate to the **Recovery Services** vault, select **Replicated items**, select the VM, and then select **Failover**. - - ![Initiate failover](./media/move-sql-vm-different-region/initiate-failover.png) - -1. Select the **latest app-consistent** recover point under **Recovery Point**. -1. Select the check box next to **Shut down the machine before beginning failover**. Site Recovery will attempt to shut down the source VM before triggering the failover. Failover will continue even if shut down fails. -1. Select **OK** to start the failover. -1. You can monitor the failover process from the same **Site Recovery jobs** page you viewed when monitoring the failover test in the previous section. -1. After the job completes, check that the SQL Server VM appears in the target region as expected. -1. Navigate back to the vault, select **Replicated Items**, select the SQL Server VM, and select **Commit** to finish the move process to the target region. Wait until the commit job finishes. -1. Register your SQL Server VM with the SQL IaaS Agent extension to enable **SQL virtual machine** manageability in the Azure portal and features associated with the extension. For more information, see [Register SQL Server VM with the SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). - - > [!WARNING] - > SQL Server data consistency is only guaranteed with app-consistent snapshots. The **latest processed** snapshot can't be used for SQL Server failover as a crash recovery snapshot can't guarantee SQL Server data consistency. - -## Clean up source resources -To avoid billing charges, remove the SQL Server VM from the vault, and delete any unnecessary associated resources. - -1. Navigate back to the **Site Recovery** vault, select **Replicated items**, and select the SQL Server VM. -1. Select **Disable Replication**. Select a reason for disabling protection, and then select **OK** to disable replication. - - >[!IMPORTANT] - > It is important to perform this step to avoid being charged for Azure Site Recovery replication. - -1. If you have no plans to reuse any of the resources in the source region, delete all relevant network resources, and corresponding storage accounts. - - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [SQL Server on a Windows VM FAQ](frequently-asked-questions-faq.yml) -* [SQL Server on a Windows VM pricing guidance](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-checklist.md b/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-checklist.md deleted file mode 100644 index f6adf2f481eda..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-checklist.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: "Checklist: Best practices & guidelines" -description: Provides a quick checklist to review your best practices and guidelines to optimize the performance of your SQL Server on Azure Virtual Machine (VM). -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: performance -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/22/2022 -ms.author: pamela -ms.custom: contperf-fy21q3 -ms.reviewer: mathoma ---- -# Checklist: Best practices for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides a quick checklist as a series of best practices and guidelines to optimize performance of your SQL Server on Azure Virtual Machines (VMs). - -For comprehensive details, see the other articles in this series: [VM size](performance-guidelines-best-practices-vm-size.md), [Storage](performance-guidelines-best-practices-storage.md), [Security](security-considerations-best-practices.md), [HADR configuration](hadr-cluster-best-practices.md), [Collect baseline](performance-guidelines-best-practices-collect-baseline.md). - -Enable [SQL Assessment for SQL Server on Azure VMs](sql-assessment-for-sql-vm.md) and your SQL Server will be evaluated against known best practices with results on the [SQL VM management page](manage-sql-vm-portal.md) of the Azure portal. - -For videos about the latest features to optimize SQL Server VM performance and automate management, review the following Data Exposed videos: - -- [Caching and Storage Capping (Ep. 1)](/shows/data-exposed/azure-sql-vm-caching-and-storage-capping-ep-1-data-exposed) -- [Automate Management with the SQL Server IaaS Agent extension (Ep. 2)](/shows/data-exposed/azure-sql-vm-automate-management-with-the-sql-server-iaas-agent-extension-ep-2) -- [Use Azure Monitor Metrics to Track VM Cache Health (Ep. 3)](/shows/data-exposed/azure-sql-vm-use-azure-monitor-metrics-to-track-vm-cache-health-ep-3) -- [Get the best price-performance for your SQL Server workloads on Azure VM](/shows/data-exposed/azure-sql-vm-get-the-best-price-performance-for-your-sql-server-workloads-on-azure-vm) -- [Using PerfInsights to Evaluate Resource Health and Troubleshoot (Ep. 5)](/shows/data-exposed/azure-sql-vm-using-perfinsights-to-evaluate-resource-health-and-troubleshoot-ep-5) -- [Best Price-Performance with Ebdsv5 Series (Ep.6)](/shows/data-exposed/azure-sql-vm-best-price-performance-with-ebdsv5-series) -- [Optimally Configure SQL Server on Azure Virtual Machines with SQL Assessment (Ep. 7)](/shows/data-exposed/optimally-configure-sql-server-on-azure-virtual-machines-with-sql-assessment) -- [New and Improved SQL Server on Azure VM deployment and management experience (Ep.8)](/shows/data-exposed/new-and-improved-sql-on-azure-vm-deployment-and-management-experience) - -## Overview - -While running SQL Server on Azure Virtual Machines, continue using the same database performance tuning options that are applicable to SQL Server in on-premises server environments. However, the performance of a relational database in a public cloud depends on many factors, such as the size of a virtual machine, and the configuration of the data disks. - -There is typically a trade-off between optimizing for costs and optimizing for performance. This performance best practices series is focused on getting the *best* performance for SQL Server on Azure Virtual Machines. If your workload is less demanding, you might not require every recommended optimization. Consider your performance needs, costs, and workload patterns as you evaluate these recommendations. - -## VM Size - -The following is a quick checklist of VM size best practices for running your SQL Server on Azure VM: - -- The new [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) provides the highest I/O throughput-to-vCore ratio in Azure along with a memory-to-vCore ratio of 8. This series offers the best price-performance for SQL Server workloads on Azure VMs. Consider this series first for most SQL Server workloads. -- Use VM sizes with 4 or more vCPUs like the [E4ds_v5](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) or higher. -- Use [memory optimized](../../../virtual-machines/sizes-memory.md) virtual machine sizes for the best performance of SQL Server workloads. -- The [Edsv5](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) series, the [M-](../../../virtual-machines/m-series.md), and the [Mv2-](../../../virtual-machines/mv2-series.md) series offer the optimal memory-to-vCore ratio required for OLTP workloads. -- The M series VMs offer the highest memory-to-vCore ratio in Azure. Consider these VMs for mission critical and data warehouse workloads. -- Leverage Azure Marketplace images to deploy your SQL Server Virtual Machines as the SQL Server settings and storage options are configured for optimal performance. -- Collect the target workload's performance characteristics and use them to determine the appropriate VM size for your business. -- Use the [Data Migration Assistant](https://www.microsoft.com/download/details.aspx?id=53595) [SKU recommendation](/sql/dma/dma-sku-recommend-sql-db) tool to find the right VM size for your existing SQL Server workload. - -To learn more, see the comprehensive [VM size best practices](performance-guidelines-best-practices-vm-size.md). - -## Storage - -The following is a quick checklist of storage configuration best practices for running your SQL Server on Azure VM: - -- Monitor the application and [determine storage bandwidth and latency requirements](../../../virtual-machines/premium-storage-performance.md#counters-to-measure-application-performance-requirements) for SQL Server data, log, and tempdb files before choosing the disk type. -- To optimize storage performance, plan for highest uncached IOPS available and use data caching as a performance feature for data reads while avoiding [virtual machine and disks capping/throttling](../../../virtual-machines/premium-storage-performance.md#throttling). -- Place data, log, and tempdb files on separate drives. - - For the data drive, only use [premium P30 and P40 disks](../../../virtual-machines/disks-types.md#premium-ssds) to ensure the availability of cache support - - For the log drive plan for capacity and test performance versus cost while evaluating the [premium P30 - P80 disks](../../../virtual-machines/disks-types.md#premium-ssds). - - If submillisecond storage latency is required, use [Azure ultra disks](../../../virtual-machines/disks-types.md#ultra-disks) for the transaction log. - - For M-series virtual machine deployments consider [Write Accelerator](../../../virtual-machines/how-to-enable-write-accelerator.md) over using Azure ultra disks. - - Place [tempdb](/sql/relational-databases/databases/tempdb-database) on the local ephemeral SSD (default `D:\`) drive for most SQL Server workloads that are not part of Failover Cluster Instance (FCI) after choosing the optimal VM size. - - If the capacity of the local drive is not enough for tempdb, consider sizing up the VM. See [Data file caching policies](performance-guidelines-best-practices-storage.md#data-file-caching-policies) for more information. - - For FCI place tempdb on the shared storage. - - If the FCI workload is heavily dependent on tempdb disk performance, then as an advanced configuration place tempdb on the local ephemeral SSD (default `D:\`) drive which is not part of FCI storage. This configuration will need custom monitoring and action to ensure the local ephemeral SSD (default `D:\`) drive is available all the time as any failures of this drive will not trigger action from FCI. -- Stripe multiple Azure data disks using [Storage Spaces](/windows-server/storage/storage-spaces/overview) to increase I/O bandwidth up to the target virtual machine's IOPS and throughput limits. -- Set [host caching](../../../virtual-machines/disks-performance.md#virtual-machine-uncached-vs-cached-limits) to read-only for data file disks. -- Set [host caching](../../../virtual-machines/disks-performance.md#virtual-machine-uncached-vs-cached-limits) to none for log file disks. - - Do not enable read/write caching on disks that contain SQL Server files. - - Always stop the SQL Server service before changing the cache settings of your disk. -- For development and test workloads consider using standard storage. It is not recommended to use Standard HDD/SDD for production workloads. -- [Credit-based Disk Bursting](../../../virtual-machines/disk-bursting.md#credit-based-bursting) (P1-P20) should only be considered for smaller dev/test workloads and departmental systems. -- Provision the storage account in the same region as the SQL Server VM. -- Disable Azure geo-redundant storage (geo-replication) and use LRS (local redundant storage) on the storage account. -- Format your data disk to use 64-KB allocation unit size for all data files placed on a drive other than the temporary `D:\` drive (which has a default of 4 KB). SQL Server VMs deployed through Azure Marketplace come with data disks formatted with allocation unit size and interleave for the storage pool set to 64 KB. - - -To learn more, see the comprehensive [Storage best practices](performance-guidelines-best-practices-storage.md). - -## SQL Server features - -The following is a quick checklist of best practices for SQL Server configuration settings when running your SQL Server instances in an Azure virtual machine in production: - -- Enable [database page compression](/sql/relational-databases/data-compression/data-compression) where appropriate. -- Enable [backup compression](/sql/relational-databases/backup-restore/backup-compression-sql-server). -- Enable [instant file initialization](/sql/relational-databases/databases/database-instant-file-initialization) for data files. -- Limit [autogrowth](/troubleshoot/sql/admin/considerations-autogrow-autoshrink#considerations-for-autogrow) of the database. -- Disable [autoshrink](/troubleshoot/sql/admin/considerations-autogrow-autoshrink#considerations-for-auto_shrink) of the database. -- Disable autoclose of the database. -- Move all databases to data disks, including [system databases](/sql/relational-databases/databases/move-system-databases). -- Move SQL Server error log and trace file directories to data disks. -- Configure default backup and database file locations. -- Set max [SQL Server memory limit](/sql/database-engine/configure-windows/server-memory-server-configuration-options#use-) to leave enough memory for the Operating System. ([Leverage Memory\Available Bytes](/sql/relational-databases/performance-monitor/monitor-memory-usage) to monitor the operating system memory health). -- Enable [lock pages in memory](/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows). -- Enable [optimize for adhoc workloads](/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option) for OLTP heavy environments. -- Evaluate and apply the [latest cumulative updates](/sql/database-engine/install-windows/latest-updates-for-microsoft-sql-server) for the installed versions of SQL Server. -- Enable [Query Store](/sql/relational-databases/performance/monitoring-performance-by-using-the-query-store) on all production SQL Server databases [following best practices](/sql/relational-databases/performance/best-practice-with-the-query-store). -- Enable [automatic tuning](/sql/relational-databases/automatic-tuning/automatic-tuning) on mission critical application databases. -- Ensure that all [tempdb best practices](/sql/relational-databases/databases/tempdb-database#optimizing-tempdb-performance-in-sql-server) are followed. -- [Use the recommended number of files](/troubleshoot/sql/performance/recommendations-reduce-allocation-contention#resolution), using multiple tempdb data files starting with one file per core, up to eight files. -- Schedule SQL Server Agent jobs to run [DBCC CHECKDB](/sql/t-sql/database-console-commands/dbcc-checkdb-transact-sql#a-checking-both-the-current-and-another-database), [index reorganize](/sql/relational-databases/indexes/reorganize-and-rebuild-indexes#reorganize-an-index), [index rebuild](/sql/relational-databases/indexes/reorganize-and-rebuild-indexes#rebuild-an-index), and [update statistics](/sql/t-sql/statements/update-statistics-transact-sql#examples) jobs. -- Monitor and manage the health and size of the SQL Server [transaction log file](/sql/relational-databases/logs/manage-the-size-of-the-transaction-log-file#Recommendations). -- Take advantage of any new [SQL Server features](/sql/sql-server/what-s-new-in-sql-server-ver15) available for the version being used. -- Be aware of the differences in [supported features](/sql/sql-server/editions-and-components-of-sql-server-version-15) between the editions you are considering deploying. - -## Azure features - -The following is a quick checklist of best practices for Azure-specific guidance when running your SQL Server on Azure VM: - -- Register with [the SQL IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md) to unlock a number of [feature benefits](sql-server-iaas-agent-extension-automate-management.md#feature-benefits). -- Leverage the best [backup and restore strategy](backup-restore.md#decision-matrix) for your SQL Server workload. -- Ensure [Accelerated Networking is enabled](../../../virtual-network/create-vm-accelerated-networking-cli.md#portal-creation) on the virtual machine. -- Leverage [Microsoft Defender for Cloud](../../../security-center/index.yml) to improve the overall security posture of your virtual machine deployment. -- Leverage [Microsoft Defender for Cloud](../../../security-center/azure-defender.md), integrated with [Microsoft Defender for Cloud](https://azure.microsoft.com/services/security-center/), for specific [SQL Server VM coverage](../../../security-center/defender-for-sql-introduction.md) including vulnerability assessments, and just-in-time access, which reduces the attack service while allowing legitimate users to access virtual machines when necessary. To learn more, see [vulnerability assessments](../../../security-center/defender-for-sql-on-machines-vulnerability-assessment.md), [enable vulnerability assessments for SQL Server VMs](../../../security-center/defender-for-sql-on-machines-vulnerability-assessment.md) and [just-in-time access](../../../security-center/just-in-time-explained.md). -- Leverage [Azure Advisor](../../../advisor/advisor-overview.md) to address [performance](../../../advisor/advisor-performance-recommendations.md), [cost](../../../advisor/advisor-cost-recommendations.md), [reliability](../../../advisor/advisor-high-availability-recommendations.md), [operational excellence](../../../advisor/advisor-operational-excellence-recommendations.md), and [security recommendations](../../../advisor/advisor-security-recommendations.md). -- Leverage [Azure Monitor](../../../azure-monitor/vm/monitor-virtual-machine.md) to collect, analyze, and act on telemetry data from your SQL Server environment. This includes identifying infrastructure issues with [VM insights](../../../azure-monitor/vm/vminsights-overview.md) and monitoring data with [Log Analytics](../../../azure-monitor/logs/log-query-overview.md) for deeper diagnostics. -- Enable [Autoshutdown](../../../automation/automation-solution-vm-management.md) for development and test environments. -- Implement a high availability and disaster recovery (HADR) solution that meets your business continuity SLAs, see the [HADR options](business-continuity-high-availability-disaster-recovery-hadr-overview.md#deployment-architectures) options available for SQL Server on Azure VMs. -- Use the Azure portal (support + troubleshooting) to evaluate [resource health](../../../service-health/resource-health-overview.md) and history; submit new support requests when needed. - -## HADR configuration - -High availability and disaster recovery (HADR) features, such as the [Always On availability group](availability-group-overview.md) and the [failover cluster instance](failover-cluster-instance-overview.md) rely on underlying [Windows Server Failover Cluster](hadr-windows-server-failover-cluster-overview.md) technology. Review the best practices for modifying your HADR settings to better support the cloud environment. - -For your Windows cluster, consider these best practices: - -* Deploy your SQL Server VMs to multiple subnets whenever possible to avoid the dependency on an Azure Load Balancer or a distributed network name (DNN) to route traffic to your HADR solution. -* Change the cluster to less aggressive parameters to avoid unexpected outages from transient network failures or Azure platform maintenance. To learn more, see [heartbeat and threshold settings](hadr-cluster-best-practices.md#heartbeat-and-threshold). For Windows Server 2012 and later, use the following recommended values: - - **SameSubnetDelay**: 1 second - - **SameSubnetThreshold**: 40 heartbeats - - **CrossSubnetDelay**: 1 second - - **CrossSubnetThreshold**: 40 heartbeats -* Place your VMs in an availability set or different availability zones. To learn more, see [VM availability settings](hadr-cluster-best-practices.md#vm-availability-settings). -* Use a single NIC per cluster node and a single subnet. -* Configure cluster [quorum voting](hadr-cluster-best-practices.md#quorum-voting) to use 3 or more odd number of votes. Do not assign votes to DR regions. -* Carefully monitor [resource limits](hadr-cluster-best-practices.md#resource-limits) to avoid unexpected restarts or failovers due to resource constraints. - - Ensure your OS, drivers, and SQL Server are at the latest builds. - - Optimize performance for SQL Server on Azure VMs. Review the other sections in this article to learn more. - - Reduce or spread out workload to avoid resource limits. - - Move to a VM or disk that his higher limits to avoid constraints. - -For your SQL Server availability group or failover cluster instance, consider these best practices: - -* If you're experiencing frequent unexpected failures, follow the performance best practices outlined in the rest of this article. -* If optimizing SQL Server VM performance does not resolve your unexpected failovers, consider [relaxing the monitoring](hadr-cluster-best-practices.md#relaxed-monitoring) for the availability group or failover cluster instance. However, doing so may not address the underlying source of the issue and could mask symptoms by reducing the likelihood of failure. You may still need to investigate and address the underlying root cause. For Windows Server 2012 or higher, use the following recommended values: - - **Lease timeout**: Use this equation to calculate the maximum lease time out value: - `Lease timeout < (2 * SameSubnetThreshold * SameSubnetDelay)`. - Start with 40 seconds. If you're using the relaxed `SameSubnetThreshold` and `SameSubnetDelay` values recommended previously, do not exceed 80 seconds for the lease timeout value. - - **Max failures in a specified period**: You can set this value to 6. - - **Healthcheck timeout**: You can set this value to 60000 initially, adjust as necessary. -* When using the virtual network name (VNN) and Azure Load Balancer to connect to your HADR solution, specify `MultiSubnetFailover = true` in the connection string, even if your cluster only spans one subnet. - - If the client does not support `MultiSubnetFailover = True` you may need to set `RegisterAllProvidersIP = 0` and `HostRecordTTL = 300` to cache client credentials for shorter durations. However, doing so may cause additional queries to the DNS server. -- To connect to your HADR solution using the distributed network name (DNN), consider the following: - - You must use a client driver that supports `MultiSubnetFailover = True`, and this parameter must be in the connection string. - - Use a unique DNN port in the connection string when connecting to the DNN listener for an availability group. -- Use a database mirroring connection string for a basic availability group to bypass the need for a load balancer or DNN. -- Validate the sector size of your VHDs before deploying your high availability solution to avoid having misaligned I/Os. See [KB3009974](https://support.microsoft.com/topic/kb3009974-fix-slow-synchronization-when-disks-have-different-sector-sizes-for-primary-and-secondary-replica-log-files-in-sql-server-ag-and-logshipping-environments-ed181bf3-ce80-b6d0-f268-34135711043c) to learn more. -- If the SQL Server database engine, Always On availability group listener, or failover cluster instance health probe are configured to use a port between 49,152 and 65,536 (the [default dynamic port range for TCP/IP](/windows/client-management/troubleshoot-tcpip-port-exhaust#default-dynamic-port-range-for-tcpip)), add an exclusion for each port. Doing so will prevent other systems from being dynamically assigned the same port. The following example creates an exclusion for port 59999: -`netsh int ipv4 add excludedportrange tcp startport=59999 numberofports=1 store=persistent` - -To learn more, see the comprehensive [HADR best practices](hadr-cluster-best-practices.md). - -## Security - -The checklist in this section covers the [security best practices](security-considerations-best-practices.md) for SQL Server on Azure VMs. - -SQL Server features and capabilities provide a method of security at the data level and is how you achieve [defense-in-depth](https://azure.microsoft.com/resources/videos/defense-in-depth-security-in-azure/) at the infrastructure level for cloud-based and hybrid solutions. In addition, with Azure security measures, it is possible to encrypt your sensitive data, protect virtual machines from viruses and malware, secure network traffic, identify and detect threats, meet compliance requirements, and provides a single method for administration and reporting for any security need in the hybrid cloud. - -- Use [Azure Security Center](../../../defender-for-cloud/defender-for-cloud-introduction.md) to evaluate and take action to improve the security posture of your data environment. Capabilities such as [Azure Advanced Threat Protection (ATP)](../../database/threat-detection-overview.md) can be leveraged across your hybrid workloads to improve security evaluation and give the ability to react to risks. Registering your SQL Server VM with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) surfaces Azure Security Center assessments within the [SQL virtual machine resource](manage-sql-vm-portal.md) of the Azure portal. -- Leverage [Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-introduction.md) to discover and mitigate potential database vulnerabilities, as well as detect anomalous activities that could indicate a threat to your SQL Server instance and database layer. -- [Vulnerability Assessment](../../database/sql-vulnerability-assessment.md) is a part of [Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-introduction.md) that can discover and help remediate potential risks to your SQL Server environment. It provides visibility into your security state, and includes actionable steps to resolve security issues. -- [Azure Advisor](../../../advisor/advisor-security-recommendations.md) analyzes your resource configuration and usage telemetry and then recommends solutions that can help you improve the cost effectiveness, performance, high availability, and security of your Azure resources.. Leverage Azure Advisor at the virtual machine, resource group, or subscription level to help identify and apply best practices to optimize your Azure deployments. -- Use [Azure Disk Encryption](../../../virtual-machines/windows/disk-encryption-windows.md) when your compliance and security needs require you to encrypt the data end-to-end using your encryption keys, including encryption of the ephemeral (locally attached temporary) disk. -- [Managed Disks are encrypted](../../../virtual-machines/disk-encryption.md) at rest by default using Azure Storage Service Encryption, where the encryption keys are Microsoft-managed keys stored in Azure. -- For a comparison of the managed disk encryption options review the [managed disk encryption comparison chart](../../../virtual-machines/disk-encryption-overview.md#comparison) -- Management ports should be closed on your virtual machines - Open remote management ports expose your VM to a high level of risk from internet-based attacks. These attacks attempt to brute force credentials to gain admin access to the machine. -- Turn on [Just-in-time (JIT) access](../../../defender-for-cloud/just-in-time-access-usage.md) for Azure virtual machines -- Use [Azure Bastion](../../../bastion/bastion-overview.md) over Remote Desktop Protocol (RDP). -- Lock down ports and only allow the necessary application traffic using [Azure Firewall](../../../firewall/features.md) which is a managed Firewall as a Service (FaaS) that grants/ denies server access based on the originating IP address. -- Use [Network Security Groups (NSGs)](../../../virtual-network/network-security-groups-overview.md) to filter network traffic to, and from, Azure resources on Azure Virtual Networks -- Leverage [Application Security Groups](../../../virtual-network/application-security-groups.md) to group servers together with similar port filtering requirements, with similar functions, such as web servers and database servers. -- For web and application servers leverage [Azure Distributed Denial of Service (DDoS) protection](../../../ddos-protection/ddos-protection-overview.md). DDoS attacks are designed to overwhelm and exhaust network resources, making apps slow or unresponsive. It is common for DDos attacks to target user interfaces. Azure DDoS protection sanitizes unwanted network traffic, before it impacts service availability -- Leverage VM extensions to help address anti-malware, desired state, threat detection, prevention, and remediation to address threats at the operating system, machine, and network levels: - - [Guest Configuration extension](../../../virtual-machines/extensions/guest-configuration.md) performs audit and configuration operations inside virtual machines. - - [Network Watcher Agent virtual machine extension for Windows and Linux](../../../virtual-machines/extensions/network-watcher-windows.md) monitors network performance, diagnostic, and analytics service that allows monitoring of Azure networks. - - [Microsoft Antimalware Extension for Windows](../../../virtual-machines/extensions/iaas-antimalware-windows.md) to help identify and remove viruses, spyware, and other malicious software, with configurable alerts. - - [Evaluate 3rd party extensions](../../../virtual-machines/extensions/overview.md) such as Symantec Endpoint Protection for Windows VM (../../../virtual-machines/extensions/symantec) -- Leverage [Azure Policy](../../../governance/policy/overview.md) to create business rules that can be applied to your environment. Azure Policies evaluate Azure resources by comparing the properties of those resources against rules defined in JSON format. -- Azure Blueprints enables cloud architects and central information technology groups to define a repeatable set of Azure resources that implements and adheres to an organization's standards, patterns, and requirements. Azure Blueprints are [different than Azure Policies](../../../governance/blueprints/overview.md#how-its-different-from-azure-policy). - - - - - -## Next steps - -To learn more, see the other articles in this best practices series: - -- [VM size](performance-guidelines-best-practices-vm-size.md) -- [Storage](performance-guidelines-best-practices-storage.md) -- [Security](security-considerations-best-practices.md) -- [HADR settings](hadr-cluster-best-practices.md) -- [Collect baseline](performance-guidelines-best-practices-collect-baseline.md) - -Consider enabling [SQL Assessment for SQL Server on Azure VMs](sql-assessment-for-sql-vm.md). - -Review other SQL Server Virtual Machine articles at [SQL Server on Azure Virtual Machines Overview](sql-server-on-azure-vm-iaas-what-is-overview.md). If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-collect-baseline.md b/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-collect-baseline.md deleted file mode 100644 index 5de889cef2689..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-collect-baseline.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "Collect baseline: Performance best practices & guidelines" -description: Provides steps to collect a performance baseline as guidelines to optimize the performance of your SQL Server on Azure Virtual Machine (VM). -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.assetid: a0c85092-2113-4982-b73a-4e80160bac36 -ms.service: virtual-machines-sql -ms.subservice: performance -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/25/2021 -ms.author: pamela -ms.reviewer: mathoma ---- -# Collect baseline: Performance best practices for SQL Server on Azure VM -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides information to collect a performance baseline as a series of best practices and guidelines to optimize performance for your SQL Server on Azure Virtual Machines (VMs). - -There is typically a trade-off between optimizing for costs and optimizing for performance. This performance best practices series is focused on getting the *best* performance for SQL Server on Azure Virtual Machines. If your workload is less demanding, you might not require every recommended optimization. Consider your performance needs, costs, and workload patterns as you evaluate these recommendations. - -## Overview - -For a prescriptive approach, gather performance counters using PerfMon/LogMan and capture SQL Server wait statistics to better understand general pressures and potential bottlenecks of the source environment. - -Start by collecting the CPU, memory, [IOPS](../../../virtual-machines/premium-storage-performance.md#iops), [throughput](../../../virtual-machines/premium-storage-performance.md#throughput), and [latency](../../../virtual-machines/premium-storage-performance.md#latency) of the source workload at peak times following the [application performance checklist](../../../virtual-machines/premium-storage-performance.md#application-performance-requirements-checklist). - -Gather data during peak hours such as workloads during your typical business day, but also other high load processes such as end-of-day processing, and weekend ETL workloads. Consider scaling up your resources for atypically heavily workloads, such as end-of-quarter processing, and then scale done once the workload completes. - -Use the performance analysis to select the [VM Size](../../../virtual-machines/sizes-memory.md) that can scale to your workload's performance requirements. - - -## Storage - -SQL Server performance depends heavily on the I/O subsystem and storage performance is measured by IOPS and throughput. Unless your database fits into physical memory, SQL Server constantly brings database pages in and out of the buffer pool. The data files for SQL Server should be treated differently. Access to log files is sequential except when a transaction needs to be rolled back where data files, including tempdb, are randomly accessed. If you have a slow I/O subsystem, your users may experience performance issues such as slow response times and tasks that do not complete due to time-outs. - -The Azure Marketplace virtual machines have log files on a physical disk that is separate from the data files by default. The tempdb data files count and size meet best practices and are targeted to the ephemeral `D:\` drive. - -The following PerfMon counters can help validate the IO throughput required by your SQL Server: -* **\LogicalDisk\Disk Reads/Sec** (read IOPS) -* **\LogicalDisk\Disk Writes/Sec** (write IOPS) -* **\LogicalDisk\Disk Read Bytes/Sec** (read throughput requirements for the data, log, and tempdb files) -* **\LogicalDisk\Disk Write Bytes/Sec** (write throughput requirements for the data, log, and tempdb files) - -Using IOPS and throughput requirements at peak levels, evaluate VM sizes that match the capacity from your measurements. - -If your workload requires 20K read IOPS and 10K write IOPS, you can either choose E16s_v3 (with up to 32K cached and 25600 uncached IOPS) or M16_s (with up to 20K cached and 10K uncached IOPS) with 2 P30 disks striped using Storage Spaces. - -Make sure to understand both throughput and IOPS requirements of the workload as VMs have different scale limits for IOPS and throughput. - -## Memory - -Track both external memory used by the OS as well as the memory used internally by SQL Server. Identifying pressure for either component will help size virtual machines and identify opportunities for tuning. - -The following PerfMon counters can help validate the memory health of a SQL Server virtual machine: -* \Memory\Available MBytes -* [\SQLServer:Memory Manager\Target Server Memory (KB)](/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object) -* [\SQLServer:Memory Manager\Total Server Memory (KB)](/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object) -* [\SQLServer:Buffer Manager\Lazy writes/sec](/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object) -* [\SQLServer:Buffer Manager\Page life expectancy](/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object) - -## Compute - -Compute in Azure is managed differently than on-premises. On-premises servers are built to last several years without an upgrade due to the management overhead and cost of acquiring new hardware. Virtualization mitigates some of these issues but applications are optimized to take the most advantage of the underlying hardware, meaning any significant change to resource consumption requires rebalancing the entire physical environment. - -This is not a challenge in Azure where a new virtual machine on a different series of hardware, and even in a different region, is easy to achieve. - -In Azure, you want to take advantage of as much of the virtual machines resources as possible, therefore, Azure virtual machines should be configured to keep the average CPU as high as possible without impacting the workload. - -The following PerfMon counters can help validate the compute health of a SQL Server virtual machine: -* **\Processor Information(_Total)\% Processor Time** -* **\Process(sqlservr)\% Processor Time** - -> [!NOTE] -> Ideally, try to aim for using 80% of your compute, with peaks above 90% but not reaching 100% for any sustained period of time. Fundamentally, you only want to provision the compute the application needs and then plan to scale up or down as the business requires. - - -## Next steps - -To learn more, see the other articles in this best practices series: - -- [Quick checklist](performance-guidelines-best-practices-checklist.md) -- [VM size](performance-guidelines-best-practices-vm-size.md) -- [Storage](performance-guidelines-best-practices-storage.md) -- [Security](security-considerations-best-practices.md) -- [HADR settings](hadr-cluster-best-practices.md) - - -For security best practices, see [Security considerations for SQL Server on Azure Virtual Machines](security-considerations-best-practices.md). - -Review other SQL Server Virtual Machine articles at [SQL Server on Azure Virtual Machines Overview](sql-server-on-azure-vm-iaas-what-is-overview.md). If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-storage.md b/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-storage.md deleted file mode 100644 index 5bc62648f6c6e..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-storage.md +++ /dev/null @@ -1,290 +0,0 @@ ---- -title: "Storage: Performance best practices & guidelines" -description: Provides storage best practices and guidelines to optimize the performance of your SQL Server on Azure Virtual Machine (VM). -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.assetid: a0c85092-2113-4982-b73a-4e80160bac36 -ms.service: virtual-machines-sql -ms.subservice: performance -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/25/2021 -ms.author: pamela -ms.reviewer: mathoma ---- -# Storage: Performance best practices for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides storage best practices and guidelines to optimize performance for your SQL Server on Azure Virtual Machines (VMs). - -There is typically a trade-off between optimizing for costs and optimizing for performance. This performance best practices series is focused on getting the *best* performance for SQL Server on Azure Virtual Machines. If your workload is less demanding, you might not require every recommended optimization. Consider your performance needs, costs, and workload patterns as you evaluate these recommendations. - -To learn more, see the other articles in this series: [Checklist](performance-guidelines-best-practices-checklist.md), [VM size](performance-guidelines-best-practices-vm-size.md), [Security](security-considerations-best-practices.md), [HADR configuration](hadr-cluster-best-practices.md), and [Collect baseline](performance-guidelines-best-practices-collect-baseline.md). - -## Checklist - -Review the following checklist for a brief overview of the storage best practices that the rest of the article covers in greater detail: - -- Monitor the application and [determine storage bandwidth and latency requirements](../../../virtual-machines/premium-storage-performance.md#counters-to-measure-application-performance-requirements) for SQL Server data, log, and tempdb files before choosing the disk type. -- To optimize storage performance, plan for highest uncached IOPS available and use data caching as a performance feature for data reads while avoiding [virtual machine and disks capping](../../../virtual-machines/premium-storage-performance.md#throttling). -- Place data, log, and tempdb files on separate drives. - - For the data drive, only use [premium P30 and P40 disks](../../../virtual-machines/disks-types.md#premium-ssds) to ensure the availability of cache support - - For the log drive plan for capacity and test performance versus cost while evaluating the [premium P30 - P80 disks](../../../virtual-machines/disks-types.md#premium-ssds) - - If submillisecond storage latency is required, use [Azure ultra disks](../../../virtual-machines/disks-types.md#ultra-disks) for the transaction log. - - For M-series virtual machine deployments consider [write accelerator](../../../virtual-machines/how-to-enable-write-accelerator.md) over using Azure ultra disks. - - Place [tempdb](/sql/relational-databases/databases/tempdb-database) on the local ephemeral SSD (default `D:\`) drive for most SQL Server workloads that are not part of Failover Cluster Instance (FCI) after choosing the optimal VM size. - - If the capacity of the local drive is not enough for tempdb, consider sizing up the VM. See [Data file caching policies](#data-file-caching-policies) for more information. - - For FCI place tempdb on the shared storage. - - If the FCI workload is heavily dependent on tempdb disk performance, then as an advanced configuration place tempdb on the local ephemeral SSD (default `D:\`) drive which is not part of FCI storage. This configuration will need custom monitoring and action to ensure the local ephemeral SSD (default `D:\`) drive is available all the time as any failures of this drive will not trigger action from FCI. -- Stripe multiple Azure data disks using [Storage Spaces](/windows-server/storage/storage-spaces/overview) to increase I/O bandwidth up to the target virtual machine's IOPS and throughput limits. -- Set [host caching](../../../virtual-machines/disks-performance.md#virtual-machine-uncached-vs-cached-limits) to read-only for data file disks. -- Set [host caching](../../../virtual-machines/disks-performance.md#virtual-machine-uncached-vs-cached-limits) to none for log file disks. - - Do not enable read/write caching on disks that contain SQL Server data or log files. - - Always stop the SQL Server service before changing the cache settings of your disk. -- For development and test workloads, and long-term backup archival consider using standard storage. It is not recommended to use Standard HDD/SDD for production workloads. -- [Credit-based Disk Bursting](../../../virtual-machines/disk-bursting.md#credit-based-bursting) (P1-P20) should only be considered for smaller dev/test workloads and departmental systems. -- Format your data disk to use 64 KB block size (allocation unit size) for all data files placed on a drive other than the temporary `D:\` drive (which has a default of 4 KB). SQL Server VMs deployed through Azure Marketplace come with data disks formatted with a block size and interleave for the storage pool set to 64 KB. - -To compare the storage checklist with the others, see the comprehensive [Performance best practices checklist](performance-guidelines-best-practices-checklist.md). - -## Overview - -To find the most effective configuration for SQL Server workloads on an Azure VM, start by [measuring the storage performance of your business application](performance-guidelines-best-practices-collect-baseline.md#storage). Once storage requirements are known, select a virtual machine that supports the necessary IOPS and throughput with the appropriate memory-to-vCore ratio. - -Choose a VM size with enough storage scalability for your workload and a mixture of disks (usually in a storage pool) that meet the capacity and performance requirements of your business. - -The type of disk depends on both the file type that's hosted on the disk and your peak performance requirements. - -> [!TIP] -> Provisioning a SQL Server VM through the Azure portal helps guide you through the storage configuration process and implements most storage best practices such as creating separate storage pools for your data and log files, targeting tempdb to the `D:\` drive, and enabling the optimal caching policy. For more information about provisioning and configuring storage, see [SQL VM storage configuration](storage-configuration.md). - -## VM disk types - -You have a choice in the performance level for your disks. The types of managed disks available as underlying storage (listed by increasing performance capabilities) are standard hard disk drives (HDD), standard SSDs, premium solid-state drives (SSD), and ultra disks. - -The performance of the disk increases with the capacity, grouped by [premium disk labels](../../../virtual-machines/disks-types.md#premium-ssds) such as the P1 with 4 GiB of space and 120 IOPS to the P80 with 32 TiB of storage and 20,000 IOPS. Premium storage supports a storage cache that helps improve read and write performance for some workloads. For more information, see [Managed disks overview](../../../virtual-machines/managed-disks-overview.md). - -There are also three main [disk types](../../../virtual-machines/managed-disks-overview.md#disk-roles) to consider for your SQL Server on Azure VM - an OS disk, a temporary disk, and your data disks. Carefully choose what is stored on the operating system drive `(C:\)` and the ephemeral temporary drive `(D:\)`. - -### Operating system disk - -An operating system disk is a VHD that can be booted and mounted as a running version of an operating system and is labeled as the `C:\` drive. When you create an Azure virtual machine, the platform will attach at least one disk to the VM for the operating system disk. The `C:\` drive is the default location for application installs and file configuration. - -For production SQL Server environments, do not use the operating system disk for data files, log files, error logs. - -### Temporary disk - -Many Azure virtual machines contain another disk type called the temporary disk (labeled as the `D:\` drive). Depending on the virtual machine series and size the capacity of this disk will vary. The temporary disk is ephemeral, which means the disk storage is recreated (as in, it is deallocated and allocated again), when the virtual machine is restarted, or moved to a different host (for [service healing](/troubleshoot/azure/virtual-machines/understand-vm-reboot), for example). - -The temporary storage drive is not persisted to remote storage and therefore should not store user database files, transaction log files, or anything that must be preserved. - -Place tempdb on the local temporary SSD `D:\` drive for SQL Server workloads unless consumption of local cache is a concern. If you are using a virtual machine that [does not have a temporary disk](../../../virtual-machines/azure-vms-no-temp-disk.yml) then it is recommended to place tempdb on its own isolated disk or storage pool with caching set to read-only. To learn more, see [tempdb data caching policies](performance-guidelines-best-practices-storage.md#data-file-caching-policies). - -### Data disks - -Data disks are remote storage disks that are often created in [storage pools](/windows-server/storage/storage-spaces/overview) in order to exceed the capacity and performance that any single disk could offer to the virtual machine. - -Attach the minimum number of disks that satisfies the IOPS, throughput, and capacity requirements of your workload. Do not exceed the maximum number of data disks of the smallest virtual machine you plan to resize to. - -Place data and log files on data disks provisioned to best suit performance requirements. - -Format your data disk to use 64 KB allocation unit size for all data files placed on a drive other than the temporary `D:\` drive (which has a default of 4 KB). SQL Server VMs deployed through Azure Marketplace come with data disks formatted with allocation unit size and interleave for the storage pool set to 64 KB. - -> [!NOTE] -> It is also possible to host your SQL Server database files directly on [Azure Blob storage](/sql/relational-databases/databases/sql-server-data-files-in-microsoft-azure) or on [SMB storage](/sql/database-engine/install-windows/install-sql-server-with-smb-fileshare-as-a-storage-option) such as [Azure premium file share](../../../storage/files/storage-how-to-create-file-share.md), but we recommend using [Azure managed disks](../../../virtual-machines/managed-disks-overview.md) for the best performance, reliability, and feature availability. - -## Premium disks - -Use premium SSD disks for data and log files for production SQL Server workloads. Premium SSD IOPS and bandwidth varies based on the [disk size and type](../../../virtual-machines/disks-types.md). - -For production workloads, use the P30 and/or P40 disks for SQL Server data files to ensure caching support and use the P30 up to P80 for SQL Server transaction log files. For the best total cost of ownership, start with P30s (5000 IOPS/200 MBPS) for data and log files and only choose higher capacities when you need to control the virtual machine disk count. - -For OLTP workloads, match the target IOPS per disk (or storage pool) with your performance requirements using workloads at peak times and the `Disk Reads/sec` + `Disk Writes/sec` performance counters. For data warehouse and reporting workloads, match the target throughput using workloads at peak times and the `Disk Read Bytes/sec` + `Disk Write Bytes/sec`. - -Use Storage Spaces to achieve optimal performance, configure two pools, one for the log file(s) and the other for the data files. If you are not using disk striping, use two premium SSD disks mapped to separate drives, where one drive contains the log file and the other contains the data. - -The [provisioned IOPS and throughput](../../../virtual-machines/disks-types.md#premium-ssds) per disk that is used as part of your storage pool. The combined IOPS and throughput capabilities of the disks is the maximum capability up to the throughput limits of the virtual machine. - -The best practice is to use the least number of disks possible while meeting the minimal requirements for IOPS (and throughput) and capacity. However, the balance of price and performance tends to be better with a large number of small disks rather than a small number of large disks. - -### Scaling premium disks - -When an Azure Managed Disk is first deployed, the performance tier for that disk is based on the provisioned disk size. Designate the performance tier at deployment or change it afterwards, without changing the size of the disk. If demand increases, you can increase the performance level to meet your business needs. - -Changing the performance tier allows administrators to prepare for and meet higher demand without relying on [disk bursting](../../../virtual-machines/disk-bursting.md#credit-based-bursting). - -Use the higher performance for as long as needed where billing is designed to meet the storage performance tier. Upgrade the tier to match the performance requirements without increasing the capacity. Return to the original tier when the extra performance is no longer required. - -This cost-effective and temporary expansion of performance is a strong use case for targeted events such as shopping, performance testing, training events and other brief windows where greater performance is needed only for a short term. - -For more information, see [Performance tiers for managed disks](../../../virtual-machines/disks-change-performance.md). - -## Azure ultra disk - -If there is a need for submillisecond response times with reduced latency consider using [Azure ultra disk](../../../virtual-machines/disks-types.md#ultra-disks) for the SQL Server log drive, or even the data drive for applications that are extremely sensitive to I/O latency. - -Ultra disk can be configured where capacity and IOPS can scale independently. With ultra disk administrators can provision a disk with the capacity, IOPS, and throughput requirements based on application needs. - -Ultra disk is not supported on all VM series and has other limitations such as region availability, redundancy, and support for Azure Backup. To learn more, see [Using Azure ultra disks](../../../virtual-machines/disks-enable-ultra-ssd.md) for a full list of limitations. - -## Standard HDDs and SSDs - -[Standard HDDs](../../../virtual-machines/disks-types.md#standard-hdds) and SSDs have varying latencies and bandwidth and are only recommended for dev/test workloads. Production workloads should use premium SSDs. If you are using Standard SSD (dev/test scenarios), the recommendation is to add the maximum number of data disks supported by your [VM size](../../../virtual-machines/sizes.md?toc=/azure/virtual-machines/windows/toc.json) and use disk striping with Storage Spaces for the best performance. - -## Caching - -Virtual machines that support premium storage caching can take advantage of an additional feature called the Azure BlobCache or host caching to extend the IOPS and throughput capabilities of a virtual machine. Virtual machines enabled for both premium storage and premium storage caching have these two different storage bandwidth limits that can be used together to improve storage performance. - -The IOPS and MBps throughput without caching counts against a virtual machine's uncached disk throughput limits. The maximum cached limits provide an additional buffer for reads that helps address growth and unexpected peaks. - -Enable premium caching whenever the option is supported to significantly improve performance for reads against the data drive without additional cost. - -Reads and writes to the Azure BlobCache (cached IOPS and throughput) do not count against the uncached IOPS and throughput limits of the virtual machine. - -> [!NOTE] -> Disk Caching is not supported for disks 4 TiB and larger (P50 and larger). If multiple disks are attached to your VM, each disk that is smaller than 4 TiB will support caching. For more information, see [Disk caching](../../../virtual-machines/premium-storage-performance.md#disk-caching). - -### Uncached throughput - -The max uncached disk IOPS and throughput is the maximum remote storage limit that the virtual machine can handle. This limit is defined at the virtual machine and is not a limit of the underlying disk storage. This limit applies only to I/O against data drives remotely attached to the VM, not the local I/O against the temp drive (`D:\` drive) or the OS drive. - -The amount of uncached IOPS and throughput that is available for a VM can be verified in the documentation for your virtual machine. - -For example, the [M-series](../../../virtual-machines/m-series.md) documentation shows that the max uncached throughput for the Standard_M8ms VM is 5000 IOPS and 125 MBps of uncached disk throughput. - -![Screenshot showing M-series uncached disk throughput documentation.](./media/performance-guidelines-best-practices/m-series-table.png) - -Likewise, you can see that the Standard_M32ts supports 20,000 uncached disk IOPS and 500 MBps uncached disk throughput. This limit is governed at the virtual machine level regardless of the underlying premium disk storage. - -For more information, see [uncached and cached limits](../../../virtual-machines/disks-performance.md#virtual-machine-uncached-vs-cached-limits). - - -### Cached and temp storage throughput - -The max cached and temp storage throughput limit is a separate limit from the uncached throughput limit on the virtual machine. The Azure BlobCache consists of a combination of the virtual machine host's random-access memory and locally attached SSD. The temp drive (`D:\` drive) within the virtual machine is also hosted on this local SSD. - -The max cached and temp storage throughput limit governs the I/O against the local temp drive (`D:\` drive) and the Azure BlobCache **only if** host caching is enabled. - -When caching is enabled on premium storage, virtual machines can scale beyond the limitations of the remote storage uncached VM IOPS and throughput limits. - -Only certain virtual machines support both premium storage and premium storage caching (which needs to be verified in the virtual machine documentation). For example, the [M-series](../../../virtual-machines/m-series.md) documentation indicates that both premium storage, and premium storage caching is supported: - -![Screenshot showing M-Series Premium Storage support.](./media/performance-guidelines-best-practices/m-series-table-premium-support.png) - -The limits of the cache will vary based on the virtual machine size. For example, the Standard_M8ms VM supports 10000 cached disk IOPS and 1000 MBps cached disk throughput with a total cache size of 793 GiB. Similarly, the Standard_M32ts VM supports 40000 cached disk IOPS and 400 MBps cached disk throughput with a total cache size of 3174 GiB. - -![Screenshot showing M-series cached disk throughput documentation.](./media/performance-guidelines-best-practices/m-series-table-cached-temp.png) - -You can manually enable host caching on an existing VM. Stop all application workloads and the SQL Server services before any changes are made to your virtual machine's caching policy. Changing any of the virtual machine cache settings results in the target disk being detached and reattached after the settings are applied. - -### Data file caching policies - -Your storage caching policy varies depending on the type of SQL Server data files that are hosted on the drive. - -The following table provides a summary of the recommended caching policies based on the type of SQL Server data: - -|SQL Server disk |Recommendation | -|---------|---------| -| **Data disk** | Enable `Read-only` caching for the disks hosting SQL Server data files.
                Reads from cache will be faster than the uncached reads from the data disk.
                Uncached IOPS and throughput plus Cached IOPS and throughput will yield the total possible performance available from the virtual machine within the VMs limits, but actual performance will vary based on the workload's ability to use the cache (cache hit ratio).
                | -|**Transaction log disk**|Set the caching policy to `None` for disks hosting the transaction log. There is no performance benefit to enabling caching for the Transaction log disk, and in fact having either `Read-only` or `Read/Write` caching enabled on the log drive can degrade performance of the writes against the drive and decrease the amount of cache available for reads on the data drive. | -|**Operating OS disk** | The default caching policy is `Read/write` for the OS drive.
                It is not recommended to change the caching level of the OS drive. | -| **tempdb**| If tempdb cannot be placed on the ephemeral drive `D:\` due to capacity reasons, either resize the virtual machine to get a larger ephemeral drive or place tempdb on a separate data drive with `Read-only` caching configured.
                The virtual machine cache and ephemeral drive both use the local SSD, so keep this in mind when sizing as tempdb I/O will count against the cached IOPS and throughput virtual machine limits when hosted on the ephemeral drive.| - - -> [!IMPORTANT] -> Changing the cache setting of an Azure disk detaches and reattaches the target disk. When changing the cache setting for a disk that hosts SQL Server data, log, or application files, be sure to stop the SQL Server service along with any other related services to avoid data corruption. - -To learn more, see [Disk caching](../../../virtual-machines/premium-storage-performance.md#disk-caching). - - -## Disk striping - -Analyze the throughput and bandwidth required for your SQL data files to determine the number of data disks, including the log file and tempdb. Throughput and bandwidth limits vary by VM size. To learn more, see [VM Size](../../../virtual-machines/sizes.md) - -Add additional data disks and use disk striping for more throughput. For example, an application that needs 12,000 IOPS and 180 MB/s throughput can use three striped P30 disks to deliver 15,000 IOPS and 600 MB/s throughput. - -To configure disk striping, see [disk striping](storage-configuration.md#disk-striping). - -## Disk capping - -There are throughput limits at both the disk and virtual machine level. The maximum IOPS limits per VM and per disk differ and are independent of each other. - -Applications that consume resources beyond these limits will be throttled (also known as capped). Select a virtual machine and disk size in a disk stripe that meets application requirements and will not face capping limitations. To address capping, use caching, or tune the application so that less throughput is required. - -For example, an application that needs 12,000 IOPS and 180 MB/s can: -- Use the [Standard_M32ms](../../../virtual-machines/m-series.md) which has a max uncached disk throughput of 20,000 IOPS and 500 MBps. -- Stripe three P30 disks to deliver 15,000 IOPS and 600-MB/s throughput. -- Use a [Standard_M16ms](../../../virtual-machines/m-series.md) virtual machine and use host caching to utilize local cache over consuming throughput. - -Virtual machines configured to scale up during times of high utilization should provision storage with enough IOPS and throughput to support the maximum VM size while keeping the overall number of disks less than or equal to the maximum number supported by the smallest VM SKU targeted to be used. - -For more information on disk capping limitations and using caching to avoid capping, see [Disk IO capping](../../../virtual-machines/disks-performance.md). - -> [!NOTE] -> Some disk capping may still result in satisfactory performance to users; tune and maintain workloads rather than resize to a larger VM to balance managing cost and performance for the business. - - -## Write Acceleration - -Write Acceleration is a disk feature that is only available for the [M-Series](../../../virtual-machines/m-series.md) Virtual Machines (VMs). The purpose of Write Acceleration is to improve the I/O latency of writes against Azure Premium Storage when you need single digit I/O latency due to high volume mission critical OLTP workloads or data warehouse environments. - -Use Write Acceleration to improve write latency to the drive hosting the log files. Do not use Write Acceleration for SQL Server data files. - -Write Accelerator disks share the same IOPS limit as the virtual machine. Attached disks cannot exceed the Write Accelerator IOPS limit for a VM. - -The follow table outlines the number of data disks and IOPS supported per virtual machine: - -| VM SKU | # Write Accelerator disks | Write Accelerator disk IOPS per VM | -|---|---|---| -| M416ms_v2, M416s_v2 | 16 | 20000 | -| M128ms, M128s | 16 | 20000 | -| M208ms_v2, M208s_v2 | 8 | 10000 | -| M64ms, M64ls, M64s | 8 | 10000 | -| M32ms, M32ls, M32ts, M32s | 4 | 5000 | -| M16ms, M16s | 2 | 2500 | -| M8ms, M8s | 1 | 1250 | - -There are a number of restrictions to using Write Acceleration. To learn more, see [Restrictions when using Write Accelerator](../../../virtual-machines/how-to-enable-write-accelerator.md#restrictions-when-using-write-accelerator). - - -### Comparing to Azure ultra disk - -The biggest difference between Write Acceleration and Azure ultra disks is that Write Acceleration is a virtual machine feature only available for the M-Series and Azure ultra disks is a storage option. Write Acceleration is a write-optimized cache with its own limitations based on the virtual machine size. Azure ultra disks are a low latency disk storage option for Azure Virtual Machines. - -If possible, use Write Acceleration over ultra disks for the transaction log disk. For virtual machines that do not support Write Acceleration but require low latency to the transaction log, use Azure ultra disks. - -## Monitor storage performance - -To assess storage needs, and determine how well storage is performing, you need to understand what to measure, and what those indicators mean. - -[IOPS (Input/Output per second)](../../../virtual-machines/premium-storage-performance.md#iops) is the number of requests the application is making to storage per second. Measure IOPS using Performance Monitor counters `Disk Reads/sec` and `Disk Writes/sec`. [OLTP (Online transaction processing)](/azure/architecture/data-guide/relational-data/online-transaction-processing) applications need to drive higher IOPS in order to achieve optimal performance. Applications such as payment processing systems, online shopping, and retail point-of-sale systems are all examples of OLTP applications. - -[Throughput](../../../virtual-machines/premium-storage-performance.md#throughput) is the volume of data that is being sent to the underlying storage, often measured by megabytes per second. Measure throughput with the Performance Monitor counters `Disk Read Bytes/sec` and `Disk Write Bytes/sec`. [Data warehousing](/azure/architecture/data-guide/relational-data/data-warehousing) is optimized around maximizing throughput over IOPS. Applications such as data stores for analysis, reporting, ETL workstreams, and other business intelligence targets are all examples of data warehousing applications. - -I/O unit sizes influence IOPS and throughput capabilities as smaller I/O sizes yield higher IOPS and larger I/O sizes yield higher throughput. SQL Server chooses the optimal I/O size automatically. For more information about, see [Optimize IOPS, throughput, and latency for your applications](../../../virtual-machines/premium-storage-performance.md#optimize-iops-throughput-and-latency-at-a-glance). - -There are specific Azure Monitor metrics that are invaluable for discovering capping at the virtual machine and disk level as well as the consumption and the health of the AzureBlob cache. To identify key counters to add to your monitoring solution and Azure portal dashboard, see [Storage utilization metrics](../../../virtual-machines/disks-metrics.md#storage-io-utilization-metrics). - -> [!NOTE] -> Azure Monitor does not currently offer disk-level metrics for the ephemeral temp drive `(D:\)`. VM Cached IOPS Consumed Percentage and VM Cached Bandwidth Consumed Percentage will reflect IOPS and throughput from both the ephemeral temp drive `(D:\)` and host caching together. - - -## Next steps - -To learn more, see the other articles in this best practices series: - -- [Quick checklist](performance-guidelines-best-practices-checklist.md) -- [VM size](performance-guidelines-best-practices-vm-size.md) -- [Security](security-considerations-best-practices.md) -- [HADR settings](hadr-cluster-best-practices.md) -- [Collect baseline](performance-guidelines-best-practices-collect-baseline.md) - -For security best practices, see [Security considerations for SQL Server on Azure Virtual Machines](security-considerations-best-practices.md). - -For detailed testing of SQL Server performance on Azure VMs with TPC-E and TPC_C benchmarks, refer to the blog [Optimize OLTP performance](https://techcommunity.microsoft.com/t5/sql-server/optimize-oltp-performance-with-sql-server-on-azure-vm/ba-p/916794). - -Review other SQL Server Virtual Machine articles at [SQL Server on Azure Virtual Machines Overview](sql-server-on-azure-vm-iaas-what-is-overview.md). If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-vm-size.md b/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-vm-size.md deleted file mode 100644 index 8a98c95129ea0..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/performance-guidelines-best-practices-vm-size.md +++ /dev/null @@ -1,195 +0,0 @@ ---- -title: "VM size: Performance best practices & guidelines" -description: Provides VM size guidelines and best practices to optimize the performance of your SQL Server on Azure Virtual Machine (VM). -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: performance -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/10/2021 -ms.author: pamela -ms.reviewer: pamela ---- - -# VM size: Performance best practices for SQL Server on Azure VMs - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides VM size guidance a series of best practices and guidelines to optimize performance for your SQL Server on Azure Virtual Machines (VMs). - -There is typically a trade-off between optimizing for costs and optimizing for performance. This performance best practices series is focused on getting the *best* performance for SQL Server on Azure Virtual Machines. If your workload is less demanding, you might not require every recommended optimization. Consider your performance needs, costs, and workload patterns as you evaluate these recommendations. - -For comprehensive details, see the other articles in this series: [Checklist](performance-guidelines-best-practices-checklist.md), [Storage](performance-guidelines-best-practices-storage.md), [Security](security-considerations-best-practices.md), [HADR configuration](hadr-cluster-best-practices.md), [Collect baseline](performance-guidelines-best-practices-collect-baseline.md). - -## Checklist - -Review the following checklist for a brief overview of the VM size best practices that the rest of the article covers in greater detail: - -- The new [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) provides the highest I/O throughput-to-vCore ratio in Azure along with a memory-to-vCore ratio of 8. This series offers the best price-performance for SQL Server workloads on Azure VMs. Consider this series first for most SQL Server workloads. -- Use VM sizes with 4 or more vCPUs like the [E4ds_v5](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) or higher. -- Use [memory optimized](../../../virtual-machines/sizes-memory.md) virtual machine sizes for the best performance of SQL Server workloads. -- The [Edsv5](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) series, the [M-](../../../virtual-machines/m-series.md), and the [Mv2-](../../../virtual-machines/mv2-series.md) series offer the optimal memory-to-vCore ratio required for OLTP workloads. -- The M series VMs offer the highest memory-to-vCore ratio in Azure. Consider these VMs for mission critical and data warehouse workloads. -- Leverage Azure Marketplace images to deploy your SQL Server Virtual Machines as the SQL Server settings and storage options are configured for optimal performance. -- Collect the target workload's performance characteristics and use them to determine the appropriate VM size for your business. -- Use the [Data Migration Assistant](https://www.microsoft.com/download/details.aspx?id=53595) [SKU recommendation](/sql/dma/dma-sku-recommend-sql-db) tool to find the right VM size for your existing SQL Server workload. - -To compare the VM size checklist with the others, see the comprehensive [Performance best practices checklist](performance-guidelines-best-practices-checklist.md). - -## Overview - -When you are creating a SQL Server on Azure VM, carefully consider the type of workload necessary. If you are migrating an existing environment, [collect a performance baseline](performance-guidelines-best-practices-collect-baseline.md) to determine your SQL Server on Azure VM requirements. If this is a new VM, then create your new SQL Server VM based on your vendor requirements. - -If you are creating a new SQL Server VM with a new application built for the cloud, you can easily size your SQL Server VM as your data and usage requirements evolve. -Start the development environments with the lower-tier D-Series, B-Series, or Av2-series and grow your environment over time. - -Use the SQL Server VM marketplace images with the storage configuration in the portal. This will make it easier to properly create the storage pools necessary to get the size, IOPS, and throughput necessary for your workloads. It is important to choose SQL Server VMs that support premium storage and premium storage caching. See the [storage](performance-guidelines-best-practices-storage.md) article to learn more. - -Use the SQL Server VM Azure Marketplace images with the storage configuration in the portal. This will make it easier to properly create the storage pools necessary to get the size, IOPS, and throughput required for your workloads. It is important to choose SQL Server VMs that support premium storage and premium storage caching. Currently, the [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) provides the highest I/O throughput-to-vCore ratio available in Azure. If you do not know the I/O requirements for your SQL Server workload, this series is the one most likely to meet your needs. See the [storage](performance-guidelines-best-practices-storage.md) article to learn more. - -> [!NOTE] -> If you are interested in participating in the [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) public preview, please sign up at [https://aka.ms/signupEbsv5Preview](https://aka.ms/signupEbsv5Preview). - -SQL Server data warehouse and mission critical environments will often need to scale beyond the 8 memory-to-vCore ratio. For medium environments, you may want to choose a 16 memory-to-vCore ratio, and a 32 memory-to-vCore ratio for larger data warehouse environments. - -SQL Server data warehouse environments often benefit from the parallel processing of larger machines. For this reason, the M-series and the Mv2-series are good options for larger data warehouse environments. - -Use the vCPU and memory configuration from your source machine as a baseline for migrating a current on-premises SQL Server database to SQL Server on Azure VMs. If you have Software Assurance, take advantage of [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) to bring your licenses to Azure and save on SQL Server licensing costs. - -## Memory optimized - -The [memory optimized virtual machine sizes](../../../virtual-machines/sizes-memory.md) are a primary target for SQL Server VMs and the recommended choice by Microsoft. The memory optimized virtual machines offer stronger memory-to-CPU ratios and medium-to-large cache options. - -### Ebdsv5-series - -The [Ebdsv5-series](../../../virtual-machines/ebdsv5-ebsv5-series.md#ebdsv5-series) is a new memory-optimized series of VMs that offer the highest remote storage throughput available in Azure. These VMs have a memory-to-vCore ratio of 8 which, together with the high I/O throughput, makes them ideal for SQL Server workloads. The Ebdsv5-series VMs offer the best price-performance for SQL Server workloads running on Azure virtual machines and we strongly recommend them for most of your production SQL Server workloads. - -### Edsv5-series - -The [Edsv5-series](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) is designed for memory-intensive applications and is ideal for SQL Server workloads that do not require as high I/O throughput as the Ebdsv5 series offers. These VMs have a large local storage SSD capacity, up to 672 GiB of RAM, and very high local and remote storage throughput. There is a nearly consistent 8 GiB of memory per vCore across most of these virtual machines, which is ideal for most SQL Server workloads. - -The largest virtual machine in this group is the [Standard_E104ids_v5](../../../virtual-machines/edv5-edsv5-series.md#edsv5-series) that offers 104 vCores and 672 GiBs of memory. This virtual machine is notable because it is [isolated](../../../virtual-machines/isolation.md) which means it is guaranteed to be the only virtual machine running on the host, and therefore is isolated from other customer workloads. This has a memory-to-vCore ratio that is lower than what is recommended for SQL Server, so it should only be used if isolation is required. - -The Edsv5-series virtual machines support [premium storage](../../../virtual-machines/premium-storage-performance.md), and [premium storage caching](../../../virtual-machines/premium-storage-performance.md#disk-caching). - -### M and Mv2 series - -The [M-series](../../../virtual-machines/m-series.md) offers vCore counts and memory for some of the largest SQL Server workloads. - -The [Mv2-series](../../../virtual-machines/mv2-series.md) has the highest vCore counts and memory and is recommended for mission critical and data warehouse workloads. Mv2-series instances are memory optimized VM sizes providing unparalleled computational performance to support large in-memory databases and workloads with a high memory-to-CPU ratio that is perfect for relational database servers, large caches, and in-memory analytics. - -Some of the features of the M and Mv2-series attractive for SQL Server performance include [premium storage](../../../virtual-machines/premium-storage-performance.md) and [premium storage caching](../../../virtual-machines/premium-storage-performance.md#disk-caching) support, [ultra-disk](../../../virtual-machines/disks-enable-ultra-ssd.md) support, and [write acceleration](../../../virtual-machines/how-to-enable-write-accelerator.md). - -## General purpose - -The [general purpose virtual machine sizes](../../../virtual-machines/sizes-general.md) are designed to provide balanced memory-to-vCore ratios for smaller entry level workloads such as development and test, web servers, and smaller database servers. - -Because of the smaller memory-to-vCore ratios with the general purpose virtual machines, it is important to carefully monitor memory-based performance counters to ensure SQL Server is able to get the buffer cache memory it needs. See [memory performance baseline](performance-guidelines-best-practices-collect-baseline.md#memory) for more information. - -Since the starting recommendation for production workloads is a memory-to-vCore ratio of 8, the minimum recommended configuration for a general purpose VM running SQL Server is 4 vCPU and 32 GiB of memory. - -### Ddsv5 series - -The [Ddsv5-series](../../../virtual-machines/ddv5-ddsv5-series.md#ddsv5-series) offers a fair combination of vCPU, memory, and temporary disk but with smaller memory-to-vCore support. - -The Ddsv5 VMs include lower latency and higher-speed local storage. - -These machines are ideal for side-by-side SQL and app deployments that require fast access to temp storage and departmental relational databases. There is a standard memory-to-vCore ratio of 4 across all of the virtual machines in this series. - -For this reason, it is recommended to leverage the D8ds_v5 as the starter virtual machine in this series, which has 8 vCores and 32 GiBs of memory. The largest machine is the D96ds_v5, which has 96 vCores and 256 GiBs of memory. - -The [Ddsv5-series](../../../virtual-machines/ddv5-ddsv5-series.md#ddsv5-series) virtual machines support [premium storage](../../../virtual-machines/premium-storage-performance.md) and [premium storage caching](../../../virtual-machines/premium-storage-performance.md#disk-caching). - -> [!NOTE] -> The [Ddsv5-series](../../../virtual-machines/ddv5-ddsv5-series.md#ddsv5-series) does not have the memory-to-vCore ratio of 8 that is recommended for SQL Server workloads. As such, consider using these virtual machines for small applications and development workloads only. - -### B-series - -The [burstable B-series](../../../virtual-machines/sizes-b-series-burstable.md) virtual machine sizes are ideal for workloads that do not need consistent performance such as proof of concept and very small application and development servers. - -Most of the [burstable B-series](../../../virtual-machines/sizes-b-series-burstable.md) virtual machine sizes have a memory-to-vCore ratio of 4. The largest of these machines is the [Standard_B20ms](../../../virtual-machines/sizes-b-series-burstable.md) with 20 vCores and 80 GiB of memory. - -This series is unique as the apps have the ability to **burst** during business hours with burstable credits varying based on machine size. - -When the credits are exhausted, the VM returns to the baseline machine performance. - -The benefit of the B-series is the compute savings you could achieve compared to the other VM sizes in other series especially if you need the processing power sparingly throughout the day. - -This series supports [premium storage](../../../virtual-machines/premium-storage-performance.md), but **does not support** [premium storage caching](../../../virtual-machines/premium-storage-performance.md#disk-caching). - -> [!NOTE] -> The [burstable B-series](../../../virtual-machines/sizes-b-series-burstable.md) does not have the memory-to-vCore ratio of 8 that is recommended for SQL Server workloads. As such, consider using these virtual machines for smaller applications, web servers, and development workloads only. - -### Av2-series - -The [Av2-series](../../../virtual-machines/av2-series.md) VMs are best suited for entry-level workloads like development and test, low traffic web servers, small to medium app databases, and proof-of-concepts. - -Only the [Standard_A2m_v2](../../../virtual-machines/av2-series.md) (2 vCores and 16GiBs of memory), [Standard_A4m_v2](../../../virtual-machines/av2-series.md) (4 vCores and 32GiBs of memory), and the [Standard_A8m_v2](../../../virtual-machines/av2-series.md) (8 vCores and 64GiBs of memory) have a good memory-to-vCore ratio of 8 for these top three virtual machines. - -These virtual machines are both good options for smaller development and test SQL Server machines. - -The 8 vCore [Standard_A8m_v2](../../../virtual-machines/av2-series.md) may also be a good option for small application and web servers. - -> [!NOTE] -> The Av2 series does not support premium storage and as such, is not recommended for production SQL Server workloads even with the virtual machines that have a memory-to-vCore ratio of 8. - -## Storage optimized - -The [storage optimized VM sizes](../../../virtual-machines/sizes-storage.md) are for specific use cases. These virtual machines are specifically designed with optimized disk throughput and IO. - -### Lsv2-series - -The [Lsv2-series](../../../virtual-machines/lsv2-series.md) features high throughput, low latency, and local NVMe storage. The Lsv2-series VMs are optimized to use the local disk on the node attached directly to the VM rather than using durable data disks. - -These virtual machines are strong options for big data, data warehouse, reporting, and ETL workloads. The high throughput and IOPS of the local NVMe storage is a good use case for processing files that will be loaded into your database and other scenarios where the data can be recreated from the source system or other repositories such as Azure Blob storage or Azure Data Lake. [Lsv2-series](../../../virtual-machines/lsv2-series.md) VMs can also burst their disk performance for up to 30 minutes at a time. - -These virtual machines size from 8 to 80 vCPU with 8 GiB of memory per vCPU and for every 8 vCPUs there is 1.92 TB of NVMe SSD. This means for the largest VM of this series, the [L80s_v2](../../../virtual-machines/lsv2-series.md), there is 80 vCPU and 640 BiB of memory with 10x1.92TB of NVMe storage. There is a consistent memory-to-vCore ratio of 8 across all of these virtual machines. - -The NVMe storage is ephemeral meaning that data will be lost on these disks if you deallocate your virtual machine, or if it's moved to a different host for service healing. - -The Lsv2 and Ls series support [premium storage](../../../virtual-machines/premium-storage-performance.md), but not premium storage caching. The creation of a local cache to increase IOPs is not supported. - -> [!WARNING] -> Storing your data files on the ephemeral NVMe storage could result in data loss when the VM is deallocated. - -## Constrained vCores - -High performing SQL Server workloads often need larger amounts of memory, IOPS, and throughput without the higher vCore counts. - -Most OLTP workloads are application databases driven by large numbers of smaller transactions. With OLTP workloads, only a small amount of the data is read or modified, but the volumes of transactions driven by user counts are much higher. It is important to have the SQL Server memory available to cache plans, store recently accessed data for performance, and ensure physical reads can be read into memory quickly. - -These OLTP environments need higher amounts of memory, fast storage, and the I/O bandwidth necessary to perform optimally. - -In order to maintain this level of performance without the higher SQL Server licensing costs, Azure offers VM sizes with [constrained vCPU counts](../../../virtual-machines/constrained-vcpu.md). - -This helps control licensing costs by reducing the available vCores while maintaining the same memory, storage, and I/O bandwidth of the parent virtual machine. - -The vCPU count can be constrained to one-half to one-quarter of the original VM size. Reducing the vCores available to the virtual machine will achieve higher memory-to-vCore ratios, but the compute cost will remain the same. - -These new VM sizes have a suffix that specifies the number of active vCPUs to make them easier to identify. - -For example, the [M64-32ms](../../../virtual-machines/constrained-vcpu.md) requires licensing only 32 SQL Server vCores with the memory, I/O, and throughput of the [M64ms](../../../virtual-machines/m-series.md) and the [M64-16ms](../../../virtual-machines/constrained-vcpu.md) requires licensing only 16 vCores. Though while the [M64-16ms](../../../virtual-machines/constrained-vcpu.md) has a quarter of the SQL Server licensing cost of the M64ms, the compute cost of the virtual machine will be the same. - -> [!NOTE] -> -> - Medium to large data warehouse workloads may still benefit from [constrained vCore VMs](../../../virtual-machines/constrained-vcpu.md), but data warehouse workloads are commonly characterized by fewer users and processes addressing larger amounts of data through query plans that run in parallel. -> - The compute cost, which includes operating system licensing, will remain the same as the parent virtual machine. - -## Next steps - -To learn more, see the other articles in this best practices series: - -- [Quick checklist](performance-guidelines-best-practices-checklist.md) -- [Storage](performance-guidelines-best-practices-storage.md) -- [Security](security-considerations-best-practices.md) -- [HADR settings](hadr-cluster-best-practices.md) -- [Collect baseline](performance-guidelines-best-practices-collect-baseline.md) - -For security best practices, see [Security considerations for SQL Server on Azure Virtual Machines](security-considerations-best-practices.md). - -Review other SQL Server Virtual Machine articles at [SQL Server on Azure Virtual Machines Overview](sql-server-on-azure-vm-iaas-what-is-overview.md). If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/windows/pricing-guidance.md b/articles/azure-sql/virtual-machines/windows/pricing-guidance.md deleted file mode 100644 index 983a95d1e628f..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/pricing-guidance.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Price guidance & managing costs -description: Provides best practices for choosing the right SQL Server virtual machine pricing model. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.assetid: -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 08/09/2018 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: "seo-lt-2019" ---- -# Pricing guidance for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article provides pricing guidance for [SQL Server on Azure Virtual Machines](sql-server-on-azure-vm-iaas-what-is-overview.md). There are several options that affect cost, and it is important to pick the right image that balances costs with business requirements. - -> [!TIP] -> If you only need to find out a cost estimate for a specific combination of SQL Server edition and virtual machine (VM) size, see the pricing page for [Windows](https://azure.microsoft.com/pricing/details/virtual-machines/windows) or [Linux](https://azure.microsoft.com/pricing/details/virtual-machines/linux). Select your platform and SQL Server edition from the **OS/Software** list. -> -> ![UI on VM Pricing page](./media/pricing-guidance/virtual-machines-pricing-ui.png) -> -> Or use the [pricing calculator](https://azure.microsoft.com/pricing/#explore-cost) to add and configure a virtual machine. - -## Free-licensed SQL Server editions - -If you want to develop, test, or build a proof of concept, then use the freely licensed **SQL Server Developer edition**. This edition has all the features of SQL Server Enterprise edition, allowing you to build and test any type of application. However, you cannot run the Developer edition in production. A SQL Server Developer edition VM only incurs charges for the cost of the VM, because there are no associated SQL Server licensing costs. - -If you want to run a lightweight workload in production (<4 cores, <1-GB memory, <10 GB/database), use the freely licensed **SQL Server Express edition**. A SQL Server Express edition VM also only incurs charges for the cost of the VM. - -For these development/test and lightweight production workloads, you can also save money by choosing a smaller VM size that matches these workloads. The DS1v2 might be a good choice in some scenarios. - -To create an Azure VM running SQL Server 2017 with one of these images, see the following links: - -| Platform | Freely licensed images | -|---|---| -| Windows Server 2016 | [SQL Server 2017 Developer Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonWindowsServer2016)
                [SQL Server 2017 Express Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonWindowsServer2016) | -| Red Hat Enterprise Linux | [SQL Server 2017 Developer Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonRedHatEnterpriseLinux74)
                [SQL Server 2017 Express Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonRedHatEnterpriseLinux74) | -| SUSE Linux Enterprise Server | [SQL Server 2017 Developer Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonSLES12SP2)
                [SQL Server 2017 Express Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonSLES12SP2) | -| Ubuntu | [SQL Server 2017 Developer Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonUbuntuServer1604LTS)
                [SQL Server 2017 Express Azure VM](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonUbuntuServer1604LTS) | - -## Paid SQL Server editions - -If you have a non-lightweight production workload, use one of the following SQL Server editions: - -| SQL Server edition | Workload | -|-----|-----| -| Web | Small web sites | -| Standard | Small to medium workloads | -| Enterprise | Large or mission-critical workloads| - -You have two options to pay for SQL Server licensing for these editions: *pay per usage* or *bring your own license (BYOL)*. - -## Pay per usage - -**Paying the SQL Server license per usage** (also known as **pay as you go**) means that the per-second cost of running the Azure VM includes the cost of the SQL Server license. You can see the pricing for the different SQL Server editions (Web, Standard, Enterprise) in the Azure Virtual Machines pricing page for [Windows](https://azure.microsoft.com/pricing/details/virtual-machines/windows) or [Linux](https://azure.microsoft.com/pricing/details/virtual-machines/linux). - -The cost is the same for all versions of SQL Server (2012 SP3 to 2019). The per-second licensing cost depends on the number of VM vCPUs. - -Paying the SQL Server licensing per usage is recommended for: - -- **Temporary or periodic workloads**. For example, an app that needs to support an event for a couple of months every year, or business analysis on Mondays. - -- **Workloads with unknown lifetime or scale**. For example, an app that may not be required in a few months, or which may require more, or less compute power, depending on demand. - -To create an Azure VM running SQL Server 2017 with one of these pay-as-you-go images, see the following links: - -| Platform | Licensed images | -|---|---| -| Windows Server 2016 | [SQL Server 2017 Web Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonWindowsServer2016)
                [SQL Server 2017 Standard Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonWindowsServer2016)
                [SQL Server 2017 Enterprise Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseWindowsServer2016) | -| Red Hat Enterprise Linux | [SQL Server 2017 Web Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonRedHatEnterpriseLinux74)
                [SQL Server 2017 Standard Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonRedHatEnterpriseLinux74)
                [SQL Server 2017 Enterprise Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonRedHatEnterpriseLinux74) | -| SUSE Linux Enterprise Server | [SQL Server 2017 Web Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonSLES12SP2)
                [SQL Server 2017 Standard Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonSLES12SP2)
                [SQL Server 2017 Enterprise Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonSLES12SP2) | -| Ubuntu | [SQL Server 2017 Web Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonUbuntuServer1604LTS)
                [SQL Server 2017 Standard Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonUbuntuServer1604LTS)
                [SQL Server 2017 Enterprise Azure VM](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseonUbuntuServer1604LTS) | - -> [!IMPORTANT] -> When you create a SQL Server virtual machine in the Azure portal, the **Choose a size** window shows an estimated cost. It is important to note that this estimate is only the compute costs for running the VM along with any OS licensing costs (Windows or third-party Linux operating systems). -> -> ![Choose VM size blade](./media/pricing-guidance/sql-vm-choose-size-pricing-estimate.png) -> ->It does not include additional SQL Server licensing costs for Web, Standard, and Enterprise editions. To get the most accurate pricing estimate, select your operating system and SQL Server edition on the pricing page for [Windows](https://azure.microsoft.com/pricing/details/virtual-machines/windows/) or [Linux](https://azure.microsoft.com/pricing/details/virtual-machines/linux/). - -> [!NOTE] -> It is now possible to change the licensing model from pay-per-usage to bring your own license (BYOL) and back. For more information, see [How to change the licensing model for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md). - -## Bring your own license (BYOL) - -**Bringing your own SQL Server license through License Mobility**, also referred to as **BYOL**, means using an existing SQL Server Volume License with Software Assurance in an Azure VM. A SQL Server VM using BYOL only charges for the cost of running the VM, not for SQL Server licensing, given that you have already acquired licenses and Software Assurance through a Volume Licensing program or through a Cloud Solution Partner (CSP). - -> [!NOTE] -> The BYOL images are currently only available for Windows virtual machines. However, you can manually install SQL Server on a Linux-only VM. See the guidelines in the [SQL Server on a Linux VM FAQ](../linux/frequently-asked-questions-faq.yml). - -Bringing your own SQL Server licensing through License Mobility is recommended for: - -- **Continuous workloads**. For example, an app that needs to support business operations 24x7. - -- **Workloads with known lifetime and scale**. For example, an app that is required for the whole year and which demand has been forecasted. - -To use BYOL with a SQL Server VM, you must have a license for SQL Server Standard or Enterprise and [Software Assurance](https://www.microsoft.com/licensing/licensing-programs/software-assurance-default.aspx#tab=1), which is a required option through some volume licensing programs and an optional purchase with others. The pricing level provided through Volume Licensing programs varies, based on the type of agreement and the quantity and or commitment to SQL Server. But as a rule of thumb, bringing your own license for continuous production workloads has the following benefits: - -| BYOL benefit | Description | -|-----|-----| -| **Cost savings** | The [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/) offers up to 55% savings. For more information, see [Switch licensing model](licensing-model-azure-hybrid-benefit-ahb-change.md) | -| **Free passive secondary replica** | Another benefit of bringing your own license is the [free licensing for one passive secondary replica](https://azure.microsoft.com/pricing/licensing-faq/) per SQL Server for high availability purposes. This cuts in half the licensing cost of a highly available SQL Server deployment (for example, using Always On Availability Groups). The rights to run the passive secondary are provided through the Fail-Over Servers Software Assurance benefit. | - -To create an Azure VM running SQL Server 2017 with one of these bring-your-own-license images, see the VMs prefixed with "{BYOL}": - -- [SQL Server 2017 Enterprise Azure VM](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2017EnterpriseWindowsServer2016) -- [SQL Server 2017 Standard Azure VM](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2017StandardonWindowsServer2016) - -> [!IMPORTANT] -> Let us know within 10 days how many SQL Server licenses you are using in Azure. The links to the previous images have instructions on how to do this. - -> [!NOTE] -> It is now possible to change the licensing model from pay-per-usage to bring your own license (BYOL) and back. For more information, see [How to change the licensing model for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md). - - - -## Reduce costs - -To avoid unnecessary costs, choose an optimal virtual machine size and consider intermittent shutdowns for non-continuous workloads. - -### Correctly size your VM - -The licensing cost of SQL Server is directly related to the number of vCPUs. Choose a VM size that matches your expected needs for CPU, memory, storage, and I/O bandwidth. For a complete list of machine size options, see [Windows VM sizes](../../../virtual-machines/sizes.md) and [Linux VM sizes](../../../virtual-machines/sizes.md?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - -There are new machine sizes that work well with certain types of SQL Server workloads. These machines sizes maintain high levels of memory, storage, and I/O bandwidth, but they have a lower virtualized core count. For example, consider the following example: - -| VM size | vCPUs | Memory | Max disks | Max I/O throughput | SQL Server licensing costs | Total costs (compute + licensing) | -|---|---|---|---|---|---|---| -| **Standard_DS14v2** | 16 | 112 GB | 32 | 51,200 IOPS or 768 MB/s | | | -| **Standard_DS14-4v2** | 4 | 112 GB | 32 | 51,200 IOPS or 768 MB/s | 75% lower | 57% lower | - -> [!IMPORTANT] -> This is a point-in-time example. For the most recent specifications, refer to the machine sizes articles and the Azure pricing page for [Windows](https://azure.microsoft.com/pricing/details/virtual-machines/windows/) and [Linux](https://azure.microsoft.com/pricing/details/virtual-machines/linux/). - -In the previous example, you can see that the specifications for **Standard_DS14v2** and **Standard_DS14-4v2** are identical except for vCPUs. The suffix **-4v2** at the end of the **Standard_DS14-4v2** machine size indicates the number of active vCPUs. Because SQL Server licensing costs are tied to the number of vCPUs, this significantly reduces the cost of the VM in scenarios where the extra vCPUs are not needed. This is one example, and there are many machine sizes with constrained vCPUs that are identified with this suffix pattern. For more information, see the blog post [Announcing new Azure VM sizes for more cost-effective database work](https://azure.microsoft.com/blog/announcing-new-azure-vm-sizes-for-more-cost-effective-database-workloads/). - -### Shut down your VM when possible - -If you are using any workloads that do not run continuously, consider shutting down the virtual machine during the inactive periods. You only pay for what you use. - -For example, if you are simply trying out SQL Server on an Azure VM, you would not want to incur charges by accidentally leaving it running for weeks. One solution is to use the [automatic shutdown feature](https://azure.microsoft.com/blog/announcing-auto-shutdown-for-vms-using-azure-resource-manager/). - -![SQL Server VM autoshutdown](./media/pricing-guidance/sql-vm-auto-shutdown.png) - -Automatic shutdown is part of a larger set of similar features provided by [Azure DevTest Labs](https://azure.microsoft.com/services/devtest-lab). - -For other workflows, consider automatically shutting down and restarting Azure VMs with a scripting solution, such as [Azure Automation](https://azure.microsoft.com/services/automation/). - -> [!IMPORTANT] -> Shutting down and deallocating your VM is the only way to avoid charges. Simply stopping or using power options to shut down the VM still incurs usage charges. - -## Next steps - -For general Azure pricing guidance, see [Prevent unexpected costs with Azure billing and cost management](../../../cost-management-billing/cost-management-billing-overview.md). For the latest Azure Virtual Machines pricing, including SQL Server, see the Azure Virtual Machines pricing page for [Windows VMs](https://azure.microsoft.com/pricing/details/virtual-machines/windows/) and [Linux VMs](https://azure.microsoft.com/pricing/details/virtual-machines/linux/). - -For an overview of SQL Server on Azure Virtual Machines, see the following articles: - -- [Overview of SQL Server on Windows VMs](sql-server-on-azure-vm-iaas-what-is-overview.md) -- [Overview of SQL Server on Linux VMs](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md) diff --git a/articles/azure-sql/virtual-machines/windows/security-considerations-best-practices.md b/articles/azure-sql/virtual-machines/windows/security-considerations-best-practices.md deleted file mode 100644 index ef17ca0f74c1b..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/security-considerations-best-practices.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: "Security: Best practices" -description: This topic provides general guidance for securing SQL Server running in an Azure virtual machine. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management - -ms.assetid: d710c296-e490-43e7-8ca9-8932586b71da -ms.service: virtual-machines-sql -ms.subservice: security - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/02/2022 -ms.author: pamela -ms.reviewer: mathoma ---- -# Security considerations for SQL Server on Azure Virtual Machines -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This topic includes overall security guidelines that help establish secure access to SQL Server instances in an Azure virtual machine (VM). - -Azure complies with several industry regulations and standards that can enable you to build a compliant solution with SQL Server running in a virtual machine. For information about regulatory compliance with Azure, see [Azure Trust Center](https://azure.microsoft.com/support/trust-center/). - -First review the security best practices for [SQL Server](/sql/relational-databases/security/sql-server-security-best-practices) and [Azure VMs](../../../virtual-machines/security-recommendations.md) and then review this article for the best practices that apply to SQL Server on Azure VMs specifically. - -To learn more about SQL Server VM best practices, see the other articles in this series: [Checklist](performance-guidelines-best-practices-checklist.md), [VM size](performance-guidelines-best-practices-vm-size.md), [HADR configuration](hadr-cluster-best-practices.md), and [Collect baseline](performance-guidelines-best-practices-collect-baseline.md). - -## Checklist - - -Review the following checklist in this section for a brief overview of the security best practices that the rest of the article covers in greater detail. - -SQL Server features and capabilities provide a method of security at the data level and is how you achieve [defense-in-depth](https://azure.microsoft.com/resources/videos/defense-in-depth-security-in-azure/) at the infrastructure level for cloud-based and hybrid solutions. In addition, with Azure security measures, it is possible to encrypt your sensitive data, protect virtual machines from viruses and malware, secure network traffic, identify and detect threats, meet compliance requirements, and provides a single method for administration and reporting for any security need in the hybrid cloud. - -- Use [Azure Security Center](../../../defender-for-cloud/defender-for-cloud-introduction.md) to evaluate and take action to improve the security posture of your data environment. Capabilities such as [Azure Advanced Threat Protection (ATP)](../../database/threat-detection-overview.md) can be leveraged across your hybrid workloads to improve security evaluation and give the ability to react to risks. Registering your SQL Server VM with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md) surfaces Azure Security Center assessments within the [SQL virtual machine resource](manage-sql-vm-portal.md) of the Azure portal. -- Leverage [Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-introduction.md) to discover and mitigate potential database vulnerabilities, as well as detect anomalous activities that could indicate a threat to your SQL Server instance and database layer. -- [Vulnerability Assessment](../../database/sql-vulnerability-assessment.md) is a part of [Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-introduction.md) that can discover and help remediate potential risks to your SQL Server environment. It provides visibility into your security state, and includes actionable steps to resolve security issues. -- [Azure Advisor](../../../advisor/advisor-security-recommendations.md) analyzes your resource configuration and usage telemetry and then recommends solutions that can help you improve the cost effectiveness, performance, high availability, and security of your Azure resources. Leverage Azure Advisor at the virtual machine, resource group, or subscription level to help identify and apply best practices to optimize your Azure deployments. -- Use [Azure Disk Encryption](../../../virtual-machines/windows/disk-encryption-windows.md) when your compliance and security needs require you to encrypt the data end-to-end using your encryption keys, including encryption of the ephemeral (locally attached temporary) disk. -- [Managed Disks are encrypted](../../../virtual-machines/disk-encryption.md) at rest by default using Azure Storage Service Encryption, where the encryption keys are Microsoft-managed keys stored in Azure. -- For a comparison of the managed disk encryption options review the [managed disk encryption comparison chart](../../../virtual-machines/disk-encryption-overview.md#comparison) -- Management ports should be closed on your virtual machines - Open remote management ports expose your VM to a high level of risk from internet-based attacks. These attacks attempt to brute force credentials to gain admin access to the machine. -- Turn on [Just-in-time (JIT) access](../../../defender-for-cloud/just-in-time-access-usage.md) for Azure virtual machines -- Use [Azure Bastion](../../../bastion/bastion-overview.md) over Remote Desktop Protocol (RDP). -- Lock down ports and only allow the necessary application traffic using [Azure Firewall](../../../firewall/features.md) which is a managed Firewall as a Service (FaaS) that grants/ denies server access based on the originating IP address. -- Use [Network Security Groups (NSGs)](../../../virtual-network/network-security-groups-overview.md) to filter network traffic to, and from, Azure resources on Azure Virtual Networks -- Leverage [Application Security Groups](../../../virtual-network/application-security-groups.md) to group servers together with similar port filtering requirements, with similar functions, such as web servers and database servers. -- For web and application servers leverage [Azure Distributed Denial of Service (DDoS) protection](../../../ddos-protection/ddos-protection-overview.md). DDoS attacks are designed to overwhelm and exhaust network resources, making apps slow or unresponsive. It is common for DDos attacks to target user interfaces. Azure DDoS protection sanitizes unwanted network traffic, before it impacts service availability -- Leverage VM extensions to help address anti-malware, desired state, threat detection, prevention, and remediation to address threats at the operating system, machine, and network levels: - - [Guest Configuration extension](../../../virtual-machines/extensions/guest-configuration.md) performs audit and configuration operations inside virtual machines. - - [Network Watcher Agent virtual machine extension for Windows and Linux](../../../virtual-machines/extensions/network-watcher-windows.md) monitors network performance, diagnostic, and analytics service that allows monitoring of Azure networks. - - [Microsoft Antimalware Extension for Windows](../../../virtual-machines/extensions/iaas-antimalware-windows.md) to help identify and remove viruses, spyware, and other malicious software, with configurable alerts. - - [Evaluate 3rd party extensions](../../../virtual-machines/extensions/overview.md) such as Symantec Endpoint Protection for Windows VM (../../../virtual-machines/extensions/symantec) -- Leverage [Azure Policy](../../../governance/policy/overview.md) to create business rules that can be applied to your environment. Azure Policies evaluate Azure resources by comparing the properties of those resources against rules defined in JSON format. -- Azure Blueprints enables cloud architects and central information technology groups to define a repeatable set of Azure resources that implements and adheres to an organization's standards, patterns, and requirements. Azure Blueprints are [different than Azure Policies](../../../governance/blueprints/overview.md#how-its-different-from-azure-policy). - -## Microsoft Defender for SQL - -[Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-introduction.md) enables Azure Security Center security features such as [vulnerability assessments](../../database/sql-vulnerability-assessment.md) and security alerts. See [enable Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-usage.md) to learn more. - -Use Azure Defender for SQL to discover and mitigate potential database vulnerabilities, and detect anomalous activities that may indicate a threat to your SQL Server instance and database layer. [Vulnerability Assessments](../../database/sql-vulnerability-assessment.md) are a feature of Microsoft Defender for SQL that can discover and help remediate potential risks to your SQL Server environment. It provides visibility into your security state, and it includes actionable steps to resolve security issues. Registering your SQL Server VM with the [SQL Server IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md) surfaces Microsoft Defender for SQL recommendations to the [SQL virtual machines resource](manage-sql-vm-portal.md) in the Azure portal. - - -## Portal management - -After you've [registered your SQL Server VM with the SQL IaaS extension](sql-agent-extension-manually-register-single-vm.md), you can configure a number of security settings using the [SQL virtual machines resource](manage-sql-vm-portal.md) in the Azure portal, such as enabling Azure Key Vault integration, or SQL authentication. - -Additionally, after you've enabled [Microsoft Defender for SQL](../../../defender-for-cloud/defender-for-sql-usage.md) you can view Defender for Cloud features directly within the [SQL virtual machines resource](manage-sql-vm-portal.md) in the Azure portal, such as vulnerability assessments and security alerts. - -See [manage SQL Server VM in the portal](manage-sql-vm-portal.md) to learn more. - -## Azure Security Center - -[Azure Security Center](../../../defender-for-cloud/defender-for-cloud-introduction.md) is a unified security management system that is designed to evaluate and provide opportunities to improve the security posture of your data environment. The Azure Security Center grants a consolidated view of the security health for all assets in the hybrid cloud. - -- Use [security score](../../../defender-for-cloud/secure-score-security-controls.md) in Azure Security Center. -- Review the list of the [compute](../../../defender-for-cloud/recommendations-reference.md#compute-recommendations) and [data recommendations](../../../security-center/recommendations-reference.md#data-recommendations) currently available, for further details. -- Registering your SQL Server VM with the [SQL Server IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md) surfaces Azure Security Center recommendations to the [SQL virtual machines resource](manage-sql-vm-portal.md) in the Azure portal. - -## Azure Advisor - -[Azure Advisor](../../../advisor/advisor-security-recommendations.md) is a personalized cloud consultant that helps you follow best practices to optimize your Azure deployments. Azure Advisor analyzes your resource configuration and usage telemetry and then recommends solutions that can help you improve the cost effectiveness, performance, high availability, and security of your Azure resources. Azure Advisor can evaluate at the virtual machine, resource group, or subscription level. - -## Azure Key Vault integration - -There are multiple SQL Server encryption features, such as transparent data encryption (TDE), column level encryption (CLE), and backup encryption. These forms of encryption require you to manage and store the cryptographic keys you use for encryption. The [Azure Key Vault](azure-key-vault-integration-configure.md) service is designed to improve the security and management of these keys in a secure and highly available location. The SQL Server Connector allows SQL Server to use these keys from Azure Key Vault. - -Consider the following: - - - Azure Key Vault stores application secrets in a centralized cloud location to securely control access permissions, and separate access logging. - - When bringing your own keys to Azure it is recommended to store secrets and certificates in the [Azure Key Vault](/sql/relational-databases/security/encryption/extensible-key-management-using-azure-key-vault-sql-server). - - Azure Disk Encryption uses [Azure Key Vault](../../../virtual-machines/windows/disk-encryption-key-vault.md) to control and manage disk encryption keys and secrets. - - -## Access control - -When you create a SQL Server virtual machine with an Azure gallery image, the **SQL Server Connectivity** option gives you the choice of **Local (inside VM)**, **Private (within Virtual Network)**, or **Public (Internet)**. - -![SQL Server connectivity](./media/security-considerations-best-practices/sql-vm-connectivity-option.png) - -For the best security, choose the most restrictive option for your scenario. For example, if you are running an application that accesses SQL Server on the same VM, then **Local** is the most secure choice. If you are running an Azure application that requires access to the SQL Server, then **Private** secures communication to SQL Server only within the specified [Azure virtual network](../../../virtual-network/virtual-networks-overview.md). If you require **Public** (internet) access to the SQL Server VM, then make sure to follow other best practices in this topic to reduce your attack surface area. - -The selected options in the portal use inbound security rules on the VM's [network security group](../../../active-directory/identity-protection/concept-identity-protection-security-overview.md) (NSG) to allow or deny network traffic to your virtual machine. You can modify or create new inbound NSG rules to allow traffic to the SQL Server port (default 1433). You can also specify specific IP addresses that are allowed to communicate over this port. - -![Network security group rules](./media/security-considerations-best-practices/sql-vm-network-security-group-rules.png) - -In addition to NSG rules to restrict network traffic, you can also use the Windows Firewall on the virtual machine. - -If you are using endpoints with the classic deployment model, remove any endpoints on the virtual machine if you do not use them. For instructions on using ACLs with endpoints, see [Manage the ACL on an endpoint](/previous-versions/azure/virtual-machines/windows/classic/setup-endpoints#manage-the-acl-on-an-endpoint). This is not necessary for VMs that use the Azure Resource Manager. - -Consider enabling [encrypted connections](/sql/database-engine/configure-windows/enable-encrypted-connections-to-the-database-engine) for the instance of the SQL Server Database Engine in your Azure virtual machine. Configure SQL server instance with a signed certificate. For more information, see [Enable Encrypted Connections to the Database Engine](/sql/database-engine/configure-windows/enable-encrypted-connections-to-the-database-engine) and [Connection String Syntax](/dotnet/framework/data/adonet/connection-string-syntax). - -Consider the following when **securing the network connectivity or perimeter**: - -- [Azure Firewall](../../../firewall/features.md) - A stateful, managed, Firewall as a Service (FaaS) that grants/ denies server access based on originating IP address, to protect network resources. -- [Azure Distributed Denial of Service (DDoS) protection](../../../ddos-protection/ddos-protection-overview.md) - DDoS attacks overwhelm and exhaust network resources, making apps slow or unresponsive. Azure DDoS protection sanitizes unwanted network traffic before it impacts service availability. -- [Network Security Groups (NSGs)](../../../virtual-network/network-security-groups-overview.md) - Filters network traffic to, and from, Azure resources on Azure Virtual Networks -- [Application Security Groups](../../../virtual-network/application-security-groups.md) - Provides for the grouping of servers with similar port filtering requirements, and group together servers with similar functions, such as web servers. - -## Encryption - -Managed disks offer server-side encryption, and Azure Disk Encryption. [Server-side encryption](../../../virtual-machines/disk-encryption.md) provides encryption-at-rest and safeguards your data to meet your organizational security and compliance commitments. [Azure Disk Encryption](../../../security/fundamentals/azure-disk-encryption-vms-vmss.md) uses either BitLocker or DM-Crypt technology, and integrates with Azure Key Vault to encrypt both the OS and data disks. - -Consider the following: - -- [Azure Disk Encryption](../../../virtual-machines/windows/disk-encryption-overview.md) - Encrypts virtual machine disks using Azure Disk Encryption both for Windows and Linux virtual machines. - - When your compliance and security requirements require you to encrypt the data end-to-end using your encryption keys, including encryption of the ephemeral (locally attached temporary) disk, use -[Azure disk encryption](../../../virtual-machines/windows/disk-encryption-windows.md). - - Azure Disk Encryption (ADE) leverages the industry-standard BitLocker feature of Windows and the DM-Crypt feature of Linux to -provide OS and data disk encryption. -- Managed Disk Encryption - - [Managed Disks are encrypted](../../../virtual-machines/disk-encryption.md) at rest by default using Azure Storage Service Encryption where the encryption keys are Microsoft managed keys stored in Azure. - - Data in Azure managed disks is encrypted transparently using 256-bit AES encryption, one of the strongest block ciphers available, and is FIPS 140-2 compliant. -- For a comparison of the managed disk encryption options review the [managed disk encryption comparison chart](../../../virtual-machines/disk-encryption-overview.md#comparison). - -## Manage accounts - -You don't want attackers to easily guess account names or passwords. Use the following tips to help: - -- Create a unique local administrator account that is not named **Administrator**. - -- Use complex strong passwords for all your accounts. For more information about how to create a strong password, see [Create a strong password](https://support.microsoft.com/account-billing/how-to-create-a-strong-password-for-your-microsoft-account-f67e4ddd-0dbe-cd75-cebe-0cfda3cf7386) article. - -- By default, Azure selects Windows Authentication during SQL Server virtual machine setup. Therefore, the **SA** login is disabled and a password is assigned by setup. We recommend that the **SA** login should not be used or enabled. If you must have a SQL login, use one of the following strategies: - - - Create a SQL account with a unique name that has **sysadmin** membership. You can do this from the portal by enabling **SQL Authentication** during provisioning. - - > [!TIP] - > If you do not enable SQL Authentication during provisioning, you must manually change the authentication mode to **SQL Server and Windows Authentication Mode**. For more information, see [Change Server Authentication Mode](/sql/database-engine/configure-windows/change-server-authentication-mode). - - - If you must use the **SA** login, enable the login after provisioning and assign a new strong password. - -> [!NOTE] -> Connecting to a SQL Server instance that's running on an Azure virtual machine (VM) is not supported using Azure Active Directory or Azure Active Directory Domain Services. Use an Active Directory domain account instead. - -## Auditing and reporting - -[Auditing with Log Analytics](../../../azure-monitor/agents/data-sources-windows-events.md#configuring-windows-event-logs) documents events and writes to an audit log in a secure Azure BLOB storage account. Log Analytics can be used to decipher the details of the audit logs. Auditing gives you the ability to save data to a separate storage account and create an audit trail of all events you select. You can also leverage Power BI against the audit log for quick analytics of and insights about your data, as well as to provide a view for regulatory compliance. To learn more about auditing at the VM and Azure levels, see [Azure security logging and auditing](../../../security/fundamentals/log-audit.md). - -## Virtual Machine level access - -Close management ports on your machine - Open remote management ports are exposing your VM to a high level of risk from internet-based attacks. These attacks attempt to brute force credentials to gain admin access to the machine. -- Turn on [Just-in-time (JIT) access](../../../security-center/security-center-just-in-time.md?tabs=jit-config-asc%2Cjit-request-asc) for Azure virtual machines. -- Leverage [Azure Bastion](../../../bastion/bastion-overview.md) over Remote Desktop Protocol (RDP). - - -## Virtual Machine extensions - -Azure Virtual Machine extensions are trusted Microsoft or 3rd party extensions that can help address specific needs and risks such as antivirus, malware, threat protection, and more. - -- [Guest Configuration extension](../../../virtual-machines/extensions/guest-configuration.md) - - To ensure secure configurations of in-guest settings of your machine, install the Guest Configuration extension. - - In-guest settings include the configuration of the operating system, application configuration or presence, and environment settings. - - Once installed, in-guest policies will be available such as 'Windows Exploit guard should be enabled'. -- [Network traffic data collection agent](../../../virtual-machines/extensions/network-watcher-windows.md) - - Security Center uses the Microsoft Dependency agent to collect network traffic data from your Azure virtual machines. - - This agent enables advanced network protection features such as traffic visualization on the network map, network hardening recommendations, and specific network threats. -- [Evaluate extensions](../../../virtual-machines/extensions/overview.md) from Microsoft and 3rd parties to address anti-malware, desired state, threat detection, prevention, and remediation to address threats at the operating system, machine, and network levels. - -## Next steps - -Review the security best practices for [SQL Server](/sql/relational-databases/security/) and [Azure VMs](../../../virtual-machines/security-recommendations.md) and then review this article for the best practices that apply to SQL Server on Azure VMs specifically. - -For other topics related to running SQL Server in Azure VMs, see [SQL Server on Azure Virtual Machines overview](sql-server-on-azure-vm-iaas-what-is-overview.md). If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). - - -To learn more, see the other articles in this best practices series: - -- [Quick checklist](performance-guidelines-best-practices-checklist.md) -- [VM size](performance-guidelines-best-practices-vm-size.md) -- [Storage](performance-guidelines-best-practices-storage.md) -- [HADR settings](hadr-cluster-best-practices.md) -- [Collect baseline](performance-guidelines-best-practices-collect-baseline.md) diff --git a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-automatic-registration-all-vms.md b/articles/azure-sql/virtual-machines/windows/sql-agent-extension-automatic-registration-all-vms.md deleted file mode 100644 index ed35777fa775e..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-automatic-registration-all-vms.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Automatic registration with SQL IaaS Agent extension -description: Learn how to enable the automatic registration feature to automatically register all past and future SQL Server VMs with the SQL IaaS Agent extension using the Azure portal. -author: adbadram -ms.author: adbadram -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.custom: devx-track-azurepowershell -ms.reviewer: mathoma ---- -# Automatic registration with SQL IaaS Agent extension -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -Enable the automatic registration feature in the Azure portal to automatically register all current and future SQL Server on Azure Virtual Machines (VMs) with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) in lightweight mode. By default, Azure VMs that have SQL Server 2016 or later installed will be automatically registered with the SQL IaaS Agent extension when detected by the [CEIP service](/sql/sql-server/usage-and-diagnostic-data-configuration-for-sql-server). See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -This article teaches you to enable the automatic registration feature. Alternatively, you can [register a single VM](sql-agent-extension-manually-register-single-vm.md), or [register your VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md) with the SQL IaaS Agent extension. - -> [!NOTE] -> Starting in September 2021, registering with the SQL IaaS extension in full mode no longer requires restarting the SQL Server service. - -## Overview - -Registering your SQL Server VM with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) to unlock a full feature set of benefits. - -When automatic registration is enabled, a job runs daily to detect whether or not SQL Server is installed on all the unregistered VMs in the subscription. This is done by copying the SQL IaaS agent extension binaries to the VM, then running a one-time utility that checks for the SQL Server registry hive. If the SQL Server hive is detected, the virtual machine is registered with the extension in lightweight mode. If no SQL Server hive exists in the registry, the binaries are removed. Automatic registration can take up to 4 days to detect newly created SQL Server VMs. - -> [!CAUTION] -> If the SQL Server hive is not present in the registry, removing the binaries might be impacted if there are [resource locks](../../../governance/blueprints/concepts/resource-locking.md#locking-modes-and-states) in place. - - -Once automatic registration is enabled for a subscription, all current and future VMs that have SQL Server installed will be registered with the SQL IaaS Agent extension **in lightweight mode without downtime, and without restarting the SQL Server service**. You still need to [manually upgrade to full manageability mode](sql-agent-extension-manually-register-single-vm.md#upgrade-to-full) to take advantage of the full feature set. The license type automatically defaults to that of the VM image. If you use a pay-as-you-go image for your VM, then your license type will be `PAYG`, otherwise your license type will be `AHUB` by default. - -By default, Azure VMs with SQL Server 2016 or later installed will be automatically registered with the SQL IaaS Agent extension when detected by the [CEIP service](/sql/sql-server/usage-and-diagnostic-data-configuration-for-sql-server). See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -> [!IMPORTANT] -> The SQL IaaS Agent extension collects data for the express purpose of giving customers optional benefits when using SQL Server within Azure Virtual Machines. Microsoft will not use this data for licensing audits without the customer's advance consent. See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - - - - -## Prerequisites - -To register your SQL Server VM with the extension, you'll need: - -- An [Azure subscription](https://azure.microsoft.com/free/) and, at minimum, [contributor role](../../../role-based-access-control/built-in-roles.md#all) permissions. -- An Azure Resource Model [Windows Server 2008 R2 (or later) virtual machine](../../../virtual-machines/windows/quick-create-portal.md) with [SQL Server](https://www.microsoft.com/sql-server/sql-server-downloads) deployed to the public or Azure Government cloud. Windows Server 2008 is not supported. - - -## Enable - -To enable automatic registration of your SQL Server VMs in the Azure portal, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to the [**SQL virtual machines**](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) resource page. -1. Select **Automatic SQL Server VM registration** to open the **Automatic registration** page. - - :::image type="content" source="media/sql-agent-extension-automatic-registration-all-vms/automatic-registration.png" alt-text="Select Automatic SQL Server VM registration to open the automatic registration page"::: - -1. Choose your subscription from the drop-down. -1. Read through the terms and if you agree, select **I accept**. -1. Select **Register** to enable the feature and automatically register all current and future SQL Server VMs with the SQL IaaS Agent extension. This will not restart the SQL Server service on any of the VMs. - -## Disable - -Use the [Azure CLI](/cli/azure/install-azure-cli) or [Azure PowerShell](/powershell/azure/install-az-ps) to disable the automatic registration feature. When the automatic registration feature is disabled, SQL Server VMs added to the subscription need to be manually registered with the SQL IaaS Agent extension. This will not unregister existing SQL Server VMs that have already been registered. - - - -# [Azure CLI](#tab/azure-cli) - -To disable automatic registration using Azure CLI, run the following command: - -```azurecli-interactive -az feature unregister --namespace Microsoft.SqlVirtualMachine --name BulkRegistration -``` - -# [Azure PowerShell](#tab/azure-powershell) - -To disable automatic registration using Azure PowerShell, run the following command: - -```powershell-interactive -Unregister-AzProviderFeature -FeatureName BulkRegistration -ProviderNamespace Microsoft.SqlVirtualMachine -``` - ---- - -## Enable for multiple subscriptions - -You can enable the automatic registration feature for multiple Azure subscriptions by using PowerShell. - -To do so, follow these steps: - -1. Save [this script](https://github.com/microsoft/tigertoolbox/blob/master/AzureSQLVM/EnableBySubscription.ps1). -1. Navigate to where you saved the script by using an administrative Command Prompt or PowerShell window. -1. Connect to Azure (`az login`). -1. Execute the script, passing in SubscriptionIds as parameters. If no subscriptions are specified, the script will enable auto-registration for all the subscriptions in the user account. - - The following command will enable auto-registration for two subscriptions: - - ```console - .\EnableBySubscription.ps1 -SubscriptionList a1a1a-aa11-11aa-a1a1-a11a111a1,b2b2b2-bb22-22bb-b2b2-b2b2b2bb - ``` - The following command will enable auto-registration for all subscriptions: - - ```console - .\EnableBySubscription.ps1 - ``` - -Failed registration errors are stored in `RegistrationErrors.csv` located in the same directory where you saved and executed the `.ps1` script from. - -## Next steps - -Upgrade your manageability mode to [full](sql-agent-extension-manually-register-single-vm.md#upgrade-to-full) to take advantage of the full feature set provided to you by the SQL IaaS Agent extension. \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md b/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md deleted file mode 100644 index d4bef0576ff67..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-single-vm.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: Register with SQL IaaS Extension (Windows) -description: Learn how to register your SQL Server on Windows Azure VM with the SQL IaaS Agent extension to enable Azure features, as well as for compliance, and improved manageability. -services: virtual-machines-windows -documentationcenter: na -author: adbadram -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: adbadram -ms.reviewer: mathoma -ms.custom: devx-track-azurecli, devx-track-azurepowershell, contperf-fy21q2 - - ---- -# Register Windows SQL Server VM with SQL IaaS Extension -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](sql-agent-extension-manually-register-single-vm.md) -> * [Linux](../linux/sql-iaas-agent-extension-register-vm-linux.md) - -Register your SQL Server VM with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) to unlock a wealth of feature benefits for your SQL Server on Windows Azure VM. - -This article teaches you to register a single SQL Server VM with the SQL IaaS Agent extension. Alternatively, you can register all SQL Server VMs in a subscription [automatically](sql-agent-extension-automatic-registration-all-vms.md) or [multiple VMs in bulk using a script](sql-agent-extension-manually-register-vms-bulk.md). - -> [!NOTE] -> Starting in September 2021, registering with the SQL IaaS extension in full mode no longer requires restarting the SQL Server service. - -## Overview - -Registering with the [SQL Server IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) creates the [**SQL virtual machine** _resource_](manage-sql-vm-portal.md) within your subscription, which is a _separate_ resource from the virtual machine resource. Unregistering your SQL Server VM from the extension will remove the **SQL virtual machine** _resource_ but will not drop the actual virtual machine. - -Deploying a SQL Server VM Azure Marketplace image through the Azure portal automatically registers the SQL Server VM with the extension. However, if you choose to self-install SQL Server on an Azure virtual machine, or provision an Azure virtual machine from a custom VHD, then you must register your SQL Server VM with the SQL IaaS Agent extension to to unlock full feature benefits and manageability. - -To utilize the SQL IaaS Agent extension, you must first [register your subscription with the **Microsoft.SqlVirtualMachine** provider](#register-subscription-with-rp), which gives the SQL IaaS extension the ability to create resources within that specific subscription. Then you can register your SQL Server VM with the extension. - -By default, Azure VMs that have SQL Server 2016 or later installed will be automatically registered with the SQL IaaS Agent extension when detected by the [CEIP service](/sql/sql-server/usage-and-diagnostic-data-configuration-for-sql-server). See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -> [!IMPORTANT] -> The SQL IaaS Agent extension collects data for the express purpose of giving customers optional benefits when using SQL Server within Azure Virtual Machines. Microsoft will not use this data for licensing audits without the customer's advance consent. See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -## Prerequisites - -To register your SQL Server VM with the extension, you'll need: - -- An [Azure subscription](https://azure.microsoft.com/free/). -- An Azure Resource Model [Windows Server 2008 (or greater) virtual machine](../../../virtual-machines/windows/quick-create-portal.md) with [SQL Server 2008 (or greater)](https://www.microsoft.com/sql-server/sql-server-downloads) deployed to the public or Azure Government cloud. -- The latest version of [Azure CLI](/cli/azure/install-azure-cli) or [Azure PowerShell (5.0 minimum)](/powershell/azure/install-az-ps). -- A minimum of .NET Framework 4.5.1 or later. - -## Register subscription with RP - -To register your SQL Server VM with the SQL IaaS Agent extension, you must first register your subscription with the **Microsoft.SqlVirtualMachine** resource provider (RP). This gives the SQL IaaS Agent extension the ability to create resources within your subscription. You can do so by using the Azure portal, the Azure CLI, or Azure PowerShell. - -### Azure portal - -Register your subscription with the resource provider by using the Azure portal: - -1. Open the Azure portal and go to **All Services**. -1. Go to **Subscriptions** and select the subscription of interest. -1. On the **Subscriptions** page, select **Resource providers** under **Settings**. -1. Enter **sql** in the filter to bring up the SQL-related resource providers. -1. Select **Register**, **Re-register**, or **Unregister** for the **Microsoft.SqlVirtualMachine** provider, depending on your desired action. - - ![Modify the provider](./media/sql-agent-extension-manually-register-single-vm/select-resource-provider-sql.png) - -### Command line - -Register your Azure subscription with the **Microsoft.SqlVirtualMachine** provider using either Azure CLI or Azure PowerShell. - -# [Azure CLI](#tab/bash) - -Register your subscription with the resource provider by using the Azure CLI: - -```azurecli-interactive -# Register the SQL IaaS Agent extension to your subscription -az provider register --namespace Microsoft.SqlVirtualMachine -``` - -# [Azure PowerShell](#tab/powershell) - -Register your subscription with the resource provider by using Azure PowerShell: - -```powershell-interactive -# Register the SQL IaaS Agent extension to your subscription -Register-AzResourceProvider -ProviderNamespace Microsoft.SqlVirtualMachine -``` - ---- - -## Full mode - -It's possible to either register your SQL Server VM directly in full mode by using the Azure CLI and Azure PowerShell or upgrade to full mode from lightweight mode by using the Azure portal, the Azure CLI, or Azure PowerShell. Upgrading VMs in _NoAgent_ mode is not supported until the OS is upgraded to Windows 2008 R2 and above. - -Starting with September 2021, registering your SQL Server VM in full mode no longer requires restarting the SQL Server service. - -To learn more about full mode, see [management modes](sql-server-iaas-agent-extension-automate-management.md#management-modes). - -### Register in full mode - -Provide the SQL Server license type as either pay-as-you-go (`PAYG`) to pay per usage, Azure Hybrid Benefit (`AHUB`) to use your own license, or disaster recovery (`DR`) to activate the [free DR replica license](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure). - - -# [Azure CLI](#tab/bash) - -Register a SQL Server VM in full mode with the Azure CLI: - -```azurecli-interactive -# Register Enterprise or Standard self-installed VM in full mode -az sql vm create --name --resource-group --location --license-type --sql-mgmt-type Full -``` - -# [Azure PowerShell](#tab/powershell) - -Register a SQL Server VM in FULL mode with Azure PowerShell: - -```powershell-interactive -# Get the existing Compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` --LicenseType -SqlManagementType Full -``` - ---- - - -### Upgrade to full - -SQL Server VMs that have registered the extension in *lightweight* mode can upgrade to _full_ using the Azure portal, the Azure CLI, or Azure PowerShell. SQL Server VMs in _NoAgent_ mode can upgrade to _full_ after the OS is upgraded to Windows 2008 R2 and above. It is not possible to downgrade - to do so, you will need to [unregister](#unregister-from-extension) the SQL Server VM from the SQL IaaS Agent extension. Doing so will remove the **SQL virtual machine** _resource_, but will not delete the actual virtual machine. - -#### Azure portal - -Upgrade the extension to full mode with the Azure portal: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Go to your [SQL virtual machines](manage-sql-vm-portal.md#access-the-resource) resource. -1. Select your SQL Server VM, and navigate to the **Overview** page. -1. For SQL Server VMs with the NoAgent or lightweight IaaS extension mode, select the **Only license type and edition updates are available with the current SQL IaaS extension mode...** message. - - ![Selections for changing the mode from the portal](./media/sql-agent-extension-manually-register-single-vm/change-sql-iaas-mode-portal.png) - -1. Select **Confirm** to upgrade your SQL Server IaaS extension mode to full. - - ![Select **Confirm** to upgrade your SQL Server IaaS extension mode to full.](./media/sql-agent-extension-manually-register-single-vm/enable-full-mode-iaas.png) - -#### Command line - -# [Azure CLI](#tab/bash) - -Upgrade the extension to full mode with the Azure CLI: - -```azurecli-interactive -# Update to full mode -az sql vm update --name --resource-group --sql-mgmt-type full -``` - -# [Azure PowerShell](#tab/powershell) - -Upgrade the extension to full mode with Azure PowerShell: - -```powershell-interactive -# Get the existing Compute VM -$vm = Get-AzVM -Name -ResourceGroupName -# Register with SQL IaaS Agent extension in full mode -Update-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -SqlManagementType Full -``` - ---- - -## Lightweight mode - -Use the Azure CLI or Azure PowerShell to register your SQL Server VM with the extension in lightweight mode for limited functionality. - -Provide the SQL Server license type as either pay-as-you-go (`PAYG`) to pay per usage, Azure Hybrid Benefit (`AHUB`) to use your own license, or disaster recovery (`DR`) to activate the [free DR replica license](business-continuity-high-availability-disaster-recovery-hadr-overview.md#free-dr-replica-in-azure). - -Failover cluster instances and SQL Server VMs with multiple instances can only be registered with the SQL IaaS Agent extension in lightweight mode. - -To learn more about lightweight mode, see [management modes](sql-server-iaas-agent-extension-automate-management.md#management-modes). - -# [Azure CLI](#tab/bash) - -Register a SQL Server VM in lightweight mode with the Azure CLI: - -```azurecli-interactive -# Register Enterprise or Standard self-installed VM in Lightweight mode -az sql vm create --name --resource-group --location --license-type -``` - -# [Azure PowerShell](#tab/powershell) - -Register a SQL Server VM in lightweight mode with Azure PowerShell: - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -# Register SQL VM with 'Lightweight' SQL IaaS agent -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType -SqlManagementType LightWeight -``` - ---- - -## NoAgent mode - -SQL Server 2008 and 2008 R2 installed on Windows Server 2008 (_not R2_) can only be registered with the SQL IaaS Agent extension in the [NoAgent mode](sql-server-iaas-agent-extension-automate-management.md#management-modes). This option assures compliance and allows the SQL Server VM to be monitored in the Azure portal with limited functionality. - -For the **license type**, specify either: `AHUB`, `PAYG`, or `DR`. -For the **image offer**, specify either `SQL2008-WS2008` or `SQL2008R2-WS2008` - -Use the Azure CLI or Azure PowerShell to register your SQL Server 2008 (`SQL2008-WS2008`) or 2008 R2 (`SQL2008R2-WS2008`) instance on your Windows Server 2008 VM. - -# [Azure CLI](#tab/bash) - -Register your SQL Server virtual machine in NoAgent mode with the Azure CLI: - -```azurecli-interactive -az sql vm create -n sqlvm -g myresourcegroup -l eastus | ---license-type --sql-mgmt-type NoAgent ---image-sku Enterprise --image-offer -``` - -# [Azure PowerShell](#tab/powershell) - -Register your SQL Server virtual machine in NoAgent mode with Azure PowerShell: - -```powershell-interactive -# Get the existing compute VM -$vm = Get-AzVM -Name -ResourceGroupName - -New-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -Location $vm.Location ` - -LicenseType -SqlManagementType NoAgent -Sku Standard -Offer -``` - ---- - - -## Check management mode - -Use Azure PowerShell to check what management mode your SQL Server IaaS agent extension is in. - -Check the mode of the extension with Azure PowerShell: - -```powershell-interactive -# Get the SqlVirtualMachine -$sqlvm = Get-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName -$sqlvm.SqlManagementType -``` - - -## Verify registration status - -You can verify if your SQL Server VM has already been registered with the SQL IaaS Agent extension by using the Azure portal, the Azure CLI, or Azure PowerShell. - -### Azure portal - -Verify the registration status with the Azure portal: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Go to your [SQL Server VMs](manage-sql-vm-portal.md). -1. Select your SQL Server VM from the list. If your SQL Server VM is not listed here, it likely hasn't been registered with the SQL IaaS Agent extension. -1. View the value under **Status**. If **Status** is **Succeeded**, then the SQL Server VM has been registered with the SQL IaaS Agent extension successfully. - - ![Verify status with SQL RP registration](./media/sql-agent-extension-manually-register-single-vm/verify-registration-status.png) - -Alternatively, you can check the status by choosing **Repair** under the **Support + troubleshooting** pane in the **SQL virtual machine** resource. The provisioning state for the SQL IaaS agent extension can be **Succeeded** or **Failed**. - -### Command line - -Verify current SQL Server VM registration status using either Azure CLI or Azure PowerShell. `ProvisioningState` shows as `Succeeded` if registration was successful. - -# [Azure CLI](#tab/bash) - -Verify the registration status with the Azure CLI: - - ```azurecli-interactive - az sql vm show -n -g - ``` - -# [Azure PowerShell](#tab/powershell) - -Verify the registration status with Azure PowerShell: - - ```powershell-interactive - Get-AzSqlVM -Name -ResourceGroupName - ``` - ---- - -An error indicates that the SQL Server VM has not been registered with the extension. - -## Repair extension - -It's possible for your SQL IaaS agent extension to be in a failed state. Use the Azure portal to repair the SQL IaaS agent extension. - -To repair the extension with the Azure portal: - -1. Sign in to the [Azure portal](https://portal.azure.com). -1. Go to your [SQL Server VMs](manage-sql-vm-portal.md). -1. Select your SQL Server VM from the list. If your SQL Server VM is not listed here, it likely hasn't been registered with the SQL IaaS Agent extension. -1. Select **Repair** under **Support + Troubleshooting** in the **SQL virtual machine** resource page. - - :::image type="content" source="media/sql-agent-extension-manually-register-single-vm/repair-extension.png" alt-text="Select **Repair** under **Support + Troubleshooting** in the **SQL virtual machine** resource page"::: - -1. If your provisioning state shows as **Failed**, choose **Repair** to repair the extension. If your state is **Succeeded** you can check the box next to **Force repair** to repair the extension regardless of state. - - ![If your provisioning state shows as **Failed**, choose **Repair** to repair the extension. If your state is **Succeeded** you can check the box next to **Force repair** to repair the extension regardless of state.](./media/sql-agent-extension-manually-register-single-vm/force-repair-extension.png) - - -## Unregister from extension - -To unregister your SQL Server VM with the SQL IaaS Agent extension, delete the SQL virtual machine *resource* using the Azure portal or Azure CLI. Deleting the SQL virtual machine *resource* does not delete the SQL Server VM. Unregistering the SQL virtual machine with the SQL IaaS Agent extension is necessary to downgrade the management mode from full. - ->[!CAUTION] -> **Use extreme caution** when unregistering your SQL Server VM from the extension. Follow the steps carefully because **it is possible to inadvertently delete the virtual machine** when attempting to remove the *resource*. - - -### Azure portal - -Unregister your SQL Server VM from the extension using the Azure portal: - -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to the SQL VM resource. - - ![SQL virtual machines resource](./media/sql-agent-extension-manually-register-single-vm/sql-vm-manage.png) - -1. Select **Delete**. - - ![Select delete in the top navigation](./media/sql-agent-extension-manually-register-single-vm/delete-sql-vm-resource.png) - -1. Type the name of the SQL virtual machine and **clear the check box next to the virtual machine**. - - ![Uncheck the VM to prevent deleting the actual virtual machine and then select Delete to proceed with deleting the SQL VM resource](./media/sql-agent-extension-manually-register-single-vm/confirm-delete-of-resource-uncheck-box.png) - - > [!WARNING] - > Failure to clear the checkbox next to the virtual machine name will *delete* the virtual machine entirely. Clear the checkbox to unregister the SQL Server VM from the extension but *not delete the actual virtual machine*. - -1. Select **Delete** to confirm the deletion of the SQL virtual machine *resource*, and not the SQL Server VM. - -### Command line - -# [Azure CLI](#tab/azure-cli) - -To unregister your SQL Server VM from the extension with the Azure CLI, use the [az sql vm delete](/cli/azure/sql/vm#az-sql-vm-delete) command. This removes the SQL Server VM *resource* but does not delete the virtual machine. - -To unregister your SQL Server VM with the Azure CLI: - -```azurecli-interactive -az sql vm delete - --name | - --resource-group | - --yes -``` - -# [PowerShell](#tab/azure-powershell) - -To unregister your SQL Server VM from the extension with Azure PowerShell, use the [Remove-AzSqlVM](/powershell/module/az.sqlvirtualmachine/remove-azsqlvm) command. This removes the SQL Server VM *resource* but will not delete the virtual machine. - -To unregister your SQL Server VM with Azure PowerShell: - -```powershell-interactive -Remove-AzSqlVM -ResourceGroupName -Name -``` - ---- - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Azure VMs](../windows/pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](../windows/doc-changes-updates-release-notes-whats-new.md) diff --git a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-vms-bulk.md b/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-vms-bulk.md deleted file mode 100644 index 60467484bdd3d..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-agent-extension-manually-register-vms-bulk.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Register multiple SQL VMs in Azure with the SQL IaaS Agent extension -description: Bulk register SQL Server VMs with the SQL IaaS Agent extension to improve manageability. -services: virtual-machines-windows -documentationcenter: na -author: adbadram -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: adbadram -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell - ---- -# Register multiple SQL VMs in Azure with the SQL IaaS Agent extension -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article describes how to register your SQL Server virtual machines (VMs) in bulk in Azure with the [SQL IaaS Agent extension](sql-server-iaas-agent-extension-automate-management.md) by using the `Register-SqlVMs`Azure PowerShell cmdlet. - - -This article teaches you to register SQL Server VMs manually in bulk. Alternatively, you can register [all SQL Server VMs automatically](sql-agent-extension-automatic-registration-all-vms.md) or [individual SQL Server VMs manually](sql-agent-extension-manually-register-single-vm.md). - -> [!NOTE] -> Starting in September 2021, registering with the SQL IaaS extension in full mode no longer requires restarting the SQL Server service. - -## Overview - -The `Register-SqlVMs` cmdlet can be used to register all virtual machines in a given list of subscriptions, resource groups, or a list of specific virtual machines. The cmdlet will register the virtual machines in [lightweight management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes), and then generate both a [report and a log file](#output-description). - -The registration process carries no risk, has no downtime, and will not restart the SQL Server service or the virtual machine. - -By default, Azure VMs with SQL Server 2016 or later installed will be automatically registered with the SQL IaaS Agent extension when detected by the [CEIP service](/sql/sql-server/usage-and-diagnostic-data-configuration-for-sql-server). See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -## Prerequisites - -To register your SQL Server VM with the extension, you'll need the following: - -- An [Azure subscription](https://azure.microsoft.com/free/) that has been [registered with the **Microsoft.SqlVirtualMachine** resource provider](sql-agent-extension-manually-register-single-vm.md#register-subscription-with-rp) and contains unregistered SQL Server virtual machines. -- The client credentials used to register the virtual machines exist in any of the following Azure roles: **Virtual Machine contributor**, **Contributor**, or **Owner**. -- The latest version of [Az PowerShell (5.0 minimum)](/powershell/azure/new-azureps-module-az). - - -## Get started - -Before proceeding, you must first create a local copy of the script, import it as a PowerShell module, and connect to Azure. - -### Create the script - -To create the script, copy the [full script](#full-script) from the end of this article and save it locally as `RegisterSqlVMs.psm1`. - -### Import the script - -After the script is created, you can import it as a module in the PowerShell terminal. - -Open an administrative PowerShell terminal and navigate to where you saved the `RegisterSqlVMs.psm1` file. Then, run the following PowerShell cmdlet to import the script as a module: - -```powershell-interactive -Import-Module .\RegisterSqlVMs.psm1 -``` - -### Connect to Azure - -Use the following PowerShell cmdlet to connect to Azure: - -```powershell-interactive -Connect-AzAccount -``` - - -## All VMs in a list of subscriptions - -Use the following cmdlet to register all SQL Server virtual machines in a list of subscriptions: - -```powershell-interactive -Register-SqlVMs -SubscriptionList SubscriptionId1,SubscriptionId2 -``` - -Example output: - -``` -Number of subscriptions registration failed for -because you do not have access or credentials are wrong: 1 -Total VMs Found: 10 -VMs Already registered: 1 -Number of VMs registered successfully: 4 -Number of VMs failed to register due to error: 1 -Number of VMs skipped as VM or the guest agent on VM is not running: 3 -Number of VMs skipped as they are not running SQL Server On Windows: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -Please find the error details in file VMsNotRegisteredDueToError1571314821.log -``` - -## All VMs in a single subscription - -Use the following cmdlet to register all SQL Server virtual machines in a single subscription: - -```powershell-interactive -Register-SqlVMs -Subscription SubscriptionId1 -``` - -Example output: - -``` -Total VMs Found: 10 -VMs Already registered: 1 -Number of VMs registered successfully: 5 -Number of VMs failed to register due to error: 1 -Number of VMs skipped as VM or the guest agent on VM is not running: 2 -Number of VMs skipped as they are not running SQL Server On Windows: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -Please find the error details in file VMsNotRegisteredDueToError1571314821.log -``` - -## All VMs in multiple resource groups - -Use the following cmdlet to register all SQL Server virtual machines in multiple resource groups within a single subscription: - -```powershell-interactive -Register-SqlVMs -Subscription SubscriptionId1 -ResourceGroupList ResourceGroup1,ResourceGroup2 -``` - -Example output: - -``` -Total VMs Found: 4 -VMs Already registered: 1 -Number of VMs registered successfully: 1 -Number of VMs failed to register due to error: 1 -Number of VMs skipped as they are not running SQL Server On Windows: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -Please find the error details in file VMsNotRegisteredDueToError1571314821.log -``` - -## All VMs in a resource group - -Use the following cmdlet to register all SQL Server virtual machines in a single resource group: - -```powershell-interactive -Register-SqlVMs -Subscription SubscriptionId1 -ResourceGroupName ResourceGroup1 -``` - -Example output: - -``` -Total VMs Found: 4 -VMs Already registered: 1 -Number of VMs registered successfully: 1 -Number of VMs failed to register due to error: 1 -Number of VMs skipped as VM or the guest agent on VM is not running: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -Please find the error details in file VMsNotRegisteredDueToError1571314821.log -``` - -## Specific VMs in a single resource group - -Use the following cmdlet to register specific SQL Server virtual machines within a single resource group: - -```powershell-interactive -Register-SqlVMs -Subscription SubscriptionId1 -ResourceGroupName ResourceGroup1 -VmList VM1,VM2,VM3 -``` - -Example output: - -``` -Total VMs Found: 3 -VMs Already registered: 0 -Number of VMs registered successfully: 1 -Number of VMs skipped as VM or the guest agent on VM is not running: 1 -Number of VMs skipped as they are not running SQL Server On Windows: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -Please find the error details in file VMsNotRegisteredDueToError1571314821.log -``` - -## A specific VM - -Use the following cmdlet to register a specific SQL Server virtual machine: - -```powershell-interactive -Register-SqlVMs -Subscription SubscriptionId1 -ResourceGroupName ResourceGroup1 -Name VM1 -``` - -Example output: - -``` -Total VMs Found: 1 -VMs Already registered: 0 -Number of VMs registered successfully: 1 - -Please find the detailed report in file RegisterSqlVMScriptReport1571314821.txt -``` - - -## Output description - -Both a report and a log file are generated every time the `Register-SqlVMs` cmdlet is used. - -### Report - -The report is generated as a `.txt` file named `RegisterSqlVMScriptReport.txt` where the timestamp is the time when the cmdlet was started. The report lists the following details: - -| **Output value** | **Description** | -| :-------------- | :-------------- | -| Number of subscriptions registration failed for because you do not have access or credentials are incorrect | This provides the number and list of subscriptions that had issues with the provided authentication. The detailed error can be found in the log by searching for the subscription ID. | -| Number of subscriptions that could not be tried because they are not registered to the resource provider | This section contains the count and list of subscriptions that have not been registered to the SQL IaaS Agent extension. | -| Total VMs found | The count of virtual machines that were found in the scope of the parameters passed to the cmdlet. | -| VMs already registered | The count of virtual machines that were skipped as they were already registered with the extension. | -| Number of VMs registered successfully | The count of virtual machines that were successfully registered after running the cmdlet. Lists the registered virtual machines in the format `SubscriptionID, Resource Group, Virtual Machine`. | -| Number of VMs failed to register due to error | Count of virtual machines that failed to register due to some error. The details of the error can be found in the log file. | -| Number of VMs skipped as the VM or the gust agent on VM is not running | Count and list of virtual machines that could not be registered as either the virtual machine or the guest agent on the virtual machine were not running. These can be retried once the virtual machine or guest agent has been started. Details can be found in the log file. | -| Number of VMs skipped as they are not running SQL Server on Windows | Count of virtual machines that were skipped as they are not running SQL Server or are not a Windows virtual machine. The virtual machines are listed in the format `SubscriptionID, Resource Group, Virtual Machine`. | - - -### Log - -Errors are logged in the log file named `VMsNotRegisteredDueToError.log`, where timestamp is the time when the script started. If the error is at the subscription level, the log contains the comma-separated Subscription ID and the error message. If the error is with the virtual machine registration, the log contains the Subscription ID, Resource group name, virtual machine name, error code, and message separated by commas. - -## Remarks - -When you register SQL Server VMs with the extension by using the provided script, consider the following: - -- Registration with the extension requires a guest agent running on the SQL Server VM. Windows Server 2008 images do not have a guest agent, so these virtual machines will fail and must be registered manually using the [NoAgent management mode](sql-server-iaas-agent-extension-automate-management.md#management-modes). -- There is retry logic built-in to overcome transparent errors. If the virtual machine is successfully registered, then it is a rapid operation. However, if the registration fails, then each virtual machine will be retried. As such, you should allow significant time to complete the registration process - though actual time requirement is dependent on the type and number of errors. - -## Full script - -For the full script on GitHub, see [Bulk register SQL Server VMs with Az PowerShell](https://github.com/Azure/azure-docs-powershell-samples/blob/master/sql-virtual-machine/register-sql-vms/RegisterSqlVMs.psm1). - -Copy the full script and save it as `RegisterSqLVMs.psm1`. - -[!code-powershell-interactive[main](../../../../powershell_scripts/sql-virtual-machine/register-sql-vms/RegisterSqlVMs.psm1 "Bulk register SQL Server virtual machines")] - -## Next steps - -For more information, see the following articles: - -* [Overview of SQL Server on a Windows VM](sql-server-on-azure-vm-iaas-what-is-overview.md) -* [FAQ for SQL Server on a Windows VM](frequently-asked-questions-faq.yml) -* [Pricing guidance for SQL Server on a Windows VM](pricing-guidance.md) -* [What's new for SQL Server on Azure VMs](doc-changes-updates-release-notes-whats-new.md) diff --git a/articles/azure-sql/virtual-machines/windows/sql-assessment-for-sql-vm.md b/articles/azure-sql/virtual-machines/windows/sql-assessment-for-sql-vm.md deleted file mode 100644 index a1baf1cb83b04..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-assessment-for-sql-vm.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: SQL best practices assessment -description: "Identify performance issues and assess that your SQL Server VM is configured to follow best practices by using the SQL best practices assessment feature in the Azure portal." -author: ebruersan -ms.author: ebrue -ms.service: virtual-machines -ms.topic: how-to -ms.date: 11/02/2021 -ms.reviewer: mathoma -ms.custom: ignite-fall-2021 ---- - - -# SQL best practices assessment for SQL Server on Azure VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -The SQL best practices assessment feature of the Azure portal identifies possible performance issues and evaluates that your SQL Server on Azure Virtual Machines (VMs) is configured to follow best practices using the [rich ruleset](https://github.com/microsoft/sql-server-samples/blob/master/samples/manage/sql-assessment-api/DefaultRuleset.csv) provided by the [SQL Assessment API](/sql/sql-assessment-api/sql-assessment-api-overview). - - -To learn more, watch this video on [SQL best practices assessment](/shows/Data-Exposed/optimally-configure-sql-server-on-azure-virtual-machines-with-sql-assessment?WT.mc_id=dataexposed-c9-niner): - - - - -## Overview - -Once the SQL best practices assessment feature is enabled, your SQL Server instance and databases are scanned to provide recommendations for things like indexes, deprecated features, enabled or missing trace flags, statistics, etc. Recommendations are surfaced to the [SQL VM management page](manage-sql-vm-portal.md) of the [Azure portal](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines). - - -Assessment results are uploaded to your [Log Analytics workspace](../../../azure-monitor/logs/quick-create-workspace.md) using [Microsoft Monitoring Agent (MMA)](../../../azure-monitor/agents/log-analytics-agent.md). If your VM is already configured to use Log Analytics, the SQL best practices assessment feature uses the existing connection. Otherwise, the MMA extension is installed to the SQL Server VM and connected to the specified Log Analytics workspace. - -Assessment run time depends on your environment (number of databases, objects, and so on), with a duration from a few minutes, up to an hour. Similarly, the size of the assessment result also depends on your environment. Assessment runs against your instance and all databases on that instance. In our testing, we observed that an assessment run can have up to 5-10% CPU impact on the machine. In these tests, the assessment was done while a TPC-C like application was running against the SQL Server. - -## Prerequisites - -To use the SQL best practices assessment feature, you must have the following prerequisites: - -- Your SQL Server VM must be registered with the [SQL Server IaaS extension in full mode](sql-agent-extension-manually-register-single-vm.md#full-mode). -- A [Log Analytics workspace](../../../azure-monitor/logs/quick-create-workspace.md) in the same subscription as your SQL Server VM to upload assessment results to. -- SQL Server needs to be 2012 or higher version. - - -## Enable - -To enable SQL best practices assessments, follow these steps: - -1. Sign into the [Azure portal](https://portal.azure.com) and go to your [SQL Server VM resource](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines). -1. Select **SQL best practices assessments** under **Settings**. -1. Select **Enable SQL best practices assessments** or **Configuration** to navigate to the **Configuration** page. -1. Check the **Enable SQL best practices assessments** box and provide the following: - 1. The [Log Analytics workspace](../../../azure-monitor/logs/quick-create-workspace.md) that assessments will be uploaded to. If the SQL Server VM has not been associated with a workspace previously, then choose an existing workspace in the subscription from the drop-down. Otherwise, the previously-associated workspace is already populated. - 1. The **Run schedule**. You can choose to run assessments on demand, or automatically on a schedule. If you choose a schedule, then provide the frequency (weekly or monthly), day of week, recurrence (every 1-6 weeks), and the time of day your assessments should start (local to VM time). -1. Select **Apply** to save your changes and deploy the Microsoft Monitoring Agent to your SQL Server VM if it's not deployed already. An Azure portal notification will tell you once the SQL best practices assessment feature is ready for your SQL Server VM. - - -## Assess SQL Server VM - -Assessments run: -- On a schedule -- On demand - -### Run scheduled assessment - -If you set a schedule in the configuration blade, an assessment runs automatically at the specified date and time. Choose **Configuration** to modify your assessment schedule. Once you provide a new schedule, the previous schedule is overwritten. - -### Run on demand assessment - -After the SQL best practices assessment feature is enabled for your SQL Server VM, it's possible to run an assessment on demand. To do so, select **Run assessment** from the SQL best practices assessment blade of the [Azure portal SQL Server VM resource](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.SqlVirtualMachine%2FSqlVirtualMachines) page. - - -## View results - -The **Assessments results** section of the **SQL best practices assessments** page shows a list of the most recent assessment runs. Each row displays the start time of a run and the status - scheduled, running, uploading results, completed, or failed. Each assessment run has two parts: evaluates your instance, and uploads the results to your Log Analytics workspace. The status field covers both parts. Assessment results are shown in Azure workbooks. - -Access the assessment results Azure workbook in three ways: -- Select the **View latest successful assessment button** on the **SQL best practices assessments** page. -- Choose a completed run from the **Assessment results** section of the **SQL best practices assessments** page. -- Select **View assessment results** from the **Top 10 recommendations** surfaced on the **Overview** page of your SQL VM resource page. - -Once you have the workbook open, you can use the drop-down to select previous runs. You can view the results of a single run using the **Results** page or review historical trends using the **Trends** page. - -### Results page - -The **Results** page organizes the recommendations using tabs for *All, new, resolved*. Use these tabs to view all recommendations from the current run, all the new recommendations (the delta from previous runs), or resolved recommendations from previous runs. Tabs help you track progress between runs. The *Insights* tab identifies the most recurring issues and the databases with the most issues. Use these to decide where to concentrate your efforts. - -The graph groups assessment results in different categories of severity - high, medium, low, and information. Select each category to see the list of recommendations, or search for key phrases in the search box. It's best to start with the most severe recommendations and go down the list. - -The first grid shows you each recommendation and the number of instances your environment hit that issue. When you select a row in the first grid, the second grid lists all the instances for that particular recommendation. If there is no selection in the first grid, the second grid shows all recommendations. Potentially this could be a big list. You can use the drop downs above the grid (**Name, Severity, Tags, Check Id**) to filter the results. You can also use **Export to Excel** and **Open the last run query in the Logs view** options by selecting the small icons on the top right corner of each grid. - -The **passed** section of the graph identifies recommendations your system already follows. - -View detailed information for each recommendation by selecting the **Message** field, such as a long description, and relevant online resources. - -### Trends page - -There are three charts on the **Trends** page to show changes over time: all issues, new issues, and resolved issues. The charts help you see your progress. Ideally, the number of recommendations should go down while the number of resolved issues goes up. The legend shows the average number of issues for each severity level. Hover over the bars to see the individual vales for each run. - -If there are multiple runs in a single day, only the latest run is included in the graphs on the **Trends** page. - -## Known Issues - -You may encounter some of the following known issues when using SQL best practices assessments. - -### Configuration error for Enable SQL best practices assessment - -If your virtual machine is already associated with a Log Analytics workspace that you don't have access to or that is in another subscription, you will see an error in the configuration blade. For the former, you can either obtain permissions for that workspace or switch your VM to a different Log Analytics workspace by following [these instructions](../../../azure-monitor/agents/agent-manage.md) to remove Microsoft Monitoring Agent. - -### Deployment failure for Enable or Run Assessment - -Refer to the [deployment history](../../../azure-resource-manager/templates/deployment-history.md) of the resource group containing the SQL VM to view the error message associated with the failed action. - -### Failed assessments - -If the assessment or uploading the results failed for some reason, the status of that run will indicate the failure. Clicking on the status will open a context pane where you can see the details about the failure and possible ways to remediate the issue. - ->[!TIP] ->If you have enforced TLS 1.0 or higher in Windows and disabled older SSL protocols as described [here](/troubleshoot/windows-server/windows-security/restrict-cryptographic-algorithms-protocols-schannel#schannel-specific-registry-keys), then you must also ensure that .NET Framework is [configured](../../../azure-monitor/agents/agent-windows.md#configure-agent-to-use-tls-12) to use strong cryptography. - -## Next steps - -- To register your SQL Server VM with the SQL Server IaaS extension to SQL Server on Azure VMs, see the articles for [Automatic installation](sql-agent-extension-automatic-registration-all-vms.md), [Single VMs](sql-agent-extension-manually-register-single-vm.md), or [VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md). -- To learn about more capabilities available by the SQL Server IaaS extension to SQL Server on Azure VMs, see [Manage SQL Server VMs by using the Azure portal](manage-sql-vm-portal.md) diff --git a/articles/azure-sql/virtual-machines/windows/sql-server-2008-extend-end-of-support.md b/articles/azure-sql/virtual-machines/windows/sql-server-2008-extend-end-of-support.md deleted file mode 100644 index 78615ec10cb12..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-server-2008-extend-end-of-support.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Extend support for SQL Server 2008 & 2008 R2 -description: Extend support for SQL Server 2008 and SQL Server 2008 R2 by migrating your SQL Server instance to Azure, or purchasing extended support to keep instances on-premises. -services: virtual-machines-windows -documentationcenter: '' -author: bluefooted -tags: azure-service-management -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 04/08/2019 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: "seo-lt-2019" ---- -# Extend support for SQL Server 2008 and SQL Server 2008 R2 with Azure -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -SQL Server 2008 and SQL Server 2008 R2 have both reached the [end of their support (EOS) life cycle](https://www.microsoft.com/sql-server/sql-server-2008). Because many customers are still using both versions, we're providing several options to continue getting support. You can migrate your on-premises SQL Server instances to Azure virtual machines (VMs), migrate to Azure SQL Database, or stay on-premises and purchase extended security updates. - -Unlike with a managed instance, migrating to an Azure VM does not require recertifying your applications. And unlike with staying on-premises, you'll receive free extended security patches by migrating to an Azure VM. - -The rest of this article provides considerations for migrating your SQL Server instance to an Azure VM. - -For more information about end of support options, see [End of support](/sql/sql-server/end-of-support/sql-server-end-of-life-overview). - -## Provisioning - -There is a pay-as-you-go **SQL Server 2008 R2 on Windows Server 2008 R2** image available on Azure Marketplace. - -Customers who are on SQL Server 2008 will need to either self-install or upgrade to SQL Server 2008 R2. Likewise, customers on Windows Server 2008 will need to either deploy their VM from a custom VHD or upgrade to Windows Server 2008 R2. - -Images deployed through Azure Marketplace come with the SQL IaaS extension pre-installed. The SQL IaaS extension is a requirement for flexible licensing and automated patching. Customers who deploy self-installed VMs will need to manually install the SQL IaaS extension. The SQL IaaS extension is not supported on Windows Server 2008. - -> [!NOTE] -> Although the SQL Server **Create** and **Manage** blades will work with the SQL Server 2008 R2 image in the Azure portal, the following features are _not supported_: Automatic backups, Azure Key Vault integration, R Services, and storage configuration. - -## Licensing -Pay-as-you-go SQL Server 2008 R2 deployments can convert to [Azure Hybrid Benefit](https://azure.microsoft.com/pricing/hybrid-benefit/). - -To convert a Software Assurance (SA)-based license to pay-as-you-go, customers should register with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). After that registration, the SQL license type will be interchangeable between Azure Hybrid Benefit and pay-as-you-go. - -Self-installed SQL Server 2008 or SQL Server 2008 R2 instances on an Azure VM can register with the SQL IaaS Agent extension and convert their license type to pay-as-you-go. - -## Migration -You can migrate EOS SQL Server instances to an Azure VM with manual backup/restore methods. This is the most common migration method from on-premises to an Azure VM. - -### Azure Site Recovery - -For bulk migrations, we recommend the [Azure Site Recovery](../../../site-recovery/site-recovery-overview.md) service. With Azure Site Recovery, customers can replicate the whole VM, including SQL Server from on-premises to Azure VM. - -SQL Server requires app-consistent Azure Site Recovery snapshots to guarantee recovery. Azure Site Recovery supports app-consistent snapshots with a minimum 1-hour interval. The minimum recovery point objective (RPO) possible for SQL Server with Azure Site Recovery migrations is 1 hour. The recovery time objective (RTO) is 2 hours plus SQL Server recovery time. - -### Database Migration Service - -The [Azure Database Migration Service](../../../dms/dms-overview.md) is an option for customers if they're migrating from on-premises to an Azure VM by upgrading SQL Server to the 2012 version or later. - -## Disaster recovery - -Disaster recovery solutions for EOS SQL Server on an Azure VM are as follows: - -- **SQL Server backups**: Use Azure Backup to help protect your EOS SQL Server 2008 and 2008 R2 against ransomware, accidental deletion, and corruption with 15-min RPO and point-in-time recovery. For more details, see [this article](../../../backup/sql-support-matrix.md#scenario-support). -- **Log shipping**: You can create a log shipping replica in another zone or Azure region with continuous restores to reduce the RTO. You need to manually configure log shipping. -- **Azure Site Recovery**: You can replicate your VM between zones and regions through Azure Site Recovery replication. SQL Server requires app-consistent snapshots to guarantee recovery in case of a disaster. Azure Site Recovery offers a minimum 1-hour RPO and a 2-hour (plus SQL Server recovery time) RTO for EOS SQL Server disaster recovery. - -## Security patching - -Extended security updates for SQL Server VMs are delivered through the Microsoft Update channels after the SQL Server VM has been registered with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). Patches can be downloaded manually or automatically. - -*Automated patching* is enabled by default. Automated patching allows Azure to automatically patch SQL Server and the operating system. You can specify a day of the week, time, and duration for a maintenance window if the SQL Server IaaS extension is installed. Azure performs patching in this maintenance window. The maintenance window schedule uses the VM locale for time. For more information, see [Automated patching for SQL Server on Azure Virtual Machines](automated-patching.md). - - -## Next steps - -Migrate your SQL Server VM to Azure: - -* [Migrate a SQL Server database to SQL Server in an Azure VM](migrate-to-vm-from-sql-server.md) - -Get started with SQL Server on Azure Virtual Machines: - -* [Create a SQL Server VM in the Azure portal](sql-vm-create-portal-quickstart.md) - -Get answers to commonly asked questions about SQL Server VMs: - -* [FAQ for SQL Server on Azure Virtual Machines](frequently-asked-questions-faq.yml) - -Find out more about end of support options, and extended security updates: - -* [End of support](/sql/sql-server/end-of-support/sql-server-end-of-life-overview) & [Extended Security Updates](/sql/sql-server/end-of-support/sql-server-extended-security-updates) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md b/articles/azure-sql/virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md deleted file mode 100644 index 283e2823a8d2b..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-server-iaas-agent-extension-automate-management.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: What is the SQL Server IaaS Agent extension? (Windows) -description: This article describes how the SQL Server IaaS Agent extension helps automate management specific administration tasks of SQL Server on Windows Azure VMs. These include features such as automated backup, automated patching, Azure Key Vault integration, licensing management, storage configuration, and central management of all SQL Server VM instances. -services: virtual-machines-windows -documentationcenter: '' -author: adbadram -editor: '' -tags: azure-resource-manager -ms.assetid: effe4e2f-35b5-490a-b5ef-b06746083da4 -ms.service: virtual-machines-sql -ms.subservice: management -ms.topic: conceptual -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 10/26/2021 -ms.author: adbadram -ms.reviewer: mathoma -ms.custom: seo-lt-2019, ignite-fall-2021 ---- -# Automate management with the Windows SQL Server IaaS Agent extension -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](sql-server-iaas-agent-extension-automate-management.md) -> * [Linux](../linux/sql-server-iaas-agent-extension-linux.md) - - - -The SQL Server IaaS Agent extension (SqlIaasExtension) runs on SQL Server on Windows Azure Virtual Machines (VMs) to automate management and administration tasks. - -This article provides an overview of the extension. To install the SQL Server IaaS extension to SQL Server on Azure VMs, see the articles for [Automatic installation](sql-agent-extension-automatic-registration-all-vms.md), [Single VMs](sql-agent-extension-manually-register-single-vm.md), or [VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md). - -> [!NOTE] -> Starting in September 2021, registering with the SQL IaaS extension in full mode no longer requires restarting the SQL Server service. - -To learn more about the Azure VM deployment and management experience, including recent improvements, see: -- [Azure SQL VM: Automate Management with the SQL Server IaaS Agent extension (Ep. 2)](/shows/data-exposed/azure-sql-vm-automate-management-with-the-sql-server-iaas-agent-extension-ep-2?WT.mc_id=dataexposed-c9-niner-mighub) -- [Azure SQL VM: New and Improved SQL on Azure VM deployment and management experience (Ep.8) | Data Exposed](/shows/data-exposed/new-and-improved-sql-on-azure-vm-deployment-and-management-experience?WT.mc_id=dataexposed-c9-niner-mighub). - -## Overview - -The SQL Server IaaS Agent extension allows for integration with the Azure portal, and depending on the management mode, unlocks a number of feature benefits for SQL Server on Azure VMs: - -- **Feature benefits**: The extension unlocks a number of automation feature benefits, such as portal management, license flexibility, automated backup, automated patching and more. See [Feature benefits](#feature-benefits) later in this article for details. - -- **Compliance**: The extension offers a simplified method to fulfill the requirement of notifying Microsoft that the Azure Hybrid Benefit has been enabled as is specified in the product terms. This process negates needing to manage licensing registration forms for each resource. - -- **Free**: The extension in all three manageability modes is completely free. There is no additional cost associated with the extension, or with changing management modes. - -- **Simplified license management**: The extension simplifies SQL Server license management, and allows you to quickly identify SQL Server VMs with the Azure Hybrid Benefit enabled using the [Azure portal](manage-sql-vm-portal.md), PowerShell or the Azure CLI: - - # [PowerShell](#tab/azure-powershell) - - ```powershell-interactive - Get-AzSqlVM | Where-Object {$_.LicenseType -eq 'AHUB'} - ``` - - # [Azure CLI](#tab/azure-cli) - - ```azurecli-interactive - $ az sql vm list --query "[?sqlServerLicenseType=='AHUB']" - ``` - --- - - - -## Feature benefits - -The SQL Server IaaS Agent extension unlocks a number of feature benefits for managing your SQL Server VM. You can register your SQL Server VM in lightweight management mode, which unlocks a few of the benefits, or in full management mode, which unlocks all available benefits. - -The following table details these benefits: - -[!INCLUDE [SQL VM feature benefits](../../includes/sql-vm-feature-benefits.md)] - - - -## Management modes - -You can choose to register your SQL IaaS extension in three management modes: - -- **Lightweight** mode copies extension binaries to the VM, but does not install the agent. Lightweight mode _only_ supports changing the license type and edition of SQL Server and provides limited portal management. Use this option for SQL Server VMs with multiple instances, or those participating in a failover cluster instance (FCI). Lightweight mode is the default management mode when using the [automatic registration](sql-agent-extension-automatic-registration-all-vms.md) feature, or when a management type is not specified during manual registration. There is no impact to memory or CPU when using the lightweight mode, and there is no associated cost. - -- **Full** mode installs the SQL IaaS Agent to the VM to deliver full functionality. Use it for managing a SQL Server VM with a single instance. Full mode installs two Windows services that have a minimal impact to memory and CPU - these can be monitored through task manager. There is no cost associated with using the full manageability mode. System administrator permissions are required. As of September 2021, restarting the SQL Server service is no longer necessary when registering your SQL Server VM in full management mode. - -- **NoAgent** mode is dedicated to SQL Server 2008 and SQL Server 2008 R2 installed on Windows Server 2008. There is no impact to memory or CPU when using the NoAgent mode. There is no cost associated with using the NoAgent manageability mode, the SQL Server is not restarted, and an agent is not installed to the VM. - -You can view the current mode of your SQL Server IaaS agent by using Azure PowerShell: - - ```powershell-interactive - # Get the SqlVirtualMachine - $sqlvm = Get-AzSqlVM -Name $vm.Name -ResourceGroupName $vm.ResourceGroupName - $sqlvm.SqlManagementType - ``` - - -## Installation - -Register your SQL Server VM with the SQL Server IaaS Agent extension to create the [**SQL virtual machine** _resource_](manage-sql-vm-portal.md) within your subscription, which is a _separate_ resource from the virtual machine resource. Unregistering your SQL Server VM from the extension will remove the **SQL virtual machine** _resource_ from your subscription but will not drop the actual virtual machine. - -Deploying a SQL Server VM Azure Marketplace image through the Azure portal automatically registers the SQL Server VM with the extension in full. However, if you choose to self-install SQL Server on an Azure virtual machine, or provision an Azure virtual machine from a custom VHD, then you must register your SQL Server VM with the SQL IaaS extension to unlock feature benefits. - -Registering the extension in lightweight mode copies binaries but does not install the agent to the VM. The agent is installed to the VM when the extension is installed in full management mode. - -There are three ways to register with the extension: -- [Automatically for all current and future VMs in a subscription](sql-agent-extension-automatic-registration-all-vms.md) -- [Manually for a single VM](sql-agent-extension-manually-register-single-vm.md) -- [Manually for multiple VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md) - -By default, Azure VMs with SQL Server 2016 or later installed will be automatically registered with the SQL IaaS Agent extension when detected by the [CEIP service](/sql/sql-server/usage-and-diagnostic-data-configuration-for-sql-server). See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - - -### Named instance support - -The SQL Server IaaS Agent extension works with a named instance of SQL Server if it is the only SQL Server instance available on the virtual machine. If a VM has multiple named SQL Server instances and no default instance, then the SQL IaaS extension will register in lightweight mode and pick either the instance with the highest edition, or the first instance, if all the instances have the same edition. - -To use a named instance of SQL Server, deploy an Azure virtual machine, install a single named SQL Server instance to it, and then register it with the [SQL IaaS Extension](sql-agent-extension-manually-register-single-vm.md). - -Alternatively, to use a named instance with an Azure Marketplace SQL Server image, follow these steps: - - 1. Deploy a SQL Server VM from Azure Marketplace. - 1. [Unregister](sql-agent-extension-manually-register-single-vm.md#unregister-from-extension) the SQL Server VM from the SQL IaaS Agent extension. - 1. Uninstall SQL Server completely within the SQL Server VM. - 1. Install SQL Server with a named instance within the SQL Server VM. - 1. [Register the VM with the SQL IaaS Agent Extension](sql-agent-extension-manually-register-single-vm.md#full-mode). - -## Verify status of extension - -Use the Azure portal or Azure PowerShell to check the status of the extension. - -### Azure portal - -Verify the extension is installed in the Azure portal. - -Go to your **Virtual machine** resource in the Azure portal (not the *SQL virtual machines* resource, but the resource for your VM). Select **Extensions** under **Settings**. You should see the **SqlIaasExtension** extension listed, as in the following example: - -![Status of the SQL Server IaaS Agent extension in the Azure portal](./media/sql-server-iaas-agent-extension-automate-management/azure-rm-sql-server-iaas-agent-portal.png) - - -### Azure PowerShell - -You can also use the **Get-AzVMSqlServerExtension** Azure PowerShell cmdlet: - - ```powershell-interactive - Get-AzVMSqlServerExtension -VMName "vmname" -ResourceGroupName "resourcegroupname" - ``` - -The previous command confirms that the agent is installed and provides general status information. You can get specific status information about automated backup and patching by using the following commands: - - ```powershell-interactive - $sqlext = Get-AzVMSqlServerExtension -VMName "vmname" -ResourceGroupName "resourcegroupname" - $sqlext.AutoPatchingSettings - $sqlext.AutoBackupSettings - ``` - - -## Limitations - -The SQL IaaS Agent extension only supports: - -- SQL Server VMs deployed through the Azure Resource Manager. SQL Server VMs deployed through the classic model are not supported. -- SQL Server VMs deployed to the public or Azure Government cloud. Deployments to other private or government clouds are not supported. -- Failover cluster instances (FCIs) in lightweight mode. -- Named instances with multiple instances on a single VM in lightweight mode. - - -## Privacy statement - -When using SQL Server on Azure VMs and the SQL IaaS extension, consider the following privacy statements: - -- **Data collection**: The SQL IaaS Agent extension collects data for the express purpose of giving customers optional benefits when using SQL Server on Azure Virtual Machines. Microsoft **will not use this data for licensing audits** without the customer's advance consent.See the [SQL Server privacy supplement](/sql/sql-server/sql-server-privacy#non-personal-data) for more information. - -- **In-region data residency**: SQL Server on Azure VMs and SQL IaaS Agent Extension do not move or store customer data out of the region in which the VMs are deployed. - - -## Next steps - -To install the SQL Server IaaS extension to SQL Server on Azure VMs, see the articles for [Automatic installation](sql-agent-extension-automatic-registration-all-vms.md), [Single VMs](sql-agent-extension-manually-register-single-vm.md), or [VMs in bulk](sql-agent-extension-manually-register-vms-bulk.md). - -For more information about running SQL Server on Azure Virtual Machines, see the [What is SQL Server on Azure Virtual Machines?](sql-server-on-azure-vm-iaas-what-is-overview.md). - -To learn more, see [frequently asked questions](frequently-asked-questions-faq.yml). diff --git a/articles/azure-sql/virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md b/articles/azure-sql/virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md deleted file mode 100644 index 9c37220707eea..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-server-on-azure-vm-iaas-what-is-overview.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Overview of SQL Server on Azure Windows Virtual Machines -description: Learn how to run full editions of SQL Server on Azure Virtual Machines in the cloud without having to manage any on-premises hardware. -services: virtual-machines-windows -documentationcenter: '' -author: MashaMSFT -tags: azure-service-management -ms.assetid: c505089e-6bbf-4d14-af0e-dd39a1872767 -ms.service: virtual-machines-sql -ms.subservice: service-overview -ms.topic: overview -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 03/10/2022 -ms.author: mathoma ---- -# What is SQL Server on Windows Azure Virtual Machines? -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -> [!div class="op_single_selector"] -> * [Windows](sql-server-on-azure-vm-iaas-what-is-overview.md) -> * [Linux](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md) - -This article provides an overview of SQL Server on Azure Virtual Machines (VMs) on the Windows platform. - -If you're new to SQL Server on Azure VMs, check out the *SQL Server on Azure VM Overview* video from our in-depth [Azure SQL video series](/shows/Azure-SQL-for-Beginners?WT.mc_id=azuresql4beg_azuresql-ch9-niner): -> [!VIDEO https://docs.microsoft.com/shows/Azure-SQL-for-Beginners/SQL-Server-on-Azure-VM-Overview-4-of-61/player] - - -## Overview - -[SQL Server on Azure Virtual Machines](https://azure.microsoft.com/services/virtual-machines/sql-server/) enables you to use full versions of SQL Server in the cloud without having to manage any on-premises hardware. SQL Server virtual machines (VMs) also simplify licensing costs when you pay as you go. - -Azure virtual machines run in many different [geographic regions](https://azure.microsoft.com/regions/) around the world. They also offer a variety of [machine sizes](../../../virtual-machines/sizes.md). The virtual machine image gallery allows you to create a SQL Server VM with the right version, edition, and operating system. This makes virtual machines a good option for many different SQL Server workloads. - - -## Feature benefits - -When you register your SQL Server on Azure VM with the [SQL IaaS agent extension](sql-server-iaas-agent-extension-automate-management.md) you unlock a number of feature benefits. You can register your SQL Server VM in lightweight management mode, which unlocks a few of the benefits, or in full management mode, which unlocks all available benefits. Registering with the extension is completely free. - -The following table details the benefits unlocked by the extension: - -[!INCLUDE [SQL VM feature benefits](../../includes/sql-vm-feature-benefits.md)] - - -## Getting started - -To get started with SQL Server on Azure VMs, review the following resources: - -- **Create SQL VM**: To create your SQL Server on Azure VM, review the Quickstarts using the [Azure portal](sql-vm-create-portal-quickstart.md), [Azure PowerShell](sql-vm-create-powershell-quickstart.md) or an [ARM template](create-sql-vm-resource-manager-template.md). For more thorough guidance, review the [Provisioning guide](create-sql-vm-portal.md). -- **Connect to SQL VM**: To connect to your SQL Server on Azure VMs, review the [ways to connect](ways-to-connect-to-sql.md). -- **Migrate data**: Migrate your data to SQL Server on Azure VMs from [SQL Server](../../migration-guides/virtual-machines/sql-server-to-sql-on-azure-vm-migration-overview.md), [Oracle](../../migration-guides/virtual-machines/oracle-to-sql-on-azure-vm-guide.md), or [Db2](../../migration-guides/virtual-machines/db2-to-sql-on-azure-vm-guide.md). -- **Storage configuration**: For information about configuring storage for your SQL Server on Azure VMs, review [Storage configuration](storage-configuration.md). -- **Performance**: Fine-tune the performance of your SQL Server on Azure VM by reviewing the [Performance best practices checklist](performance-guidelines-best-practices-checklist.md). -- **Pricing**: For information about the pricing structure of your SQL Server on Azure VM, review the [Pricing guidance](pricing-guidance.md). -- **Frequently asked questions**: For commonly asked questions, and scenarios, review the [FAQ](frequently-asked-questions-faq.yml). - -## Videos - -For videos about the latest features to optimize SQL Server VM performance and automate management, review the following Data Exposed videos: - -- [Caching and Storage Capping (Ep. 1)](/shows/data-exposed/azure-sql-vm-caching-and-storage-capping-ep-1-data-exposed) -- [Automate Management with the SQL Server IaaS Agent extension (Ep. 2)](/shows/data-exposed/azure-sql-vm-automate-management-with-the-sql-server-iaas-agent-extension-ep-2) -- [Use Azure Monitor Metrics to Track VM Cache Health (Ep. 3)](/shows/data-exposed/azure-sql-vm-use-azure-monitor-metrics-to-track-vm-cache-health-ep-3) -- [Get the best price-performance for your SQL Server workloads on Azure VM](/shows/data-exposed/azure-sql-vm-get-the-best-price-performance-for-your-sql-server-workloads-on-azure-vm) -- [Using PerfInsights to Evaluate Resource Health and Troubleshoot (Ep. 5)](/shows/data-exposed/azure-sql-vm-using-perfinsights-to-evaluate-resource-health-and-troubleshoot-ep-5) -- [Best Price-Performance with Ebdsv5 Series (Ep.6)](/shows/data-exposed/azure-sql-vm-best-price-performance-with-ebdsv5-series) -- [Optimally Configure SQL Server on Azure Virtual Machines with SQL Assessment (Ep. 7)](/shows/data-exposed/optimally-configure-sql-server-on-azure-virtual-machines-with-sql-assessment) -- [New and Improved SQL Server on Azure VM deployment and management experience (Ep.8)](/shows/data-exposed/new-and-improved-sql-on-azure-vm-deployment-and-management-experience) - - -## High availability & disaster recovery - -On top of the built-in [high availability provided by Azure virtual machines](../../../virtual-machines/availability.md), you can also leverage the high availability and disaster recovery features provided by SQL Server. - -To learn more, see the overview of [Always On availability groups](availability-group-overview.md), and [Always On failover cluster instances](failover-cluster-instance-overview.md). For more details, see the [business continuity overview](business-continuity-high-availability-disaster-recovery-hadr-overview.md). - -To get started, see the tutorials for [availability groups](availability-group-manually-configure-prerequisites-tutorial-multi-subnet.md) or [preparing your VM for a failover cluster instance](failover-cluster-instance-prepare-vm.md). - -## Licensing - -To get started, choose a SQL Server virtual machine image with your required version, edition, and operating system. The following sections provide direct links to the Azure portal for the SQL Server virtual machine gallery images. - -Azure only maintains one virtual machine image for each supported operating system, version, and edition combination. This means that over time images are refreshed, and older images are removed. For more information, see the **Images** section of the [SQL Server VMs FAQ](./frequently-asked-questions-faq.yml). - -> [!TIP] -> For more information about how to understand pricing for SQL Server images, see [Pricing guidance for SQL Server on Azure Virtual Machines](pricing-guidance.md). - -### Pay as you go - -The following table provides a matrix of pay-as-you-go SQL Server images. - -| Version | Operating system | Edition | -| --- | --- | --- | -| **SQL Server 2019** | Windows Server 2019 | [Enterprise](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019enterprise), [Standard](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019standard), [Web](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019web), [Developer](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019sqldev) | -| **SQL Server 2017** |Windows Server 2016 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2017EnterpriseWindowsServer2016), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2017StandardonWindowsServer2016), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2017WebonWindowsServer2016), [Express](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017ExpressonWindowsServer2016), [Developer](https://portal.azure.com/#create/Microsoft.FreeSQLServerLicenseSQLServer2017DeveloperonWindowsServer2016) | -| **SQL Server 2016 SP2** |Windows Server 2016 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2016SP2EnterpriseWindowsServer2016), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2016SP2StandardWindowsServer2016), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2016SP2WebWindowsServer2016), [Express](https://portal.azure.com/#create/Microsoft.FreeLicenseSQLServer2016SP2ExpressWindowsServer2016), [Developer](https://portal.azure.com/#create/Microsoft.FreeLicenseSQLServer2016SP2DeveloperWindowsServer2016) | -| **SQL Server 2014 SP2** |Windows Server 2012 R2 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2014SP2EnterpriseWindowsServer2012R2), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2014SP2StandardWindowsServer2012R2), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2014SP2WebWindowsServer2012R2), [Express](https://portal.azure.com/#create/Microsoft.SQLServer2014SP2ExpressWindowsServer2012R2) | -| **SQL Server 2012 SP4** |Windows Server 2012 R2 |[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2012SP4EnterpriseWindowsServer2012R2), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2012SP4StandardWindowsServer2012R2), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2012SP4WebWindowsServer2012R2), [Express](https://portal.azure.com/#create/Microsoft.SQLServer2012SP4ExpressWindowsServer2012R2) | -| **SQL Server 2008 R2 SP3** |Windows Server 2008 R2|[Enterprise](https://portal.azure.com/#create/Microsoft.SQLServer2008R2SP3EnterpriseWindowsServer2008R2), [Standard](https://portal.azure.com/#create/Microsoft.SQLServer2008R2SP3StandardWindowsServer2008R2), [Web](https://portal.azure.com/#create/Microsoft.SQLServer2008R2SP3WebWindowsServer2008R2), [Express](https://portal.azure.com/#create/Microsoft.SQLServer2008R2SP3ExpressWindowsServer2008R2) | - -To see the available SQL Server on Linux virtual machine images, see [Overview of SQL Server on Azure Virtual Machines (Linux)](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md). - -> [!NOTE] -> Change the licensing model of a pay-per-usage SQL Server VM to use your own license. For more information, see [How to change the licensing model for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md). - -### Bring your own license - -You can also bring your own license (BYOL). In this scenario, you only pay for the VM without any additional charges for SQL Server licensing. Bringing your own license can save you money over time for continuous production workloads. For requirements to use this option, see [Pricing guidance for SQL Server Azure VMs](pricing-guidance.md#byol). - -To bring your own license, you can either convert an existing pay-per-usage SQL Server VM, or you can deploy an image with the prefixed **{BYOL}**. For more information about switching your licensing model between pay-per-usage and BYOL, see [How to change the licensing model for a SQL Server VM](licensing-model-azure-hybrid-benefit-ahb-change.md). - -| Version | Operating system | Edition | -| --- | --- | --- | -| **SQL Server 2019** | Windows Server 2019 | [Enterprise BYOL](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019-byolenterprise), [Standard BYOL](https://portal.azure.com/#create/microsoftsqlserver.sql2019-ws2019-byolstandard)| -| **SQL Server 2017** |Windows Server 2016 |[Enterprise BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2017EnterpriseWindowsServer2016), [Standard BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2017StandardonWindowsServer2016) | -| **SQL Server 2016 SP2** |Windows Server 2016 |[Enterprise BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2016SP2EnterpriseWindowsServer2016), [Standard BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2016SP2StandardWindowsServer2016) | -| **SQL Server 2014 SP2** |Windows Server 2012 R2 |[Enterprise BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2014SP2EnterpriseWindowsServer2012R2), [Standard BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2014SP2StandardWindowsServer2012R2) | -| **SQL Server 2012 SP4** |Windows Server 2012 R2 |[Enterprise BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2012SP4EnterpriseWindowsServer2012R2), [Standard BYOL](https://portal.azure.com/#create/Microsoft.BYOLSQLServer2012SP4StandardWindowsServer2012R2) | - -It is possible to deploy an older image of SQL Server that is not available in the Azure portal using PowerShell. To view all available images using PowerShell, use the following command: - - ```powershell - Get-AzVMImageOffer -Location $Location -Publisher 'MicrosoftSQLServer' - ``` - -For more information about deploying SQL Server VMs using PowerShell, view [How to provision SQL Server virtual machines with Azure PowerShell](create-sql-vm-powershell.md). - - - -## Customer experience improvement program (CEIP) - -The Customer Experience Improvement Program (CEIP) is enabled by default. This periodically sends reports to Microsoft to help improve SQL Server. There is no management task required with CEIP unless you want to disable it after provisioning. You can customize or disable the CEIP by connecting to the VM with remote desktop. Then run the **SQL Server Error and Usage Reporting** utility. Follow the instructions to disable reporting. For more information about data collection, see the [SQL Server Privacy Statement](/sql/sql-server/sql-server-privacy). - -## Related products and services - -Since SQL Server on Azure VMs is integrated into the Azure platform, review resources from related products and services that interact with the SQL Server on Azure VM ecosystem: - -- **Windows virtual machines**: [Azure Virtual Machines overview](../../../virtual-machines/windows/overview.md) -- **Storage**: [Introduction to Microsoft Azure Storage](../../../storage/common/storage-introduction.md) -- **Networking**: [Virtual Network overview](../../../virtual-network/virtual-networks-overview.md), [IP addresses in Azure](../../../virtual-network/ip-services/public-ip-addresses.md), [Create a Fully Qualified Domain Name in the Azure portal](../../../virtual-machines/create-fqdn.md) -- **SQL**: [SQL Server documentation](/sql/index), [Azure SQL Database comparison](../../azure-sql-iaas-vs-paas-what-is-overview.md) - - -## Next steps - -Get started with SQL Server on Azure Virtual Machines: - -* [Create a SQL Server VM in the Azure portal](sql-vm-create-portal-quickstart.md) - -Get answers to commonly asked questions about SQL Server VMs: - -* [SQL Server on Azure Virtual Machines FAQ](frequently-asked-questions-faq.yml) - -View Reference Architectures for running N-tier applications on SQL Server in IaaS - -* [Windows N-tier application on Azure with SQL Server](/azure/architecture/reference-architectures/n-tier/n-tier-sql-server) -* [Run an N-tier application in multiple Azure regions for high availability](/azure/architecture/reference-architectures/n-tier/multi-region-sql-server) diff --git a/articles/azure-sql/virtual-machines/windows/sql-vm-create-portal-quickstart.md b/articles/azure-sql/virtual-machines/windows/sql-vm-create-portal-quickstart.md deleted file mode 100644 index 29e2500365c64..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-vm-create-portal-quickstart.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Create SQL Server on a Windows virtual machine in the Azure portal | Microsoft Docs -description: This tutorial shows how to create a Windows virtual machine with SQL Server 2017 in the Azure portal. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: deployment -ms.topic: quickstart -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: infrastructure-services -ms.date: 07/11/2019 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: mode-ui ---- - -# Quickstart: Create SQL Server 2017 on a Windows virtual machine in the Azure portal - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - - -> [!div class="op_single_selector"] -> * [Windows](sql-vm-create-portal-quickstart.md) -> * [Linux](../linux/sql-vm-create-portal-quickstart.md) - -This quickstart steps through creating a SQL Server virtual machine (VM) in the Azure portal. - - - > [!TIP] - > - This quickstart provides a path for quickly provisioning and connecting to a SQL VM. For more information about other SQL VM provisioning choices, see the [Provisioning guide for SQL Server on Windows VM in the Azure portal](create-sql-vm-portal.md). - > - If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). - -## Get an Azure subscription - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - -## Select a SQL Server VM image - -1. Sign in to the [Azure portal](https://portal.azure.com) using your account. - -1. Select **Azure SQL** in the left-hand menu of the Azure portal. If **Azure SQL** is not in the list, select **All services**, then type *Azure SQL* in the search box. -1. Select **+Add** to open the **Select SQL deployment option** page. You can view additional information by selecting **Show details** on the **SQL virtual machines** tile. -1. Select the **Free SQL Server License: SQL Server 2017 Developer on Windows Server 2016** image from the dropdown. - - ![Screenshot that shows where you select the Free SQL Server License: SQL Server 2017 Developer on Windows Server 2016 image.](./media/sql-vm-create-portal-quickstart/select-sql-2017-vm-image.png) - -1. Select **Create**. - - ![New search window](./media/sql-vm-create-portal-quickstart/create-sql-2017-vm-image.png) - -## Provide basic details - -On the **Basics** tab, provide the following information: - -1. In the **Project Details** section, select your Azure subscription and then select **Create new** to create a new resource group. Type _SQLVM-RG_ for the name. - - ![Subscription](./media/sql-vm-create-portal-quickstart/basics-project-details.png) - -1. Under **Instance details**: - 1. Type _SQLVM_ for the **Virtual machine name**. - 1. Choose a location for your **Region**. - 1. For the purpose of this quickstart, leave **Availability options** set to _No infrastructure redundancy required_. To find out more information about availability options, see [Availability](../../../virtual-machines/availability.md). - 1. In the **Image** list, select _Free SQL Server License: SQL Server 2017 Developer on Windows Server 2016_. - 1. Choose to **Change size** for the **Size** of the virtual machine and select the **A2 Basic** offering. Be sure to clean up your resources once you're done with them to prevent any unexpected charges. - - ![Instance details](./media/sql-vm-create-portal-quickstart/basics-instance-details.png) - -1. Under **Administrator account**, provide a username, such as _azureuser_ and a password. The password must be at least 12 characters long and meet the [defined complexity requirements](../../../virtual-machines/windows/faq.yml#what-are-the-password-requirements-when-creating-a-vm-). - - ![Administrator account](./media/sql-vm-create-portal-quickstart/basics-administrator-account.png) - -1. Under **Inbound port rules**, choose **Allow selected ports** and then select **RDP (3389)** from the drop-down. - - ![Inbound port rules](./media/sql-vm-create-portal-quickstart/basics-inbound-port-rules.png) - -## SQL Server settings - -On the **SQL Server settings** tab, configure the following options: - -1. Under **Security & Networking**, select _Public (Internet_) for **SQL Connectivity** and change the port to `1401` to avoid using a well-known port number in the public scenario. -1. Under **SQL Authentication**, select **Enable**. The SQL login credentials are set to the same user name and password that you configured for the VM. Use the default setting for [**Azure Key Vault integration**](azure-key-vault-integration-configure.md). **Storage configuration** is not available for the basic SQL Server VM image, but you can find more information about available options for other images at [storage configuration](storage-configuration.md#new-vms). - - ![SQL server security settings](./media/sql-vm-create-portal-quickstart/sql-server-settings.png) - - -1. Change any other settings if needed, and then select **Review + create**. - - ![Review + create](./media/sql-vm-create-portal-quickstart/review-create.png) - - -## Create the SQL Server VM - -On the **Review + create** tab, review the summary, and select **Create** to create SQL Server, resource group, and resources specified for this VM. - -You can monitor the deployment from the Azure portal. The **Notifications** button at the top of the screen shows basic status of the deployment. Deployment can take several minutes. - -## Connect to SQL Server - -1. In the portal, find the **Public IP address** of your SQL Server VM in the **Overview** section of your virtual machine's properties. - -1. On a different computer connected to the Internet, open [SQL Server Management Studio (SSMS)](/sql/ssms/download-sql-server-management-studio-ssms). - - -1. In the **Connect to Server** or **Connect to Database Engine** dialog box, edit the **Server name** value. Enter your VM's public IP address. Then add a comma and add the custom port (**1401**) that you specified when you configured the new VM. For example, `11.22.33.444,1401`. - -1. In the **Authentication** box, select **SQL Server Authentication**. - -1. In the **Login** box, type the name of a valid SQL login. - -1. In the **Password** box, type the password of the login. - -1. Select **Connect**. - - ![ssms connect](./media/sql-vm-create-portal-quickstart/ssms-connect.png) - -## Log in to the VM remotely - -Use the following steps to connect to the SQL Server virtual machine with Remote Desktop: - -[!INCLUDE [Connect to SQL Server VM with remote desktop](../../../../includes/virtual-machines-sql-server-remote-desktop-connect.md)] - -After you connect to the SQL Server virtual machine, you can launch SQL Server Management Studio and connect with Windows Authentication using your local administrator credentials. If you enabled SQL Server Authentication, you can also connect with SQL Authentication using the SQL login and password you configured during provisioning. - -Access to the machine enables you to directly change machine and SQL Server settings based on your requirements. For example, you could configure the firewall settings or change SQL Server configuration settings. - -## Clean up resources - -If you do not need your SQL VM to run continually, you can avoid unnecessary charges by stopping it when not in use. You can also permanently delete all resources associated with the virtual machine by deleting its associated resource group in the portal. This permanently deletes the virtual machine as well, so use this command with care. For more information, see [Manage Azure resources through portal](../../../azure-resource-manager/management/manage-resource-groups-portal.md). - - -## Next steps - -In this quickstart, you created a SQL Server 2017 virtual machine in the Azure portal. To learn more about how to migrate your data to the new SQL Server, see the following article. - -> [!div class="nextstepaction"] -> [Migrate a database to a SQL VM](migrate-to-vm-from-sql-server.md) diff --git a/articles/azure-sql/virtual-machines/windows/sql-vm-create-powershell-quickstart.md b/articles/azure-sql/virtual-machines/windows/sql-vm-create-powershell-quickstart.md deleted file mode 100644 index 79810bd5e52b6..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/sql-vm-create-powershell-quickstart.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: Create SQL Server on a Windows virtual machine with Azure PowerShell | Microsoft Docs -description: This tutorial shows how to use Azure PowerShell to create a Windows virtual machine running SQL Server 2017. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager -ms.service: virtual-machines-sql -ms.subservice: deployment -ms.topic: quickstart -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: infrastructure-services -ms.date: 12/21/2018 -ms.author: pamela -ms.reviewer: mathoma -ms.custom: devx-track-azurepowershell, mode-api ---- - -# Quickstart: Create SQL Server on a Windows virtual machine with Azure PowerShell - -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This quickstart steps through creating a SQL Server virtual machine (VM) with Azure PowerShell. - -> [!TIP] -> - This quickstart provides a path for quickly provisioning and connecting to a SQL VM. For more information about other Azure PowerShell options for creating SQL VMs, see the [Provisioning guide for SQL Server VMs with Azure PowerShell](create-sql-vm-powershell.md). -> - If you have questions about SQL Server virtual machines, see the [Frequently Asked Questions](frequently-asked-questions-faq.yml). - -## Get an Azure subscription - -If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. - - -## Get Azure PowerShell - -[!INCLUDE [updated-for-az.md](../../../../includes/updated-for-az.md)] - -## Configure PowerShell - -1. Open PowerShell and establish access to your Azure account by running the **Connect-AzAccount** command. - - ```powershell - Connect-AzAccount - ``` - -1. When you see the sign-in window, enter your credentials. Use the same email and password that you use to sign in to the Azure portal. - -## Create a resource group - -1. Define a variable with a unique resource group name. To simplify the rest of the quickstart, the remaining commands use this name as a basis for other resource names. - - ```powershell - $ResourceGroupName = "sqlvm1" - ``` - -1. Define a location of a target Azure region for all VM resources. - - ```powershell - $Location = "East US" - ``` - -1. Create the resource group. - - ```powershell - New-AzResourceGroup -Name $ResourceGroupName -Location $Location - ``` - -## Configure network settings - -1. Create a virtual network, subnet, and a public IP address. These resources are used to provide network connectivity to the virtual machine and connect it to the internet. - - ``` PowerShell - $SubnetName = $ResourceGroupName + "subnet" - $VnetName = $ResourceGroupName + "vnet" - $PipName = $ResourceGroupName + $(Get-Random) - - # Create a subnet configuration - $SubnetConfig = New-AzVirtualNetworkSubnetConfig -Name $SubnetName -AddressPrefix 192.168.1.0/24 - - # Create a virtual network - $Vnet = New-AzVirtualNetwork -ResourceGroupName $ResourceGroupName -Location $Location ` - -Name $VnetName -AddressPrefix 192.168.0.0/16 -Subnet $SubnetConfig - - # Create a public IP address and specify a DNS name - $Pip = New-AzPublicIpAddress -ResourceGroupName $ResourceGroupName -Location $Location ` - -AllocationMethod Static -IdleTimeoutInMinutes 4 -Name $PipName - ``` - -1. Create a network security group. Configure rules to allow remote desktop (RDP) and SQL Server connections. - - ```powershell - # Rule to allow remote desktop (RDP) - $NsgRuleRDP = New-AzNetworkSecurityRuleConfig -Name "RDPRule" -Protocol Tcp ` - -Direction Inbound -Priority 1000 -SourceAddressPrefix * -SourcePortRange * ` - -DestinationAddressPrefix * -DestinationPortRange 3389 -Access Allow - - #Rule to allow SQL Server connections on port 1433 - $NsgRuleSQL = New-AzNetworkSecurityRuleConfig -Name "MSSQLRule" -Protocol Tcp ` - -Direction Inbound -Priority 1001 -SourceAddressPrefix * -SourcePortRange * ` - -DestinationAddressPrefix * -DestinationPortRange 1433 -Access Allow - - # Create the network security group - $NsgName = $ResourceGroupName + "nsg" - $Nsg = New-AzNetworkSecurityGroup -ResourceGroupName $ResourceGroupName ` - -Location $Location -Name $NsgName ` - -SecurityRules $NsgRuleRDP,$NsgRuleSQL - ``` - -1. Create the network interface. - - ```powershell - $InterfaceName = $ResourceGroupName + "int" - $Interface = New-AzNetworkInterface -Name $InterfaceName ` - -ResourceGroupName $ResourceGroupName -Location $Location ` - -SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $Pip.Id ` - -NetworkSecurityGroupId $Nsg.Id - ``` - -## Create the SQL VM - -1. Define your credentials to sign in to the VM. The username is "azureadmin". Make sure you change \ before running the command. - - ``` PowerShell - # Define a credential object - $SecurePassword = ConvertTo-SecureString '' ` - -AsPlainText -Force - $Cred = New-Object System.Management.Automation.PSCredential ("azureadmin", $securePassword) - ``` - -1. Create a virtual machine configuration object and then create the VM. The following command creates a SQL Server 2017 Developer Edition VM on Windows Server 2016. - - ```powershell - # Create a virtual machine configuration - $VMName = $ResourceGroupName + "VM" - $VMConfig = New-AzVMConfig -VMName $VMName -VMSize Standard_DS13_V2 | - Set-AzVMOperatingSystem -Windows -ComputerName $VMName -Credential $Cred -ProvisionVMAgent -EnableAutoUpdate | - Set-AzVMSourceImage -PublisherName "MicrosoftSQLServer" -Offer "SQL2017-WS2016" -Skus "SQLDEV" -Version "latest" | - Add-AzVMNetworkInterface -Id $Interface.Id - - # Create the VM - New-AzVM -ResourceGroupName $ResourceGroupName -Location $Location -VM $VMConfig - ``` - - > [!TIP] - > It takes several minutes to create the VM. - -## Register with SQL VM RP - -To get portal integration and SQL VM features, you must register with the [SQL IaaS Agent extension](sql-agent-extension-manually-register-single-vm.md). - -To get full functionality, you need to register with the extension in [full mode](sql-agent-extension-manually-register-single-vm.md#full-mode). Otherwise, register in lightweight mode. - - -## Remote desktop into the VM - -1. Use the following command to retrieve the public IP address for the new VM. - - ```powershell - Get-AzPublicIpAddress -ResourceGroupName $ResourceGroupName | Select IpAddress - ``` - -1. Pass the returned IP address as a command-line parameter to **mstsc** to start a Remote Desktop session into the new VM. - - ``` - mstsc /v: - ``` - -1. When prompted for credentials, choose to enter credentials for a different account. Enter the username with a preceding backslash (for example, `\azureadmin`), and the password that you set previously in this quickstart. - -## Connect to SQL Server - -1. After signing in to the Remote Desktop session, launch **SQL Server Management Studio 2017** from the start menu. - -1. In the **Connect to Server** dialog box, keep the defaults. The server name is the name of the VM. Authentication is set to **Windows Authentication**. Select **Connect**. - -You're now connected to SQL Server locally. If you want to connect remotely, you must [configure connectivity](ways-to-connect-to-sql.md) from the Azure portal or manually. - -## Clean up resources - -If you don't need the VM to run continuously, you can avoid unnecessary charges by stopping it when not in use. The following command stops the VM but leaves it available for future use. - -```powershell -Stop-AzVM -Name $VMName -ResourceGroupName $ResourceGroupName -``` - -You can also permanently delete all resources associated with the virtual machine with the **Remove-AzResourceGroup** command. Doing so permanently deletes the virtual machine as well, so use this command with care. - -## Next steps - -In this quickstart, you created a SQL Server 2017 virtual machine using Azure PowerShell. To learn more about how to migrate your data to the new SQL Server, see the following article. - -> [!div class="nextstepaction"] -> [Migrate a database to a SQL VM](migrate-to-vm-from-sql-server.md) diff --git a/articles/azure-sql/virtual-machines/windows/storage-configuration.md b/articles/azure-sql/virtual-machines/windows/storage-configuration.md deleted file mode 100644 index cb8a990741505..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/storage-configuration.md +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: Configure storage for SQL Server VMs | Microsoft Docs -description: This topic describes how Azure configures storage for SQL Server VMs during provisioning (Azure Resource Manager deployment model). It also explains how you can configure storage for your existing SQL Server VMs. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager - -ms.assetid: 169fc765-3269-48fa-83f1-9fe3e4e40947 -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/21/2021 -ms.author: pamela -ms.reviewer: mathoma ---- -# Configure storage for SQL Server VMs -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -This article teaches you how to configure your storage for your SQL Server on Azure Virtual Machines (VMs). - -SQL Server VMs deployed through marketplace images automatically follow default [storage best practices](performance-guidelines-best-practices-storage.md) which can be modified during deployment. Some of these configuration settings can be changed after deployment. - - -## Prerequisites - -To use the automated storage configuration settings, your virtual machine requires the following characteristics: - -* Provisioned with a [SQL Server gallery image](sql-server-on-azure-vm-iaas-what-is-overview.md#payasyougo) or registered with the [SQL IaaS extension](). -* Uses the [Resource Manager deployment model](../../../azure-resource-manager/management/deployment-models.md). -* Uses [premium SSDs](../../../virtual-machines/disks-types.md). - -## New VMs - -The following sections describe how to configure storage for new SQL Server virtual machines. - -### Azure portal - -When provisioning an Azure VM using a SQL Server gallery image, select **Change configuration** under **Storage** on the **SQL Server Settings** tab to open the **Configure storage** page. You can either leave the values at default, or modify the type of disk configuration that best suits your needs based on your workload. - -![Screenshot that highlights the SQL Server settings tab and the Change configuration option.](./media/storage-configuration/sql-vm-storage-configuration-provisioning.png) - -Choose the drive location for your data files and log files, specifying the disk type, and number of disks. Use the IOPS values to determine the best storage configuration to meet your business needs. Choosing premium storage sets the caching to *ReadOnly* for the data drive, and *None* for the log drive as per [SQL Server VM performance best practices](./performance-guidelines-best-practices-checklist.md). - -![SQL Server VM Storage Configuration During Provisioning](./media/storage-configuration/sql-vm-storage-configuration.png) - -The disk configuration is completely customizable so that you can configure the storage topology, disk type and IOPs you need for your SQL Server VM workload. You also have the ability to use UltraSSD (preview) as an option for the **Disk type** if your SQL Server VM is in one of the supported regions (East US 2, SouthEast Asia and North Europe) and you've enabled [ultra disks for your subscription](../../../virtual-machines/disks-enable-ultra-ssd.md). - -Configure your tempdb database settings under **Tempdb storage**, such as the location of the database files, as well as the number of files, initial size, and autogrowth size in MB. Currently, during deployment, the max number of tempdb files is 8, but more files can be added after the SQL Server VM is deployed. - -![Screenshot that shows where you can configure the tempdb storage for your SQL VM](./media/create-sql-vm-portal/storage-configuration-tempdb-storage.png) - -Additionally, you have the ability to set the caching for the disks. Azure VMs have a multi-tier caching technology called [Blob Cache](../../../virtual-machines/premium-storage-performance.md#disk-caching) when used with [Premium Disks](../../../virtual-machines/disks-types.md#premium-ssds). Blob Cache uses a combination of the Virtual Machine RAM and local SSD for caching. - -Disk caching for Premium SSD can be *ReadOnly*, *ReadWrite* or *None*. - -- *ReadOnly* caching is highly beneficial for SQL Server data files that are stored on Premium Storage. *ReadOnly* caching brings low read latency, high read IOPS, and throughput as, reads are performed from cache, which is within the VM memory and local SSD. These reads are much faster than reads from data disk, which is from Azure Blob storage. Premium storage does not count the reads served from cache towards the disk IOPS and throughput. Therefore, your applicable is able to achieve higher total IOPS and throughput. -- *None* cache configuration should be used for the disks hosting SQL Server Log file as the log file is written sequentially and does not benefit from *ReadOnly* caching. -- *ReadWrite* caching should not be used to host SQL Server files as SQL Server does not support data consistency with the *ReadWrite* cache. Writes waste capacity of the *ReadOnly* blob cache and latencies slightly increase if writes go through *ReadOnly* blob cache layers. - - - > [!TIP] - > Be sure that your storage configuration matches the limitations imposed by the the selected VM size. Choosing storage parameters that exceed the performance cap of the VM size will result in warning: `The desired performance might not be reached due to the maximum virtual machine disk performance cap`. Either decrease the IOPs by changing the disk type, or increase the performance cap limitation by increasing the VM size. This will not stop provisioning. - - -Based on your choices, Azure performs the following storage configuration tasks after creating the VM: - -* Creates and attaches Premium SSDs to the virtual machine. -* Configures the data disks to be accessible to SQL Server. -* Configures the data disks into a storage pool based on the specified size and performance (IOPS and throughput) requirements. -* Associates the storage pool with a new drive on the virtual machine. -* Optimizes this new drive based on your specified workload type (Data warehousing, Transactional processing, or General). - -For a full walkthrough of how to create a SQL Server VM in the Azure portal, see [the provisioning tutorial](/azure/azure-sql/virtual-machines/windows/create-sql-vm-portal). - - - -### Resource Manager templates - -If you use the following Resource Manager templates, two premium data disks are attached by default, with no storage pool configuration. However, you can customize these templates to change the number of premium data disks that are attached to the virtual machine. - -* [Create VM with Automated Backup](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-sql-full-autobackup) -* [Create VM with Automated Patching](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-sql-full-autopatching) -* [Create VM with AKV Integration](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.compute/vm-sql-full-keyvault) - -### Quickstart template - -You can use the following quickstart template to deploy a SQL Server VM using storage optimization. - -* [Create VM with storage optimization](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-new-storage/) -* [Create VM using UltraSSD](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.sqlvirtualmachine/sql-vm-new-storage-ultrassd) - -## Existing VMs - -For existing SQL Server VMs, you can modify some storage settings in the Azure portal. Open your [SQL virtual machines resource](manage-sql-vm-portal.md#access-the-resource), and select **Overview**. The SQL Server **Overview** page shows the current storage usage of your VM. All drives that exist on your VM are displayed in this chart. For each drive, the storage space displays in four sections: - -* SQL data -* SQL log -* Other (non-SQL storage) -* Available - -To modify the storage settings, select **Storage configuration** under **Settings**. - -![Screenshot that highlights the Configure option and the Storage Usage section.](./media/storage-configuration/sql-vm-storage-configuration-existing.png) - -You can modify the disk settings for the drives that were configured during the SQL Server VM creation process. Selecting **Configure** opens the drive modification page, allowing you to change the disk type, as well as add additional disks. - -![Configure Storage for Existing SQL Server VM](./media/storage-configuration/sql-vm-storage-extend-drive.png) - -You can also configure the settings for tempdb directly from the portal. Select **Configure** to open the tempdb settings page, where you can add more data files: - -![Configure tempdb settings for Existing SQL Server VM](./media/storage-configuration/tempdb-customization.png) - - - -## Automated changes - -This section provides a reference for the storage configuration changes that Azure automatically performs during SQL Server VM provisioning or configuration in the Azure portal. - -* Azure configures a storage pool from storage selected from your VM. The next section of this topic provides details about storage pool configuration. -* Automatic storage configuration always uses [premium SSDs](../../../virtual-machines/disks-types.md) P30 data disks. Consequently, there is a 1:1 mapping between your selected number of Terabytes and the number of data disks attached to your VM. - -For pricing information, see the [Storage pricing](https://azure.microsoft.com/pricing/details/storage) page on the **Disk Storage** tab. - -### Creation of the storage pool - -Azure uses the following settings to create the storage pool on SQL Server VMs. - -| Setting | Value | -| --- | --- | -| Stripe size |256 KB (Data warehousing); 64 KB (Transactional) | -| Disk sizes |1 TB each | -| Cache |Read | -| Allocation size |64 KB NTFS allocation unit size | -| Recovery | Simple recovery (no resiliency) | -| Number of columns |Number of data disks up to 81 | - - -1 After the storage pool is created, you cannot alter the number of columns in the storage pool. - - -### Workload optimization settings - -The following table describes the three workload type options available and their corresponding optimizations: - -| Workload type | Description | Optimizations | -| --- | --- | --- | -| **General** |Default setting that supports most workloads |None | -| **Transactional processing** |Optimizes the storage for traditional database OLTP workloads |Trace Flag 1117
                Trace Flag 1118 | -| **Data warehousing** |Optimizes the storage for analytic and reporting workloads |Trace Flag 610
                Trace Flag 1117 | - -> [!NOTE] -> You can only specify the workload type when you provision a SQL Server virtual machine by selecting it in the storage configuration step. - -## Enable caching - -Change the caching policy at the disk level. You can do so using the Azure portal, [PowerShell](/powershell/module/az.compute/set-azvmdatadisk), or the [Azure CLI](/cli/azure/vm/disk). - -To change your caching policy in the Azure portal, follow these steps: - -1. Stop your SQL Server service. -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your virtual machine, select **Disks** under **Settings**. - - ![Screenshot showing the VM disk configuration blade in the Azure portal.](./media/storage-configuration/disk-in-portal.png) - -1. Choose the appropriate caching policy for your disk from the drop-down. - - ![Screenshot showing the disk caching policy configuration in the Azure portal.](./media/storage-configuration/azure-disk-config.png) - -1. After the change takes effect, reboot the SQL Server VM and start the SQL Server service. - - -## Enable Write Accelerator - -Write Acceleration is a disk feature that is only available for the M-Series Virtual Machines (VMs). The purpose of write acceleration is to improve the I/O latency of writes against Azure Premium Storage when you need single digit I/O latency due to high volume mission critical OLTP workloads or data warehouse environments. - -Stop all SQL Server activity and shut down the SQL Server service before making changes to your write acceleration policy. - -If your disks are striped, enable Write Acceleration for each disk individually, and your Azure VM should be shut down before making any changes. - -To enable Write Acceleration using the Azure portal, follow these steps: - -1. Stop your SQL Server service. If your disks are striped, shut down the virtual machine. -1. Sign into the [Azure portal](https://portal.azure.com). -1. Navigate to your virtual machine, select **Disks** under **Settings**. - - ![Screenshot showing the VM disk configuration blade in the Azure portal.](./media/storage-configuration/disk-in-portal.png) - -1. Choose the cache option with **Write Accelerator** for your disk from the drop-down. - - ![Screenshot showing the write accelerator cache policy.](./media/storage-configuration/write-accelerator.png) - -1. After the change takes effect, start the virtual machine and SQL Server service. - -## Disk striping - -For more throughput, you can add additional data disks and use disk striping. To determine the number of data disks, analyze the throughput and bandwidth required for your SQL Server data files, including the log and tempdb. Throughput and bandwidth limits vary by VM size. To learn more, see [VM Size](../../../virtual-machines/sizes.md) - - -* For Windows 8/Windows Server 2012 or later, use [Storage Spaces](/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/hh831739(v=ws.11)) with the following guidelines: - - 1. Set the interleave (stripe size) to 64 KB (65,536 bytes) to avoid performance impact due to partition misalignment. This must be set with PowerShell. - - 2. Set column count = number of physical disks. Use PowerShell when configuring more than 8 disks (not Server Manager UI). - -For example, the following PowerShell creates a new storage pool with the interleave size to 64 KB and the number of columns equal to the amount of physical disk in the storage pool: - -# [Windows Server 2016 +](#tab/windows2016) - - ```powershell - $PhysicalDisks = Get-PhysicalDisk | Where-Object {$_.FriendlyName -like "*2" -or $_.FriendlyName -like "*3"} - - New-StoragePool -FriendlyName "DataFiles" -StorageSubsystemFriendlyName "Windows Storage on " ` - -PhysicalDisks $PhysicalDisks | New-VirtualDisk -FriendlyName "DataFiles" ` - -Interleave 65536 -NumberOfColumns $PhysicalDisks.Count -ResiliencySettingName simple ` - -UseMaximumSize |Initialize-Disk -PartitionStyle GPT -PassThru |New-Partition -AssignDriveLetter ` - -UseMaximumSize |Format-Volume -FileSystem NTFS -NewFileSystemLabel "DataDisks" ` - -AllocationUnitSize 65536 -Confirm:$false - ``` - -In Windows Server 2016 and later, the default value for `-StorageSubsystemFriendlyName` is `Windows Storage on ` - - - -# [Windows Server 2008 - 2012 R2](#tab/windows2012) - - - - ```powershell - $PhysicalDisks = Get-PhysicalDisk | Where-Object {$_.FriendlyName -like "*2" -or $_.FriendlyName -like "*3"} - - New-StoragePool -FriendlyName "DataFiles" -StorageSubsystemFriendlyName "Storage Spaces on " ` - -PhysicalDisks $PhysicalDisks | New-VirtualDisk -FriendlyName "DataFiles" ` - -Interleave 65536 -NumberOfColumns $PhysicalDisks.Count -ResiliencySettingName simple ` - -UseMaximumSize |Initialize-Disk -PartitionStyle GPT -PassThru |New-Partition -AssignDriveLetter ` - -UseMaximumSize |Format-Volume -FileSystem NTFS -NewFileSystemLabel "DataDisks" ` - -AllocationUnitSize 65536 -Confirm:$false - ``` - -In Windows Server 2008 to 2012 R2, the default value for `-StorageSubsystemFriendlyName` is `Storage Spaces on `. - ---- - - - * For Windows 2008 R2 or earlier, you can use dynamic disks (OS striped volumes) and the stripe size is always 64 KB. This option is deprecated as of Windows 8/Windows Server 2012. For information, see the support statement at [Virtual Disk Service is transitioning to Windows Storage Management API](/windows/win32/w8cookbook/vds-is-transitioning-to-wmiv2-based-windows-storage-management-api). - - * If you are using [Storage Spaces Direct (S2D)](/windows-server/storage/storage-spaces/storage-spaces-direct-in-vm) with [SQL Server Failover Cluster Instances](./failover-cluster-instance-storage-spaces-direct-manually-configure.md), you must configure a single pool. Although different volumes can be created on that single pool, they will all share the same characteristics, such as the same caching policy. - - * Determine the number of disks associated with your storage pool based on your load expectations. Keep in mind that different VM sizes allow different numbers of attached data disks. For more information, see [Sizes for virtual machines](../../../virtual-machines/sizes.md?toc=/azure/virtual-machines/windows/toc.json). - - -## Next steps - -For other topics related to running SQL Server in Azure VMs, see [SQL Server on Azure Virtual Machines](sql-server-on-azure-vm-iaas-what-is-overview.md). - diff --git a/articles/azure-sql/virtual-machines/windows/storage-migrate-to-ultradisk.md b/articles/azure-sql/virtual-machines/windows/storage-migrate-to-ultradisk.md deleted file mode 100644 index ec6fa688f0c27..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/storage-migrate-to-ultradisk.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Migrate log disk to Ultra disk -description: Learn how to migrate your SQL Server on Azure Virtual Machine (VM) log disk to an Azure Ultradisk to take advantage of high performance and low latency. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -editor: '' -tags: azure-service-management -ms.assetid: -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 07/09/2020 -ms.author: pamela -ms.reviewer: mathoma - ---- -# Migrate log disk to Ultra disk -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -Azure ultra disks deliver high throughput, high IOPS, and consistently low latency disk storage for SQL Server on Azure Virtual Machine (VM). - -This article teaches you to migrate your log disk to an ultra SSD to take advantage of the performance benefits offered by ultra disks. - -## Back up database - -Complete a [full backup](backup-restore.md) up of your database. - -## Attach disk - -Attach the Ultra SSD to your virtual machine once you have enabled ultradisk compatibility on the VM. - -Ultra disk is supported on a subset of VM sizes and regions. Before proceeding, validate that your VM is in a region, zone, and size that supports ultra disk. You can [determine and validate VM size and region](../../../virtual-machines/disks-enable-ultra-ssd.md#determine-vm-size-and-region-availability) using the Azure CLI or PowerShell. - -### Enable compatibility - -To enable compatibility, follow these steps: - -1. Go to your virtual machine in the [Azure portal](https://portal.azure.com/). -1. Stop/deallocate the virtual machine. -1. Select **Disks** under **Settings** and then select **Additional settings**. - - :::image type="content" source="media/storage-migrate-to-ultradisk/additional-disks-settings-azure-portal.png" alt-text="Select additional settings for Disks under Settings in the Azure portal"::: - -1. Select **Yes** to **Enable Ultra disk compatibility**. - - :::image type="content" source="../../../virtual-machines/media/virtual-machines-disks-getting-started-ultra-ssd/enable-ultra-disks-existing-vm.png" alt-text="Screenshot that shows the Yes option."::: - -1. Select **Save**. - - - -### Attach disk - -Use the Azure portal to attach an ultra disk to your virtual machine. For details, see [Attach an ultra disk](../../../virtual-machines/disks-enable-ultra-ssd.md#attach-an-ultra-disk). - -Once the disk is attached, start your VM once more using the Azure portal. - - - -## Format disk - -Connect to your virtual machine and format your ultra disk. - -To format your ultra disk, follow these steps: - -1. Connect to your VM by using Remote Desktop Protocol (RDP). -1. Use [Disk Management](/windows-server/storage/disk-management/overview-of-disk-management) to format and partition your newly attached ultra disk. - - -## Use disk for log - -Configure SQL Server to use the new log drive. You can do so using Transact-SQL (T-SQL) or SQL Server Management Studio (SSMS). The account used for the SQL Server service account must have full control of the new log file location. - -### Configure permissions - -1. Verify the service account used by SQL Server. You can do so by using SQL Server Configuration Manager or Services.msc. -1. Navigate to your new disk. -1. Create a folder (or multiple folders) to be used for your log file. -1. Right-click the folder and select **Properties**. -1. On the **Security** tab, grant full control access to the SQL Server service account. -1. Select **OK** to save your settings. -1. Repeat this for every root-level folder where you plan to have SQL data. - -### Use new log drive - -After permission has been granted, use either Transact-SQL (T-SQL) or SQL Server Management Studio (SSMS) to detach the database and move existing log files to the new location. - - > [!CAUTION] - > Detaching the database will take it offline, closing connections and rolling back any transactions that are in-flight. Proceed with caution and during a down-time maintenance window. - - - -# [Transact-SQL (T-SQL)](#tab/tsql) - -Use T-SQL to move the existing files to a new location: - -1. Connect to your database in SQL Server Management Studio and open a **New Query** window. -1. Get the existing files and locations: - - ```sql - USE AdventureWorks - GO - - sp_helpfile - GO - ``` - -1. Detach the database: - - ```sql - USE master - GO - - sp_detach_db 'AdventureWorks' - GO - ``` - -1. Use file explorer to move the log file to the new location on the ultra disk. - -1. Attach the database, specifying the new file locations: - - ```sql - sp_attach_db 'AdventureWorks' - 'E:\Fixed_FG\AdventureWorks.mdf', - 'E:\Fixed_FG\AdventureWorks_2.ndf', - 'F:\New_Log\AdventureWorks_log.ldf' - GO - ``` - -At this point, the database comes online with the log in the new location. - - - -# [SQL Server Management Studio (SSMS)](#tab/ssms) - -Use SSMS to move the existing files to a new location: - -1. Connect to your database in SQL Server Management Studio (SSMS). -1. Right-click the database, select **Properties** and then select **Files**. -1. Note down the path of the existing files. -1. Select **OK** to close the dialog box. -1. Right-click the database, select **Tasks** > **Detach**. -1. Follow the wizard to detach the database. -1. Use File Explorer to manually move the log file to the new location. -1. Attach the database in SQL Server Management Studio - 1. Right-click **Databases** in **Object Explorer** and select **Attach database**. - 1. Using the dialog box, add each file, including the log file in its new location. - 1. Select **OK** to attach the database. - -At this point, the database comes online with the log in the new location. - ---- - - -## Next steps - -Review the [performance best practices](./performance-guidelines-best-practices-checklist.md) for additional settings to improve performance. - -For an overview of SQL Server on Azure Virtual Machines, see the following articles: - -- [Overview of SQL Server on Windows VMs](sql-server-on-azure-vm-iaas-what-is-overview.md) -- [Overview of SQL Server on Linux VMs](../linux/sql-server-on-linux-vm-what-is-iaas-overview.md) \ No newline at end of file diff --git a/articles/azure-sql/virtual-machines/windows/ways-to-connect-to-sql.md b/articles/azure-sql/virtual-machines/windows/ways-to-connect-to-sql.md deleted file mode 100644 index 9313c783a45dd..0000000000000 --- a/articles/azure-sql/virtual-machines/windows/ways-to-connect-to-sql.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Connect to a SQL Server virtual machine (Resource Manager) | Microsoft Docs -description: Learn how to connect to your SQL Server virtual machine on Azure. This topic uses the classic deployment model. The scenarios differ depending on the networking configuration and the location of the client. -services: virtual-machines-windows -documentationcenter: na -author: bluefooted -tags: azure-resource-manager - -ms.assetid: aa5bf144-37a3-4781-892d-e0e300913d03 -ms.service: virtual-machines-sql -ms.subservice: management - -ms.topic: how-to -ms.tgt_pltfrm: vm-windows-sql-server -ms.workload: iaas-sql-server -ms.date: 12/12/2017 -ms.author: pamela -ms.reviewer: mathoma ---- -# Connect to a SQL Server virtual machine on Azure -[!INCLUDE[appliesto-sqlvm](../../includes/appliesto-sqlvm.md)] - -## Overview - -This topic describes how to connect to your SQL on Azure virtual machine (VM). It covers some [general connectivity scenarios](#connection-scenarios) and then provides [steps in the portal for changing connectivity settings](#change). If you need to troubleshoot or configure connectivity outside of the portal, see the [manual configuration](#manual) at the end of this topic. - -If you would rather have a full walkthrough of both provisioning and connectivity, see [Provision a SQL Server virtual machine on Azure](create-sql-vm-portal.md). - -## Connection scenarios - -The way a client connects to a SQL Server VM differs depending on the location of the client and the networking configuration. - -If you provision a SQL Server VM in the Azure portal, you have the option of specifying the type of **SQL connectivity**. - -![Public SQL connectivity option during provisioning](./media/ways-to-connect-to-sql/sql-vm-portal-connectivity.png) - -Your options for connectivity include: - -| Option | Description | -|---|---| -| **Public** | Connect to SQL Server over the internet. | -| **Private** | Connect to SQL Server in the same virtual network. | -| **Local** | Connect to SQL Server locally on the same virtual machine. | - -The following sections explain the **Public** and **Private** options in more detail. - -## Connect to SQL Server over the internet - -If you want to connect to your SQL Server database engine from the internet, select **Public** for the **SQL connectivity** type in the portal during provisioning. The portal automatically does the following steps: - -* Enables the TCP/IP protocol for SQL Server. -* Configures a firewall rule to open the SQL Server TCP port (default 1433). -* Enables SQL Server authentication, required for public access. -* Configures the network security group on the VM to all TCP traffic on the SQL Server port. - -> [!IMPORTANT] -> The virtual machine images for the SQL Server Developer and Express editions do not automatically enable the TCP/IP protocol. For Developer and Express editions, you must use SQL Server Configuration Manager to [manually enable the TCP/IP protocol](#manualtcp) after creating the VM. - -Any client with internet access can connect to the SQL Server instance by specifying either the public IP address of the virtual machine or any DNS label assigned to that IP address. If the SQL Server port is 1433, you do not need to specify it in the connection string. The following connection string connects to a SQL VM with a DNS label of `sqlvmlabel.eastus.cloudapp.azure.com` using SQL authentication (you could also use the public IP address). - -``` -Server=sqlvmlabel.eastus.cloudapp.azure.com;Integrated Security=false;User ID=;Password= -``` - -Although this string enables connectivity for clients over the internet, this does not imply that anyone can connect to your SQL Server instance. Outside clients have to use the correct username and password. However, for additional security, you can avoid the well-known port 1433. For example, if you were to configure SQL Server to listen on port 1500 and establish proper firewall and network security group rules, you could connect by appending the port number to the server name. The following example alters the previous one by adding a custom port number, **1500**, to the server name: - -``` -Server=sqlvmlabel.eastus.cloudapp.azure.com,1500;Integrated Security=false;User ID=;Password=" -``` - -> [!NOTE] -> When you query SQL Server on VM over the internet, all outgoing data from the Azure datacenter is subject to normal [pricing on outbound data transfers](https://azure.microsoft.com/pricing/details/data-transfers/). - -## Connect to SQL Server within a virtual network - -When you choose **Private** for the **SQL connectivity** type in the portal, Azure configures most of the settings identical to **Public**. The one difference is that there is no network security group rule to allow outside traffic on the SQL Server port (default 1433). - -> [!IMPORTANT] -> The virtual machine images for the SQL Server Developer and Express editions do not automatically enable the TCP/IP protocol. For Developer and Express editions, you must use SQL Server Configuration Manager to [manually enable the TCP/IP protocol](#manualtcp) after creating the VM. - -Private connectivity is often used in conjunction with a [virtual network](../../../virtual-network/virtual-networks-overview.md), which enables several scenarios. You can connect VMs in the same virtual network, even if those VMs exist in different resource groups. And with a [site-to-site VPN](../../../vpn-gateway/tutorial-site-to-site-portal.md), you can create a hybrid architecture that connects VMs with on-premises networks and machines. - -Virtual networks also enable you to join your Azure VMs to a domain. This is the only way to use Windows authentication to SQL Server. The other connection scenarios require SQL authentication with user names and passwords. - -Assuming that you have configured DNS in your virtual network, you can connect to your SQL Server instance by specifying the SQL Server VM computer name in the connection string. The following example also assumes that Windows authentication has been configured and that the user has been granted access to the SQL Server instance. - -``` -Server=mysqlvm;Integrated Security=true -``` - -## Change SQL connectivity settings - -You can change the connectivity settings for your SQL Server virtual machine in the Azure portal. - -1. In the Azure portal, select **SQL virtual machines**. - -2. Select your SQL Server VM. - -3. Under **Settings**, select **Security**. - -4. Change the **SQL connectivity level** to your required setting. You can optionally use this area to change the SQL Server port or the SQL authentication settings. - - ![Change SQL connectivity](./media/ways-to-connect-to-sql/sql-vm-portal-connectivity-change.png) - -5. Wait several minutes for the update to complete. - - ![SQL VM update notification](./media/ways-to-connect-to-sql/sql-vm-updating-notification.png) - -## Enable TCP/IP for Developer and Express editions - -When changing SQL Server connectivity settings, Azure does not automatically enable the TCP/IP protocol for SQL Server Developer and Express editions. The steps below explain how to manually enable TCP/IP so that you can connect remotely by IP address. - -First, connect to the SQL Server virtual machine with remote desktop. - -[!INCLUDE [Connect to SQL Server VM with remote desktop](../../../../includes/virtual-machines-sql-server-remote-desktop-connect.md)] - -Next, enable the TCP/IP protocol with **SQL Server Configuration Manager**. - -[!INCLUDE [Connect to SQL Server VM with remote desktop](../../../../includes/virtual-machines-sql-server-connection-tcp-protocol.md)] - -## Connect with SSMS - -The following steps show how to create an optional DNS label for your Azure VM and then connect with SQL Server Management Studio (SSMS). - -[!INCLUDE [Connect to SQL Server in a VM Resource Manager](../../../../includes/virtual-machines-sql-server-connection-steps-resource-manager.md)] - -## Manual configuration and troubleshooting - -Although the portal provides options to automatically configure connectivity, it is useful to know how to manually configure connectivity. Understanding the requirements can also aid troubleshooting. - -The following table lists the requirements to connect to SQL Server on Azure VM. - -| Requirement | Description | -|---|---| -| [Enable SQL Server authentication mode](/sql/database-engine/configure-windows/change-server-authentication-mode#use-ssms) | SQL Server authentication is needed to connect to the VM remotely unless you have configured Active Directory on a virtual network. | -| [Create a SQL login](/sql/relational-databases/security/authentication-access/create-a-login) | If you are using SQL authentication, you need a SQL login with a user name and password that also has permissions to your target database. | -| [Enable TCP/IP protocol](#manualtcp) | SQL Server must allow connections over TCP. | -| [Enable firewall rule for the SQL Server port](/sql/database-engine/configure-windows/configure-a-windows-firewall-for-database-engine-access) | The firewall on the VM must allow inbound traffic on the SQL Server port (default 1433). | -| [Create a network security group rule for TCP 1433](../../../virtual-network/manage-network-security-group.md#create-a-security-rule) | You must allow the VM to receive traffic on the SQL Server port (default 1433) if you want to connect over the internet. Local and virtual-network-only connections do not require this. This is the only step required in the Azure portal. | - -> [!TIP] -> The steps in the preceding table are done for you when you configure connectivity in the portal. Use these steps only to confirm your configuration or to set up connectivity manually for SQL Server. - -## Next steps - -To see provisioning instructions along with these connectivity steps, see [Provisioning a SQL Server virtual machine on Azure](create-sql-vm-portal.md). - -For other topics related to running SQL Server on Azure VMs, see [SQL Server on Azure virtual machines](sql-server-on-azure-vm-iaas-what-is-overview.md). \ No newline at end of file diff --git a/articles/azure-video-analyzer/index.yml b/articles/azure-video-analyzer/index.yml index 1f799f7d6eb49..7a91bc0d6f31b 100644 --- a/articles/azure-video-analyzer/index.yml +++ b/articles/azure-video-analyzer/index.yml @@ -25,10 +25,6 @@ highlightedContent: - title: Azure Video Analyzer documentation itemType: overview url: ./video-analyzer-docs/overview.md - # Card - - title: Azure Video Analyzer for Media documentation - itemType: overview - url: ./video-analyzer-for-media-docs/video-indexer-overview.md additionalContent: sections: diff --git a/articles/azure-video-analyzer/video-analyzer-docs/includes/deprecation-notice.md b/articles/azure-video-analyzer/video-analyzer-docs/includes/deprecation-notice.md new file mode 100644 index 0000000000000..25b946a725c6e --- /dev/null +++ b/articles/azure-video-analyzer/video-analyzer-docs/includes/deprecation-notice.md @@ -0,0 +1,12 @@ +--- +author: Juliako +ms.service: azure-video-analyzer +ms.topic: include +ms.date: 03/25/2022 +ms.author: anilmur +--- + +> [!NOTE] +> We’re retiring the Azure Video Analyzer preview service, you're advised to **transition your applications off of Video Analyzer by 01 December 2022.** +> +> **Action Required**: To minimize disruption to your workloads, transition your application from Video Analyzer per suggestions described in this [guide](../transition-from-video-analyzer.md) before December 01, 2022. After December 1, 2022 your Azure Video Analyzer account will no longer function. diff --git a/articles/azure-video-analyzer/video-analyzer-docs/toc.yml b/articles/azure-video-analyzer/video-analyzer-docs/toc.yml index 3e591c06f554d..c74cafec339ad 100644 --- a/articles/azure-video-analyzer/video-analyzer-docs/toc.yml +++ b/articles/azure-video-analyzer/video-analyzer-docs/toc.yml @@ -5,7 +5,7 @@ - name: About Azure Video Analyzer href: overview.md - name: About Video Analyzer for Media - href: ../video-analyzer-for-media-docs/video-indexer-overview.md + href: /azure-video-indexer/video-indexer-overview.md - name: Terminology href: terminology.md - name: What's new @@ -111,6 +111,8 @@ href: continuous-video-recording.md - name: How-to guides items: + - name: Transition from Video Analyzer + href: transition-from-video-analyzer.md - name: Setup items: - name: Create a new account diff --git a/articles/azure-video-analyzer/video-analyzer-docs/transition-from-video-analyzer.md b/articles/azure-video-analyzer/video-analyzer-docs/transition-from-video-analyzer.md new file mode 100644 index 0000000000000..5a0f43d6b766e --- /dev/null +++ b/articles/azure-video-analyzer/video-analyzer-docs/transition-from-video-analyzer.md @@ -0,0 +1,40 @@ +--- +title: Transition from Azure Video Analyzer +description: This article describes some options to transition off of Video Analyzer +manager: femila +ms.topic: conceptual +ms.date: 03/25/2022 +ms.author: anilmur +--- + +# Transition from Video Analyzer + +This article describes some options to transition your video analysis application off of Video Analyzer. + +[!INCLUDE [deprecation notice](./includes/deprecation-notice.md)] + +## When using Spatial Analysis + +If you're using Video Analyzer on an edge server together with the [Spatial Analysis](../../cognitive-services/computer-vision/intro-to-spatial-analysis-public-preview.md) container from Cognitive Services, you have the following options: + +* You can switch to [Dynamics 365 Connected Spaces](/dynamics365/connected-spaces/), which is a SaaS (software as a service) solution currently targeting the retail industry. +* You can connect your RTSP cameras directly to the Spatial Analysis container, and build [web applications](../../cognitive-services/computer-vision/spatial-analysis-web-app.md). + +## When using other AI models + +If you're using Video Analyzer on an edge server to analyze live video with other AI models, your options are as follows. + +* If you're using an AI model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) provided by Intel(R), you may be able to make use of their Deep Learning (DL) Streamer video analytics framework. See [OpenVINO™ Toolkit - DL Streamer repository](https://github.com/openvinotoolkit/dlstreamer_gst) for more information. +* If you're using an AI model optimized for running on NVIDIA(R) GPU, then you should consider the different [reference implementations](https://developer.nvidia.com/deepstream-getting-started) provided for their [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk). Also check out the [Intelligent Video Analytics with NVIDIA Jetson and Microsoft Azure](https://github.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure) example that demonstrates an end-to-end architecture for video analytics. + +## Video Management Systems + +There are several commercial solutions for [Video Management Systems](./terminology.md#vms), see the [available reviews and ratings](https://www.gartner.com/reviews/market/video-surveillance-management-systems). + +## Next steps + +Transition your applications from Video Analyzer using suggestions described above by 01 December 2022. + + + + diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition-how-to.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition-how-to.md deleted file mode 100644 index 84842f99a8982..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition-how-to.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: Animated character detection with Azure Video Analyzer for Media (formerly Video Indexer) how to -titleSuffix: Azure Video Analyzer -description: This how to demonstrates how to use animated character detection with Azure Video Analyzer for Media (formerly Video Indexer). -services: azure-video-analyzer -author: Juliako -manager: femila - -ms.custom: references_regions -ms.topic: how-to -ms.subservice: azure-video-analyzer-media -ms.date: 12/07/2020 -ms.author: juliako ---- - -# Use the animated character detection (preview) with portal and API - -Azure Video Analyzer for Media (formerly Video Indexer) supports detection, grouping, and recognition of characters in animated content, this functionality is available through the Azure portal and through API. Review [this overview](animated-characters-recognition.md) topic. - -This article demonstrates to how to use the animated character detection with the Azure portal and the Video Analyzer for Media API. - -## Use the animated character detection with portal - -In the trial accounts the Custom Vision integration is managed by Video Analyzer for Media, you can start creating and using the animated characters model. If using the trial account, you can skip the following ("Connect your Custom Vision account") section. - -### Connect your Custom Vision account (paid accounts only) - -If you own a Video Analyzer for Media paid account, you need to connect a Custom Vision account first. If you don't have a Custom Vision account already, please create one. For more information, see [Custom Vision](../../cognitive-services/custom-vision-service/overview.md). - -> [!NOTE] -> Both accounts need to be in the same region. The Custom Vision integration is currently not supported in the Japan region. - -Paid accounts that have access to their Custom Vision account can see the models and tagged images there. Learn more about [improving your classifier in Custom Vision](../../cognitive-services/custom-vision-service/getting-started-improving-your-classifier.md). - -Note that the training of the model should be done only via Video Analyzer for Media, and not via the Custom Vision website. - -#### Connect a Custom Vision account with API - -Follow these steps to connect you Custom Vision account to Video Analyzer for Media, or to change the Custom Vision account that is currently connected to Video Analyzer for Media: - -1. Browse to [www.customvision.ai](https://www.customvision.ai) and login. -1. Copy the keys for the Training and Prediction resources: - - > [!NOTE] - > To provide all the keys you need to have two separate resources in Custom Vision, one for training and one for prediction. -1. Provide other information: - - * Endpoint - * Prediction resource ID -1. Browse and sign in to the [Video Analyzer for Media](https://vi.microsoft.com/). -1. Click on the question mark on the top-right corner of the page and choose **API Reference**. -1. Make sure you are subscribed to API Management by clicking **Products** tab. If you have an API connected you can continue to the next step, otherwise, subscribe. -1. On the developer portal, click the **Complete API Reference** and browse to **Operations**. -1. Select **Connect Custom Vision Account (PREVIEW)** and click **Try it**. -1. Fill in the required fields as well as the access token and click **Send**. - - For more information about how to get the Video Indexer access token go to the [developer portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token), and see the [relevant documentation](video-indexer-use-apis.md#obtain-access-token-using-the-authorization-api). -1. Once the call return 200 OK response, your account is connected. -1. To verify your connection by browse to the [Video Analyzer for Media](https://vi.microsoft.com/)) portal: -1. Click on the **Content model customization** button in the top-right corner. -1. Go to the **Animated characters** tab. -1. Once you click on Manage models in Custom Vision, you will be transferred to the Custom Vision account you just connected. - -> [!NOTE] -> Currently, only models that were created via Video Analyzer for Media are supported. Models that are created through Custom Vision will not be available. In addition, the best practice is to edit models that were created through Video Analyzer for Media only through the Video Analyzer for Media platform, since changes made through Custom Vision may cause unintended results. - -### Create an animated characters model - -1. Browse to the [Video Analyzer for Media](https://vi.microsoft.com/) website and sign in. -1. To customize a model in your account, select the **Content model customization** button on the left of the page. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/content-model-customization/content-model-customization.png" alt-text="Customize content model in Video Analyzer for Media"::: -1. Go to the **Animated characters** tab in the model customization section. -1. Click on **Add model**. -1. Name you model and click enter to save the name. - -> [!NOTE] -> The best practice is to have one custom vision model for each animated series. - -### Index a video with an animated model - -For the initial training, upload at least two videos. Each should be preferably longer than 15 minutes, before expecting good recognition model. If you have shorter episodes, we recommend uploading at least 30 minutes of video content before training. This will allow you to merge groups that belong to the same character from different scenes and backgrounds, and therefore increase the chance it will detect the character in the following episodes you index. To train a model on multiple videos (episodes) you need to index them all with the same animation model. - -1. Click on the **Upload** button. -1. Choose a video to upload (from a file or a URL). -1. Click on **Advanced options**. -1. Under **People / Animated characters** choose **Animation models**. -1. If you have one model it will be chosen automatically, and if you have multiple models you can choose the relevant one out of the dropdown menu. -1. Click on upload. -1. Once the video is indexed, you will see the detected characters in the **Animated characters** section in the **Insights** pane. - -Before tagging and training the model, all animated characters will be named “Unknown #X”. After you train the model they will also be recognized. - -### Customize the animated characters models - -1. Name the characters in Video Analyzer for Media. - - 1. After the model created character group, it is recommended to review these groups in Custom Vision. - 1. To tag an animated character in your video, go to the **Insights** tab and click on the **Edit** button on the top-right corner of the window. - 1. In the **Insights** pane, click on any of the detected animated characters and change their names from "Unknown #X" to a temporary name (or the name that was previously assigned to the character). - 1. After typing in the new name, click on the check icon next to the new name. This saves the new name in the model in Video Analyzer for Media. -1. Paid accounts only: Review the groups in Custom Vision - - > [!NOTE] - > Paid accounts that have access to their Custom Vision account can see the models and tagged images there. Learn more about [improving your classifier in Custom Vision](../../cognitive-services/custom-vision-service/getting-started-improving-your-classifier.md). It’s important to note that training of the model should be done only via Video Analyzer for Media (as described in this topid), and not via the Custom Vision website. - - 1. Go to the **Custom Models** page in Video Analyzer for Media and choose the **Animated characters** tab. - 1. Click on the Edit button for the model you are working on to manage it in Custom Vision. - 1. Review each character group: - - * If the group contains unrelated images it is recommended to delete these in the Custom Vision website. - * If there are images that belong to a different character, change the tag on these specific images by click on the image, adding the right tag and deleting the wrong tag. - * If the group is not correct, meaning it contains mainly non-character images or images from multiple characters, you can delete in in Custom Vision website or in Video Analyzer for Media insights. - * The grouping algorithm will sometimes split your characters to different groups. It is therefore recommended to give all the groups that belong to the same character the same name (in Video Analyzer for Media Insights), which will immediately cause all these groups to appear as on in Custom Vision website. - 1. Once the group is refined, make sure the initial name you tagged it with reflects the character in the group. -1. Train the model - - 1. After you finished editing all names you want, you need to train the model. - 1. Once a character is trained into the model, it will be recognized it the next video indexed with that model. - 1. Open the customization page and click on the **Animated characters** tab and then click on the **Train** button to train your model. In order to keep the connection between Video - -Indexer and the model, don't train the model in the Custom Vision website (paid accounts have access to Custom Vision website), only in Video Analyzer for Media. -Once trained, any video that will be indexed or reindexed with that model will recognize the trained characters. - -## Delete an animated character and the model - -1. Delete an animated character. - - 1. To delete an animated character in your video insights, go to the **Insights** tab and click on the **Edit** button on the top-right corner of the window. - 1. Choose the animated character and then click on the **Delete** button under their name. - - > [!NOTE] - > This will delete the insight from this video but will not affect the model. -1. Delete a model. - - 1. Click on the **Content model customization** button on the top menu and go to the **Animated characters** tab. - 1. Click on the ellipsis icon to the right of the model you wish to delete and then on the delete button. - - * Paid account: the model will be disconnected from Video Analyzer for Media and you will not be able to reconnect it. - * Trial account: the model will be deleted from Customs vision as well. - - > [!NOTE] - > In a trial account, you only have one model you can use. After you delete it, you can’t train other models. - -## Use the animated character detection with API - -1. Connect a Custom Vision account. - - If you own a Video Analyzer for Media paid account, you need to connect a Custom Vision account first.
                - If you don’t have a Custom Vision account already, please create one. For more information, see [Custom Vision](../../cognitive-services/custom-vision-service/overview.md). - - [Connect your Custom Vision account using API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Connect-Custom-Vision-Account). -1. Create an animated characters model. - - Use the [create animation model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Animation-Model) API. -1. Index or re-index a video. - - Use the [re-indexing](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) API. -1. Customize the animated characters models. - - Use the [train animation model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Train-Animation-Model) API. - -### View the output - -See the animated characters in the generated JSON file. - -```json -"animatedCharacters": [ - { - "videoId": "e867214582", - "confidence": 0, - "thumbnailId": "00000000-0000-0000-0000-000000000000", - "seenDuration": 201.5, - "seenDurationRatio": 0.3175, - "isKnownCharacter": true, - "id": 4, - "name": "Bunny", - "appearances": [ - { - "startTime": "0:00:52.333", - "endTime": "0:02:02.6", - "startSeconds": 52.3, - "endSeconds": 122.6 - }, - { - "startTime": "0:02:40.633", - "endTime": "0:03:16.6", - "startSeconds": 160.6, - "endSeconds": 196.6 - }, - ] - }, -] -``` - -## Limitations - -* Currently, the "animation identification" capability is not supported in East-Asia region. -* Characters that appear to be small or far in the video may not be identified properly if the video's quality is poor. -* The recommendation is to use a model per set of animated characters (for example per an animated series). - -## Next steps - -[Video Analyzer for Media overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition.md deleted file mode 100644 index 1b7a7ef527457..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Animated character detection with Azure Video Analyzer for Media (formerly Video Indexer) -description: This topic demonstrates how to use animated character detection with Azure Video Analyzer for Media (formerly Video Indexer). -ms.topic: conceptual -ms.date: 11/19/2019 -ms.author: juliako ---- - -# Animated character detection (preview) - -Azure Video Analyzer for Media (formerly Video Indexer) supports detection, grouping, and recognition of characters in animated content via integration with [Cognitive Services custom vision](https://azure.microsoft.com/services/cognitive-services/custom-vision-service/). This functionality is available both through the portal and through the API. - -After uploading an animated video with a specific animation model, Video Analyzer for Media extracts keyframes, detects animated characters in these frames, groups similar character, and chooses the best sample. Then, it sends the grouped characters to Custom Vision that identifies characters based on the models it was trained on. - -Before you start training your model, the characters are detected namelessly. As you add names and train the model the Video Analyzer for Media will recognize the characters and name them accordingly. - -## Flow diagram - -The following diagram demonstrates the flow of the animated character detection process. - -![Flow diagram](./media/animated-characters-recognition/flow.png) - -## Accounts - -Depending on a type of your Video Analyzer for Media account, different feature sets are available. For information on how to connect your account to Azure, see [Create a Video Analyzer for Media account connected to Azure](connect-to-azure.md). - -* Trial account: Video Analyzer for Media uses an internal Custom Vision account to create model and connect it to your Video Analyzer for Media account. -* Paid account: you connect your Custom Vision account to your Video Analyzer for Media account (if you don’t already have one, you need to create an account first). - -### Trial vs. paid - -|Functionality|Trial|Paid| -|---|---|---| -|Custom Vision account|Managed behind the scenes by Video Analyzer for Media. |Your Custom Vision account is connected to Video Analyzer for Media.| -|Number of animation models|One|Up to 100 models per account (Custom Vision limitation).| -|Training the model|Video Analyzer for Media trains the model for new characters additional examples of existing characters.|The account owner trains the model when they are ready to make changes.| -|Advanced options in Custom Vision|No access to the Custom Vision portal.|You can adjust the models yourself in the Custom Vision portal.| - -## Use the animated character detection with portal and API - -For details, see [Use the animated character detection with portal and API](animated-characters-recognition-how-to.md). - -## Limitations - -* Currently, the "animation identification" capability is not supported in East-Asia region. -* Characters that appear to be small or far in the video may not be identified properly if the video's quality is poor. -* The recommendation is to use a model per set of animated characters (for example per an animated series). - -## Next steps - -[Video Analyzer for Media overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/compare-video-indexer-with-media-services-presets.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/compare-video-indexer-with-media-services-presets.md deleted file mode 100644 index 7632bb4449297..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/compare-video-indexer-with-media-services-presets.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Comparison of Azure Video Analyzer for Media (formerly Video Indexer) and Azure Media Services v3 presets -description: This article compares Azure Video Analyzer for Media (formerly Video Indexer) capabilities and Azure Media Services v3 presets. -ms.topic: conceptual -ms.date: 02/24/2020 -ms.author: juliako - ---- - -# Compare Azure Media Services v3 presets and Video Analyzer for Media - -This article compares the capabilities of **Video Analyzer for Media (formerly Video Indexer) APIs** and **Media Services v3 APIs**. - -Currently, there is an overlap between features offered by the [Video Analyzer for Media APIs](https://api-portal.videoindexer.ai/) and the [Media Services v3 APIs](https://github.com/Azure/azure-rest-api-specs/blob/master/specification/mediaservices/resource-manager/Microsoft.Media/stable/2018-07-01/Encoding.json). The following table offers the current guideline for understanding the differences and similarities. - -## Compare - -|Feature|Video Analyzer for Media APIs |Video Analyzer and Audio Analyzer Presets
                in Media Services v3 APIs| -|---|---|---| -|Media Insights|[Enhanced](video-indexer-output-json-v2.md) |[Fundamentals](/azure/media-services/latest/analyze-video-audio-files-concept)| -|Experiences|See the full list of supported features:
                [Overview](video-indexer-overview.md)|Returns video insights only| -|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics)|[Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/#analytics)| -|Compliance|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Video Analyzer for Media" to see if it complies with a certificate of interest.|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Media Services" to see if it complies with a certificate of interest.| -|Free Trial|East US|Not available| -|Region availability|See [Cognitive Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services)|See [Media Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=media-services).| - -## Next steps - -[Video Analyzer for Media overview](video-indexer-overview.md) - -[Media Services v3 overview](/azure/media-services/latest/media-services-overview) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/concepts-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/concepts-overview.md deleted file mode 100644 index 0bafd26cb35b0..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/concepts-overview.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Azure Video Analyzer for Media (formerly Video Indexer) concepts - Azure -titleSuffix: Azure Video Analyzer for Media (formerly Video Indexer) -description: This article gives a brief overview of Azure Video Analyzer for Media (formerly Video Indexer) terminology and concepts. -ms.topic: conceptual -ms.date: 01/19/2021 -ms.author: juliako ---- - - -# Video Analyzer for Media concepts - -This article gives a brief overview of Azure Video Analyzer for Media (formerly Video Indexer) terminology and concepts. - -## Audio/video/combined insights - -When you upload your videos to Video Analyzer for Media, it analyses both visuals and audio by running different AI models. As Video Analyzer for Media analyzes your video, the insights that are extracted by the AI models. For more information, see [overview](video-indexer-overview.md). - -## Confidence scores - -The confidence score indicates the confidence in an insight. It is a number between 0.0 and 1.0. The higher the score- the greater the confidence in the answer. For example, - -```json -"transcript":[ -{ - "id":1, - "text":"Well, good morning everyone and welcome to", - "confidence":0.8839, - "speakerId":1, - "language":"en-US", - "instances":[ - { - "adjustedStart":"0:00:10.21", - "adjustedEnd":"0:00:12.81", - "start":"0:00:10.21", - "end":"0:00:12.81" - } - ] -}, -``` - -## Content moderation - -Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. For more information, see [Insights: visual and textual content moderation](video-indexer-output-json-v2.md#visualcontentmoderation). - -## Blocks - -Blocks are meant to make it easier to go through the data. For example, block might be broken down based on when speakers change or there is a long pause. - -## Project and editor - -The [Video Analyzer for Media](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. Once created, the project can be rendered and downloaded from Video Analyzer for Media and be used in your own editing applications or downstream workflows. - -Some scenarios where you may find this feature useful are: - -* Creating movie highlights for trailers. -* Using old clips of videos in news casts. -* Creating shorter content for social media. - -For more information, see [Use editor to create projects](use-editor-create-project.md). - -## Keyframes - -Video Analyzer for Media selects the frame(s) that best represent each shot. Keyframes are the representative frames selected from the entire video based on aesthetic properties (for example, contrast and stableness). For more information, see [Scenes, shots, and keyframes](scenes-shots-keyframes.md). - -## time range vs. adjusted time range - -TimeRange is the time range in the original video. AdjustedTimeRange is the time range relative to the current playlist. Since you can create a playlist from different lines of different videos, you can take a 1-hour video and use just 1 line from it, for example, 10:00-10:15. In that case, you will have a playlist with 1 line, where the time range is 10:00-10:15 but the adjustedTimeRange is 00:00-00:15. - -## Widgets - -Video Analyzer for Media supports embedding widgets in your apps. For more information, see [Embed Video Analyzer for Media widgets in your apps](video-indexer-embed-widgets.md). - -## Summarized insights - -Summarized insights contain an aggregated view of the data: faces, topics, emotions. For example, instead of going over each of the thousands of time ranges and checking which faces are in it, the summarized insights contains all the faces and for each one, the time ranges it appears in and the % of the time it is shown. - -## Next steps - -- [overview](video-indexer-overview.md) -- [Insights](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-classic-account-to-arm.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-classic-account-to-arm.md deleted file mode 100644 index ef5feacca3ff5..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-classic-account-to-arm.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Connect a classic Video Analyzer for Media account to ARM -description: This topic explains how to connect an existing classic paid Azure Video Analyzer for Media account to an ARM-based account -ms.topic: how-to -ms.author: itnorman -ms.date: 10/19/2021 -ms.custom: ignite-fall-2021 ---- - -# Connect an existing classic paid Video Analyzer for Media account to ARM-based account - -This article details how to connect an existing classic paid Azure Video Analyzer for Media account to an Azure Resource Manager (ARM) based account. -Today, Azure Video Analyzer for Media (formerly Video Indexer), is a GA(general availability) product that is not an ARM resource on Azure. -In this article, we will go through options on connecting your **existing** Video Analyzer for Media account to [ARM][docs-arm-overview]. - -## Prerequisites - -* Unlimited paid Video Analyzer for Media account (classic account). - - * To perform the connect to the ARM (Azure Resource Manager) action, you should have owner's permissions on the Video Analyzer for Media account. -* Azure Subscription. -* User assigned managed identity (can be created along the flow). - -## Transition state - -Connecting a classic account to be ARM-based triggers a 30 days of a transition state. In the transition state, an existing account can be accessed by generating an access token using both: - -* Access token [generated through API Management](https://aka.ms/avam-dev-portal)(classic way) -* Access token [generated through ARM](/rest/api/videoindexer/generate/access-token) - -The transition state moves all account management functionality to be managed by ARM and will be handled by [Azure RBAC][docs-rbac-overview]. - -The [invite users](invite-users.md) feature in the Video Analyzer for Media portal gets disabled. The invited users on this account lose their access to the Video Analyzer for Media account Media in the portal. -However, this can be resolved by assigning the right role-assignment to these users through Azure RBAC, see [How to assign RBAC][docs-rbac-assignment]. - -Only the account owner, who performed the connect action, is automatically assigned as the owner on the connected account. When [Azure policies][docs-governance-policy] are enforced, they override the settings on the account. - -If users are not added through Azure RBAC to the account after 30 days, they will lose access through API as well as Video Analyzer for Media portal. -After the transition state ends, users will only be able to generate a valid access token through through ARM, making Azure RBAC the exclusive way to manage role-based access control on the account. - -> [!NOTE] -> If there are invited users you wish to remove access from, do it before connecting the account to ARM. - -Before the end of the 30 days of transition state, you can remove access from users through the Azure Video Analyzer for Media portal on the account settings page. - -## Get started - -### Browse to [Video Analyzer for Media portal](https://aka.ms/vi-portal-link) - -1. Sign in using your Azure AD account. -1. On the top right bar press *User account* to open the side pane account list. -1. Select the Video Analyzer for Media classic account you wish to connect to ARM (classic accounts will be tagged with a *classic tag*). -1. Click **Settings**. - - ![account-settings](media/connect-classic-account-to-arm/user-account-settings.png) -1. Click **Connect to an ARM-based account**. - - ![connect-button-portal](media/connect-classic-account-to-arm/connect-button.png) -1. Sign to Azure portal. -1. The Video Analyzer for Media create blade will open. -1. In the **Create Video Analyzer for Media account** section enter required values. - - * If you followed the steps the fields should be auto-populated, make sure to validate the eligible values. - - ![connect-to-arm](media/connect-classic-account-to-arm/connect-blade-new.png) - - | Name | Description | - | ---|---| - |**Subscription**| The subscription currently contains the classic account and other related resources such as the Media Services.| - |**Resource Group**|Select an existing resource or create a new one. The resource group must be the same location as the classic account being connected| - |**Video Analyzer for Media account** (radio button)| Select the *"Connecting an existing classic account"*.| - |**Existing account ID**| Enter the ID of existing Video Analyzer for Media classic account.| - |**Resource name**|Enter the name of the new Video Analyzer for Media account. Default value would be the same name the account had as classic.| - |**Location**|The geographic region can't be changed in the connect process, the connected account must stay in the same region. | - |**Media Services account name**|The original Media Services account name that was associated with classic account.| - |**User-assigned managed identity**|Select a user-assigned managed identity, or create a new one. Video Analyzer for Media account will use it to access the Media services. The user-assignment managed identity will be assigned the roles of Contributor for the Media Service account.| -1. Click **Review + create** at the bottom of the form. - -## After connecting to ARM is complete - -After successfully connecting your account to ARM, it is recommended to make sure your account management APIs are replaced with [Video Analyzer for Media REST API](/rest/api/videoindexer/accounts?branch=videoindex). -As mentioned in the beginning of this article, during the 30 days of the transition state, “[Get-access-token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token)” will be supported side by side the ARM-based “[Generate-Access token](/rest/api/videoindexer/generate/access-token)”. -Make sure to change to the new "Generate-Access token" by updating all your solutions that use the API. - -APIs to be changed: - -- Get Access token for each scope: Account, Project & Video. -- Get account – the account’s details. -- Get accounts – List of all account in a region. -- Create paid account – would create a classic account. - -For a full description of [Video Analyzer for Media REST API](/rest/api/videoindexer/accounts?branch=videoindex) calls and documentation, follow the link. - -For code sample generating an access token through ARM see [C# code sample](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ApiUsage/ArmBased/Program.cs). - -### Next steps - -Learn how to [Upload a video using C#](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ApiUsage/ArmBased). - - -[docs-arm-overview]: ../../azure-resource-manager/management/overview.md -[docs-rbac-overview]: ../../role-based-access-control/overview.md -[docs-rbac-assignment]: ../../role-based-access-control/role-assignments-portal.md -[docs-governance-policy]: ../../governance/policy/overview.md diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-to-azure.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-to-azure.md deleted file mode 100644 index 6ac068821bc43..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-to-azure.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -title: Create an Azure Video Analyzer for Media (formerly Video Indexer) account connected to Azure -description: Learn how to create a Azure Video Analyzer for Media (formerly Video Indexer) account connected to Azure. -ms.topic: tutorial -ms.date: 10/19/2021 -ms.author: itnorman -ms.custom: ignite-fall-2021 ---- - -# Create a Video Analyzer for Media account - -When creating an Azure Video Analyzer for Media (formerly Video Indexer) account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Video Analyzer for Media provides up to 600 minutes of free indexing to users and up to 2400 minutes of free indexing to users that subscribe to the Video Analyzer API on the [developer portal](https://aka.ms/avam-dev-portal). With the paid options, Azure Video Analyzer for Media offers two types of accounts: classic accounts(General Availability), and ARM-based accounts(Public Preview). Main difference between the two is account management platform. While classic accounts is built on the API Management, ARM-based accounts management is built on Azure, enables to apply access control to all services with role-based access control (Azure RBAC) natively. - -* You can create a Video Analyzer for Media **classic** account through our [API](https://aka.ms/avam-dev-portal). -* You can create a Video Analyzer for Media **ARM-based** account through one of the following: - - 1. [Video Analyzer for Media portal](https://aka.ms/vi-portal-link) - 2. [Azure portal](https://portal.azure.com/#home) - 3. [QuickStart ARM template](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account) - -To read more on how to create a **new ARM-Based** Video Analyzer for Media account, read this [article](create-video-analyzer-for-media-account.md) - -## How to create classic accounts -This article shows how to create a Video Analyzer for Media classic account. The topic provides steps for connecting to Azure using the automatic (default) flow. It also shows how to connect to Azure manually (advanced). - -If you are moving from a *trial* to *paid ARM-Based* Video Analyzer for Media account, you can choose to copy all of the videos and model customization to the new account, as discussed in the [Import your content from the trial account](#import-your-content-from-the-trial-account) section. - -The article also covers [Linking a Video Analyzer for Media account to Azure Government](#video-analyzer-for-media-in-azure-government). - -## Prerequisites for connecting to Azure - -* An Azure subscription. - - If you don't have an Azure subscription yet, sign up for [Azure Free Trial](https://azure.microsoft.com/free/). -* An Azure Active Directory (Azure AD) domain. - - If you don't have an Azure AD domain, create this domain with your Azure subscription. For more information, see [Managing custom domain names in your Azure AD](../../active-directory/enterprise-users/domains-manage.md) -* A user in your Azure AD domain with an **Application administrator** role. You'll use this member when connecting your Video Analyzer for Media account to Azure. - - This user should be an Azure AD user with a work or school account. Don't use a personal account, such as outlook.com, live.com, or hotmail.com. - - ![all Azure AD users](./media/create-account/all-aad-users.png) - -### Additional prerequisites for automatic flow - -* A user and member in your Azure AD domain. - - You'll use this member when connecting your Video Analyzer for Media account to Azure. - - This user should be a member in your Azure subscription with either an **Owner** role, or both **Contributor** and **User Access Administrator** roles. A user can be added twice, with two roles. Once with Contributor and once with user Access Administrator. For more information, see [View the access a user has to Azure resources](../../role-based-access-control/check-access.md). - - ![access control](./media/create-account/access-control-iam.png) - -### Additional prerequisites for manual flow - -* Register the EventGrid resource provider using the Azure portal. - - In the [Azure portal](https://portal.azure.com/), go to **Subscriptions**->[subscription]->**ResourceProviders**. - - Search for **Microsoft.Media** and **Microsoft.EventGrid**. If not in the "Registered" state, click **Register**. It takes a couple of minutes to register. - - ![EventGrid](./media/create-account/event-grid.png) - -## Connect to Azure manually (advanced option) - -If the connection to Azure failed, you can attempt to troubleshoot the problem by connecting manually. - -> [!NOTE] -> It's mandatory to have the following three accounts in the same region: the Video Analyzer for Media account that you're connecting with the Media Services account, as well as the Azure storage account connected to the same Media Services account. - -### Create and configure a Media Services account - -1. Use the [Azure](https://portal.azure.com/) portal to create an Azure Media Services account, as described in [Create an account](/azure/media-services/previous/media-services-portal-create-account). - - Make sure the Media Services account was created with the classic APIs. - - ![Media Services classic API](./media/create-account/enable-classic-api.png) - - - When creating a storage account for your Media Services account, select **StorageV2** for account kind and **Geo-redundant** (GRS) for replication fields. - - ![New AMS account](./media/create-account/create-new-ams-account.png) - - > [!NOTE] - > Make sure to write down the Media Services resource and account names. You'll need them for the steps in the next section. - -1. Before you can play your videos in the Video Analyzer for Media web app, you must start the default **Streaming Endpoint** of the new Media Services account. - - In the new Media Services account, select **Streaming endpoints**. Then select the streaming endpoint and press start. - - ![Streaming endpoints](./media/create-account/create-ams-account-se.png) -4. For Video Analyzer for Media to authenticate with Media Services API, an AD app needs to be created. The following steps guide you through the Azure AD authentication process described in [Get started with Azure AD authentication by using the Azure portal](/azure/media-services/previous/media-services-portal-get-started-with-aad): - - 1. In the new Media Services account, select **API access**. - 2. Select [Service principal authentication method](/azure/media-services/previous/media-services-portal-get-started-with-aad). - 3. Get the client ID and client secret - - After you select **Settings**->**Keys**, add **Description**, press **Save**, and the key value gets populated. - - If the key expires, the account owner will have to contact Video Analyzer for Media support to renew the key. - - > [!NOTE] - > Make sure to write down the key value and the Application ID. You'll need it for the steps in the next section. - -### Connect manually - -In the **Create a new account on an Azure subscription** dialog of your [Video Analyzer for Media](https://www.videoindexer.ai/) page, select the **Switch to manual configuration** link. - -In the dialog, provide the following information: - -|Setting|Description| -|---|---| -|Video Analyzer for Media account region|The name of the Video Analyzer for Media account region. For better performance and lower costs, it's highly recommended to specify the name of the region where the Azure Media Services resource and Azure Storage account are located. | -|Azure AD tenant|The name of the Azure AD tenant, for example "contoso.onmicrosoft.com". The tenant information can be retrieved from the Azure portal. Place your cursor over the name of the signed-in user in the top-right corner. Find the name to the right of **Domain**.| -|Subscription ID|The Azure subscription under which this connection should be created. The subscription ID can be retrieved from the Azure portal. Select **All services** in the left panel, and search for "subscriptions". Select **Subscriptions** and choose the desired ID from the list of your subscriptions.| -|Azure Media Services resource group name|The name for the resource group in which you created the Media Services account.| -|Media service resource name|The name of the Azure Media Services account that you created in the previous section.| -|Application ID|The Azure AD application ID (with permissions for the specified Media Services account) that you created in the previous section.| -|Application key|The Azure AD application key that you created in the previous section. | - -### Import your content from the *trial* account - -When creating a new **ARM-Based** account, you have an option to import your content from the *trial* account into the new **ARM-Based** account free of charge. -> [!NOTE] -> * Import from trial can be performed only once per trial account. -> * The target ARM-Based account needs to be created and available before import is assigned. -> * Target ARM-Based account has to be an empty account (never indexed any media files). - -To import your data, follow the steps: - 1. Go to [Azure Video Analyzer for Media portal](https://aka.ms/vi-portal-link) - 2. Select your trial account and go to the *account settings* page - 3. Click the *Import content to an ARM-based account* - 4. From the dropdown menu choose the ARM-based account you wish to import the data to. - * If the account ID isn't showing, you can copy and paste the account ID from Azure portal or the account list, on the side blade in the Azure Video Analyzer for Media Portal. - 5. Click **Import content** - -![import](./media/create-account/import-steps.png) - - -All media and content model customizations will be copied from the *trial* account into the new ARM-Based account. - - -> [!NOTE] -> -> The *trial* account is not availagle on the Azure Government cloud. - -## Azure Media Services considerations - -The following Azure Media Services related considerations apply: - -* If you plan to connect to an existing Media Services account, make sure the Media Services account was created with the classic APIs. - - ![Media Services classic API](./media/create-account/enable-classic-api.png) -* If you connect to an existing Media Services account, Video Analyzer for Media doesn't change the existing media **Reserved Units** configuration. - - You might need to adjust the type and number of Media Reserved Units according to your planned load. Keep in mind that if your load is high and you don't have enough units or speed, videos processing can result in timeout failures. -* If you connect to a new Media Services account, Video Analyzer for Media automatically starts the default **Streaming Endpoint** in it: - - ![Media Services streaming endpoint](./media/create-account/ams-streaming-endpoint.png) - - Streaming endpoints have a considerable startup time. Therefore, it may take several minutes from the time you connected your account to Azure until your videos can be streamed and watched in the Video Analyzer for Media web app. -* If you connect to an existing Media Services account, Video Analyzer for Media doesn't change the default Streaming Endpoint configuration. If there's no running **Streaming Endpoint**, you can't watch videos from this Media Services account or in Video Analyzer for Media. -* If you connect automatically, Video Analyzer for Media sets the media **Reserved Units** to 10 S3 units: - - ![Media Services reserved units](./media/create-account/ams-reserved-units.png) - -## Automate creation of the Video Analyzer for Media account - -To automate the creation of the account is a two steps process: - -1. Use Azure Resource Manager to create an Azure Media Services account + Azure AD application. - - See an example of the [Media Services account creation template](https://github.com/Azure-Samples/media-services-v3-arm-templates). -1. Call [Create-Account with the Media Services and Azure AD application](https://videoindexer.ai.azure.us/account/login?source=apim). - -## Video Analyzer for Media in Azure Government - -### Prerequisites for connecting to Azure Government - -- An Azure subscription in [Azure Government](../../azure-government/index.yml). -- An Azure AD account in Azure Government. -- All pre-requirements of permissions and resources as described above in [Prerequisites for connecting to Azure](#prerequisites-for-connecting-to-azure). Make sure to check [Additional prerequisites for automatic flow](#additional-prerequisites-for-automatic-flow) and [Additional prerequisites for manual flow](#additional-prerequisites-for-manual-flow). - -### Create new account via the Azure Government portal - -> [!NOTE] -> The Azure Government cloud does not include a *trial* experience of Video Analyzer for Media. - -To create a paid account via the Video Analyzer for Media portal: - -1. Go to https://videoindexer.ai.azure.us -1. Log in with your Azure Government Azure AD account. -1. If you do not have any Video Analyzer for Media accounts in Azure Government that you are an owner or a contributor to, you will get an empty experience from which you can start creating your account. - - The rest of the flow is as described in above , only the regions to select from will be Government regions in which Video Analyzer for Media is available - - If you already are a contributor or an admin of an existing one or more Video Analyzer for Media account in Azure Government, you will be taken to that account and from there you can start a follow steps for creating an additional account if needed, as described above. - -### Create new account via the API on Azure Government - -To create a paid account in Azure Government, follow the instructions in [Create-Paid-Account](). This API end point only includes Government cloud regions. - -### Limitations of Video Analyzer for Media on Azure Government - -* No manual content moderation available in Government cloud. - - In the public cloud when content is deemed offensive based on a content moderation, the customer can ask for a human to look at that content and potentially revert that decision. -* No trial accounts. -* Bing description - in Gov cloud we will not present a description of celebrities and named entities identified. This is a UI capability only. - -## Clean up resources - -After you are done with this tutorial, delete resources that you are not planning to use. - -### Delete a Video Analyzer for Media account - -If you want to delete a Video Analyzer for Media account, you can delete the account from the Video Analyzer for Media website. To delete the account, you must be the owner. - -Select the account -> **Settings** -> **Delete this account**. - -The account will be permanently deleted in 90 days. - -## Firewall - -See [Storage account that is behind a firewall](faq.yml#can-a-storage-account-connected-to-the-media-services-account-be-behind-a-firewall). - -## Next steps - -You can programmatically interact with your trial account and/or with your Video Analyzer for Media accounts that are connected to Azure by following the instructions in: [Use APIs](video-indexer-use-apis.md). - -You should use the same Azure AD user you used when connecting to Azure. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/considerations-when-use-at-scale.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/considerations-when-use-at-scale.md deleted file mode 100644 index d8753625fd0f3..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/considerations-when-use-at-scale.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Things to consider when using Azure Video Analyzer for Media (formerly Video Indexer) at scale - Azure -description: This topic explains what things to consider when using Azure Video Analyzer for Media (formerly Video Indexer) at scale. -ms.topic: how-to -ms.date: 11/13/2020 -ms.author: juliako ---- - -# Things to consider when using Video Analyzer for Media at scale - -When using Azure Video Analyzer for Media (formerly Video Indexer) to index videos and your archive of videos is growing, consider scaling. - -This article answers questions like: - -* Are there any technological constraints I need to take into account? -* Is there a smart and efficient way of doing it? -* Can I prevent spending excess money in the process? - -The article provides six best practices of how to use Video Analyzer for Media at scale. - -## When uploading videos consider using a URL over byte array - -Video Analyzer for Media does give you the choice to upload videos from URL or directly by sending the file as a byte array, the latter comes with some constraints. For more information, see [uploading considerations and limitations)](upload-index-videos.md#uploading-considerations-and-limitations) - -First, it has file size limitations. The size of the byte array file is limited to 2 GB compared to the 30-GB upload size limitation while using URL. - -Second, consider just some of the issues that can affect your performance and hence your ability to scale: - -* Sending files using multi-part means high dependency on your network, -* service reliability, -* connectivity, -* upload speed, -* lost packets somewhere in the world wide web. - -:::image type="content" source="./media/considerations-when-use-at-scale/first-consideration.png" alt-text="First consideration for using Video Analyzer for Media at scale"::: - -When you upload videos using URL, you just need to provide a path to the location of a media file and Video Indexer takes care of the rest (see the `videoUrl` field in the [upload video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) API). - -> [!TIP] -> Use the `videoUrl` optional parameter of the upload video API. - -To see an example of how to upload videos using URL, check out [this example](upload-index-videos.md#code-sample). Or, you can use [AzCopy](../../storage/common/storage-use-azcopy-v10.md) for a fast and reliable way to get your content to a storage account from which you can submit it to Video Analyzer for Media using [SAS URL](../../storage/common/storage-sas-overview.md). Video Analyzer for Media recommends using *readonly* SAS URLs. - -## Automatic Scaling of Media Reserved Units - -Starting August 1st 2021, Azure Video Analyzer for Media (formerly Video Indexer) enabled [Reserved Units](/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Analyzer for Media. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. - -## Respect throttling - -Video Analyzer for Media is built to deal with indexing at scale, and when you want to get the most out of it you should also be aware of the system's capabilities and design your integration accordingly. You don't want to send an upload request for a batch of videos just to discover that some of the movies didn't upload and you are receiving an HTTP 429 response code (too many requests). It can happen due to the fact that you sent more requests than the [limit of movies per minute we support](upload-index-videos.md#uploading-considerations-and-limitations). Video Analyzer for Media adds a `retry-after` header in the HTTP response, the header specifies when you should attempt your next retry. Make sure you respect it before trying your next request. - -:::image type="content" source="./media/considerations-when-use-at-scale/respect-throttling.jpg" alt-text="Design your integration well, respect throttling"::: - -## Use callback URL - -We recommend that instead of polling the status of your request constantly from the second you sent the upload request, you can add a [callback URL](upload-index-videos.md#callbackurl), and wait for Video Analyzer for Media to update you. As soon as there is any status change in your upload request, you get a POST notification to the URL you specified. - -You can add a callback URL as one of the parameters of the [upload video API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). Check out the code samples in [GitHub repo](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/). - -For callback URL you can also use Azure Functions, a serverless event-driven platform that can be triggered by HTTP and implement a following flow. - -### callBack URL definition - -[!INCLUDE [callback url](./includes/callback-url.md)] - -## Use the right indexing parameters for you - -When making decisions related to using Video Analyzer for Media at scale, look at how to get the most out of it with the right parameters for your needs. Think about your use case, by defining different parameters you can save money and make the indexing process for your videos faster. - -Before uploading and indexing your video read this short [documentation](upload-index-videos.md), check the [indexingPreset](upload-index-videos.md#indexingpreset) and [streamingPreset](upload-index-videos.md#streamingpreset) to get a better idea of what your options are. - -For example, don’t set the preset to streaming if you don't plan to watch the video, don't index video insights if you only need audio insights. - -## Index in optimal resolution, not highest resolution - -You might be asking, what video quality do you need for indexing your videos? - -In many cases, indexing performance has almost no difference between HD (720P) videos and 4K videos. Eventually, you’ll get almost the same insights with the same confidence. The higher the quality of the movie you upload means the higher the file size, and this leads to higher computing power and time needed to upload the video. - -For example, for the face detection feature, a higher resolution can help with the scenario where there are many small but contextually important faces. However, this will come with a quadratic increase in runtime and an increased risk of false positives. - -Therefore, we recommend you to verify that you get the right results for your use case and to first test it locally. Upload the same video in 720P and in 4K and compare the insights you get. - -## Next steps - -[Examine the Azure Video Analyzer for Media output produced by API](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/create-video-analyzer-for-media-account.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/create-video-analyzer-for-media-account.md deleted file mode 100644 index e932def8f554e..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/create-video-analyzer-for-media-account.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Create an Azure Video Analyzer for Media account -description: This topic explains how to create an account for Azure Video Analyzer for Media. -ms.topic: tutorial -ms.author: itnorman -ms.date: 10/13/2021 -ms.custom: ignite-fall-2021 ---- - -# Get started with Azure Video Analyzer for Media in Azure portal - -This Quickstart walks you through the steps to get started with Azure Video Analyzer for Media. You will create an Azure Video Analyzer for Media account and its accompanying resources by using the Azure portal. - -To start using Azure Video Analyzer for Media, you will need to create a Video Analyzer for Media account. The account needs to be associated with a [Media Services][docs-ms] resource and a [User-assigned managed identity][docs-uami]. The managed identity will need to have Contributor permissions role on the Media Services. - -## Prerequisites -> [!NOTE] -> You'll need an Azure subscription where you have access to both the Contributor role and the User Access Administrator role to the resource group under which you will create new resources, and Contributor role on both Azure Media Services and the User-assigned managed identity. If you don't have the right permissions, ask your account administrator to grant you those permissions. The associated Azure Media Services must be in the same region as the Video Analyzer for Media account. - - -## Azure portal - -### Create a Video Analyzer for Media account in the Azure portal - -1. Sign into the [Azure portal](https://portal.azure.com/). -1. Using the search bar at the top, enter **"Video Analyzer for Media"**. -1. Click on *Video Analyzer for Media* under *Services*. - - ![Image of search bar](media/create-video-analyzer-for-media-account/search-bar1.png) - -1. Click **Create**. -1. In the **Create a Video Analyzer for Media resource** section enter required values. - - ![Image of create account](media/create-video-analyzer-for-media-account/create-account-blade.png) - - -| Name | Description | -| ---|---| -|**Subscription**|Choose the subscription that you are using to create the Video Analyzer for Media account.| -|**Resource Group**|Choose a resource group where you are creating the Video Analyzer for Media account, or select **Create new** to create a resource group.| -|**Video Analyzer for Media account**|Select *Create a new account* option.| -|**Resource name**|Enter the name of the new Video Analyzer for Media account, the name can contain letters, numbers and dashes with no spaces.| -|**Location**|Select the geographic region that will be used to deploy the Video Analyzer for Media account. The location matches the **resource group location** you chose, if you'd like to change the selected location change the selected resource group or create a new one in the preferred location. [Azure region in which Video Analyzer for Media is available](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all)| -|**Media Services account name**|Select a Media Services that the new Video Analyzer for Media account will use to process the videos. You can select an existing Media Services or you can create a new one. The Media Services must be in the same location you selected.| -|**User-assigned managed identity**|Select a user-assigned managed identity that the new Video Analyzer for Media account will use to access the Media Services. You can select an existing user-assigned managed identity or you can create a new one. The user-assignment managed identity will be assigned the role of Contributor role on the Media Services.| - -1. Click **Review + create** at the bottom of the form. - -### Review deployed resource - -You can use the Azure portal to validate the Azure Video Analyzer for Media account and other resources that were created. After the deployment is finished, select **Go to resource** to see your new Video Analyzer for Media account. - -### Overview - -![Image of overview](media/create-video-analyzer-for-media-account/overview-screenshot.png) - -Click on *Explore Video Analyzer for Media's portal* to view your new account on the [Azure Video Analyzer for Media portal](https://aka.ms/vi-portal-link) - -### Management API - -![Image of Generate-access-token](media/create-video-analyzer-for-media-account/generate-access-token.png) - -Use the *Management API* tab to manually generate access tokens for the account. -This token can be used to authenticate API calls for this account. Each token is valid for one hour. - -Choose the following: -* Permission type: **Contributor** or **Reader** -* Scope: **Account**, **Project** or **Video** - * For **Project** or **Video** you should also insert the matching ID -* Click **Generate** - ---- - -### Next steps - -Learn how to [Upload a video using C#](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ApiUsage/ArmBased). - - - -[docs-uami]: ../../active-directory/managed-identities-azure-resources/overview.md -[docs-ms]: /azure/media-services/latest/media-services-overview -[docs-role-contributor]: ../../role-based-access-control/built-in-roles.md#contibutor -[docs-contributor-on-ms]: ./add-contributor-role-on-the-media-service.md diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-overview.md deleted file mode 100644 index b81f314dea8ab..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-overview.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Customize a Brands model in Azure Video Analyzer for Media (formerly Video Indexer) - Azure -description: This article gives an overview of what is a Brands model in Azure Video Analyzer for Media (formerly Video Indexer) and how to customize it. - -ms.topic: conceptual -ms.date: 12/15/2019 -ms.author: juliako ---- - -# Customize a Brands model in Video Analyzer for Media - -Azure Video Analyzer for Media (formerly Video Indexer) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in a video or audio content or if it shows up in visual text in a video, Video Analyzer for Media detects it as a brand in the content. Brands are disambiguated from other terms using context. - -Brand detection is useful in a wide variety of business scenarios such as contents archive and discovery, contextual advertising, social media analysis, retail compete analysis, and many more. Video Analyzer for Media brand detection enables you to index brand mentions in speech and visual text, using Bing's brands database as well as with customization by building a custom Brands model for each Video Analyzer for Media account. The custom Brands model feature allows you to select whether or not Video Analyzer for Media will detect brands from the Bing brands database, exclude certain brands from being detected (essentially creating a list of unapproved brands), and include brands that should be part of your model that might not be in Bing's brands database (essentially creating a list of approved brands). The custom Brands model that you create will only be available in the account in which you created the model. - -## Out of the box detection example - -In the "Microsoft Build 2017 Day 2" presentation, the brand "Microsoft Windows" appears multiple times. Sometimes in the transcript, sometimes as visual text and never as verbatim. Video Analyzer for Media detects with high precision that a term is indeed brand based on the context, covering over 90k brands out of the box, and constantly updating. At 02:25, Video Analyzer for Media detects the brand from speech and then again at 02:40 from visual text, which is part of the Windows logo. - -![Brands overview](./media/content-model-customization/brands-overview.png) - -Talking about Windows in the context of construction will not detect the word "Windows" as a brand, and same for Box, Apple, Fox, etc., based on advanced Machine Learning algorithms that know how to disambiguate from context. Brand Detection works for all our supported languages. - -## Next steps - -To bring your own brands, check out these topics: - -[Customize Brands model using APIs](customize-brands-model-with-api.md) - -[Customize Brands model using the website](customize-brands-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-api.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-api.md deleted file mode 100644 index 7df7b530459da..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-api.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Customize a Brands model with Azure Video Analyzer for Media (formerly Video Indexer) API -titleSuffix: Azure Video Analyzer for Media -description: Learn how to customize a Brands model with the Azure Video Analyzer for Media (formerly Video Indexer) API. -services: azure-video-analyzer -author: anikaz -manager: johndeu -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 01/14/2020 -ms.author: kumud ---- - -# Customize a Brands model with the Video Analyzer for Media API - -Azure Video Analyzer for Media (formerly Video Indexer) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in video or audio content or if it shows up in visual text in a video, Video Analyzer for Media detects it as a brand in the content. A custom Brands model allows you to exclude certain brands from being detected and include brands that should be part of your model that might not be in Bing's brands database. For more information, see [Overview](customize-brands-model-overview.md). - -> [!NOTE] -> If your video was indexed prior to adding a brand, you need to reindex it. - -You can use the Video Analyzer for Media APIs to create, use, and edit custom Brands models detected in a video, as described in this topic. You can also use the Video Analyzer for Media website, as described in [Customize Brands model using the Video Analyzer for Media website](customize-brands-model-with-api.md). - -## Create a Brand - -The [create a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Brand) API creates a new custom brand and adds it to the custom Brands model for the specified account. - -> [!NOTE] -> Setting `enabled` (in the body) to true puts the brand in the *Include* list for Video Analyzer for Media to detect. Setting `enabled` to false puts the brand in the *Exclude* list, so Video Analyzer for Media won't detect it. - -Some other parameters that you can set in the body: - -* The `referenceUrl` value can be any reference websites for the brand, such as a link to its Wikipedia page. -* The `tags` value is a list of tags for the brand. This tag shows up in the brand's *Category* field in the Video Analyzer for Media website. For example, the brand "Azure" can be tagged or categorized as "Cloud". - -### Response - -The response provides information on the brand that you just created following the format of the example below. - -```json -{ - "referenceUrl": "https://en.wikipedia.org/wiki/Example", - "id": 97974, - "name": "Example", - "accountId": "SampleAccountId", - "lastModifierUserName": "SampleUserName", - "created": "2018-04-25T14:59:52.7433333", - "lastModified": "2018-04-25T14:59:52.7433333", - "enabled": true, - "description": "This is an example", - "tags": [ - "Tag1", - "Tag2" - ] -} -``` - -## Delete a Brand - -The [delete a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Brand) API removes a brand from the custom Brands model for the specified account. The account is specified in the `accountId` parameter. Once called successfully, the brand will no longer be in the *Include* or *Exclude* brands lists. - -### Response - -There's no returned content when the brand is deleted successfully. - -## Get a specific Brand - -The [get a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brand) API lets you search for the details of a brand in the custom Brands model for the specified account using the brand ID. - -### Response - -The response provides information on the brand that you searched (using brand ID) following the format of the example below. - -```json -{ - "referenceUrl": "https://en.wikipedia.org/wiki/Example", - "id": 128846, - "name": "Example", - "accountId": "SampleAccountId", - "lastModifierUserName": "SampleUserName", - "created": "2018-01-06T13:51:38.3666667", - "lastModified": "2018-01-11T13:51:38.3666667", - "enabled": true, - "description": "This is an example", - "tags": [ - "Tag1", - "Tag2" - ] -} -``` - -> [!NOTE] -> `enabled` being set to `true` signifies that the brand is in the *Include* list for Video Analyzer for Media to detect, and `enabled` being false signifies that the brand is in the *Exclude* list, so Video Analyzer for Media won't detect it. - -## Update a specific brand - -The [update a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Brand) API lets you search for the details of a brand in the custom Brands model for the specified account using the brand ID. - -### Response - -The response provides the updated information on the brand that you updated following the format of the example below. - -```json -{ - "referenceUrl": null, - "id": 97974, - "name": "Example", - "accountId": "SampleAccountId", - "lastModifierUserName": "SampleUserName", - "Created": "2018-04-25T14:59:52.7433333", - "lastModified": "2018-04-25T15:37:50.67", - "enabled": false, - "description": "This is an update example", - "tags": [ - "Tag1", - "NewTag2" - ] -} -``` - -## Get all of the Brands - -The [get all brands](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brands) API returns all of the brands in the custom Brands model for the specified account regardless of whether the brand is meant to be in the *Include* or *Exclude* brands list. - -### Response - -The response provides a list of all of the brands in your account and each of their details following the format of the example below. - -```json -[ - { - "ReferenceUrl": null, - "id": 97974, - "name": "Example", - "accountId": "AccountId", - "lastModifierUserName": "UserName", - "Created": "2018-04-25T14:59:52.7433333", - "LastModified": "2018-04-25T14:59:52.7433333", - "enabled": true, - "description": "This is an example", - "tags": ["Tag1", "Tag2"] - }, - { - "ReferenceUrl": null, - "id": 97975, - "name": "Example2", - "accountId": "AccountId", - "lastModifierUserName": "UserName", - "Created": "2018-04-26T14:59:52.7433333", - "LastModified": "2018-04-26T14:59:52.7433333", - "enabled": false, - "description": "This is another example", - "tags": ["Tag1", "Tag2"] - }, -] -``` - -> [!NOTE] -> The brand named *Example* is in the *Include* list for Video Analyzer for Media to detect, and the brand named *Example2* is in the *Exclude* list, so Video Analyzer for Media won't detect it. - -## Get Brands model settings - -The [get brands settings](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brands) API returns the Brands model settings in the specified account. The Brands model settings represent whether detection from the Bing brands database is enabled or not. If Bing brands aren't enabled, Video Analyzer for Media will only detect brands from the custom Brands model of the specified account. - -### Response - -The response shows whether Bing brands are enabled following the format of the example below. - -```json -{ - "state": true, - "useBuiltIn": true -} -``` - -> [!NOTE] -> `useBuiltIn` being set to true represents that Bing brands are enabled. If `useBuiltin` is false, Bing brands are disabled. The `state` value can be ignored because it has been deprecated. - -## Update Brands model settings - -The [update brands](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Brands-Model-Settings) API updates the Brands model settings in the specified account. The Brands model settings represent whether detection from the Bing brands database is enabled or not. If Bing brands aren't enabled, Video Analyzer for Media will only detect brands from the custom Brands model of the specified account. - -The `useBuiltIn` flag set to true means that Bing brands are enabled. If `useBuiltin` is false, Bing brands are disabled. - -### Response - -There's no returned content when the Brands model setting is updated successfully. - -## Next steps - -[Customize Brands model using website](customize-brands-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-website.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-website.md deleted file mode 100644 index 201537879d038..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-website.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Customize a Brands model with the Azure Video Analyzer for Media (formerly Video Indexer) website -titleSuffix: Azure Video Analyzer for Media -description: Learn how to customize a Brands model with the Azure Video Analyzer for Media (formerly Video Indexer) website. -services: azure-video-analyzer -author: anikaz -manager: johndeu -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 12/15/2019 -ms.author: kumud ---- - -# Customize a Brands model with the Video Analyzer for Media website - -Azure Video Analyzer for Media (formerly Video Indexer) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in video or audio content or if it shows up in visual text in a video, Video Analyzer for Media detects it as a brand in the content. - -A custom Brands model allows you to: - -- select if you want Video Analyzer for Media to detect brands from the Bing brands database. -- select if you want Video Analyzer for Media to exclude certain brands from being detected (essentially creating a deny list of brands). -- select if you want Video Analyzer for Media to include brands that should be part of your model that might not be in Bing's brands database (essentially creating an accept list of brands). - -For a detailed overview, see this [Overview](customize-brands-model-overview.md). - -You can use the Video Analyzer for Media website to create, use, and edit custom Brands models detected in a video, as described in this topic. You can also use the API, as described in [Customize Brands model using APIs](customize-brands-model-with-api.md). - -> [!NOTE] -> If your video was indexed prior to adding a brand, you need to reindex it. You will find **Re-index** item in the drop-down menu associated with the video. Select **Advanced options** -> **Brand categories** and check **All brands**. - -## Edit Brands model settings - -You have the option to set whether or not you want brands from the Bing brands database to be detected. To set this option, you need to edit the settings of your Brands model. Follow these steps: - -1. Go to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. -1. To customize a model in your account, select the **Content model customization** button on the left of the page. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/content-model-customization/content-model-customization.png" alt-text="Customize content model in Video Analyzer for Media"::: -1. To edit brands, select the **Brands** tab. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/customize-brand-model/customize-brand-model.png" alt-text="Screenshot shows the Brands tab of the Content model customization dialog box"::: -1. Check the **Show brands suggested by Bing** option if you want Video Analyzer for Media to detect brands suggested by Bing—leave the option unchecked if you don't. - -## Include brands in the model - -The **Include brands** section represents custom brands that you want Video Analyzer for Media to detect, even if they aren't suggested by Bing. - -### Add a brand to include list - -1. Select **+ Create new brand**. - - Provide a name (required), category (optional), description (optional), and reference URL (optional). - The category field is meant to help you tag your brands. This field shows up as the brand's *tags* when using the Video Analyzer for Media APIs. For example, the brand "Azure" can be tagged or categorized as "Cloud". - - The reference URL field can be any reference website for the brand (like a link to its Wikipedia page). - -2. Select **Save** and you'll see that the brand has been added to the **Include brands** list. - -### Edit a brand on the include list - -1. Select the pencil icon next to the brand that you want to edit. - - You can update the category, description, or reference URL of a brand. You can't change the name of a brand because names of brands are unique. If you need to change the brand name, delete the entire brand (see next section) and create a new brand with the new name. - -2. Select the **Update** button to update the brand with the new information. - -### Delete a brand on the include list - -1. Select the trash icon next to the brand that you want to delete. -2. Select **Delete** and the brand will no longer appear in your *Include brands* list. - -## Exclude brands from the model - -The **Exclude brands** section represents the brands that you don't want Video Analyzer for Media to detect. - -### Add a brand to exclude list - -1. Select **+ Create new brand.** - - Provide a name (required), category (optional). - -2. Select **Save** and you'll see that the brand has been added to the *Exclude brands* list. - -### Edit a brand on the exclude list - -1. Select the pencil icon next to the brand that you want to edit. - - You can only update the category of a brand. You can't change the name of a brand because names of brands are unique. If you need to change the brand name, delete the entire brand (see next section) and create a new brand with the new name. - -2. Select the **Update** button to update the brand with the new information. - -### Delete a brand on the exclude list - -1. Select the trash icon next to the brand that you want to delete. -2. Select **Delete** and the brand will no longer appear in your *Exclude brands* list. - -## Next steps - -[Customize Brands model using APIs](customize-brands-model-with-api.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-content-models-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-content-models-overview.md deleted file mode 100644 index 060f97c1a022c..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-content-models-overview.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Customizing content models in Azure Video Analyzer for Media (formerly Video Indexer) -description: This article gives links to the conceptual articles that explain the benefits of each type of customization. This article also links to how-to guides that show how you can implement the customization of each model. -ms.topic: conceptual -ms.date: 06/26/2019 -ms.author: kumud ---- - -# Customizing content models in Video Analyzer for Media - -Azure Video Analyzer for Media (formerly Video Indexer) allows you to customize some of its models to be adapted to your specific use case. These models include [brands](customize-brands-model-overview.md), [language](customize-language-model-overview.md), and [person](customize-person-model-overview.md). You can easily customize these models using the Video Analyzer for Media website or API. - -This article gives links to articles that explain the benefits of each type of customization. The article also links to how-to guides that show how you can implement the customization of each model. - -## Animated characters - -* [Animated character detection](animated-characters-recognition.md) - -## Brands model - -* [Customizing the brands model overview](customize-brands-model-overview.md) -* [Customizing the brands model using the Video Analyzer for Media website](customize-brands-model-with-website.md) -* [Customizing the brands model using the Video Analyzer for Media API](customize-brands-model-with-api.md) - -## Language model - -* [Customizing language models overview](customize-language-model-overview.md) -* [Customizing language models using the Video Analyzer for Media website](customize-language-model-with-website.md) -* [Customizing language models using the Video Analyzer for Media API](customize-language-model-with-api.md) - -## Person model - -* [Customizing person models overview](customize-person-model-overview.md) -* [Customizing person models using the Video Analyzer for Media website](customize-person-model-with-website.md) -* [Customizing person models using the Video Analyzer for Media API](customize-person-model-with-api.md) - -## Next steps - -[Video Analyzer for Media overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-overview.md deleted file mode 100644 index c5bb024e6cf29..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-overview.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Customize a Language model in Azure Video Analyzer for Media (formerly Video Indexer) - Azure -titleSuffix: Azure Video Analyzer for Media -description: This article gives an overview of what is a Language model in Azure Video Analyzer for Media (formerly Video Indexer) and how to customize it. -author: Juliako -manager: femila -ms.topic: conceptual -ms.author: juliako -ms.date: 02/02/2022 ---- - -# Customize a Language model with Video Analyzer for Media - -Azure Video Analyzer for Media (formerly Video Indexer) supports automatic speech recognition through integration with the Microsoft [Custom Speech Service](https://azure.microsoft.com/services/cognitive-services/custom-speech-service/). You can customize the Language model by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized, assuming default pronunciation, and the Language model will learn new probable sequences of words. See the list of supported by Video Analyzer for Media languages in [supported langues](language-support.md). - -Let's take a word that is highly specific, like "Kubernetes" (in the context of Azure Kubernetes service), as an example. Since the word is new to Video Analyzer for Media, it is recognized as "communities". You need to train the model to recognize it as "Kubernetes". In other cases, the words exist, but the Language model is not expecting them to appear in a certain context. For example, "container service" is not a 2-word sequence that a non-specialized Language model would recognize as a specific set of words. - -You have the option to upload words without context in a list in a text file. This is considered partial adaptation. Alternatively, you can upload text file(s) of documentation or sentences related to your content for better adaptation. - -You can use the Video Analyzer for Media APIs or the website to create and edit custom Language models, as described in topics in the [Next steps](#next-steps) section of this topic. - -## Best practices for custom Language models - -Video Analyzer for Media learns based on probabilities of word combinations, so to learn best: - -* Give enough real examples of sentences as they would be spoken. -* Put only one sentence per line, not more. Otherwise the system will learn probabilities across sentences. -* It is okay to put one word as a sentence to boost the word against others, but the system learns best from full sentences. -* When introducing new words or acronyms, if possible, give as many examples of usage in a full sentence to give as much context as possible to the system. -* Try to put several adaptation options, and see how they work for you. -* Avoid repetition of the exact same sentence multiple times. It may create bias against the rest of the input. -* Avoid including uncommon symbols (~, # @ % &) as they will get discarded. The sentences in which they appear will also get discarded. -* Avoid putting too large inputs, such as hundreds of thousands of sentences, because doing so will dilute the effect of boosting. - -## Next steps - -[Customize Language model using APIs](customize-language-model-with-api.md) - -[Customize Language model using the website](customize-language-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-website.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-website.md deleted file mode 100644 index 0c795b1e3be95..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-website.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: Customize Language model with Azure Video Analyzer for Media (formerly Video Indexer) website -titleSuffix: Azure Video Analyzer for Media -description: Learn how to customize a Language model with the Azure Video Analyzer for Media (formerly Video Indexer) website. -services: azure-video-analyzer -author: anikaz -manager: johndeu -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 08/10/2020 -ms.author: kumud ---- - -# Customize a Language model with the Video Analyzer for Media website - -Azure Video Analyzer for Media (formerly Video Indexer) lets you create custom Language models to customize speech recognition by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized. - -For a detailed overview and best practices for custom language models, see [Customize a Language model with Video Analyzer for Media](customize-language-model-overview.md). - -You can use the Video Analyzer for Media website to create and edit custom Language models in your account, as described in this topic. You can also use the API, as described in [Customize Language model using APIs](customize-language-model-with-api.md). - -## Create a Language model - -1. Go to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. -1. To customize a model in your account, select the **Content model customization** button on the left of the page. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/customize-language-model/model-customization.png" alt-text="Customize content model in Video Analyzer for Media"::: -1. Select the **Language** tab. - - You see a list of supported languages. -1. Under the language that you want, select **Add model**. -1. Type in the name for the Language model and hit enter. - - This step creates the model and gives the option to upload text files to the model. -1. To add a text file, select **Add file**. Your file explorer will open. -1. Navigate to and select the text file. You can add multiple text files to a Language model. - - You can also add a text file by selecting the **...** button on the right side of the Language model and selecting **Add file**. -1. Once you're done uploading the text files, select the green **Train** option. - -The training process can take a few minutes. Once the training is done, you see **Trained** next to the model. You can preview, download, and delete the file from the model. - -> [!div class="mx-imgBorder"] -> :::image type="content" source="./media/customize-language-model/customize-language-model.png" alt-text="Train the model"::: - -### Using a Language model on a new video - -To use your Language model on a new video, do one of the following actions: - -* Select the **Upload** button on the top of the page. - - ![Upload button Video Analyzer for Media](./media/customize-language-model/upload.png) -* Drop your audio or video file or browse for your file. - -You're given the option to select the **Video source language**. Select the drop-down and select a Language model that you created from the list. It should say the language of your Language model and the name that you gave it in parentheses. For example: - -![Choose video source language—Reindex a video with Video Analyzer for Media](./media/customize-language-model/reindex.png) - -Select the **Upload** option in the bottom of the page, and your new video will be indexed using your Language model. - -### Using a Language model to reindex - -To use your Language model to reindex a video in your collection, follow these steps: - -1. Sign in to the [Video Analyzer for Media](https://www.videoindexer.ai/) home page. -1. Click on **...** button on the video and select **Re-index**. -1. You're given the option to select the **Video source language** to reindex your video with. Select the drop-down and select a Language model that you created from the list. It should say the language of your language model and the name that you gave it in parentheses. -1. Select the **Re-index** button and your video will be reindexed using your Language model. - -## Edit a Language model - -You can edit a Language model by changing its name, adding files to it, and deleting files from it. - -If you add or delete files from the Language model, you'll have to train the model again by selecting the green **Train** option. - -### Rename the Language model - -You can change the name of the Language model by selecting the ellipsis (**...**) button on the right side of the Language model and selecting **Rename**. - -Type in the new name and hit enter. - -### Add files - -To add a text file, select **Add file**. Your file explorer will open. - -Navigate to and select the text file. You can add multiple text files to a Language model. - -You can also add a text file by selecting the ellipsis (**...**) button on the right side of the Language model and selecting **Add file**. - -### Delete files - -To delete a file from the Language model, select the ellipsis (**...**) button on the right side of the text file and select **Delete**. A new window pops up telling you that the deletion can't be undone. Select the **Delete** option in the new window. - -This action removes the file completely from the Language model. - -## Delete a Language model - -To delete a Language model from your account, select the ellipsis (**...**) button on the right side of the Language model and select **Delete**. - -A new window pops up telling you that the deletion can't be undone. Select the **Delete** option in the new window. - -This action removes the Language model completely from your account. Any video that was using the deleted Language model will keep the same index until you reindex the video. If you reindex the video, you can assign a new Language model to the video. Otherwise, Video Analyzer for Media will use its default model to reindex the video. - -## Customize Language models by correcting transcripts - -Video Analyzer for Media supports automatic customization of Language models based on the actual corrections users make to the transcriptions of their videos. - -1. To make corrections to a transcript, open up the video that you want to edit from your Account Videos. Select the **Timeline** tab. - - ![Customize language model timeline tab—Video Analyzer for Media](./media/customize-language-model/timeline.png) - -1. Select the pencil icon to edit the transcript of your transcription. - - ![Customize language model edit transcription—Video Analyzer for Media](./media/customize-language-model/edits.png) - - Video Analyzer for Media captures all lines that are corrected by you in the transcription of your video and adds them automatically to a text file called "From transcript edits". These edits are used to retrain the specific Language model that was used to index this video. - - The edits that were done in the [widget's](video-indexer-embed-widgets.md) timeline are also included. - - If you didn't specify a Language model when indexing this video, all edits for this video will be stored in a default Language model called "Account adaptations" within the detected language of the video. - - In case multiple edits have been made to the same line, only the last version of the corrected line will be used for updating the Language model. - - > [!NOTE] - > Only textual corrections are used for the customization. Corrections that don't involve actual words (for example, punctuation marks or spaces) aren't included. - -1. You'll see transcript corrections show up in the Language tab of the Content model customization page. - - To look at the "From transcript edits" file for each of your Language models, select it to open it. - - ![From transcript edits—Video Analyzer for Media](./media/customize-language-model/from-transcript-edits.png) - -## Next steps - -[Customize language model using APIs](customize-language-model-with-api.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-overview.md deleted file mode 100644 index a8881246ed736..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-overview.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Customize a Person model in Azure Video Analyzer for Media (formerly Video Indexer) - Azure -description: This article gives an overview of what is a Person model in Azure Video Analyzer for Media (formerly Video Indexer) and how to customize it. -ms.topic: conceptual -ms.date: 05/15/2019 -ms.author: kumud ---- - -# Customize a Person model in Video Analyzer for Media - -Azure Video Analyzer for Media (formerly Video Indexer) supports celebrity recognition in your videos. The celebrity recognition feature covers approximately one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. Faces that are not recognized by Video Analyzer for Media are still detected but are left unnamed. Customers can build custom Person models and enable Video Analyzer for Media to recognize faces that are not recognized by default. Customers can build these Person models by pairing a person's name with image files of the person's face. - -If your account caters to different use-cases, you can benefit from being able to create multiple Person models per account. For example, if the content in your account is meant to be sorted into different channels, you might want to create a separate Person model for each channel. - -> [!NOTE] -> Each Person model supports up to 1 million people and each account has a limit of 50 Person models. - -Once a model is created, you can use it by providing the model ID of a specific Person model when uploading/indexing or reindexing a video. Training a new face for a video, updates the specific custom model that the video was associated with. - -If you do not need the multiple Person model support, do not assign a Person model ID to your video when uploading/indexing or reindexing. In this case, Video Analyzer for Media will use the default Person model in your account. - -You can use the Video Analyzer for Media website to edit faces that were detected in a video and to manage multiple custom Person models in your account, as described in the [Customize a Person model using a website](customize-person-model-with-website.md) topic. You can also use the API, as described in [Customize a Person model using APIs](customize-person-model-with-api.md). diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-api.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-api.md deleted file mode 100644 index 58585c94503e1..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-api.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Customize a Person model with Azure Video Analyzer for Media (formerly Video Indexer) API -titleSuffix: Azure Video Analyzer for Media -description: Learn how to customize a Person model with the Azure Video Analyzer for Media (formerly Video Indexer) API. -services: azure-video-analyzer -author: anikaz -manager: johndeu -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 01/14/2020 -ms.author: kumud ---- - -# Customize a Person model with the Video Analyzer for Media API - -Azure Video Analyzer for Media (formerly Video Indexer) supports face detection and celebrity recognition for video content. The celebrity recognition feature covers about one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. Faces that aren't recognized by the celebrity recognition feature are detected but left unnamed. After you upload your video to Video Analyzer for Media and get results back, you can go back and name the faces that weren't recognized. Once you label a face with a name, the face and name get added to your account's Person model. Video Analyzer for Media will then recognize this face in your future videos and past videos. - -You can use the Video Analyzer for Media API to edit faces that were detected in a video, as described in this topic. You can also use the Video Analyzer for Media website, as described in [Customize Person model using the Video Analyzer for Media website](customize-person-model-with-api.md). - -## Managing multiple Person models - -Video Analyzer for Media supports multiple Person models per account. This feature is currently available only through the Video Analyzer for Media APIs. - -If your account caters to different use-case scenarios, you might want to create multiple Person models per account. For example, if your content is related to sports, you can then create a separate Person model for each sport (football, basketball, soccer, and so on). - -Once a model is created, you can use it by providing the model ID of a specific Person model when uploading/indexing or reindexing a video. Training a new face for a video updates the specific custom model that the video was associated with. - -Each account has a limit of 50 Person models. If you don't need the multiple Person model support, don't assign a Person model ID to your video when uploading/indexing or reindexing. In this case, Video Analyzer for Media uses the default custom Person model in your account. - -## Create a new Person model - -To create a new Person model in the specified account, use the [create a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Person-Model) API. - -The response provides the name and generated model ID of the Person model that you just created following the format of the example below. - -```json -{ - "id": "227654b4-912c-4b92-ba4f-641d488e3720", - "name": "Example Person Model" -} -``` - -You then use the **id** value for the **personModelId** parameter when [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video). - -## Delete a Person model - -To delete a custom Person model from the specified account, use the [delete a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Person-Model) API. - -Once the Person model is deleted successfully, the index of your current videos that were using the deleted model will remain unchanged until you reindex them. Upon reindexing, the faces that were named in the deleted model won't be recognized by Video Analyzer for Media in your current videos that were indexed using that model but the faces will still be detected. Your current videos that were indexed using the deleted model will now use your account's default Person model. If faces from the deleted model are also named in your account's default model, those faces will continue to be recognized in the videos. - -There's no returned content when the Person model is deleted successfully. - -## Get all Person models - -To get all Person models in the specified account, use the [get a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Person-Models) API. - -The response provides a list of all of the Person models in your account (including the default Person model in the specified account) and each of their names and IDs following the format of the example below. - -```json -[ - { - "id": "59f9c326-b141-4515-abe7-7d822518571f", - "name": "Default" - }, - { - "id": "9ef2632d-310a-4510-92e1-cc70ae0230d4", - "name": "Test" - } -] -``` - -You can choose which model you want to use for a video by using the `id` value of the Person model for the `personModelId` parameter when [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video). - -## Update a face - -This command allows you to update a face in your video with a name using the ID of the video and ID of the face. This action then updates the Person model that the video was associated with upon uploading/indexing or reindexing. If no Person model was assigned, it updates the account's default Person model. - -The system then recognizes the occurrences of the same face in your other current videos that share the same Person model. Recognition of the face in your other current videos might take some time to take effect as this is a batch process. - -You can update a face that Video Analyzer for Media recognized as a celebrity with a new name. The new name that you give will take precedence over the built-in celebrity recognition. - -To update the face, use the [update a video face](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Face) API. - -Names are unique for Person models, so if you give two different faces in the same Person model the same `name` parameter value, Video Analyzer for Media views the faces as the same person and converges them once you reindex your video. - -## Next steps - -[Customize Person model using the Video Analyzer for Media website](customize-person-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/deploy-with-arm-template.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/deploy-with-arm-template.md deleted file mode 100644 index a92bbaac5fcf6..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/deploy-with-arm-template.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Deploy Azure Video Analyzer for Media with ARM template -titleSuffix: Azure Video Analyzer for Media (formerly Video Indexer) -description: In this tutorial you will create an Azure Video Analyzer for Media account by using Azure Resource Manager (ARM) template. -ms.topic: tutorial -ms.date: 12/01/2021 -ms.author: juliako ---- - -# Tutorial: deploy Azure Video Analyzer for Media with ARM template - -## Overview - -In this tutorial you will create an Azure Video Analyzer for Media (formerly Video Indexer) account by using Azure Resource Manager (ARM) template (preview). -The resource will be deployed to your subscription and will create the Azure Video Analyzer for Media resource based on parameters defined in the avam.template file. - -> [!NOTE] -> This sample is *not* for connecting an existing Azure Video Analyzer for Media classic account to an ARM-based Video Analyzer for Media account. -> For full documentation on Azure Video Analyzer for Media API, visit the [Developer portal](https://aka.ms/avam-dev-portal) page. -> The current API Version is "2021-10-27-preview". Check this Repo from time to time to get updates on new API Versions. - -## Prerequisites - -* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/media-services/latest/account-create-how-to). - -## Deploy the sample - ----- - -### Option 1: Click the "Deploy To Azure Button", and fill in the missing parameters - -[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Samples%2FCreate-Account%2Favam.template.json) - ----- - -### Option 2 : Deploy using PowerShell Script - -1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.json) file and inspect its content. -2. Fill in the required parameters (see below) -3. Run the Following PowerShell commands: - - * Create a new Resource group on the same location as your Azure Video Analyzer for Media account, using the [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) cmdlet. - - - ```powershell - New-AzResourceGroup -Name myResourceGroup -Location eastus - ``` - - * Deploy the template to the resource group using the [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment) cmdlet. - - ```powershell - New-AzResourceGroupDeployment -ResourceGroupName myResourceGroup -TemplateFile ./avam.template.json - ``` - -> [!NOTE] -> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.bicep) on this repo. - -## Parameters - -### name - -* Type: string -* Description: Specifies the name of the new Azure Video Analyzer for Media account. -* required: true - -### location - -* Type: string -* Description: Specifies the Azure location where the Azure Video Analyzer for Media account should be created. -* Required: false - -> [!NOTE] -> You need to deploy Your Azure Video Analyzer for Media account in the same location (region) as the associated Azure Media Services(AMS) resource exists. - -### mediaServiceAccountResourceId - -* Type: string -* Description: The Resource ID of the Azure Media Services(AMS) resource. -* Required: true - -### managedIdentityId - -* Type: string -* Description: The Resource ID of the Managed Identity used to grant access between Azure Media Services(AMS) resource and the Azure Video Analyzer for Media account. -* Required: true - -### tags - -* Type: object -* Description: Array of objects that represents custom user tags on the Azure Video Analyzer for Media account - - Required: false - -## Reference documentation - -If you're new to Azure Video Analyzer for Media (formerly Video Indexer), see: - -* [Azure Video Analyzer for Media Documentation](/en-gb/azure/azure-video-analyzer/video-analyzer-for-media-docs/) -* [Azure Video Analyzer for Media Developer Portal](/en-gb/azure/azure-video-analyzer/video-analyzer-for-media-docs/) -* After completing this tutorial, head to other Azure Video Analyzer for Media samples, described on [README.md](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/README.md) - -If you're new to template deployment, see: - -* [Azure Resource Manager documentation](../../azure-resource-manager/index.yml) -* [Deploy Resources with ARM Template](../../azure-resource-manager/templates/deploy-powershell.md) -* [Deploy Resources with Bicep and Azure CLI](../../azure-resource-manager/bicep/deploy-cli.md) - -## Next steps - -[Connect an existing classic paid Video Analyzer for Media account to ARM-based account](connect-classic-account-to-arm.md) \ No newline at end of file diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/faq.yml b/articles/azure-video-analyzer/video-analyzer-for-media-docs/faq.yml deleted file mode 100644 index 41a07f90f007a..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/faq.yml +++ /dev/null @@ -1,201 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Frequently asked questions about Azure Video Analyzer for Media (formerly Video Indexer) - Azure - titleSuffix: Azure Video Analyzer for Media - description: This article gives answers to frequently asked questions about Azure Video Analyzer for Media (formerly Video Indexer). - services: azure-video-analyzer - author: Juliako - manager: femila - ms.topic: faq - ms.subservice: azure-video-analyzer-media - ms.date: 05/25/2021 - ms.author: juliako -title: Video Analyzer for Media frequently asked questions -summary: This article answers frequently asked questions about Azure Video Analyzer for Media (formerly Video Indexer). - - -sections: - - name: General questions - questions: - - question: What is Video Analyzer for Media? - answer: | - Video Analyzer for Media is an artificial intelligence service that is part of Microsoft Azure Media Services. Video Analyzer for Media provides an orchestration of multiple machine learning models that enable you to easily extract deep insight from a video. To provide advanced and accurate insights, Video Analyzer for Media makes use of multiple channels of the video: audio, speech, and visual. Video Analyzer for Media’s insights may be used in many ways, like improving content discoverability and accessibility, creating new monetization opportunities, or building new experiences that use the insights. Video Analyzer for Media provides a web-based interface for testing, configuration, and customization of models in your account. Developers can use a REST-based API to integrate Video Analyzer for Media into production system. - - - question: What can I do with Video Analyzer for Media? - answer: | - Some of the operations that Video Analyzer for Media can perform on media files include: - - * Identifying and extracting speech and identify speakers. - * Identifying and extracting on-screen text in a video. - * Detecting objects in a video file. - * Identify brands (for example: Microsoft) from audio tracks and on-screen text in a video. - * Detecting and recognizing faces from a database of celebrities and a user-defined database of faces. - * Extracting topics discussed but not necessarily mentioned in audio and video content. - * Creating closed captions or subtitles from the audio track. - - For more information and more Video Analyzer for Media features, see [Overview](video-indexer-overview.md). - - - question: How do I get started with Video Analyzer for Media? - answer: | - Video Analyzer for Media includes a free trial offering that provides you with 600 minutes in the web-based interface and 2,400 minutes via the API. You can [login to the Video Analyzer for Media web-based interface](https://www.videoindexer.ai/) and try it for yourself using any web identity and without having to set up an Azure Subscription. Follow [this easy introduction lab](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/IntroToVideoIndexer.md) to get better idea of how to use Video Analyzer for Media. - - To index videos and audio flies at scale, you can connect Video Analyzer for Media to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. - - You can find more information on getting started in [Get started](video-indexer-get-started.md). - - - question: Do I need coding skills to use Video Analyzer for Media? - answer: | - You can use the Video Analyzer for Media web-based interface to evaluate, configure, and manage your account with **no coding required**. When you are ready to develop more complex applications, you can use the [Video Analyzer for Media API](https://api-portal.videoindexer.ai/) to integrate Video Analyzer for Media into your own applications, web sites, or [custom workflows using serverless technologies like Azure Logic Apps](https://azure.microsoft.com/blog/logic-apps-flow-connectors-will-make-automating-video-indexer-simpler-than-ever/) or Azure Functions. - - - question: Do I need machine learning skills to use Video Analyzer for Media? - answer: No, Video Analyzer for Media provides the integration of multiple machine learning models into one pipeline. Indexing a video or audio file via Video Analyzer for Media retrieves a full set of insights extracted on one shared timeline without any machine learning skills or knowledge on algorithms needed on the customer's part. - - - question: What media formats does Video Analyzer for Media support? - answer: | - Video Analyzer for Media supports most common media formats. Refer to the [Azure Media Encoder standard formats](/azure/media-services/latest/encode-media-encoder-standard-formats-reference) list for more details. - - - question: How do I upload a media file into Video Analyzer for Media and what are the limitations? - answer: | - In the Video Analyzer for Media web-based portal, you can upload a media file using the file upload dialog or by pointing to a URL that directly hosts the source file (see [example](https://nimbuscdn-nimbuspm.streaming.mediaservices.windows.net/2b533311-b215-4409-80af-529c3e853622/Ignite-short.mp4)). Any URL that hosts the media content using an iFrame or embed code will not work (see [example](https://www.videoindexer.ai/accounts/7e1282e8-083c-46ab-8c20-84cae3dc289d/videos/5cfa29e152/?t=4.11)). - - For more information, please see this [how-to guide](./upload-index-videos.md). - - - question: How long does it take Video Analyzer for Media to extract insights from media? - answer: | - The amount of time it takes to index a video or audio file, both using the Video Analyzer for Media API and the Video Analyzer for Media web-based interface, depends on multiple parameters such as the file length and quality, the number of insights found in the file, the number of [reserved units](/azure/media-services/previous/media-services-scale-media-processing-overview) available, and whether the [streaming endpoint](/azure/media-services/previous/media-services-streaming-endpoints-overview) is enabled or not. We recommend that you run a few test files with your own content and take an average to get a better idea. - - - question: Can I create customized workflows to automate processes with Video Analyzer for Media? - answer: | - Yes, you can integrate Video Analyzer for Media into serverless technologies like Logic Apps, Flow, and [Azure Functions](https://azure.microsoft.com/services/functions/). You can find more details on the [Logic App](https://azure.microsoft.com/services/logic-apps/) and [Flow](https://flow.microsoft.com/en-us/) connectors for Video Analyzer for Media [here](https://azure.microsoft.com/blog/logic-apps-flow-connectors-will-make-automating-video-indexer-simpler-than-ever/). You can see some automation projects done by partners in the [Video Analyzer for Media Samples](https://github.com/Azure-Samples/media-services-video-indexer) repo. - - - question: In which Azure regions is Video Analyzer for Media available? - answer: | - You can see which Azure regions Video Analyzer for Media is available on the [regions](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all) page. - - - question: Can I customize Video Analyzer for Media models for my specific use case? - answer: | - Yes. In Video Analyzer for Media you can customize some of the available models to better fit your needs. - - For example, our Person model supports out-of-the-box 1,000,000 faces of celebrity recognition, but you can also train it to recognize other faces which are not in that database. - - For details, see articles about customizing [Person](customize-person-model-overview.md), [Brands](customize-brands-model-overview.md), and [Language](customize-language-model-overview.md) models. - - - question: Can I edit the videos in my library? - answer: | - Yes. Press the **edit video** button from the library display or the **open in editor** button from the player display to get to the **Projects** tab. You can create a new project and add more videos from your library to edit them together, once you are done you can render your video and download. - - If you want to get insights on your new video, index it with Video Analyzer for Media and it will appear in your library with its insights. - - - question: Can I index multiple audio streams or channels? - answer: If there are multiple audio streams, Video Analyzer for Media takes the first one it encounters and will process only this stream. In any audio stream Video Analyzer for Media processes, it takes the different channels (if present) and processes them together as mono. For streams/channels manipulation you can use ffmpeg commands on the file before indexing it. - - - question: Can a storage account connected to the Media Services account be behind a firewall? - answer: | - Your paid Video Analyzer for Media account uses the specified Media Services account that is connected to a storage account. Currently, to use the connected storage account that is behind firewall, you need to contact Video Analyzer for Media support and they will give the exact directions. - - To open a new support request on Azure portal, navigate to [support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). - - - question: What is the SLA for Video Analyzer for Media? - answer: | - Azure Media Service’s SLA covers Video Analyzer for Media and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/media-services/v1_2/) page. The SLA only applies to Video Analyzer for Media paid accounts and does not apply to the free trial. - - - name: Privacy Questions - questions: - - question: Are video and audio files indexed by Video Analyzer for Media stored? - answer: Yes, unless you delete the file from Video Analyzer for Media, either using the Video Analyzer for Media website or API, your video and audio files are stored. For the free trial, the video and audio files that you index are stored in the Azure region East US. Otherwise, your video and audio files are stored in the storage account of your Azure subscription. - - - question: Can I delete my files that are stored in Video Analyzer for Media Portal? - answer: Yes, you can always delete your video and audio files as well as any metadata and insights extracted from them by Video Analyzer for Media. Once you delete a file from Video Analyzer for Media, the file and its metadata and insights are permanently removed from Video Analyzer for Media. However, if you have implemented your own backup solution in Azure storage, the file remains in your Azure storage. - - - question: Can I control user access to my Video Analyzer for Media account? - answer: Yes, only account admins can invite and uninvite people to their accounts, as well as assign who has editing privileges and who has read-only access. - - - question: Who has access to my video and audio files that have been indexed and/or stored by Video Analyzer for Media and the metadata and insights that were extracted? - answer: Your video or audio content that have public as its privacy setting can be accessed by anyone who has the link to your video or audio content and its insights. Your video or audio content that have private as its privacy setting can only be accessed by users that were invited to the account of the video or audio content. The privacy setting of your content also applies to the metadata and insights that Video Analyzer for Media extracts. You assign the privacy setting when you upload your video or audio file. You can also change the privacy setting after indexing. - - - question: What access does Microsoft have to my video or audio files that have been indexed and/or stored by Video Analyzer for Media and the metadata and insights that were extracted? - answer: | - Per the [Azure Online Services Terms](https://www.microsoftvolumelicensing.com/DocumentSearch.aspx?Mode=3&DocumentTypeId=31) (OST), you completely own your content, and Microsoft will only access your content and the metadata and insights that Video Analyzer for Media extracts from your content according to the OST and the Microsoft Privacy Statement. - - - question: Are the custom models that I build in my Video Analyzer for Media account available to other accounts? - answer: | - No, the custom models that you create in your account are not available to any other account. Video Analyzer for Media currently allows you to build custom [brands](customize-brands-model-overview.md), [language](customize-language-model-overview.md), and [person](customize-person-model-overview.md) models in your account. These models are only available in the account in which you created the models. - - - question: Is the content indexed by Video Indexer kept within the Azure region where I am using Video Indexer? - answer: | - Yes, the content and its insights are kept within the Azure region (except for Singapore and Brazil South regions) unless you have a manual configuration in your Azure subscription that uses multiple Azure regions. - - Customer data in a region is replicated for BCDR reasons to the [paired region](../../availability-zones/cross-region-replication-azure.md#azure-cross-region-replication-pairings-for-all-geographies). - - - question: What is the privacy policy for Video Analyzer for Media? - answer: | - Video Analyzer for Media is covered by the [Microsoft Privacy Statement](https://privacy.microsoft.com/privacystatement). The privacy statement explains the personal data Microsoft processes, how Microsoft processes it, and for what purposes Microsoft processes it. To learn more about privacy, visit the [Microsoft Trust Center](https://www.microsoft.com/trustcenter). - - - question: What certifications does Video Analyzer for Media have? - answer: | - Video Analyzer for Media currently has the SOC certification. To review Video Analyzer for Media's certification, please refer to the [Microsoft Trust Center](https://www.microsoft.com/trustcenter/compliance/complianceofferings?product=Azure). - - - question: What is the difference between private and public videos? - answer: | - When videos are uploaded to Video Analyzer for Media, you can choose from two privacy settings: private and public. Public videos are accessible for anyone, including anonymous and unidentified users. Private ones are restricted solely to the account members. - - - question: I tried to upload a video as public and it was flagged for inappropriate or offensive content, what does that mean? - answer: | - When uploading a video to Video Analyzer for Media, an automatic content analysis is done by algorithms and models in order to make sure no inappropriate content will be presented publicly. If a video is found to be suspicious as containing explicit content, it will not be possible to set it as public. However, the account members can still access it as a private video (view it, download the insights and extracted artifacts, and perform other operations available to account members). - - In order to set the video for public access, you can either: - - * Build your own interface layer (such as app or website) and use it to interact with the Video Analyzer for Media service. This way the video remains private in our portal and your users can interact with it through your interface. For example, you can still get the insights or allow viewing of the video in your own interface. - * Request a human review of the content, which would result in removing of the restriction assuming the content is not explicit. - - This option can be explored if the Video Analyzer for Media website is used directly by your users as the interface layer, and for public (unauthenticated) viewing. - - - name: API Questions - questions: - - question: What APIs does Video Analyzer for Media offer? - answer: | - Video Analyzer for Media's APIs allows for indexing, extracting metadata, asset management, translation, embedding, customization of models and more. To find more detailed information on using the Video Analyzer for Media API, refer to the [Video Analyzer for Media Developer Portal](https://api-portal.videoindexer.ai/). - - - question: What client SDKs does Video Analyzer for Media offer? - answer: There are currently no client SDKs offered. The Video Analyzer for Media team is working on the SDKs and plans to deliver them soon. - - - question: How do I get started with Video Analyzer for Media's API? - answer: | - Follow [Tutorial: get started with the Video Analyzer for Media API](video-indexer-use-apis.md). - - - question: What is the difference between the Video Analyzer for Media API and the Azure Media Service v3 API? - answer: | - Currently there are some overlaps in features offered by the Video Analyzer for Media API and the Azure Media Service v3 API. You can find more information on how to compare both services [here](compare-video-indexer-with-media-services-presets.md). - - - question: What is an API access token and why do I need it? - answer: | - The Video Analyzer for Media API contains an Authorization API and an Operations API. The Authorizations API contains calls that give you access token. Each call to the Operations API should be associated with an access token, matching the authorization scope of the call. - - Access tokens are needed to use the Video Analyzer for Media APIs for security purposes. This ensures that any calls are coming from you or those who have access permissions to your account.  - - - question: What is the difference between Account access token, User access token, and Video access token? - answer: | - * Account level – account level access tokens let you perform operations on the account level or the video level. For example, upload a video, list all videos, get video insights. - * User level - user level access tokens let you perform operations on the user level. For example, get associated accounts. - * Video level – video level access tokens let you perform operations on a specific video. For example, get video insights, download captions, get widgets, etc. - - - question: How often do I need to get a new access token? When do access tokens expire? - answer: Access tokens expire every hour, so you need to generate a new access token every hour. - - - question: What are the login options to Video Analyzer for Media Developer portal? - answer: | - See a release note regarding [login information](release-notes.md#october-2020). - - Once you register your email account using an identity provider, you cannot use this email account with another identity provider. - - - name: Billing questions - questions: - - question: How much does Video Analyzer for Media cost? - answer: | - Video Analyzer for Media uses a simple pay-as-you-go pricing model based on the duration of the content input that you index. Additional charges may apply for encoding, streaming, storage, network usage, and media reserved units. For more information, see the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. - - - question: When am I billed for using Video Analyzer for Media? - answer: When sending a video to be indexed, the user will define the indexing to be video analysis, audio analysis or both. This will determine which SKUs will be charged. If there is a critical level error during processing, an error code will be returned as a response. In such a case, no billing occurs. A critical error can be caused by a bug in our code or a critical failure in an internal dependency the service has. Errors such as wrong identification or insight extraction are not considered as critical and a response is returned. In any case where a valid (non-error code) response is returned, billing occurs. - - - question: Does Video Analyzer for Media offer a free trial? - answer: Yes, Video Analyzer for Media offers a free trial that gives full service and API functionality. There is a quota of 600 minutes worth of videos for web-based interface users and 2,400 minutes for API users. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/note-account-ms-uami-same-subscription-and-region.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/note-account-ms-uami-same-subscription-and-region.md deleted file mode 100644 index 4a6ae4c542af7..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/note-account-ms-uami-same-subscription-and-region.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -author: uratzmon -ms.service: video-analyzer for media -ms.topic: include -ms.date: 10/13/2021 -ms.author: uratzmon -ms.custom: ignite-fall-2021 ---- - -> [!NOTE] -> The associated user-assigned managed identit and the media service must be in the same region as the Video Analyzer for media account. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/regulation.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/regulation.md deleted file mode 100644 index ab6a10de98960..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/regulation.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -author: Juliako -ms.topic: include -ms.date: 04/15/2021 -ms.author: juliako ---- - -> [!Warning] -> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Azure Video Analyzer for Media (formerly Video Indexer), if a customers is, or is allowing use of such services by or for, a police department in the United States. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/index.yml b/articles/azure-video-analyzer/video-analyzer-for-media-docs/index.yml deleted file mode 100644 index 9db5ffc14025f..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/index.yml +++ /dev/null @@ -1,77 +0,0 @@ -### YamlMime:Landing - -title: Learn about Azure Video Analyzer for Media (formerly Video Indexer) -summary: Azure Video Analyzer for Media (formerly Video Indexer) is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Video Analyzer for Media video and audio models. - -metadata: - title: Azure Video Analyzer for Media (formerly Video Indexer) documentation - description: Azure Video Analyzer for Media (formerly Video Indexer) is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Video Analyzer for Media video and audio models. - services: azure-video-analyzer - ms.service: azure-video-analyzer - ms.topic: landing-page # Required - ms.collection: collection - author: Juliako - ms.author: juliako - ms.date: 05/07/2021 #Required; mm/dd/yyyy format. - -# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new - -landingContent: -# Cards and links should be based on top customer tasks or top subjects -# Start card title with a verb - # Card (optional) - - title: About Azure Video Analyzer for Media (formerly Video Indexer) - linkLists: - - linkListType: overview - links: - - text: "What is Azure Video Analyzer for Media?" - url: video-indexer-overview.md - - text: "Compare Media Services v3 presets and Video Analyzer for Media" - url: compare-video-indexer-with-media-services-presets.md - - text: Frequently asked questions - url: faq.yml - - text: User voice - url: https://aka.ms/UserVoiceVI - - linkListType: whats-new - links: - - text: "What's new in Video Analyzer for Media?" - url: release-notes.md - - # Card (optional) - - title: Get started - linkLists: - - linkListType: quickstart - links: - - text: Sign up and upload a video - url: video-indexer-get-started.md - - text: Invite users - url: invite-users.md - - linkListType: how-to-guide - links: - - text: Create an account connected to Azure - url: connect-to-azure.md - - text: Use Video Analyzer for Media API - url: video-indexer-use-apis.md - - text: Upload and index your videos - url: upload-index-videos.md - - text: Examine output produced by API - url: video-indexer-output-json-v2.md - - text: Embed Video Analyzer for Media widgets into apps - url: video-indexer-embed-widgets.md - - linkListType: sample - links: - - text: Code samples - url: https://github.com/Azure-Samples/media-services-video-indexer - -# Card - - title: Customize content models - linkLists: - - linkListType: how-to-guide - links: - - text: Customize a brands model - url: customize-brands-model-with-website.md - - text: Customize a language model - url: customize-language-model-with-website.md - - text: Customize a person model - url: customize-person-model-with-website.md - diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-identification-model.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-identification-model.md deleted file mode 100644 index 2eca81192fa80..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-identification-model.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Use Azure Video Analyzer for Media (formerly Video Indexer) to auto identify spoken languages - Azure -description: This article describes how the Azure Video Analyzer for Media (formerly Video Indexer) language identification model is used to automatically identifying the spoken language in a video. -ms.topic: conceptual -ms.date: 04/12/2020 -ms.author: ellbe ---- - -# Automatically identify the spoken language with language identification model - -Azure Video Analyzer for Media (formerly Video Indexer) supports automatic language identification (LID), which is the process of automatically identifying the spoken language content from audio and sending the media file to be transcribed in the dominant identified language. - -Currently LID supports: English, Spanish, French, German, Italian, Mandarin Chinese, Japanese, Russian, and Portuguese (Brazilian). - -Make sure to review the [Guidelines and limitations](#guidelines-and-limitations) section below. - -## Choosing auto language identification on indexing - -When indexing or [re-indexing](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) a video using the API, choose the `auto detect` option in the `sourceLanguage` parameter. - -When using portal, go to your **Account videos** on the [Video Analyzer for Media](https://www.videoindexer.ai/) home page and hover over the name of the video that you want to re-index. On the right-bottom corner click the re-index button. In the **Re-index video** dialog, choose *Auto detect* from the **Video source language** drop-down box. - -![auto detect](./media/language-identification-model/auto-detect.png) - -## Model output - -Video Analyzer for Media transcribes the video according to the most likely language if the confidence for that language is `> 0.6`. If the language cannot be identified with confidence, it assumes the spoken language is English. - -Model dominant language is available in the insights JSON as the `sourceLanguage` attribute (under root/videos/insights). A corresponding confidence score is also available under the `sourceLanguageConfidence` attribute. - -```json -"insights": { - "version": "1.0.0.0", - "duration": "0:05:30.902", - "sourceLanguage": "fr-FR", - "language": "fr-FR", - "transcript": [...], - . . . - "sourceLanguageConfidence": 0.8563 - }, -``` - -## Guidelines and limitations - -* Automatic language identification (LID) supports the following languages: - - English, Spanish, French, German, Italian, Mandarin Chines, Japanese, Russian, and Portuguese (Brazilian). -* Even though Video Analyzer for Media supports Arabic (Modern Standard and Levantine), Hindi, and Korean, these languages are not supported in LID. -* If the audio contains languages other than the supported list above, the result is unexpected. -* If Video Analyzer for Media cannot identify the language with a high enough confidence (`>0.6`), the fallback language is English. -* There is no current support for file with mixed languages audio. If the audio contains mixed languages, the result is unexpected. -* Low-quality audio may impact the model results. -* The model requires at least one minute of speech in the audio. -* The model is designed to recognize a spontaneous conversational speech (not voice commands, singing, etc.). - -## Next steps - -* [Overview](video-indexer-overview.md) -* [Automatically identify and transcribe multi-language content](multi-language-identification-transcription.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/live-stream-analysis.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/live-stream-analysis.md deleted file mode 100644 index 78006fa9e9efc..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/live-stream-analysis.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Live stream analysis using Azure Video Analyzer for Media (formerly Video Indexer) -description: This article shows how to perform a live stream analysis using Azure Video Analyzer for Media (formerly Video Indexer). -ms.topic: conceptual -ms.date: 11/13/2019 ---- - -# Live stream analysis with Video Analyzer for Media - -Azure Video Analyzer for Media (formerly Video Indexer) is an Azure service designed to extract deep insights from video and audio files offline. This is to analyze a given media file already created in advance. However, for some use cases it's important to get the media insights from a live feed as quick as possible to unlock operational and other use cases pressed in time. For example, such rich metadata on a live stream could be used by content producers to automate TV production. - -A solution described in this article, allows customers to use Video Analyzer for Media in near real-time resolutions on live feeds. The delay in indexing can be as low as four minutes using this solution, depending on the chunks of data being indexed, the input resolution, the type of content and the compute powered used for this process. - -![The Video Analyzer for Media metadata on the live stream](./media/live-stream-analysis/live-stream-analysis01.png) - -*Figure 1 – Sample player displaying the Video Analyzer for Media metadata on the live stream* - -The [stream analysis solution](https://aka.ms/livestreamanalysis) at hand, uses Azure Functions and two Logic Apps to process a live program from a live channel in Azure Media Services with Video Analyzer for Media and displays the result with Azure Media Player showing the near real-time resulted stream. - -In high level, it is comprised of two main steps. The first step runs every 60 seconds, and takes a subclip of the last 60 seconds played, creates an asset from it and indexes it via Video Analyzer for Media. Then the second step is called once indexing is complete. The insights captured are processed, sent to Azure Cosmos DB, and the subclip indexed is deleted. - -The sample player plays the live stream and gets the insights from Azure Cosmos DB, using a dedicated Azure Function. It displays the metadata and thumbnails in sync with the live video. - -![The two logic apps processing the live stream every minute in the cloud](./media/live-stream-analysis/live-stream-analysis02.png) - -*Figure 2 – The two logic apps processing the live stream every minute in the cloud.* - -## Step-by-step guide - -The full code and a step-by-step guide to deploy the results can be found in [GitHub project for Live media analytics with Video Analyzer for Media](https://aka.ms/livestreamanalysis). - -## Next steps - -[Video Analyzer for Media overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/logic-apps-connector-tutorial.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/logic-apps-connector-tutorial.md deleted file mode 100644 index 35ad436cd1d99..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/logic-apps-connector-tutorial.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: The Azure Video Analyzer for Media (formerly Video Indexer) connectors with Logic App and Power Automate tutorial. -description: This tutorial shows how to unlock new experiences and monetization opportunities Azure Video Analyzer for Media (formerly Video Indexer) connectors with Logic App and Power Automate. -ms.author: alzam -ms.topic: tutorial #Required -ms.date: 09/21/2020 ---- - -# Tutorial: use Video Analyzer for Media with Logic App and Power Automate - -Azure Video Analyzer for Media (formerly Video Indexer) [REST API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Video) supports both server-to-server and client-to-server communication and enables Video Analyzer for Media users to integrate video and audio insights easily into their application logic, unlocking new experiences and monetization opportunities. - -To make the integration even easier, we support [Logic Apps](https://azure.microsoft.com/services/logic-apps/) and [Power Automate](https://preview.flow.microsoft.com/connectors/shared_videoindexer-v2/video-indexer-v2/) connectors that are compatible with our API. You can use the connectors to set up custom workflows to effectively index and extract insights from a large amount of video and audio files, without writing a single line of code. Furthermore, using the connectors for your integration gives you better visibility on the health of your workflow and an easy way to debug it.  - -To help you get started quickly with the Video Analyzer for Media connectors, we will do a walkthrough of an example Logic App and Power Automate solution you can set up. This tutorial shows how to set up flows using Logic Apps. However, the editors and capabilities are almost identical in both solutions, thus the diagrams and explanations are applicable to both Logic Apps and Power Automate. - -The "upload and index your video automatically" scenario covered in this tutorial is comprised of two different flows that work together. -* The first flow is triggered when a blob is added or modified in an Azure Storage account. It uploads the new file to Video Analyzer for Media with a callback URL to send a notification once the indexing operation completes. -* The second flow is triggered based on the callback URL and saves the extracted insights back to a JSON file in Azure Storage. This two flow approach is used to support async upload and indexing of larger files effectively. - -This tutorial is using Logic App to show how to: - -> [!div class="checklist"] -> * Set up the file upload flow -> * Set up the JSON extraction flow - -[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -## Prerequisites - -* To begin with, you will need a Video Analyzer for Media account along with [access to the APIs via API key](video-indexer-use-apis.md). -* You will also need an Azure Storage account. Keep note of the access key for your Storage account. Create two containers – one to store videos in and one to store insights generated by Video Analyzer for Media in. -* Next, you will need to open two separate flows on either Logic Apps or Power Automate (depending on which you are using). - -## Set up the first flow - file upload - -The first flow is triggered whenever a blob is added in your Azure Storage container. Once triggered, it will create a SAS URI that you can use to upload and index the video in Video Analyzer for Media. In this section you will create the following flow. - -![File upload flow](./media/logic-apps-connector-tutorial/file-upload-flow.png) - -To set up the first flow, you will need to provide your Video Analyzer for Media API Key and Azure Storage credentials. - -![Azure blob storage](./media/logic-apps-connector-tutorial/azure-blob-storage.png) - -![Connection name and API key](./media/logic-apps-connector-tutorial/connection-name-api-key.png) - -> [!TIP] -> If you previously connected an Azure Storage account or Video Analyzer for Media account to a Logic App, your connection details are stored and you will be connected automatically.
                You can edit the connection by clicking on **Change connection** at the bottom of an Azure Storage (the storage window) or Video Analyzer for Media (the player window) action. - -Once you can connect to your Azure Storage and Video Analyzer for Media accounts, find and choose the "When a blob is added or modified" trigger in **Logic Apps Designer**. - -Select the container that you will place your video files in. - -![Screenshot shows the When a blob is added or modified dialog box where you can select a container.](./media/logic-apps-connector-tutorial/container.png) - -Next, find and select the "Create SAS URI by path” action. In the dialog for the action, select List of Files Path from the Dynamic content options. - -Also, add a new "Shared Access Protocol" parameter. Choose HttpsOnly for the value of the parameter. - -![SAS uri by path](./media/logic-apps-connector-tutorial/sas-uri-by-path.jpg) - -Fill out [your account location](regions.md) and [account ID](./video-indexer-use-apis.md#account-id) to get the Video Analyzer for Media account token. - -![Get account access token](./media/logic-apps-connector-tutorial/account-access-token.png) - -For “Upload video and index”, fill out the required parameters and Video URL. Select “Add new parameter” and select Callback URL. - -![Upload and index](./media/logic-apps-connector-tutorial/upload-and-index.png) - -You will leave the callback URL empty for now. You’ll add it only after finishing the second flow where the callback URL is created. - -You can use the default value for the other parameters or set them according to your needs. - -Click **Save**, and let’s move on to configure the second flow, to extract the insights once the upload and indexing is completed. - -## Set up the second flow - JSON extraction - -The completion of the uploading and indexing from the first flow will send an HTTP request with the correct callback URL to trigger the second flow. Then, it will retrieve the insights generated by Video Analyzer for Media. In this example, it will store the output of your indexing job in your Azure Storage. However, it is up to you what you can do with the output. - -Create the second flow separate from the first one. - -![JSON extraction flow](./media/logic-apps-connector-tutorial/json-extraction-flow.png) - -To set up this flow, you will need to provide your Video Analyzer for Media API Key and Azure Storage credentials again. You will need to update the same parameters as you did for the first flow. - -For your trigger, you will see a HTTP POST URL field. The URL won’t be generated until after you save your flow; however, you will need the URL eventually. We will come back to this. - -Fill out [your account location](regions.md) and [account ID](./video-indexer-use-apis.md#account-id) to get the Video Analyzer for Media account token. - -Go to the “Get Video Index” action and fill out the required parameters. For Video ID, put in the following expression: triggerOutputs()['queries']['id'] - -![Video Analyzer for Media action info](./media/logic-apps-connector-tutorial/video-indexer-action-info.jpg) - -This expression tells the connecter to get the Video ID from the output of your trigger. In this case, the output of your trigger will be the output of “Upload video and index” in your first trigger. - -Go to the “Create blob” action and select the path to the folder in which you will save the insights to. Set the name of the blob you are creating. For Blob content, put in the following expression: body(‘Get_Video_Index’) - -![Create blob action](./media/logic-apps-connector-tutorial/create-blob-action.jpg) - -This expression takes the output of the “Get Video Index” action from this flow. - -Click **Save flow**. - -Once the flow is saved, an HTTP POST URL is created in the trigger. Copy the URL from the trigger. - -![Save URL trigger](./media/logic-apps-connector-tutorial/save-url-trigger.png) - -Now, go back to the first flow and paste the URL in the "Upload video and index" action for the Callback URL parameter. - -Make sure both flows are saved, and you’re good to go! - -Try out your newly created Logic App or Power Automate solution by adding a video to your Azure blobs container, and go back a few minutes later to see that the insights appear in the destination folder. - -## Generate captions - -See the following blog for the steps that show [how to generate captions with Video Analyzer for Media and Logic Apps](https://techcommunity.microsoft.com/t5/azure-media-services/generating-captions-with-video-indexer-and-logic-apps/ba-p/1672198). - -The article also shows how to index a video automatically by copying it to OneDrive and how to store the captions generated by Video Analyzer for Media in OneDrive. - -## Clean up resources - -After you are done with this tutorial, feel free to keep this Logic App or Power Automate solution up and running if you need. However, if you do not want to keep this running and do not want to be billed, Turn Off both of your flows if you’re using Power Automate. Disable both of the flows if you’re using Logic Apps. - -## Next steps - -This tutorial showed just one Video Analyzer for Media connectors example. You can use the Video Analyzer for Media connectors for any API call provided by Video Analyzer for Media. For example: upload and retrieve insights, translate the results, get embeddable widgets and even customize your models. Additionally, you can choose to trigger those actions based on different sources like updates to file repositories or emails sent. You can then choose to have the results update to our relevant infrastructure or application or generate any number of action items. - -> [!div class="nextstepaction"] -> [Use the Video Analyzer for Media API](video-indexer-use-apis.md) - -For additional resources, refer to [Video Analyzer for Media](/connectors/videoindexer-v2/) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-account-connected-to-azure.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-account-connected-to-azure.md deleted file mode 100644 index ca1c36c4f2535..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-account-connected-to-azure.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Manage a Azure Video Analyzer for Media (formerly Video Indexer) account -description: Learn how to manage a Azure Video Analyzer for Media (formerly Video Indexer) account connected to Azure. -ms.topic: how-to -ms.date: 01/14/2021 -ms.author: juliako ---- - -# Manage a Video Analyzer for Media account connected to Azure - -This article demonstrates how to manage a Azure Video Analyzer for Media (formerly Video Indexer) account that's connected to your Azure subscription and an Azure Media Services account. - -> [!NOTE] -> You have to be the Video Analyzer for Media account owner to do account configuration adjustments discussed in this topic. - -## Prerequisites - -Connect your Video Analyzer for Media account to Azure, as described in [Connected to Azure](connect-to-azure.md). - -Make sure to follow [Prerequisites](connect-to-azure.md#prerequisites-for-connecting-to-azure) and review [Considerations](connect-to-azure.md#azure-media-services-considerations) in the article. - -## Examine account settings - -This section examines settings of your Video Analyzer for Media account. - -To view settings: - -1. Click on the user icon in the top-right corner and select **Settings**. - - ![Settings in Video Analyzer for Media](./media/manage-account-connected-to-azure/select-settings.png) - -2. On the **Settings** page, select the **Account** tab. - -If your Videos Indexer account is connected to Azure, you see the following things: - -* The name of the underlying Azure Media Services account. -* The number of indexing jobs running and queued. -* The number and type of allocated reserved units. - -If your account needs some adjustments, you'll see relevant errors and warnings about your account configuration on the **Settings** page. The messages contain links to exact places in Azure portal where you need to make changes. For more information, see the [errors and warnings](#errors-and-warnings) section that follows. - -## Repair the connection to Azure - -In the **Update connection to Azure Media Services** dialog of your [Video Analyzer for Media](https://www.videoindexer.ai/) page, you're asked to provide values for the following settings: - -|Setting|Description| -|---|---| -|Azure subscription ID|The subscription ID can be retrieved from the Azure portal. Click on **All services** in the left panel and search for "subscriptions". Select **Subscriptions** and choose the desired ID from the list of your subscriptions.| -|Azure Media Services resource group name|The name for the resource group in which you created the Media Services account.| -|Application ID|The Azure AD application ID (with permissions for the specified Media Services account) that you created for this Video Analyzer for Media account.

                To get the app ID, navigate to Azure portal. Under the Media Services account, choose your account and go to **API Access**. Select **Connect to Media Services API with service principal** -> **Azure AD App**. Copy the relevant parameters.| -|Application key|The Azure AD application key associated with your Media Services account that you specified above.

                To get the app key, navigate to Azure portal. Under the Media Services account, choose your account and go to **API Access**. Select **Connect to Media Services API with service principal** -> **Manage application** -> **Certificates & secrets**. Copy the relevant parameters.| - -## Errors and warnings - -If your account needs some adjustments, you see relevant errors and warnings about your account configuration on the **Settings** page. The messages contain links to exact places in Azure portal where you need to make changes. This section gives more details about the error and warning messages. - -* EventGrid - - You have to register the EventGrid resource provider using the Azure portal. In the [Azure portal](https://portal.azure.com/), go to **Subscriptions** > [subscription] > **ResourceProviders** > **Microsoft.EventGrid**. If not in the **Registered** state, select **Register**. It takes a couple of minutes to register. - -* Streaming endpoint - - Make sure the underlying Media Services account has the default **Streaming Endpoint** in a started state. Otherwise, you can't watch videos from this Media Services account or in Video Analyzer for Media. - -* Media reserved units - - You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. - -## Next steps - -You can programmatically interact with your trial account or Video Analyzer for Media accounts that are connected to Azure by following the instructions in: [Use APIs](video-indexer-use-apis.md). - -Use the same Azure AD user you used when connecting to Azure. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-multiple-tenants.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-multiple-tenants.md deleted file mode 100644 index f3074f9fddd86..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-multiple-tenants.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Manage multiple tenants with Azure Video Analyzer for Media (formerly Video Indexer) - Azure -description: This article suggests different integration options for managing multiple tenants with Azure Video Analyzer for Media (formerly Video Indexer). -ms.topic: conceptual -ms.date: 05/15/2019 -ms.author: ikbarmen ---- - -# Manage multiple tenants - -This article discusses different options for managing multiple tenants with Azure Video Analyzer for Media (formerly Video Indexer). Choose a method that is most suitable for your scenario: - -* Video Analyzer for Media account per tenant -* Single Video Analyzer for Media account for all tenants -* Azure subscription per tenant - -## Video Analyzer for Media account per tenant - -When using this architecture, a Video Analyzer for Media account is created for each tenant. The tenants have full isolation in the persistent and compute layer. - -![Video Analyzer for Media account per tenant](./media/manage-multiple-tenants/video-indexer-account-per-tenant.png) - -### Considerations - -* Customers do not share storage accounts (unless manually configured by the customer). -* Customers do not share compute (reserved units) and don't impact processing jobs times of one another. -* You can easily remove a tenant from the system by deleting the Video Analyzer for Media account. -* There is no ability to share custom models between tenants. - - Make sure there is no business requirement to share custom models. -* Harder to manage due to multiple Video Analyzer for Media (and associated Media Services) accounts per tenant. - -> [!TIP] -> Create an admin user for your system in [Video Indexer Developer Portal](https://api-portal.videoindexer.ai/) and use the Authorization API to provide your tenants the relevant [account access token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token). - -## Single Video Analyzer for Media account for all users - -When using this architecture, the customer is responsible for tenants isolation. All tenants have to use a single Video Analyzer for Media account with a single Azure Media Service account. When uploading, searching, or deleting content, the customer will need to filter the proper results for that tenant. - -![Single Video Analyzer for Media account for all users](./media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png) - -With this option, customization models (Person, Language, and Brands) can be shared or isolated between tenants by filtering the models by tenant. - -When [uploading videos](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video), you can specify a different partition attribute per tenant. This will allow isolation in the [search API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Search-Videos). By specifying the partition attribute in the search API you will only get results of the specified partition. - -### Considerations - -* Ability to share content and customization models between tenants. -* One tenant impacts the performance of other tenants. -* Customer needs to build a complex management layer on top of Video Analyzer for Media. - -> [!TIP] -> You can use the [priority](upload-index-videos.md) attribute to prioritize tenants jobs. - -## Azure subscription per tenant - -When using this architecture, each tenant will have their own Azure subscription. For each user, you will create a new Video Analyzer for Media account in the tenant subscription. - -![Azure subscription per tenant](./media/manage-multiple-tenants/azure-subscription-per-tenant.png) - -### Considerations - -* This is the only option that enables billing separation. -* This integration has more management overhead than Video Analyzer for Media account per tenant. If billing is not a requirement, it is recommended to use one of the other options described in this article. - -## Next steps - -[Overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/regions.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/regions.md deleted file mode 100644 index 722b477361763..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/regions.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Regions in which Azure Video Analyzer for Media (formerly Video Indexer) is available -titleSuffix: Azure Video Analyzer for Media -description: This article talks about Azure regions in which Azure Video Analyzer for Media (formerly Video Indexer) is available. -services: azure-video-analyzer -author: Juliako -manager: femila -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 09/14/2020 -ms.author: juliako ---- - -# Azure regions in which Video Analyzer for Media exists - -Azure Video Analyzer for Media (formerly Video Indexer) APIs contain a **location** parameter that you should set to the Azure region to which the call should be routed. This must be an [Azure region in which Video Analyzer for Media is available](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all). - -## Locations - -The `location` parameter must be given the Azure region code name as its value. If you are using Video Analyzer for Media in preview mode, you should put `"trial"` as the value. `trial` is the default value for the `location` parameter. Otherwise, to get the code name of the Azure region that your account is in and that your call should be routed to, you can use the Azure portal or run a [Azure CLI](/cli/azure) command. - -### Azure portal - -1. Sign in on the [Video Analyzer for Media](https://www.videoindexer.ai/) website. -1. Select **User accounts** from the top-right corner of the page. -1. Find the location of your account in the top-right corner. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/location/location1.png" alt-text="Location"::: - -### CLI command - -```azurecli-interactive -az account list-locations -``` - -Once you run the line shown above, you get a list of all Azure regions. Navigate to the Azure region that has the *displayName* you are looking for, and use its *name* value for the **location** parameter. - -For example, for the Azure region West US 2 (displayed below), you will use "westus2" for the **location** parameter. - -```json - { - "displayName": "West US 2", - "id": "/subscriptions/00000000-0000-0000-0000-000000000000/locations/westus2", - "latitude": "47.233", - "longitude": "-119.852", - "name": "westus2", - "subscriptionId": null - } -``` - -## Next steps - -- [Customize Language model using APIs](customize-language-model-with-api.md) -- [Customize Brands model using APIs](customize-brands-model-with-api.md) -- [Customize Person model using APIs](customize-person-model-with-api.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/release-notes.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/release-notes.md deleted file mode 100644 index 21554ab612e71..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/release-notes.md +++ /dev/null @@ -1,582 +0,0 @@ ---- -title: Azure Video Analyzer for Media (formerly Video Indexer) release notes | Microsoft Docs -description: To stay up-to-date with the most recent developments, this article provides you with the latest updates on Azure Video Analyzer for Media (formerly Video Indexer). -ms.topic: article -ms.custom: references_regions -ms.date: 04/07/2022 -ms.author: juliako ---- - -# Video Analyzer for Media release notes - ->Get notified about when to revisit this page for updates by copying and pasting this URL: `https://docs.microsoft.com/api/search/rss?search=%22Azure+Media+Services+Video+Indexer+release+notes%22&locale=en-us` into your RSS feed reader. - -To stay up-to-date with the most recent Azure Video Analyzer for Media (former Video Indexer) developments, this article provides you with information about: - -* [Important notice](#upcoming-critical-changes) about planned changes -* The latest releases -* Known issues -* Bug fixes -* Deprecated functionality - -## Upcoming critical changes - -> [!Important] -> This section describes a critical upcoming change for the `Upload-Video` API. - - -### Upload-Video API - -In the past, the `Upload-Video` API was tolerant to calls to upload a video from a URL where an empty multipart form body was provided in the C# code, such as: - -```csharp -var content = new MultipartFormDataContent(); -var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Location}/Accounts/{accountInfo.Id}/Videos?{queryParams}", content); -``` - -In the coming weeks, our service will fail requests of this type. - -In order to upload a video from a URL, change your code to send null in the request body: - -```csharp -var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Location}/Accounts/{accountInfo.Id}/Videos?{queryParams}", null); -``` - -## March 2022 release updates - -### Closed Captioning files now support including speakers’ attributes - -Video Analyzer for Media enables you to include speakers' characteristic based on a closed captioning file that you choose to download. To include the speakers’ attributes, select Downloads -> Closed Captions -> choose the closed captioning downloadable file format (SRT, VTT, TTML, TXT, or CSV) and check **Include speakers** checkbox. - -### Improvements to the widget offering - -The following improvements were made: - -* Video Analyzer for Media widgets support more than 1 locale in a widget's parameter. -* The Insights widgets support initial search parameters and multiple sorting options. -* The Insights widgets also include a confirmation step before deleting a face to avoid mistakes. -* The widget customization now supports width as strings (for example 100%, 100vw). - -## February 2022 - -### Public preview of Video Analyzer for Media account management based on ARM in Government cloud - -Video Analyzer for Media website is now supporting account management based on ARM in public preview (see, [November 2021 release note](#november-2021)). - -### Leverage open-source code to create ARM based account - -Added new code samples including HTTP calls to use Video Analyzer for Media create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account -). - -## January 2022 - -### Improved audio effects detection - -The audio effects detection capability was improved to have a better detection rate over the following classes: - -* Crowd reactions (cheering, clapping, and booing), -* Gunshot or explosion, -* Laughter - -For more information, see [Audio effects detection](audio-effects-detection.md). - -### New source languages support for STT, translation, and search on the website - -Video Analyzer for Media introduces source languages support for STT (speech-to-text), translation, and search in Hebrew (he-IL), Portuguese (pt-PT), and Persian (fa-IR) on the [Video Analyzer for Media](https://www.videoindexer.ai/) website. -It means transcription, translation, and search features are also supported for these languages in Video Analyzer for Media web applications and widgets. - -## December 2021 - -### The projects feature is now GA - -The projects feature is now GA and ready for productive use. There is no pricing impact related to the "Preview to GA" transition. See [Add video clips to your projects](use-editor-create-project.md). - -### New source languages support for STT, translation, and search on API level - -Video Analyzer for Media introduces source languages support for STT (speech-to-text), translation, and search in Hebrew (he-IL), Portuguese (pt-PT), and Persian (fa-IR) on the API level. - -### Matched person detection capability - -When indexing a video through our advanced video settings, you can view the new matched person detection capability. If there are people observed in your media file, you can now view the specific person who matched each of them through the media player. - -## November 2021 - -### Public preview of Video Analyzer for Media account management based on ARM - -Azure Video Analyzer for Media introduces a public preview of Azure Resource Manager (ARM) based account management. You can leverage ARM-based Video Analyzer for Media APIs to create, edit, and delete an account from the [Azure portal](https://portal.azure.com/#home). - -> [!NOTE] -> The Government cloud includes support for CRUD ARM based accounts from Video Analyzer for Media API and from the Azure portal. -> -> There is currently no support from the Video Analyzer for Media [website](https://www.videoindexer.ai). - -For more information go to [create a Video Analyzer for Media account](https://techcommunity.microsoft.com/t5/azure-ai/azure-video-analyzer-for-media-is-now-available-as-an-azure/ba-p/2912422). - -### People’s clothing detection - -When indexing a video through the advanced video settings, you can view the new **People’s clothing detection** capability. If there are people detected in your media file, you can now view the clothing type they are wearing through the media player. - -### Face bounding box (preview) - -You can now turn on a bounding box for detected faces during indexing of the media file. The face bounding box feature is available when indexing your file by choosing the **standard**, **basic**, or **advanced** indexing presets. - -You can enable the bounding boxes through the player. - -## October 2021 - -### Embed widgets in your app using Azure Video Analyzer for Media package - -Use the new Azure Video Analyzer for Media (AVAM) `@azure/video-analyzer-for-media-widgets` npm package to add `insights` widgets to your app and customize it according to your needs. - -The new AVAM package enables you to easily embed and communicate between our widgets and your app, instead of adding an `iframe` element to embed the insights widget. Learn more in [Embed and customize Video Analyzer for Media widgets in your app](https://techcommunity.microsoft.com/t5/azure-media-services/embed-and-customize-azure-video-analyzer-for-media-widgets-in/ba-p/2847063).  - -## August 2021 - -### Re-index video or audio files - -There is now an option to re-index video or audio files that have failed during the indexing process. - -### Improve accessibility support - -Fixed bugs related to CSS, theming and accessibility: - -* high contrast -* account settings and insights views in the [portal](https://www.videoindexer.ai). - -## July 2021 - -### Automatic Scaling of Media Reserved Units - -Starting August 1st 2021, Azure Video Analyzer for Media (formerly Video Indexer) enabled [Media Reserved Units (MRUs)](/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Analyzer for Media. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. - -## June 2021 - -### Video Analyzer for Media deployed in six new regions - -You can now create a Video Analyzer for Media paid account in France Central, Central US, Brazil South, West Central US, Korea Central, and Japan West regions. - -## May 2021 - -### New source languages support for speech-to-text (STT), translation, and search - -Video Analyzer for Media now supports STT, translation, and search in Chinese (Cantonese) ('zh-HK'), Dutch (Netherlands) ('Nl-NL'), Czech ('Cs-CZ'), Polish ('Pl-PL'), Swedish (Sweden) ('Sv-SE'), Norwegian('nb-NO'), Finnish('fi-FI'), Canadian French ('fr-CA'), Thai('th-TH'), -Arabic: (United Arab Emirates) ('ar-AE', 'ar-EG'), (Iraq) ('ar-IQ'), (Jordan) ('ar-JO'), (Kuwait) ('ar-KW'), (Lebanon) ('ar-LB'), (Oman) ('ar-OM'), (Qatar) ('ar-QA'), (Palestinian Authority) ('ar-PS'), (Syria) ('ar-SY'), and Turkish('tr-TR'). - -These languages are available in both API and Video Analyzer for Media website. Select the language from the combobox under **Video source language**. - -### New theme for Azure Video Analyzer for Media - -New theme is available: 'Azure' along with the 'light' and 'dark themes. To select a theme, click on the gear icon in the top-right corner of the website, find themes under **User settings**. - -### New open-source code you can leverage - -Three new Git-Hub projects are available at our [GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer): - -* Code to help you leverage the newly added [widget customization](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/Embedding%20widgets). -* Solution to help you add [custom search](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/VideoSearchWithAutoMLVision) to your video libraries. -* Solution to help you add [de-duplication](https://github.com/Azure-Samples/media-services-video-indexer/commit/6b828f598f5bf61ce1b6dbcbea9e8b87ba11c7b1) to your video libraries. - -### New option to toggle bounding boxes (for observed people) on the player - -When indexing a video through our advanced video settings, you can view our new observed people capabilities. If there are people detected in your media file, you can enable a bounding box on the detected person through the media player. - -## April 2021 - -The Video Indexer service was renamed to Azure Video Analyzer for Media. - -### Improved upload experience in the portal - -Video Analyzer for Media has a new upload experience in the [portal](https://www.videoindexer.ai). To upload your media file, press the **Upload** button from the **Media files** tab. - -### New developer portal in available in gov-cloud - -[Video Analyzer for Media Developer Portal](https://api-portal.videoindexer.ai) is now also available in Azure for US Government. - -### Observed people tracing (preview) - -Azure Video Analyzer for Media now detects observed people in videos and provides information such as the location of the person in the video frame and the exact timestamp (start, end) when a person appears. The API returns the bounding box coordinates (in pixels) for each person instance detected, including its confidence. - -For example, if a video contains a person, the detect operation will list the person appearances together with their coordinates in the video frames. You can use this functionality to determine the person path in a video. It also lets you determine whether there are multiple instances of the same person in a video. - -The newly added observed people tracing feature is available when indexing your file by choosing the **Advanced option** -> **Advanced video** or **Advanced video + audio** preset (under Video + audio indexing). Standard and basic indexing presets will not include this new advanced model. - -When you choose to see Insights of your video on the Video Analyzer for Media website, the Observed People Tracing will show up on the page with all detected people thumbnails. You can choose a thumbnail of a person and see where the person appears in the video player. - -The feature is also available in the JSON file generated by Video Analyzer for Media. For more information, see [Trace observed people in a video](observed-people-tracing.md). - -### Detected acoustic events with **Audio Effects Detection** (preview) - -You can now see the detected acoustic events in the closed captions file. The file can be downloaded from the Video Analyzer for Media portal and is available as an artifact in the GetArtifact API. - -**Audio Effects Detection** (preview) component detects various acoustics events and classifies them into different acoustic categories (such as Gunshot, Screaming, Crowd Reaction and more). For more information, see [Audio effects detection](audio-effects-detection.md). - -## March 2021 - -### Audio analysis - -Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/) page. - -The newly added bundle is available when indexing or re-indexing your file by choosing the **Advanced option** -> **Basic Audio** preset (under the **Video + audio indexing** drop-down box). - -### New developer portal - -Video Analyzer for Media has a new [Developer Portal](https://api-portal.videoindexer.ai/), try out the new Video Analyzer for Media APIs and find all the relevant resources in one place: [GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer), [Stack overflow](https://stackoverflow.com/questions/tagged/video-indexer), [Video Analyzer for Media tech community](https://techcommunity.microsoft.com/t5/azure-media-services/bg-p/AzureMediaServices/label-name/Video%20Indexer) with relevant blog posts, [Video Analyzer for Media FAQs](faq.yml), [User Voice](https://feedback.azure.com/d365community/forum/09041fae-0b25-ec11-b6e6-000d3a4f0858) to provide your feedback and suggest features, and ['CodePen' link](https://codepen.io/videoindexer) with widgets code samples. - -### Advanced customization capabilities for insight widget - -SDK is now available to embed Video Analyzer for Media's insights widget in your own service and customize its style and data. The SDK supports the standard Video Analyzer for Media insights widget and a fully customizable insights widget. Code sample is available in [Video Analyzer for Media GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/Embedding%20widgets/widget-customization). With this advanced customization capabilities, solution developer can apply custom styling and bring customer’s own AI data and present that in the insight widget (with or without Video Analyzer for Media insights). - -### Video Analyzer for Media deployed in the US North Central , US West and Canada Central - -You can now create a Video Analyzer for Media paid account in the US North Central, US West and Canada Central regions - -### New source languages support for speech-to-text (STT), translation and search - -Video Analyzer for Media now support STT, translation and search in Danish ('da-DK'), Norwegian('nb-NO'), Swedish('sv-SE'), Finnish('fi-FI'), Canadian French ('fr-CA'), Thai('th-TH'), Arabic ('ar-BH', 'ar-EG', 'ar-IQ', 'ar-JO', 'ar-KW', 'ar-LB', 'ar-OM', 'ar-QA', 'ar-S', and 'ar-SY'), and Turkish('tr-TR'). Those languages are available in both API and Video Analyzer for Media website. - -### Search by Topic in Video Analyzer for Media Website - -You can now use the search feature, at the top of the [Video Analyzer for Media website](https://www.videoindexer.ai/account/login) page, to search for videos with specific topics. - -## February 2021 - -### Multiple account owners - -Account owner role was added to Video Analyzer for Media. You can add, change, and remove users; change their role. For details on how to share an account, see [Invite users](invite-users.md). - -### Audio event detection (public preview) - -> [!NOTE] -> This feature is only available in trial accounts. - -Video Analyzer for Media now detects the following audio effects in the non-speech segments of the content: gunshot, glass shatter, alarm, siren, explosion, dog bark, screaming, laughter, crowd reactions (cheering, clapping, and booing) and Silence. - -The newly added audio affects feature is available when indexing your file by choosing the **Advanced option** -> **Advanced audio** preset (under Video + audio indexing). Standard indexing will only include **silence** and **crowd reaction**. - -The **clapping** event type that was included in the previous audio effects model, is now extracted a part of the **crowd reaction** event type. - -When you choose to see **Insights** of your video on the [Video Analyzer for Media](https://www.videoindexer.ai/) website, the Audio Effects show up on the page. - -:::image type="content" source="./media/release-notes/audio-detection.png" alt-text="Audio event detection"::: - -### Named entities enhancement - -The extracted list of people and location was extended and updated in general. - -In addition, the model now includes people and locations in-context which are not famous, like a ‘Sam’ or ‘Home’ in the video. - -## January 2021 - -### Video Analyzer for Media is deployed on US Government cloud - -You can now create a Video Analyzer for Media paid account on US government cloud in Virginia and Arizona regions. -Video Analyzer for Media free trial offering isn't available in the mentioned region. For more information go to Video Analyzer for Media Documentation. - -### Video Analyzer for Media deployed in the India Central region - -You can now create a Video Analyzer for Media paid account in the India Central region. - -### New Dark Mode for the Video Analyzer for Media website experience - -The Video Analyzer for Media website experiences is now available in dark mode. -To enable the dark mode open the settings panel and toggle on the **Dark Mode** option. - -:::image type="content" source="./media/release-notes/dark-mode.png" alt-text="Dark mode setting"::: - -## December 2020 - -### Video Analyzer for Media deployed in the Switzerland West and Switzerland North - -You can now create a Video Analyzer for Media paid account in the Switzerland West and Switzerland North regions. - -## October 2020 - -### Animated character identification improvements - -Video Analyzer for Media supports detection, grouping, and recognition of characters in animated content via integration with Cognitive Services custom vision. We added a major improvement to this AI algorithm in the detection and characters recognition, as a result insight accuracy and identified characters are significantly improved. - -### Planned Video Analyzer for Media website authenticatication changes - -Starting March 1st 2021, you no longer will be able to sign up and sign in to the [Video Analyzer for Media website](https://www.videoindexer.ai/) [developer portal](video-indexer-use-apis.md) using Facebook or LinkedIn. - -You will be able to sign up and sign in using one of these providers: Azure AD, Microsoft, and Google. - -> [!NOTE] -> The Video Analyzer for Media accounts connected to LinkedIn and Facebook will not be accessible after March 1st 2021. -> -> You should [invite](invite-users.md) an Azure AD, Microsoft, or Google email you own to the Video Analyzer for Media account so you will still have access. You can add an additional owner of supported providers, as described in [invite](invite-users.md).
                -> Alternatively, you can create a paid account and migrate the data. - -## August 2020 - -### Mobile design for the Video Analyzer for Media website - -The Video Analyzer for Media website experience is now supporting mobile devices. The user experience is responsive to adapt to your mobile screen size (excluding customization UIs). - -### Accessibility improvements and bug fixes - -As part of WCAG (Web Content Accessibility guidelines), the Video Analyzer for Media website experiences is aligned with grade C, as part of Microsoft Accessibility standards. Several bugs and improvements related to keyboard navigation, programmatic access, and screen reader were solved. - -## July 2020 - -### GA for multi-language identification - -Multi-language identification is moved from preview to GA and ready for productive use. - -There is no pricing impact related to the "Preview to GA" transition. - -### Video Analyzer for Media website improvements - -#### Adjustments in the video gallery - -New search bar for deep insights search with additional filtering capabilities was added. Search results were also enhanced. - -New list view with ability to sort and manage video archive with multiple files. - -#### New panel for easy selection and configuration - -Side panel for easy selection and user configuration was added, allowing simple and quick account creation and sharing as well as setting configuration. - -Side panel is also used for user preferences and help. - -## June 2020 - -### Search by topics - -You can now use the search API to search for videos with specific topics (API only). - -Topics is added as part of the `textScope` (optional parameter). See [API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Search-Videos) for details. - -### Labels enhancement - -The label tagger was upgraded and now includes more visual labels that can be identified. - -## May 2020 - -### Video Analyzer for Media deployed in the East US - -You can now create a Video Analyzer for Media paid account in the East US region. - -### Video Analyzer for Media URL - -Video Analyzer for Media regional endpoints were all unified to start only with www. No action item is required. - -From now on, you reach www.videoindexer.ai whether it is for embedding widgets or logging into Video Analyzer for Media web applications. - -Also wus.videoindexer.ai would be redirected to www. More information is available in [Embed Video Analyzer for Media widgets in your apps](video-indexer-embed-widgets.md). - -## April 2020 - -### New widget parameters capabilities - -The **Insights** widget includes new parameters: `language` and `control`. - -The **Player** widget has a new `locale` parameter. Both `locale` and `language` parameters control the player’s language. - -For more information, see the [widget types](video-indexer-embed-widgets.md#widget-types) section. - -### New player skin - -A new player skin launched with updated design. - -### Prepare for upcoming changes - -* Today, the following APIs return an account object: - - * [Create-Paid-Account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Paid-Account) - * [Get-Account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account) - * [Get-Accounts-Authorization](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Accounts-Authorization) - * [Get-Accounts-With-Token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Accounts-With-Token) - - The Account object has a `Url` field pointing to the location of the [Video Analyzer for Media website](https://www.videoindexer.ai/). -For paid accounts the `Url` field is currently pointing to an internal URL instead of the public website. -In the coming weeks we will change it and return the [Video Analyzer for Media website](https://www.videoindexer.ai/) URL for all accounts (trial and paid). - - Do not use the internal URLs, you should be using the [Video Analyzer for Media public APIs](https://api-portal.videoindexer.ai/). -* If you are embedding Video Analyzer for Media URLs in your applications and the URLs are not pointing to the [Video Analyzer for Media website](https://www.videoindexer.ai/) or the Video Analyzer for Media API endpoint (`https://api.videoindexer.ai`) but rather to a regional endpoint (for example, `https://wus2.videoindexer.ai`), regenerate the URLs. - - You can do it by either: - - * Replacing the URL with a URL pointing to the Video Analyzer for Media widget APIs (for example, the [insights widget](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Insights-Widget)) - * Using the Video Analyzer for Media website to generate a new embedded URL: - - Press **Play** to get to your video's page -> click the **</> Embed** button -> copy the URL into your application: - - The regional URLs are not supported and will be blocked in the coming weeks. - -## January 2020 - -### Custom language support for additional languages - -Video Analyzer for Media now supports custom language models for `ar-SY` , `en-UK`, and `en-AU` (API only). - -### Delete account timeframe action update - -Delete account action now deletes the account within 90 days instead of 48 hours. - -### New Video Analyzer for Media GitHub repository - -A new Video Analyzer for Media GitHub with different projects, getting started guides and code samples is now available: -https://github.com/Azure-Samples/media-services-video-indexer - -### Swagger update - -Video Analyzer for Media unified **authentications** and **operations** into a single [Video Analyzer for Media OpenAPI Specification (swagger)](https://api-portal.videoindexer.ai/api-details#api=Operations&operation). Developers can find the APIs in [Video Analyzer for Media Developer Portal](https://api-portal.videoindexer.ai/). - -## December 2019 - -### Update transcript with the new API - -Update a specific section in the transcript using the [Update-Video-Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Index) API. - -### Fix account configuration from the Video Analyzer for Media portal - -You can now update Media Services connection configuration in order to self-help with issues like: - -* incorrect Azure Media Services resource -* password changes -* Media Services resources were moved between subscriptions - -To fix the account configuration, in the Video Analyzer for Media portal navigate to Settings > Account tab (as owner). - -### Configure the custom vision account - -Configure the custom vision account on paid accounts using the Video Analyzer for Media portal (previously, this was only supported by API). To do that, sign in to the Video Analyzer for Media portal, choose Model Customization > Animated characters > Configure. - -### Scenes, shots and keyframes – now in one insight pane - -Scenes, shots, and keyframes are now merged into one insight for easier consumption and navigation. When you select the desired scene you can see what shots and keyframes it consists of. - -### Notification about a long video name - -When a video name is longer than 80 characters, Video Analyzer for Media shows a descriptive error on upload. - -### Streaming endpoint is disabled notification - -When streaming endpoint is disabled, Video Analyzer for Media will show a descriptive error on the player page. - -### Error handling improvement - -Status code 409 will now be returned from [Re-Index Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) and [Update Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Index) APIs in case a video is actively indexed, to prevent overriding the current re-index changes by accident. - -## November 2019 - -* Korean custom language models support - - Video Analyzer for Media now supports custom language models in Korean (`ko-KR`) in both the API and portal. -* New languages supported for speech-to-text (STT) - - Video Analyzer for Media APIs now support STT in Arabic Levantine (ar-SY), English UK dialect (en-GB), and English Australian dialect (en-AU). - - For video upload, we replaced zh-HANS to zh-CN, both are supported but zh-CN is recommended and more accurate. - -## October 2019 - -* Search for animated characters in the gallery - - When indexing animated characters, you can now search for them in the account’s video galley. For more information, see [Animated characters recognition](animated-characters-recognition.md). - -## September 2019 - -Multiple advancements announced at IBC 2019: - -* Animated character recognition (public preview) - - Ability to detect group ad recognize characters in animated content, via integration with custom vision. For more information, see [Animated character detection](animated-characters-recognition.md). -* Multi-language identification (public preview) - - Detect segments in multiple languages in the audio track and create a multilingual transcript based on them. Initial support: English, Spanish, German and French. For more information, see [Automatically identify and transcribe multi-language content](multi-language-identification-transcription.md). -* Named entity extraction for People and Location - - Extracts brands, locations, and people from speech and visual text via natural language processing (NLP). -* Editorial shot type classification - - Tagging of shots with editorial types such as close up, medium shot, two shot, indoor, outdoor etc. For more information, see [Editorial shot type detection](scenes-shots-keyframes.md#editorial-shot-type-detection). -* Topic inferencing enhancement - now covering level 2 - - The topic inferencing model now supports deeper granularity of the IPTC taxonomy. Read full details at [Azure Media Services new AI-powered innovation](https://azure.microsoft.com/blog/azure-media-services-new-ai-powered-innovation/). - -## August 2019 updates - -### Video Analyzer for Media deployed in UK South - -You can now create a Video Analyzer for Media paid account in the UK south region. - -### New Editorial Shot Type insights available - -New tags added to video shots provides editorial “shot types” to identify them with common editorial phrases used in the content creation workflow such as: extreme closeup, closeup, wide, medium, two shot, outdoor, indoor, left face and right face (Available in the JSON). - -### New People and Locations entities extraction available - -Video Analyzer for Media identifies named locations and people via natural language processing (NLP) from the video’s OCR and transcription. Video Analyzer for Media uses machine learning algorithm to recognize when specific locations (for example, the Eiffel Tower) or people (for example, John Doe) are being called out in a video. - -### Keyframes extraction in native resolution - -Keyframes extracted by Video Analyzer for Media are available in the original resolution of the video. - -### GA for training custom face models from images - -Training faces from images moved from Preview mode to GA (available via API and in the portal). - -> [!NOTE] -> There is no pricing impact related to the "Preview to GA" transition. - -### Hide gallery toggle option - -User can choose to hide the gallery tab from the portal (similar to hiding the samples tab). - -### Maximum URL size increased - -Support for URL query string of 4096 (instead of 2048) on indexing a video. - -### Support for multi-lingual projects - -Projects can now be created based on videos indexed in different languages (API only). - -## July 2019 - -### Editor as a widget - -The Video Analyzer for Media AI-editor is now available as a widget to be embedded in customer applications. - -### Update custom language model from closed caption file from the portal - -Customers can provide VTT, SRT, and TTML file formats as input for language models in the customization page of the portal. - -## June 2019 - -### Video Analyzer for Media deployed to Japan East - -You can now create a Video Analyzer for Media paid account in the Japan East region. - -### Create and repair account API (Preview) - -Added a new API that enables you to [update the Azure Media Service connection endpoint or key](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Paid-Account-Azure-Media-Services). - -### Improve error handling on upload - -A descriptive message is returned in case of misconfiguration of the underlying Azure Media Services account. - -### Player timeline Keyframes preview - -You can now see an image preview for each time on the player's timeline. - -### Editor semi-select - -You can now see a preview of all the insights that are selected as a result of choosing a specific insight timeframe in the editor. - -## May 2019 - -### Update custom language model from closed caption file - -[Create custom language model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Language-Model) and [Update custom language models](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Language-Model) APIs now support VTT, SRT, and TTML file formats as input for language models. - -When calling the [Update Video transcript API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Transcript), the transcript is added automatically. The training model associated with the video is updated automatically as well. For information on how to customize and train your language models, see [Customize a Language model with Video Analyzer for Media](customize-language-model-overview.md). - -### New download transcript formats – TXT and CSV - -In addition to the closed captioning format already supported (SRT, VTT, and TTML), Video Analyzer for Media now supports downloading the transcript in TXT and CSV formats. - -## Next steps - -[Overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/scenes-shots-keyframes.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/scenes-shots-keyframes.md deleted file mode 100644 index 2ca0699d3926a..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/scenes-shots-keyframes.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Azure Video Analyzer for Media (formerly Video Indexer) scenes, shots, and keyframes -description: This topic gives an overview of the Azure Video Analyzer for Media (formerly Video Indexer) scenes, shots, and keyframes. -ms.topic: how-to -ms.date: 07/05/2019 -ms.author: juliako ---- - -# Scenes, shots, and keyframes - -Azure Video Analyzer for Media (formerly Video Indexer) supports segmenting videos into temporal units based on structural and semantic properties. This capability enables customers to easily browse, manage, and edit their video content based on varying granularities. For example, based on scenes, shots, and keyframes, described in this topic. - -![Scenes, shots, and keyframes](./media/scenes-shots-keyframes/scenes-shots-keyframes.png) - -## Scene detection - -Video Analyzer for Media determines when a scene changes in video based on visual cues. A scene depicts a single event and it is composed of a series of consecutive shots, which are semantically related. A scene thumbnail is the first keyframe of its underlying shot. Video Analyzer for Media segments a video into scenes based on color coherence across consecutive shots and retrieves the beginning and end time of each scene. Scene detection is considered a challenging task as it involves quantifying semantic aspects of videos. - -> [!NOTE] -> Applicable to videos that contain at least 3 scenes. - -## Shot detection - -Video Analyzer for Media determines when a shot changes in the video based on visual cues, by tracking both abrupt and gradual transitions in the color scheme of adjacent frames. The shot's metadata includes a start and end time, as well as the list of keyframes included in that shot. The shots are consecutive frames taken from the same camera at the same time. - -## Keyframe detection - -Video Analyzer for Media selects the frame(s) that best represent each shot. Keyframes are the representative frames selected from the entire video based on aesthetic properties (for example, contrast and stableness). Video Analyzer for Media retrieves a list of keyframe IDs as part of the shot's metadata, based on which customers can extract the keyframe as a high resolution image. - -### Extracting Keyframes - -To extract high-resolution keyframes for your video, you must first upload and index the video. - -![Keyframes](./media/scenes-shots-keyframes/extracting-keyframes.png) - -#### With the Video Analyzer for Media website - -To extract keyframes using the Video Analyzer for Media website, upload and index your video. Once the indexing job is complete, click on the **Download** button and select **Artifacts (ZIP)**. This will download the artifacts folder to your computer. - -![Screenshot that shows the "Download" drop-down with "Artifacts" selected.](./media/scenes-shots-keyframes/extracting-keyframes2.png) - -Unzip and open the folder. In the *_KeyframeThumbnail* folder, and you will find all of the keyframes that were extracted from your video. - -#### With the Video Analyzer for Media API - -To get keyframes using the Video Indexer API, upload and index your video using the [Upload Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) call. Once the indexing job is complete, call [Get Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index). This will give you all of the insights that Video Indexer extracted from your content in a JSON file. - -You will get a list of keyframe IDs as part of each shot's metadata. - -```json -"shots":[ - { - "id":0, - "keyFrames":[ - { - "id":0, - "instances":[ - { - "thumbnailId":"00000000-0000-0000-0000-000000000000", - "start":"0:00:00.209", - "end":"0:00:00.251", - "duration":"0:00:00.042" - } - ] - }, - { - "id":1, - "instances":[ - { - "thumbnailId":"00000000-0000-0000-0000-000000000000", - "start":"0:00:04.755", - "end":"0:00:04.797", - "duration":"0:00:00.042" - } - ] - } - ], - "instances":[ - { - "start":"0:00:00", - "end":"0:00:06.34", - "duration":"0:00:06.34" - } - ] - }, - -] -``` - -You will now need to run each of these keyframe IDs on the [Get Thumbnails](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Thumbnail) call. This will download each of the keyframe images to your computer. - -## Editorial shot type detection - -Keyframes are associated with shots in the output JSON. - -The shot type associated with an individual shot in the insights JSON represents its editorial type. You may find these shot type characteristics useful when editing videos into clips, trailers, or when searching for a specific style of keyframe for artistic purposes. The different types are determined based on analysis of the first keyframe of each shot. Shots are identified by the scale, size, and location of the faces appearing in their first keyframe. - -The shot size and scale are determined based on the distance between the camera and the faces appearing in the frame. Using these properties, Video Analyzer for Media detects the following shot types: - -* Wide: shows an entire person’s body. -* Medium: shows a person's upper-body and face. -* Close up: mainly shows a person’s face. -* Extreme close-up: shows a person’s face filling the screen. - -Shot types can also be determined by location of the subject characters with respect to the center of the frame. This property defines the following shot types in Video Analyzer for Media: - -* Left face: a person appears in the left side of the frame. -* Center face: a person appears in the central region of the frame. -* Right face: a person appears in the right side of the frame. -* Outdoor: a person appears in an outdoor setting. -* Indoor: a person appears in an indoor setting. - -Additional characteristics: - -* Two shots: shows two persons’ faces of medium size. -* Multiple faces: more than two persons. - - -## Next steps - -[Examine the Video Analyzer for Media output produced by the API](video-indexer-output-json-v2.md#scenes) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/toc.yml b/articles/azure-video-analyzer/video-analyzer-for-media-docs/toc.yml deleted file mode 100644 index 99bc3d8da6dc0..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/toc.yml +++ /dev/null @@ -1,145 +0,0 @@ -- name: Video Analyzer for Media documentation - href: ./index.yml -- name: Overview - items: - - name: What is Video Analyzer for Media? - href: video-indexer-overview.md - - name: About Video Analyzer - href: ../video-analyzer-docs/overview.md - - name: Language support - href: language-support.md -- name: Quickstarts - expanded: true - items: - - name: Get started - href: video-indexer-get-started.md - - name: Invite users - href: invite-users.md -- name: Tutorials - items: - - name: Create a new ARM account (Preview) - href: create-video-analyzer-for-media-account.md - - name: Create a new account - href: connect-to-azure.md - - name: Use Video Analyzer for Media API - href: video-indexer-use-apis.md - - name: Logic Apps connector - href: logic-apps-connector-tutorial.md - - name: Deploy using ARM template - href: deploy-with-arm-template.md - - name: Index Video from OneDrive - href: odrv-download.md -- name: Samples - items: - - name: Video Analyzer for Media samples - href: https://github.com/Azure-Samples/media-services-video-indexer -- name: Concepts - items: - - name: Overview - href: concepts-overview.md - - name: Compare Video Analyzer for Media and Media Services presets - href: compare-video-indexer-with-media-services-presets.md - - name: Manage multiple tenants - href: manage-multiple-tenants.md - - name: Language identification model - href: language-identification-model.md - - name: Live stream analysis - href: live-stream-analysis.md - - name: Observed people in a video - href: observed-people-tracing.md - - name: Matched person - href: matched-person.md - - name: People's detected clothing - href: detected-clothing.md - - name: Audio effects detection - href: audio-effects-detection.md - - name: Customizing content models - items: - - name: Overview - href: customize-content-models-overview.md - - name: Animated characters - href: animated-characters-recognition.md - - name: Brands - href: customize-brands-model-overview.md - - name: Language - href: customize-language-model-overview.md - - name: Person - href: customize-person-model-overview.md -- name: How to guides - items: - - name: Connect an existing account to ARM (Preview) - href: connect-classic-account-to-arm.md - - name: Manage account connected to Azure - href: manage-account-connected-to-azure.md - - name: Upload and index videos - href: upload-index-videos.md - - name: Examine Video Analyzer for Media output - href: video-indexer-output-json-v2.md - - name: Find exact moments within videos - displayName: search - href: video-indexer-search.md - - name: Detect scenes, shots, keyframes - href: scenes-shots-keyframes.md - - name: Identify and transcribe multi-language content - href: multi-language-identification-transcription.md - - name: View and edit insights - href: video-indexer-view-edit.md - - name: Use editor to create projects - href: use-editor-create-project.md - - name: Embed widgets into your application - href: video-indexer-embed-widgets.md - - name: Considerations when using Video Analyzer for Media at scale - href: considerations-when-use-at-scale.md - - name: Disaster recovery - href: video-indexer-disaster-recovery.md - - name: Customize content models - items: - - name: Animated characters - href: animated-characters-recognition-how-to.md - - name: Person - items: - - name: using the website - href: customize-person-model-with-website.md - - name: using the API - href: customize-person-model-with-api.md - - name: Brands - items: - - name: using the website - href: customize-brands-model-with-website.md - - name: using the API - href: customize-brands-model-with-api.md - - name: Language - items: - - name: using the website - href: customize-language-model-with-website.md - - name: using the API - href: customize-language-model-with-api.md -- name: Reference - items: - - name: Video Analyzer for Media API - href: https://api-portal.videoindexer.ai/ - - name: Video Analyzer for Media ARM REST API - href: /rest/api/videoindexer/accounts?branch=videoindex -- name: Resources - items: - - name: Azure Roadmap - href: https://azure.microsoft.com/roadmap/?category=web-mobile - - name: Pricing - href: https://azure.microsoft.com/pricing/details/media-services/ - - name: Regional availability - href: https://azure.microsoft.com/global-infrastructure/services/ - - name: Regions - displayName: location - href: regions.md - - name: FAQ - href: faq.yml - - name: Compliance - href: https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942 - - name: Release notes - href: release-notes.md - - name: Stack Overflow - href: https://stackoverflow.com/search?q=video-indexer - - name: User voice - href: https://aka.ms/UserVoiceVI - - name: Blogs - href: https://azure.microsoft.com/blog/tag/video-indexer/ diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-disaster-recovery.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-disaster-recovery.md deleted file mode 100644 index 7ec39b9545d58..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-disaster-recovery.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Azure Video Analyzer for Media (formerly Video Indexer) failover and disaster recovery -titleSuffix: Azure Video Analyzer for Media -description: Learn how to failover to a secondary Azure Video Analyzer for Media (formerly Video Indexer) account if a regional datacenter failure or disaster occurs. -services: azure-video-analyzer -documentationcenter: '' -author: juliako -manager: femila -editor: '' -ms.workload: -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.custom: -ms.date: 07/29/2019 -ms.author: juliako ---- -# Video Analyzer for Media failover and disaster recovery - -Azure Video Analyzer for Media (formerly Video Indexer) doesn't provide instant failover of the service if there's a regional datacenter outage or failure. This article explains how to configure your environment for a failover to ensure optimal availability for apps and minimized recovery time if a disaster occurs. - -We recommend that you configure business continuity disaster recovery (BCDR) across regional pairs to benefit from Azure's isolation and availability policies. For more information, see [Azure paired regions](../../availability-zones/cross-region-replication-azure.md). - -## Prerequisites - -An Azure subscription. If you don't have an Azure subscription yet, sign up for [Azure free trial](https://azure.microsoft.com/free/). - -## Failover to a secondary account - -To implement BCDR, you need to have two Video Analyzer for Media accounts to handle redundancy. - -1. Create two Video Analyzer for Media accounts connected to Azure (see [Create a Video Analyzer for Media account](connect-to-azure.md)). Create one account for your primary region and the other to the paired azure region. -1. If there's a failure in your primary region, switch to indexing using the secondary account. - -> [!TIP] -> You can automate BCDR by setting up activity log alerts for service health notifications as per [Create activity log alerts on service notifications](../../service-health/alerts-activity-log-service-notifications-portal.md). - -For information about using multiple tenants, see [Manage multiple tenants](manage-multiple-tenants.md). To implement BCDR, choose one of these two options: [Video Analyzer for Media account per tenant](./manage-multiple-tenants.md#video-analyzer-for-media-account-per-tenant) or [Azure subscription per tenant](./manage-multiple-tenants.md#azure-subscription-per-tenant). - -## Next steps - -[Manage a Video Analyzer for Media account connected to Azure](manage-account-connected-to-azure.md). diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-get-started.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-get-started.md deleted file mode 100644 index 28635b4f53691..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-get-started.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Sign up for Azure Video Analyzer for Media (formerly Video Indexer) and upload your first video - Azure -description: Learn how to sign up and upload your first video using the Azure Video Analyzer for Media (formerly Video Indexer) portal. -ms.topic: quickstart -ms.subservice: azure-video-analyzer-media -ms.date: 01/25/2021 -ms.author: juliako -ms.custom: mode-other ---- - -# Quickstart: How to sign up and upload your first video - -This getting started quickstart shows how to sign in to the Azure Video Analyzer for Media (formerly Video Indexer) website and how to upload your first video. - -When creating a Video Analyzer for Media account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you are not limited by the quota). With free trial, Video Analyzer for Media provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With paid option, you create a Video Analyzer for Media account that is [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). - -## Sign up for Video Analyzer for Media - -To start developing with Video Analyzer for Media, browse to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign up. - -Once you start using Video Analyzer for Media, all your stored data and uploaded content are encrypted at rest with a Microsoft managed key. - -> [!NOTE] -> Review [planned Video Analyzer for Media website authenticatication changes](./release-notes.md#planned-video-analyzer-for-media-website-authenticatication-changes). - -## Upload a video using the Video Analyzer for Media website - -### Supported file formats for Video Analyzer for Media - -See the [input container/file formats](/azure/media-services/latest/encode-media-encoder-standard-formats-reference) article for a list of file formats that you can use with Video Analyzer for Media. - -### Upload a video - -1. Sign in on the [Video Analyzer for Media](https://www.videoindexer.ai/) website. -1. To upload a video, press the **Upload** button or link. - - > [!NOTE] - > The name of the video must be no greater than 80 characters. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/video-indexer-get-started/video-indexer-upload.png" alt-text="Upload"::: -1. Once your video has been uploaded, Video Analyzer for Media starts indexing and analyzing the video. You see the progress. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Progress of the upload"::: -1. Once Video Analyzer for Media is done analyzing, you will get an email with a link to your video and a short description of what was found in your video. For example: people, spoken and written words, topics, and named entities. -1. You can later find your video in the library list and perform different operations. For example: search, re-index, edit. - - > [!div class="mx-imgBorder"] - > :::image type="content" source="./media/video-indexer-get-started/uploaded.png" alt-text="Uploaded the upload"::: - -## Supported browsers - -For more information, see [supported browsers](video-indexer-overview.md#supported-browsers). - -## See also - -See [Upload and index videos](upload-index-videos.md) for more details. - -After you upload and index a video, you can start using [Video Analyzer for Media website](video-indexer-view-edit.md) or [Video Analyzer for Media Developer Portal](video-indexer-use-apis.md) to see the insights of the video. - -[Start using APIs](video-indexer-use-apis.md) - -## Next steps - -For detailed introduction please visit our [introduction lab](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/IntroToVideoIndexer.md). - -At the end of the workshop you will have a good understanding of the kind of information that can be extracted from video and audio content, you will be more prepared to identify opportunities related to content intelligence, pitch video AI on Azure, and demo several scenarios on Video Analyzer for Media. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview.md deleted file mode 100644 index de23400cac761..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: What is Azure Video Analyzer for Media (formerly Video Indexer)? -description: This article gives an overview of the Azure Video Analyzer for Media (formerly Video Indexer) service. -ms.topic: overview -ms.date: 02/15/2022 -ms.author: juliako ---- - -# What is Video Analyzer for Media? - -[!INCLUDE [regulation](./includes/regulation.md)] - -Azure Video Analyzer for Media (formerly Video Indexer) is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Video Analyzer for Media video and audio models. - -To start extracting insights with Video Analyzer for Media, you need to create an account and upload videos. When you upload your videos to Video Analyzer for Media, it analyses both visuals and audio by running different AI models. As Video Analyzer for Media analyzes your video, the insights that are extracted by the AI models. - -When you create a Video Analyzer for Media account and connect it to Media Services, the media and metadata files are stored in the Azure storage account associated with that Media Services account. For more information, see [Create a Video Analyzer for Media account connected to Azure](connect-to-azure.md). - -The following diagram is an illustration and not a technical explanation of how Video Analyzer for Media works in the backend. - -> [!div class="mx-imgBorder"] -> :::image type="content" source="./media/video-indexer-overview/model-chart.png" alt-text="Video Analyzer for Media flow diagram"::: - -## Compliance, Privacy and Security - -As an important reminder, you must comply with all applicable laws in your use of Video Analyzer for Media, and you may not use Video Analyzer for Media or any Azure service in a manner that violates the rights of others, or that may be harmful to others. - -Before uploading any video/image to Video Analyzer for Media, You must have all the proper rights to use the video/image, including, where required by law, all the necessary consents from individuals (if any) in the video/image, for the use, processing, and storage of their data in Video Analyzer for Media and Azure. Some jurisdictions may impose special legal requirements for the collection, online processing and storage of certain categories of data, such as biometric data. Before using Video Analyzer for Media and Azure for the processing and storage of any data subject to special legal requirements, You must ensure compliance with any such legal requirements that may apply to You. - -To learn about compliance, privacy and security in Video Analyzer for Media please visit the Microsoft [Trust Center](https://www.microsoft.com/TrustCenter/CloudServices/Azure/default.aspx). For Microsoft's privacy obligations, data handling and retention practices, including how to delete your data, please review Microsoft's [Privacy Statement](https://privacy.microsoft.com/PrivacyStatement), the [Online Services Terms](https://www.microsoft.com/licensing/product-licensing/products?rtc=1) ("OST") and [Data Processing Addendum](https://www.microsoftvolumelicensing.com/DocumentSearch.aspx?Mode=3&DocumentTypeId=67) ("DPA"). By using Video Analyzer for Media, you agree to be bound by the OST, DPA and the Privacy Statement. - -## What can I do with Video Analyzer for Media? - -Video Analyzer for Media's insights can be applied to many scenarios, among them are: - -* *Deep search*: Use the insights extracted from the video to enhance the search experience across a video library. For example, indexing spoken words and faces can enable the search experience of finding moments in a video where a person spoke certain words or when two people were seen together. Search based on such insights from videos is applicable to news agencies, educational institutes, broadcasters, entertainment content owners, enterprise LOB apps, and in general to any industry that has a video library that users need to search against. -* *Content creation*: Create trailers, highlight reels, social media content, or news clips based on the insights Video Analyzer for Media extracts from your content. Keyframes, scenes markers, and timestamps for the people and label appearances make the creation process much smoother and easier, and allows you to get to the parts of the video you need for the content you're creating. -* *Accessibility*: Whether you want to make your content available for people with disabilities or if you want your content to be distributed to different regions using different languages, you can use the transcription and translation provided by Video Analyzer for Media in multiple languages. -* *Monetization*: Video Analyzer for Media can help increase the value of videos. For example, industries that rely on ad revenue (news media, social media, and so on) can deliver relevant ads by using the extracted insights as additional signals to the ad server. -* *Content moderation*: Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. -* *Recommendations*: Video insights can be used to improve user engagement by highlighting the relevant video moments to users. By tagging each video with additional metadata, you can recommend to users the most relevant videos and highlight the parts of the video that will match their needs. - -## Features - -The following list shows the insights you can retrieve from your videos using Video Analyzer for Media video and audio models: - -### Video insights - -* **Face detection**: Detects and groups faces appearing in the video. -* **Celebrity identification**: Video Analyzer for Media automatically identifies over 1 million celebrities—like world leaders, actors, actresses, athletes, researchers, business, and tech leaders across the globe. The data about these celebrities can also be found on various websites (IMDB, Wikipedia, and so on). -* **Account-based face identification**: Video Analyzer for Media trains a model for a specific account. It then recognizes faces in the video based on the trained model. For more information, see [Customize a Person model from the Video Analyzer for Media website](customize-person-model-with-website.md) and [Customize a Person model with the Video Analyzer for Media API](customize-person-model-with-api.md). -* **Thumbnail extraction for faces** ("best face"): Automatically identifies the best captured face in each group of faces (based on quality, size, and frontal position) and extracts it as an image asset. -* **Visual text recognition** (OCR): Extracts text that's visually displayed in the video. -* **Visual content moderation**: Detects adult and/or racy visuals. -* **Labels identification**: Identifies visual objects and actions displayed. -* **Scene segmentation**: Determines when a scene changes in video based on visual cues. A scene depicts a single event and it's composed by a series of consecutive shots, which are semantically related. -* **Shot detection**: Determines when a shot changes in video based on visual cues. A shot is a series of frames taken from the same motion-picture camera. For more information, see [Scenes, shots, and keyframes](scenes-shots-keyframes.md). -* **Black frame detection**: Identifies black frames presented in the video. -* **Keyframe extraction**: Detects stable keyframes in a video. -* **Rolling credits**: Identifies the beginning and end of the rolling credits in the end of TV shows and movies. -* **Animated characters detection** (preview): Detection, grouping, and recognition of characters in animated content via integration with [Cognitive Services custom vision](https://azure.microsoft.com/services/cognitive-services/custom-vision-service/). For more information, see [Animated character detection](animated-characters-recognition.md). -* **Editorial shot type detection**: Tagging shots based on their type (like wide shot, medium shot, close up, extreme close up, two shot, multiple people, outdoor and indoor, and so on). For more information, see [Editorial shot type detection](scenes-shots-keyframes.md#editorial-shot-type-detection). -* **Observed People Tracking** (preview): detects observed people in videos and provides information such as the location of the person in the video frame (using bounding boxes) and the exact timestamp (start, end) and confidence when a person appears. For more information, see [Trace observed people in a video](observed-people-tracing.md). - * **People's detected clothing**: detects the clothing types of people appearing in the video and provides information such as long or short sleeves, long or short pants and skirt or dress. The detected clothing are associated with the people wearing it and the exact timestamp (start,end) along with a confidence level for the detection are provided. -* **Matched person**: matches between people that were observed in the video with the corresponding faces detected. The matching between the observed people and the faces contain a confidence level. - -### Audio insights - -* **Audio transcription**: Converts speech to text over 50 languages and allows extensions. Supported languages include English US, English United Kingdom, English Australia, Spanish, Spanish(Mexico), French, French(Canada), German, Italian, Mandarin Chinese, Chinese (Cantonese, Traditional), Chinese (Simplified), Japanese, Russian, Portuguese, Hindi, Czech, Dutch, Polish, Danish, Norwegian, Finish, Swedish, Thai, Turkish, Korean, Arabic(Egypt), Arabic(Syrian Arab Republic), Arabic(Israel), Arabic(Iraq), Arabic(Jordan), Arabic(Kuwait), Arabic(Lebanon), Arabic(Oman), Arabic(Qatar), Arabic(Saudi Arabia), Arabic(United Arab Emirates), Arabic(Palestinian Authority) and Arabic Modern Standard (Bahrain) . -* **Automatic language detection**: Automatically identifies the dominant spoken language. Supported languages include English, Spanish, French, German, Italian, Mandarin Chinese, Japanese, Russian, and Portuguese. If the language can't be identified with confidence, Video Analyzer for Media assumes the spoken language is English. For more information, see [Language identification model](language-identification-model.md). -* **Multi-language speech identification and transcription**: Automatically identifies the spoken language in different segments from audio. It sends each segment of the media file to be transcribed and then combines the transcription back to one unified transcription. For more information, see [Automatically identify and transcribe multi-language content](multi-language-identification-transcription.md). -* **Closed captioning**: Creates closed captioning in three formats: VTT, TTML, SRT. -* **Two channel processing**: Auto detects separate transcript and merges to single timeline. -* **Noise reduction**: Clears up telephony audio or noisy recordings (based on Skype filters). -* **Transcript customization** (CRIS): Trains custom speech to text models to create industry-specific transcripts. For more information, see [Customize a Language model from the Video Analyzer for Media website](customize-language-model-with-website.md) and [Customize a Language model with the Video Analyzer for Media APIs](customize-language-model-with-api.md). -* **Speaker enumeration**: Maps and understands which speaker spoke which words and when. Sixteen speakers can be detected in a single audio-file. -* **Speaker statistics**: Provides statistics for speakers' speech ratios. -* **Textual content moderation**: Detects explicit text in the audio transcript. -* **Audio effects** (preview): Detects the following audio effects in the non-speech segments of the content: Gunshot, Glass shatter, Alarm, Siren, Explosion, Dog Bark, Screaming, Laughter, Crowd reactions (cheering, clapping, and booing) and Silence. Note: the full set of events is available only when choosing ‘Advanced Audio Analysis’ in upload preset, otherwise only ‘Silence’ and ‘Crowd reaction’ will be available. -* **Emotion detection**: Identifies emotions based on speech (what's being said) and voice tonality (how it's being said). The emotion could be joy, sadness, anger, or fear. -* **Translation**: Creates translations of the audio transcript to 54 different languages. -* **Audio effects detection** (preview): Detects the following audio effects in the non-speech segments of the content: alarm or siren, dog barking, crowd reactions (cheering, clapping, and booing), gunshot or explosion, laughter, breaking glass, and silence. - - The detected acoustic events are in the closed captions file. The file can be downloaded from the Video Analyzer for Media portal. For more information, see [Audio effects detection](audio-effects-detection.md). - - > [!NOTE] - > The full set of events is available only when you choose **Advanced Audio Analysis** when uploading a file, in upload preset. By default, only silence is detected. - -### Audio and video insights (multi-channels) - -When indexing by one channel, partial result for those models will be available. - -* **Keywords extraction**: Extracts keywords from speech and visual text. -* **Named entities extraction**: Extracts brands, locations, and people from speech and visual text via natural language processing (NLP). -* **Topic inference**: Extracts topics based on various keywords (i.e. keywords 'Stock Exchange', 'Wall Street' will produce the topic 'Economics'). The model uses three different ontologies ([IPTC](https://iptc.org/standards/media-topics/), [Wikipedia](https://www.wikipedia.org/) and the Video Indexer hierarchical topic ontology). The model uses transcription (spoken words), OCR content (visual text), and celebrities recognized in the video using the Video Indexer facial recognition model. -* **Artifacts**: Extracts rich set of "next level of details" artifacts for each of the models. -* **Sentiment analysis**: Identifies positive, negative, and neutral sentiments from speech and visual text. - -## How can I get started with Video Analyzer for Media? - -You can access Video Analyzer for Media capabilities in three ways: - -* Video Analyzer for Media portal: An easy to use solution that lets you evaluate the product, manage the account, and customize models. - - For more information about the portal, see [Get started with the Video Analyzer for Media website](video-indexer-get-started.md). - -* API integration: All of Video Analyzer for Media's capabilities are available through a REST API, which lets you integrate the solution into your apps and infrastructure. - - To get started as a developer, see [Use Video Analyzer for Media REST API](video-indexer-use-apis.md). - -* Embeddable widget: Lets you embed the Video Analyzer for Media insights, player, and editor experiences into your app. - - For more information, see [Embed visual widgets in your application](video-indexer-embed-widgets.md). - -If you're using the website, the insights are added as metadata and are visible in the portal. If you're using APIs, the insights are available as a JSON file. - -## Supported browsers - -The following list shows the supported browsers that you can use for the Video Analyzer for Media website and for your apps that embed the widgets. The list also shows the minimum supported browser version: - -- Edge, version: 16 -- Firefox, version: 54 -- Chrome, version: 58 -- Safari, version: 11 -- Opera, version: 44 -- Opera Mobile, version: 59 -- Android Browser, version: 81 -- Samsung Browser, version: 7 -- Chrome for Android, version: 87 -- Firefox for Android, version: 83 - -## Next steps - -You're ready to get started with Video Analyzer for Media. For more information, see the following articles: - -- [Get started with the Video Analyzer for Media website](video-indexer-get-started.md). -- [Process content with Video Analyzer for Media REST API](video-indexer-use-apis.md). -- [Embed visual widgets in your application](video-indexer-embed-widgets.md). diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-search.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-search.md deleted file mode 100644 index eb60de97526fd..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-search.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Search for exact moments in videos with Azure Video Analyzer for Media (formerly Video Indexer) -description: Learn how to search for exact moments in videos using Azure Video Analyzer for Media (formerly Video Indexer). -ms.topic: how-to -ms.date: 11/23/2019 -ms.author: juliako ---- - -# Search for exact moments in videos with Video Analyzer for Media - -This topic shows you how to use the Azure Video Analyzer for Media (formerly Video Indexer) website to search for exact moments in videos. - -1. Go to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. -1. Specify the search keywords and the search will be performed among all videos in your account's library. - - You can filter your search by selecting **Filters**. In below example, we search for "Microsoft" that appears as an on-screen text only (OCR). - - :::image type="content" source="./media/video-indexer-search/filter.png" alt-text="Filter, text only"::: -1. Press **Search** to see the result. - - :::image type="content" source="./media/video-indexer-search/results.png" alt-text="Video search result"::: - - If you select one of the results, the player brings you to that exact moment in the video. -1. View and search the summarized insights of the video by clicking **Play** on the video or selecting one of your original search results. - - You can view, search, edit the **insights**. When you select one of the insights, the player brings you to that exact moment in the video. - - :::image type="content" source="./media/video-indexer-search/insights.png" alt-text="View, search and edit the insights of the video"::: - - If you embed the video through Video Analyzer for Media widgets, you can achieve the player/insights view and synchronization in your app. For more information, see [Embed Video Analyzer for Media widgets into your app](video-indexer-embed-widgets.md). -1. You can view, search, and edit the transcripts by clicking on the **Timeline** tab. - - :::image type="content" source="./media/video-indexer-search/timeline.png" alt-text="View, search and edit the transcripts of the video"::: - - To edit the text, select **Edit** from the top-right corner and change the text as you need. - - You can also translate and download the transcripts by selecting the appropriate option from the top-right corner. - -## Embed, download, create projects - -You can embed your video by selecting **Embed** under your video. For details, see [Embed visual widgets in your application](video-indexer-embed-widgets.md). - -You can download the source video, insights of the video, transcripts by clicking **Download** under your video. - -You can create a clip based on your video of specific lines and moments by clicking **Open in editor**. Then editing the video, and saving the project. For details, see [Use your videos' deep insights](use-editor-create-project.md). - -:::image type="content" source="./media/video-indexer-search/embed-download-create-projects.png" alt-text="Embed, download, create projects of the video"::: - -## Next steps - -[Process content with Video Analyzer for Media REST API](video-indexer-use-apis.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-use-apis.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-use-apis.md deleted file mode 100644 index 3924984b2a7b9..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-use-apis.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: Use the Azure Video Analyzer for Media (formerly Video Indexer) API -description: This article describes how to get started with Azure Video Analyzer for Media (formerly Video Indexer) API. -ms.date: 01/07/2021 -ms.topic: tutorial -ms.custom: devx-track-csharp ---- - -# Tutorial: Use the Video Analyzer for Media API - -Azure Video Analyzer for Media (formerly Video Indexer) consolidates various audio and video artificial intelligence (AI) technologies offered by Microsoft into one integrated service, making development simpler. The APIs are designed to enable developers to focus on consuming Media AI technologies without worrying about scale, global reach, availability, and reliability of cloud platforms. You can use the API to upload your files, get detailed video insights, get URLs of embeddable insight and player widgets, and more. - -When creating a Video Analyzer for Media account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Video Analyzer for Media provides up to 600 minutes of free indexing to website users and up to 2400 minutes of free indexing to API users. With a paid option, you create a Video Analyzer for Media account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for minutes indexed, for more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). - -This article shows how the developers can take advantage of the [Video Analyzer for Media API](https://api-portal.videoindexer.ai/). - -## Subscribe to the API - -1. Sign in to [Video Analyzer for Media Developer Portal](https://api-portal.videoindexer.ai/). - - Review a release note regarding [login information](release-notes.md#october-2020). - - ![Sign in to Video Analyzer for Media Developer Portal](./media/video-indexer-use-apis/sign-in.png) - - > [!Important] - > * You must use the same provider you used when you signed up for Video Analyzer for Media. - > * Personal Google and Microsoft (Outlook/Live) accounts can only be used for trial accounts. Accounts connected to Azure require Azure AD. - > * There can be only one active account per email. If a user tries to sign in with user@gmail.com for LinkedIn and later with user@gmail.com for Google, the latter will display an error page, saying the user already exists. - -2. Subscribe. - - Select the [Products](https://api-portal.videoindexer.ai/products) tab. Then, select Authorization and subscribe. - - ![Products tab in Video Indexer Developer Portal](./media/video-indexer-use-apis/authorization.png) - - > [!NOTE] - > New users are automatically subscribed to Authorization. - - After you subscribe, you can find your subscription under **Products** -> **Authorization**. In the subscription page, you will find the primary and secondary keys. The keys should be protected. The keys should only be used by your server code. They shouldn't be available on the client side (.js, .html, and so on). - - ![Subscription and keys in Video Indexer Developer Portal](./media/video-indexer-use-apis/subscriptions.png) - -> [!TIP] -> Video Analyzer for Media user can use a single subscription key to connect to multiple Video Analyzer for Media accounts. You can then link these Video Analyzer for Media accounts to different Media Services accounts. - -## Obtain access token using the Authorization API - -Once you subscribe to the Authorization API, you can obtain access tokens. These access tokens are used to authenticate against the Operations API. - -Each call to the Operations API should be associated with an access token, matching the authorization scope of the call. - -- User level: User level access tokens let you perform operations on the **user** level. For example, get associated accounts. -- Account level: Account level access tokens let you perform operations on the **account** level or the **video** level. For example, upload video, list all videos, get video insights, and so on. -- Video level: Video level access tokens let you perform operations on a specific **video**. For example, get video insights, download captions, get widgets, and so on. - -You can control the permission level of tokens in two ways: - -* For **Account** tokens, you can use the **Get Account Access Token With Permission** API and specify the permission type (**Reader**/**Contributor**/**MyAccessManager**/**Owner**). -* For all types of tokens (including **Account** tokens), you can specify **allowEdit=true/false**. **false** is the equivalent of a **Reader** permission (read-only) and **true** is the equivalent of a **Contributor** permission (read-write). - -For most server-to-server scenarios, you'll probably use the same **account** token since it covers both **account** operations and **video** operations. However, if you're planning to make client side calls to Video Analyzer for Media (for example, from JavaScript), you would want to use a **video** access token to prevent clients from getting access to the entire account. That's also the reason that when embedding Video Analyzer for Media client code in your client (for example, using **Get Insights Widget** or **Get Player Widget**), you must provide a **video** access token. - -To make things easier, you can use the **Authorization** API > **GetAccounts** to get your accounts without obtaining a user token first. You can also ask to get the accounts with valid tokens, enabling you to skip an additional call to get an account token. - -Access tokens expire after 1 hour. Make sure your access token is valid before using the Operations API. If it expires, call the Authorization API again to get a new access token. - -You're ready to start integrating with the API. Find [the detailed description of each Video Analyzer for Media REST API](https://api-portal.videoindexer.ai/). - -## Account ID - -The Account ID parameter is required in all operational API calls. Account ID is a GUID that can be obtained in one of the following ways: - -* Use the **Video Analyzer for Media website** to get the Account ID: - - 1. Browse to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. - 2. Browse to the **Settings** page. - 3. Copy the account ID. - - ![Video Analyzer for Media settings and account ID](./media/video-indexer-use-apis/account-id.png) - -* Use **Video Analyzer for Media Developer Portal** to programmatically get the Account ID. - - Use the [Get account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account) API. - - > [!TIP] - > You can generate access tokens for the accounts by defining `generateAccessTokens=true`. - -* Get the account ID from the URL of a player page in your account. - - When you watch a video, the ID appears after the `accounts` section and before the `videos` section. - - ``` - https://www.videoindexer.ai/accounts/00000000-f324-4385-b142-f77dacb0a368/videos/d45bf160b5/ - ``` - -## Recommendations - -This section lists some recommendations when using Video Analyzer for Media API. - -- If you're planning to upload a video, it's recommended to place the file in some public network location (for example, an Azure Blob Storage account). Get the link to the video and provide the URL as the upload file param. - - The URL provided to Video Analyzer for Media must point to a media (audio or video) file. An easy verification for the URL (or SAS URL) is to paste it into a browser, if the file starts playing/downloading, it's likely a good URL. If the browser is rendering some visualization, it's likely not a link to a file but to an HTML page. - -- When you call the API that gets video insights for the specified video, you get a detailed JSON output as the response content. [See details about the returned JSON in this topic](video-indexer-output-json-v2.md). - -## Code sample - -The following C# code snippet demonstrates the usage of all the Video Analyzer for Media APIs together. - -```csharp -var apiUrl = "https://api.videoindexer.ai"; -var accountId = "..."; -var location = "westus2"; // replace with the account's location, or with “trial” if this is a trial account -var apiKey = "..."; - -System.Net.ServicePointManager.SecurityProtocol = System.Net.ServicePointManager.SecurityProtocol | System.Net.SecurityProtocolType.Tls12; - -// create the http client -var handler = new HttpClientHandler(); -handler.AllowAutoRedirect = false; -var client = new HttpClient(handler); -client.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", apiKey); - -// obtain account access token -var accountAccessTokenRequestResult = client.GetAsync($"{apiUrl}/auth/{location}/Accounts/{accountId}/AccessToken?allowEdit=true").Result; -var accountAccessToken = accountAccessTokenRequestResult.Content.ReadAsStringAsync().Result.Replace("\"", ""); - -client.DefaultRequestHeaders.Remove("Ocp-Apim-Subscription-Key"); - -// upload a video -var content = new MultipartFormDataContent(); -Debug.WriteLine("Uploading..."); -// get the video from URL -var videoUrl = "VIDEO_URL"; // replace with the video URL - -// as an alternative to specifying video URL, you can upload a file. -// remove the videoUrl parameter from the query string below and add the following lines: - //FileStream video =File.OpenRead(Globals.VIDEOFILE_PATH); - //byte[] buffer = new byte[video.Length]; - //video.Read(buffer, 0, buffer.Length); - //content.Add(new ByteArrayContent(buffer)); - -var uploadRequestResult = client.PostAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos?accessToken={accountAccessToken}&name=some_name&description=some_description&privacy=private&partition=some_partition&videoUrl={videoUrl}", content).Result; -var uploadResult = uploadRequestResult.Content.ReadAsStringAsync().Result; - -// get the video id from the upload result -var videoId = JsonConvert.DeserializeObject(uploadResult)["id"]; -Debug.WriteLine("Uploaded"); -Debug.WriteLine("Video ID: " + videoId); - -// obtain video access token -client.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", apiKey); -var videoTokenRequestResult = client.GetAsync($"{apiUrl}/auth/{location}/Accounts/{accountId}/Videos/{videoId}/AccessToken?allowEdit=true").Result; -var videoAccessToken = videoTokenRequestResult.Content.ReadAsStringAsync().Result.Replace("\"", ""); - -client.DefaultRequestHeaders.Remove("Ocp-Apim-Subscription-Key"); - -// wait for the video index to finish -while (true) -{ - Thread.Sleep(10000); - - var videoGetIndexRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/Index?accessToken={videoAccessToken}&language=English").Result; - var videoGetIndexResult = videoGetIndexRequestResult.Content.ReadAsStringAsync().Result; - - var processingState = JsonConvert.DeserializeObject(videoGetIndexResult)["state"]; - - Debug.WriteLine(""); - Debug.WriteLine("State:"); - Debug.WriteLine(processingState); - - // job is finished - if (processingState != "Uploaded" && processingState != "Processing") - { - Debug.WriteLine(""); - Debug.WriteLine("Full JSON:"); - Debug.WriteLine(videoGetIndexResult); - break; - } -} - -// search for the video -var searchRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/Search?accessToken={accountAccessToken}&id={videoId}").Result; -var searchResult = searchRequestResult.Content.ReadAsStringAsync().Result; -Debug.WriteLine(""); -Debug.WriteLine("Search:"); -Debug.WriteLine(searchResult); - -// get insights widget url -var insightsWidgetRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/InsightsWidget?accessToken={videoAccessToken}&widgetType=Keywords&allowEdit=true").Result; -var insightsWidgetLink = insightsWidgetRequestResult.Headers.Location; -Debug.WriteLine("Insights Widget url:"); -Debug.WriteLine(insightsWidgetLink); - -// get player widget url -var playerWidgetRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/PlayerWidget?accessToken={videoAccessToken}").Result; -var playerWidgetLink = playerWidgetRequestResult.Headers.Location; -Debug.WriteLine(""); -Debug.WriteLine("Player Widget url:"); -Debug.WriteLine(playerWidgetLink); -``` - -## Clean up resources - -After you are done with this tutorial, delete resources that you are not planning to use. - -## See also - -- [Video Analyzer for Media overview](video-indexer-overview.md) -- [Regions](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services) - -## Next steps - -- [Examine details of the output JSON](video-indexer-output-json-v2.md) -- Check out the [sample code](https://github.com/Azure-Samples/media-services-video-indexer) that demonstrates important aspect of uploading and indexing a video. Following the code will give you a good idea of how to use our API for basic functionalities. Make sure to read the inline comments and notice our best practices advices. - diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit.md b/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit.md deleted file mode 100644 index b07d877d53ad0..0000000000000 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: View and edit Azure Video Analyzer for Media (formerly Video Indexer) insights -titlesuffix: Azure Video Analyzer for Media -description: This article demonstrates how to view and edit Azure Video Analyzer for Media (formerly Video Indexer) insights. -services: azure-video-analyzer -author: Juliako -manager: femila -ms.topic: article -ms.subservice: azure-video-analyzer-media -ms.date: 05/15/2019 -ms.author: juliako ---- - -# View and edit Video Analyzer for Media insights - -This topic shows you how to view and edit the Azure Video Analyzer for Media (formerly Video Indexer) insights of a video. - -1. Browse to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. -2. Find a video from which you want to create your Video Analyzer for Media insights. For more information, see [Find exact moments within videos](video-indexer-search.md). -3. Press **Play**. - - The page shows the video's summarized insights. - - ![Insights](./media/video-indexer-view-edit/video-indexer-summarized-insights.png) - -4. View the summarized insights of the video. - - Summarized insights show an aggregated view of the data: faces, keywords, sentiments. For example, you can see the faces of people and the time ranges each face appears in and the % of the time it is shown. - - The player and the insights are synchronized. For example, if you click a keyword or the transcript line, the player brings you to that moment in the video. You can achieve the player/insights view and synchronization in your application. For more information, see [Embed Azure Indexer widgets into your application](video-indexer-embed-widgets.md). - -## Next steps - -[Use your videos' deep insights](use-editor-create-project.md) - -## See also - -[Video Analyzer for Media overview](video-indexer-overview.md) - diff --git a/articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json b/articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json new file mode 100644 index 0000000000000..7d8bc8afd5f02 --- /dev/null +++ b/articles/azure-video-indexer/.openpublishing.redirection.azure-video-indexer.json @@ -0,0 +1,244 @@ +{ + "redirections": [ + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/add-contributor-role-on-the-media-service.md", + "redirect_url": "/azure/azure-video-indexer/add-contributor-role-on-the-media-service", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition-how-to.md", + "redirect_url": "/azure/azure-video-indexer/animated-characters-recognition-how-to", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/animated-characters-recognition.md", + "redirect_url": "/azure/azure-video-indexer/animated-characters-recognition", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/audio-effects-detection.md", + "redirect_url": "/azure/azure-video-indexer/audio-effects-detection", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/compare-video-indexer-with-media-services-presets.md", + "redirect_url": "/azure/azure-video-indexer/compare-video-indexer-with-media-services-presets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/concepts-overview.md", + "redirect_url": "/azure/azure-video-indexer/concepts-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-classic-account-to-arm.md", + "redirect_url": "/azure/azure-video-indexer/connect-classic-account-to-arm", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/connect-to-azure.md", + "redirect_url": "/azure/azure-video-indexer/connect-to-azure", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/considerations-when-use-at-scale.md", + "redirect_url": "/azure/azure-video-indexer/considerations-when-use-at-scale", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/create-video-analyzer-for-media-account.md", + "redirect_url": "/azure/azure-video-indexer/create-video-analyzer-for-media-account", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-overview.md", + "redirect_url": "/azure/azure-video-indexer/customize-brands-model-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-api.md", + "redirect_url": "/azure/azure-video-indexer/customize-brands-model-with-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-brands-model-with-website.md", + "redirect_url": "/azure/azure-video-indexer/customize-brands-model-with-website", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-content-models-overview.md", + "redirect_url": "/azure/azure-video-indexer/customize-content-models-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-overview.md", + "redirect_url": "/azure/azure-video-indexer/customize-language-model-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-api.md", + "redirect_url": "/azure/azure-video-indexer/customize-language-model-with-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-website.md", + "redirect_url": "/azure/azure-video-indexer/customize-language-model-with-website", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-overview.md", + "redirect_url": "/azure/azure-video-indexer/customize-person-model-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-api.md", + "redirect_url": "/azure/azure-video-indexer/customize-person-model-with-api", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-website.md", + "redirect_url": "/azure/azure-video-indexer/customize-person-model-with-website", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/deploy-with-arm-template.md", + "redirect_url": "/azure/azure-video-indexer/deploy-with-arm-template", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/detected-clothing.md", + "redirect_url": "/azure/azure-video-indexer/detected-clothing", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/faq.yml", + "redirect_url": "/azure/azure-video-indexer/faq.yml", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/index.yml", + "redirect_url": "/azure/azure-video-indexer", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/invite-users.md", + "redirect_url": "/azure/azure-video-indexer/invite-users", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-identification-model.md", + "redirect_url": "/azure/azure-video-indexer/language-identification-model", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-support.md", + "redirect_url": "/azure/azure-video-indexer/language-support", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/live-stream-analysis.md", + "redirect_url": "/azure/azure-video-indexer/live-stream-analysis", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/logic-apps-connector-tutorial.md", + "redirect_url": "/azure/azure-video-indexer/logic-apps-connector-tutorial", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-account-connected-to-azure.md", + "redirect_url": "/azure/azure-video-indexer/manage-account-connected-to-azure", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/manage-multiple-tenants.md", + "redirect_url": "/azure/azure-video-indexer/manage-multiple-tenants", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/matched-person.md", + "redirect_url": "/azure/azure-video-indexer/matched-person", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/multi-language-identification-transcription.md", + "redirect_url": "/azure/azure-video-indexer/multi-language-identification-transcription", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/observed-people-tracing.md", + "redirect_url": "/azure/azure-video-indexer/observed-people-tracing", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/odrv-download.md", + "redirect_url": "/azure/azure-video-indexer/odrv-download", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/regions.md", + "redirect_url": "/azure/azure-video-indexer/regions", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/release-notes.md", + "redirect_url": "/azure/azure-video-indexer/release-notes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/scenes-shots-keyframes.md", + "redirect_url": "/azure/azure-video-indexer/scenes-shots-keyframes", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/upload-index-videos.md", + "redirect_url": "/azure/azure-video-indexer/upload-index-videos", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/use-editor-create-project.md", + "redirect_url": "/azure/azure-video-indexer/use-editor-create-project", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-disaster-recovery.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-disaster-recovery", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-embed-widgets.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-embed-widgets", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-get-started.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-get-started", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-output-json-v2.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-output-json-v2", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-overview.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-overview", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-search.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-search", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-use-apis.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-use-apis", + "redirect_document_id": false + }, + { + "source_path_from_root": "/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-view-edit.md", + "redirect_url": "/azure/azure-video-indexer/video-indexer-view-edit", + "redirect_document_id": false + }, + ] +} diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/add-contributor-role-on-the-media-service.md b/articles/azure-video-indexer/add-contributor-role-on-the-media-service.md similarity index 81% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/add-contributor-role-on-the-media-service.md rename to articles/azure-video-indexer/add-contributor-role-on-the-media-service.md index 60ed4c57eaa01..5d557ea70f3da 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/add-contributor-role-on-the-media-service.md +++ b/articles/azure-video-indexer/add-contributor-role-on-the-media-service.md @@ -12,14 +12,14 @@ ms.custom: ignite-fall-2021 This article describes how to assign contributor role on the Media Services. > [!NOTE] -> If you are creating your Azure Video Analyzer for Media through the Azure portal UI, the selected Managed identity will be automatically assigned with a contributor permission on the selected Media Service account. +> If you are creating your Azure Video Indexer through the Azure portal UI, the selected Managed identity will be automatically assigned with a contributor permission on the selected Media Service account. ## Prerequisites 1. Azure Media Services (AMS) 2. User-assigned managed identity > [!NOTE] -> You'll need an Azure subscription where you have access to both the [Contributor][docs-role-contributor] role and the [User Access Administrator][docs-role-administrator] role to the Azure Media Services and the User-assigned managed identity. If you don't have the right permissions, ask your account administrator to grant you those permissions. The associated Azure Media Services must be in the same region as the Video Analyzer for Media account. +> You'll need an Azure subscription where you have access to both the [Contributor][docs-role-contributor] role and the [User Access Administrator][docs-role-administrator] role to the Azure Media Services and the User-assigned managed identity. If you don't have the right permissions, ask your account administrator to grant you those permissions. The associated Azure Media Services must be in the same region as the Azure Video Indexer account. ## Add Contributor role on the Media Services @@ -43,5 +43,5 @@ This article describes how to assign contributor role on the Media Services. 1. To assign the role, click **Review + assign** -[docs-role-contributor]: ../../role-based-access-control/built-in-roles.md#contributor -[docs-role-administrator]: ../../role-based-access-control/built-in-roles.md#user-access-administrator +[docs-role-contributor]: ../role-based-access-control/built-in-roles.md#contributor +[docs-role-administrator]: ../role-based-access-control/built-in-roles.md#user-access-administrator diff --git a/articles/azure-video-indexer/animated-characters-recognition-how-to.md b/articles/azure-video-indexer/animated-characters-recognition-how-to.md new file mode 100644 index 0000000000000..679ee7551a826 --- /dev/null +++ b/articles/azure-video-indexer/animated-characters-recognition-how-to.md @@ -0,0 +1,203 @@ +--- +title: Animated character detection with Azure Video Indexer (formerly Azure Video Analyzer for Media) how to +description: This how to demonstrates how to use animated character detection with Azure Video Indexer (formerly Azure Video Analyzer for Media). +services: azure-video-analyzer +author: Juliako +manager: femila + +ms.custom: references_regions +ms.topic: how-to +ms.date: 12/07/2020 +ms.author: juliako +--- + +# Use the animated character detection (preview) with portal and API + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports detection, grouping, and recognition of characters in animated content, this functionality is available through the Azure portal and through API. Review [this overview](animated-characters-recognition.md) topic. + +This article demonstrates to how to use the animated character detection with the Azure portal and the Azure Video Indexer API. + +## Use the animated character detection with portal + +In the trial accounts the Custom Vision integration is managed by Azure Video Indexer, you can start creating and using the animated characters model. If using the trial account, you can skip the following ("Connect your Custom Vision account") section. + +### Connect your Custom Vision account (paid accounts only) + +If you own an Azure Video Indexer paid account, you need to connect a Custom Vision account first. If you don't have a Custom Vision account already, please create one. For more information, see [Custom Vision](../cognitive-services/custom-vision-service/overview.md). + +> [!NOTE] +> Both accounts need to be in the same region. The Custom Vision integration is currently not supported in the Japan region. + +Paid accounts that have access to their Custom Vision account can see the models and tagged images there. Learn more about [improving your classifier in Custom Vision](../cognitive-services/custom-vision-service/getting-started-improving-your-classifier.md). + +Note that the training of the model should be done only via Azure Video Indexer, and not via the Custom Vision website. + +#### Connect a Custom Vision account with API + +Follow these steps to connect you Custom Vision account to Azure Video Indexer, or to change the Custom Vision account that is currently connected to Azure Video Indexer: + +1. Browse to [www.customvision.ai](https://www.customvision.ai) and login. +1. Copy the keys for the Training and Prediction resources: + + > [!NOTE] + > To provide all the keys you need to have two separate resources in Custom Vision, one for training and one for prediction. +1. Provide other information: + + * Endpoint + * Prediction resource ID +1. Browse and sign in to the [Azure Video Indexer](https://vi.microsoft.com/). +1. Click on the question mark on the top-right corner of the page and choose **API Reference**. +1. Make sure you are subscribed to API Management by clicking **Products** tab. If you have an API connected you can continue to the next step, otherwise, subscribe. +1. On the developer portal, click the **Complete API Reference** and browse to **Operations**. +1. Select **Connect Custom Vision Account (PREVIEW)** and click **Try it**. +1. Fill in the required fields as well as the access token and click **Send**. + + For more information about how to get the Video Indexer access token go to the [developer portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token), and see the [relevant documentation](video-indexer-use-apis.md#obtain-access-token-using-the-authorization-api). +1. Once the call return 200 OK response, your account is connected. +1. To verify your connection by browse to the [Azure Video Indexer](https://vi.microsoft.com/) portal: +1. Click on the **Content model customization** button in the top-right corner. +1. Go to the **Animated characters** tab. +1. Once you click on Manage models in Custom Vision, you will be transferred to the Custom Vision account you just connected. + +> [!NOTE] +> Currently, only models that were created via Azure Video Indexer are supported. Models that are created through Custom Vision will not be available. In addition, the best practice is to edit models that were created through Azure Video Indexer only through the Azure Video Indexer platform, since changes made through Custom Vision may cause unintended results. + +### Create an animated characters model + +1. Browse to the [Azure Video Indexer](https://vi.microsoft.com/) website and sign in. +1. To customize a model in your account, select the **Content model customization** button on the left of the page. + + > [!div class="mx-imgBorder"] + > :::image type="content" source="./media/content-model-customization/content-model-customization.png" alt-text="Customize content model in Azure Video Indexer "::: +1. Go to the **Animated characters** tab in the model customization section. +1. Click on **Add model**. +1. Name you model and click enter to save the name. + +> [!NOTE] +> The best practice is to have one custom vision model for each animated series. + +### Index a video with an animated model + +For the initial training, upload at least two videos. Each should be preferably longer than 15 minutes, before expecting good recognition model. If you have shorter episodes, we recommend uploading at least 30 minutes of video content before training. This will allow you to merge groups that belong to the same character from different scenes and backgrounds, and therefore increase the chance it will detect the character in the following episodes you index. To train a model on multiple videos (episodes) you need to index them all with the same animation model. + +1. Click on the **Upload** button. +1. Choose a video to upload (from a file or a URL). +1. Click on **Advanced options**. +1. Under **People / Animated characters** choose **Animation models**. +1. If you have one model it will be chosen automatically, and if you have multiple models you can choose the relevant one out of the dropdown menu. +1. Click on upload. +1. Once the video is indexed, you will see the detected characters in the **Animated characters** section in the **Insights** pane. + +Before tagging and training the model, all animated characters will be named “Unknown #X”. After you train the model they will also be recognized. + +### Customize the animated characters models + +1. Name the characters in Azure Video Indexer. + + 1. After the model created character group, it is recommended to review these groups in Custom Vision. + 1. To tag an animated character in your video, go to the **Insights** tab and click on the **Edit** button on the top-right corner of the window. + 1. In the **Insights** pane, click on any of the detected animated characters and change their names from "Unknown #X" to a temporary name (or the name that was previously assigned to the character). + 1. After typing in the new name, click on the check icon next to the new name. This saves the new name in the model in Azure Video Indexer. +1. Paid accounts only: Review the groups in Custom Vision + + > [!NOTE] + > Paid accounts that have access to their Custom Vision account can see the models and tagged images there. Learn more about [improving your classifier in Custom Vision](../cognitive-services/custom-vision-service/getting-started-improving-your-classifier.md). It’s important to note that training of the model should be done only via Azure Video Indexer (as described in this topic), and not via the Custom Vision website. + + 1. Go to the **Custom Models** page in Azure Video Indexer and choose the **Animated characters** tab. + 1. Click on the Edit button for the model you are working on to manage it in Custom Vision. + 1. Review each character group: + + * If the group contains unrelated images it is recommended to delete these in the Custom Vision website. + * If there are images that belong to a different character, change the tag on these specific images by click on the image, adding the right tag and deleting the wrong tag. + * If the group is not correct, meaning it contains mainly non-character images or images from multiple characters, you can delete in in Custom Vision website or in Azure Video Indexer insights. + * The grouping algorithm will sometimes split your characters to different groups. It is therefore recommended to give all the groups that belong to the same character the same name (in Azure Video Indexer Insights), which will immediately cause all these groups to appear as on in Custom Vision website. + 1. Once the group is refined, make sure the initial name you tagged it with reflects the character in the group. +1. Train the model + + 1. After you finished editing all names you want, you need to train the model. + 1. Once a character is trained into the model, it will be recognized it the next video indexed with that model. + 1. Open the customization page and click on the **Animated characters** tab and then click on the **Train** button to train your model. In order to keep the connection between Video + +Indexer and the model, don't train the model in the Custom Vision website (paid accounts have access to Custom Vision website), only in Azure Video Indexer. +Once trained, any video that will be indexed or reindexed with that model will recognize the trained characters. + +## Delete an animated character and the model + +1. Delete an animated character. + + 1. To delete an animated character in your video insights, go to the **Insights** tab and click on the **Edit** button on the top-right corner of the window. + 1. Choose the animated character and then click on the **Delete** button under their name. + + > [!NOTE] + > This will delete the insight from this video but will not affect the model. +1. Delete a model. + + 1. Click on the **Content model customization** button on the top menu and go to the **Animated characters** tab. + 1. Click on the ellipsis icon to the right of the model you wish to delete and then on the delete button. + + * Paid account: the model will be disconnected from Azure Video Indexer and you will not be able to reconnect it. + * Trial account: the model will be deleted from Customs vision as well. + + > [!NOTE] + > In a trial account, you only have one model you can use. After you delete it, you can’t train other models. + +## Use the animated character detection with API + +1. Connect a Custom Vision account. + + If you own an Azure Video Indexer paid account, you need to connect a Custom Vision account first.
                + If you don’t have a Custom Vision account already, please create one. For more information, see [Custom Vision](../cognitive-services/custom-vision-service/overview.md). + + [Connect your Custom Vision account using API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Connect-Custom-Vision-Account). +1. Create an animated characters model. + + Use the [create animation model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Animation-Model) API. +1. Index or re-index a video. + + Use the [re-indexing](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) API. +1. Customize the animated characters models. + + Use the [train animation model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Train-Animation-Model) API. + +### View the output + +See the animated characters in the generated JSON file. + +```json +"animatedCharacters": [ + { + "videoId": "e867214582", + "confidence": 0, + "thumbnailId": "00000000-0000-0000-0000-000000000000", + "seenDuration": 201.5, + "seenDurationRatio": 0.3175, + "isKnownCharacter": true, + "id": 4, + "name": "Bunny", + "appearances": [ + { + "startTime": "0:00:52.333", + "endTime": "0:02:02.6", + "startSeconds": 52.3, + "endSeconds": 122.6 + }, + { + "startTime": "0:02:40.633", + "endTime": "0:03:16.6", + "startSeconds": 160.6, + "endSeconds": 196.6 + }, + ] + }, +] +``` + +## Limitations + +* Currently, the "animation identification" capability is not supported in East-Asia region. +* Characters that appear to be small or far in the video may not be identified properly if the video's quality is poor. +* The recommendation is to use a model per set of animated characters (for example per an animated series). + +## Next steps + +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/animated-characters-recognition.md b/articles/azure-video-indexer/animated-characters-recognition.md new file mode 100644 index 0000000000000..9d469565f2def --- /dev/null +++ b/articles/azure-video-indexer/animated-characters-recognition.md @@ -0,0 +1,51 @@ +--- +title: Animated character detection with Azure Video Indexer (formerly Azure Video Analyzer for Media) +description: This topic demonstrates how to use animated character detection with Azure Video Indexer (formerly Azure Video Analyzer for Media). +ms.topic: conceptual +ms.date: 11/19/2019 +ms.author: juliako +--- + +# Animated character detection (preview) + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports detection, grouping, and recognition of characters in animated content via integration with [Cognitive Services custom vision](https://azure.microsoft.com/services/cognitive-services/custom-vision-service/). This functionality is available both through the portal and through the API. + +After uploading an animated video with a specific animation model, Azure Video Indexer extracts keyframes, detects animated characters in these frames, groups similar character, and chooses the best sample. Then, it sends the grouped characters to Custom Vision that identifies characters based on the models it was trained on. + +Before you start training your model, the characters are detected namelessly. As you add names and train the model the Azure Video Indexer will recognize the characters and name them accordingly. + +## Flow diagram + +The following diagram demonstrates the flow of the animated character detection process. + +![Flow diagram](./media/animated-characters-recognition/flow.png) + +## Accounts + +Depending on a type of your Azure Video Indexer account, different feature sets are available. For information on how to connect your account to Azure, see [Create an Azure Video Indexer account connected to Azure](connect-to-azure.md). + +* Trial account: Azure Video Indexer uses an internal Custom Vision account to create model and connect it to your Azure Video Indexer account. +* Paid account: you connect your Custom Vision account to your Azure Video Indexer account (if you don’t already have one, you need to create an account first). + +### Trial vs. paid + +|Functionality|Trial|Paid| +|---|---|---| +|Custom Vision account|Managed behind the scenes by Azure Video Indexer. |Your Custom Vision account is connected to Azure Video Indexer.| +|Number of animation models|One|Up to 100 models per account (Custom Vision limitation).| +|Training the model|Azure Video Indexer trains the model for new characters additional examples of existing characters.|The account owner trains the model when they are ready to make changes.| +|Advanced options in Custom Vision|No access to the Custom Vision portal.|You can adjust the models yourself in the Custom Vision portal.| + +## Use the animated character detection with portal and API + +For details, see [Use the animated character detection with portal and API](animated-characters-recognition-how-to.md). + +## Limitations + +* Currently, the "animation identification" capability is not supported in East-Asia region. +* Characters that appear to be small or far in the video may not be identified properly if the video's quality is poor. +* The recommendation is to use a model per set of animated characters (for example per an animated series). + +## Next steps + +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/audio-effects-detection.md b/articles/azure-video-indexer/audio-effects-detection.md similarity index 85% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/audio-effects-detection.md rename to articles/azure-video-indexer/audio-effects-detection.md index d99ec91278551..f75106fe67654 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/audio-effects-detection.md +++ b/articles/azure-video-indexer/audio-effects-detection.md @@ -1,6 +1,6 @@ --- title: Audio effects detection -description: Audio Effects Detection is one of Azure Video Analyzer for Media AI capabilities. It can detects a various of acoustics events and classify them into different acoustic categories (for example, gunshot, screaming, crowd reaction and more). +description: Audio Effects Detection is one of Azure Video Indexer AI capabilities that detects various acoustics events and classifies them into different acoustic categories (for example, gunshot, screaming, crowd reaction and more). ms.topic: conceptual ms.date: 01/04/2022 ms.author: juliako @@ -8,7 +8,7 @@ ms.author: juliako # Audio effects detection (preview) -**Audio effects detection** is one of Azure Video Analyzer for Media AI capabilities. It can detects a various of acoustics events and classify them into different acoustic categories (such as dog barking, crowd reactions, laugher and more). +**Audio effects detection** is one of Azure Video Indexer AI capabilities that detects various acoustics events and classifies them into different acoustic categories (such as dog barking, crowd reactions, laugher and more). Some scenarios where this feature is useful: @@ -18,7 +18,7 @@ Some scenarios where this feature is useful: ## Supported audio categories -**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/media-services/). +**Audio effect detection** can detect and classify 7 different categories. In the next table, you can find the different categories split in to the different presets, divided to **Standard** and **Advanced**. For more information, see [pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). |Indexing type |Standard indexing| Advanced indexing| |---|---|---| @@ -107,7 +107,7 @@ Audio Effects in closed captions file will be retrieved with the following logic ## Adding audio effects in closed caption files -Audio effects can be added to the closed captions files supported by Azure Video Analyzer for Media via the [Get video captions API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Captions) by choosing true in the `includeAudioEffects` parameter or via the video.ai portal experience by selecting **Download** -> **Closed Captions** -> **Include Audio Effects**. +Audio effects can be added to the closed captions files supported by Azure Video Indexer via the [Get video captions API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Captions) by choosing true in the `includeAudioEffects` parameter or via the video.ai portal experience by selecting **Download** -> **Closed Captions** -> **Include Audio Effects**. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/audio-effects-detection/close-caption.jpg" alt-text="Audio Effects in CC"::: @@ -119,7 +119,7 @@ Audio effects can be added to the closed captions files supported by Azure Video * The audio effects are detected when present in non-speech segments only. * The model is optimized for cases where there is no loud background music. -* Low quality audio may impact the detection results . +* Low quality audio may impact the detection results. * Minimal non-speech section duration is 2 seconds. * Music that is characterized with repetitive and/or linearly scanned frequency can be mistakenly classified as Alarm or siren. * The model is currently optimized for natural and non-synthetic gunshot and explosions sounds. diff --git a/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md new file mode 100644 index 0000000000000..7266f0b19512d --- /dev/null +++ b/articles/azure-video-indexer/compare-video-indexer-with-media-services-presets.md @@ -0,0 +1,31 @@ +--- +title: Comparison of Azure Video Indexer (formerly Azure Video Analyzer for Media) and Azure Media Services v3 presets +description: This article compares Azure Video Indexer (formerly Azure Video Analyzer for Media) capabilities and Azure Media Services v3 presets. +ms.topic: conceptual +ms.date: 02/24/2020 +ms.author: juliako + +--- + +# Compare Azure Media Services v3 presets and Azure Video Indexer + +This article compares the capabilities of **Azure Video Indexer (formerly Video Indexer) APIs** and **Media Services v3 APIs**. + +Currently, there is an overlap between features offered by the [Azure Video Indexer APIs](https://api-portal.videoindexer.ai/) and the [Media Services v3 APIs](https://github.com/Azure/azure-rest-api-specs/blob/master/specification/mediaservices/resource-manager/Microsoft.Media/stable/2018-07-01/Encoding.json). The following table offers the current guideline for understanding the differences and similarities. + +## Compare + +|Feature|Azure Video Indexer APIs |Video Analyzer and Audio Analyzer Presets
                in Media Services v3 APIs| +|---|---|---| +|Media Insights|[Enhanced](video-indexer-output-json-v2.md) |[Fundamentals](/azure/media-services/latest/analyze-video-audio-files-concept)| +|Experiences|See the full list of supported features:
                [Overview](video-indexer-overview.md)|Returns video insights only| +|Billing|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)|[Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/#analytics)| +|Compliance|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Azure Video Indexer" to see if it complies with a certificate of interest.|For the most current compliance updates, visit [Azure Compliance Offerings.pdf](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942/file/178110/23/Microsoft%20Azure%20Compliance%20Offerings.pdf) and search for "Media Services" to see if it complies with a certificate of interest.| +|Free Trial|East US|Not available| +|Region availability|See [Cognitive Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services)|See [Media Services availability by region](https://azure.microsoft.com/global-infrastructure/services/?products=media-services).| + +## Next steps + +[Azure Video Indexer overview](video-indexer-overview.md) + +[Media Services v3 overview](/azure/media-services/latest/media-services-overview) diff --git a/articles/azure-video-indexer/concepts-overview.md b/articles/azure-video-indexer/concepts-overview.md new file mode 100644 index 0000000000000..fe18914d18552 --- /dev/null +++ b/articles/azure-video-indexer/concepts-overview.md @@ -0,0 +1,80 @@ +--- +title: Azure Video Indexer (formerly Azure Video Analyzer for Media) concepts - Azure +description: This article gives a brief overview of Azure Video Indexer (formerly Azure Video Analyzer for Media) terminology and concepts. +ms.topic: conceptual +ms.date: 01/19/2021 +ms.author: juliako +--- + + +# Azure Video Indexer concepts + +This article gives a brief overview of Azure Video Indexer (formerly Azure Video Analyzer for Media) terminology and concepts. + +## Audio/video/combined insights + +When you upload your videos to Azure Video Indexer, it analyses both visuals and audio by running different AI models. As Azure Video Indexer analyzes your video, the insights that are extracted by the AI models. For more information, see [overview](video-indexer-overview.md). + +## Confidence scores + +The confidence score indicates the confidence in an insight. It is a number between 0.0 and 1.0. The higher the score- the greater the confidence in the answer. For example, + +```json +"transcript":[ +{ + "id":1, + "text":"Well, good morning everyone and welcome to", + "confidence":0.8839, + "speakerId":1, + "language":"en-US", + "instances":[ + { + "adjustedStart":"0:00:10.21", + "adjustedEnd":"0:00:12.81", + "start":"0:00:10.21", + "end":"0:00:12.81" + } + ] +}, +``` + +## Content moderation + +Use textual and visual content moderation models to keep your users safe from inappropriate content and validate that the content you publish matches your organization's values. You can automatically block certain videos or alert your users about the content. For more information, see [Insights: visual and textual content moderation](video-indexer-output-json-v2.md#visualcontentmoderation). + +## Blocks + +Blocks are meant to make it easier to go through the data. For example, block might be broken down based on when speakers change or there is a long pause. + +## Project and editor + +The [Azure Video Indexer](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. Once created, the project can be rendered and downloaded from Azure Video Indexer and be used in your own editing applications or downstream workflows. + +Some scenarios where you may find this feature useful are: + +* Creating movie highlights for trailers. +* Using old clips of videos in news casts. +* Creating shorter content for social media. + +For more information, see [Use editor to create projects](use-editor-create-project.md). + +## Keyframes + +Azure Video Indexer selects the frame(s) that best represent each shot. Keyframes are the representative frames selected from the entire video based on aesthetic properties (for example, contrast and stableness). For more information, see [Scenes, shots, and keyframes](scenes-shots-keyframes.md). + +## time range vs. adjusted time range + +TimeRange is the time range in the original video. AdjustedTimeRange is the time range relative to the current playlist. Since you can create a playlist from different lines of different videos, you can take a 1-hour video and use just 1 line from it, for example, 10:00-10:15. In that case, you will have a playlist with 1 line, where the time range is 10:00-10:15 but the adjustedTimeRange is 00:00-00:15. + +## Widgets + +Azure Video Indexer supports embedding widgets in your apps. For more information, see [Embed Azure Video Indexer widgets in your apps](video-indexer-embed-widgets.md). + +## Summarized insights + +Summarized insights contain an aggregated view of the data: faces, topics, emotions. For example, instead of going over each of the thousands of time ranges and checking which faces are in it, the summarized insights contains all the faces and for each one, the time ranges it appears in and the % of the time it is shown. + +## Next steps + +- [overview](video-indexer-overview.md) +- [Insights](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-indexer/connect-classic-account-to-arm.md b/articles/azure-video-indexer/connect-classic-account-to-arm.md new file mode 100644 index 0000000000000..9f9558849938a --- /dev/null +++ b/articles/azure-video-indexer/connect-classic-account-to-arm.md @@ -0,0 +1,104 @@ +--- +title: Connect a classic Azure Video Indexer account to ARM +description: This topic explains how to connect an existing classic paid Azure Video Indexer account to an ARM-based account +ms.topic: how-to +ms.author: itnorman +ms.date: 10/19/2021 +ms.custom: ignite-fall-2021 +--- + +# Connect an existing classic paid Azure Video Indexer account to ARM-based account + +This article details how to connect an existing classic paid Azure Video Indexer account to an Azure Resource Manager (ARM) based account. +Today, Azure Video Indexer (formerly Azure Video Analyzer for Media), is a GA(general availability) product that is not an ARM resource on Azure. +In this article, we will go through options on connecting your **existing** Azure Video Indexer account to [ARM][docs-arm-overview]. + +## Prerequisites + +* Unlimited paid Azure Video Indexer account (classic account). + + * To perform the connect to the ARM (Azure Resource Manager) action, you should have owner's permissions on the Azure Video Indexer account. +* Azure Subscription. +* User assigned managed identity (can be created along the flow). + +## Transition state + +Connecting a classic account to be ARM-based triggers a 30 days of a transition state. In the transition state, an existing account can be accessed by generating an access token using both: + +* Access token [generated through API Management](https://aka.ms/avam-dev-portal)(classic way) +* Access token [generated through ARM](/rest/api/videoindexer/generate/access-token) + +The transition state moves all account management functionality to be managed by ARM and will be handled by [Azure RBAC][docs-rbac-overview]. + +The [invite users](invite-users.md) feature in the Azure Video Indexer portal gets disabled. The invited users on this account lose their access to the Azure Video Indexer account Media in the portal. +However, this can be resolved by assigning the right role-assignment to these users through Azure RBAC, see [How to assign RBAC][docs-rbac-assignment]. + +Only the account owner, who performed the connect action, is automatically assigned as the owner on the connected account. When [Azure policies][docs-governance-policy] are enforced, they override the settings on the account. + +If users are not added through Azure RBAC to the account after 30 days, they will lose access through API as well as Azure Video Indexer portal. +After the transition state ends, users will only be able to generate a valid access token through through ARM, making Azure RBAC the exclusive way to manage role-based access control on the account. + +> [!NOTE] +> If there are invited users you wish to remove access from, do it before connecting the account to ARM. + +Before the end of the 30 days of transition state, you can remove access from users through the Azure Video Indexer portal on the account settings page. + +## Get started + +### Browse to [Azure Video Indexer portal](https://aka.ms/vi-portal-link) + +1. Sign in using your Azure AD account. +1. On the top right bar press *User account* to open the side pane account list. +1. Select the Azure Video Indexer classic account you wish to connect to ARM (classic accounts will be tagged with a *classic tag*). +1. Click **Settings**. + + ![account-settings](media/connect-classic-account-to-arm/user-account-settings.png) +1. Click **Connect to an ARM-based account**. + + ![connect-button-portal](media/connect-classic-account-to-arm/connect-button.png) +1. Sign to Azure portal. +1. The Azure Video Indexer create blade will open. +1. In the **Create Azure Video Indexer account** section enter required values. + + * If you followed the steps the fields should be auto-populated, make sure to validate the eligible values. + + ![connect-to-arm](media/connect-classic-account-to-arm/connect-blade-new.png) + + | Name | Description | + | ---|---| + |**Subscription**| The subscription currently contains the classic account and other related resources such as the Media Services.| + |**Resource Group**|Select an existing resource or create a new one. The resource group must be the same location as the classic account being connected| + |**Azure Video Indexer account** (radio button)| Select the *"Connecting an existing classic account"*.| + |**Existing account ID**| Enter the ID of existing Azure Video Indexer classic account.| + |**Resource name**|Enter the name of the new Azure Video Indexer account. Default value would be the same name the account had as classic.| + |**Location**|The geographic region can't be changed in the connect process, the connected account must stay in the same region. | + |**Media Services account name**|The original Media Services account name that was associated with classic account.| + |**User-assigned managed identity**|Select a user-assigned managed identity, or create a new one. Azure Video Indexer account will use it to access the Media services. The user-assignment managed identity will be assigned the roles of Contributor for the Media Service account.| +1. Click **Review + create** at the bottom of the form. + +## After connecting to ARM is complete + +After successfully connecting your account to ARM, it is recommended to make sure your account management APIs are replaced with [Azure Video Indexer REST API](/rest/api/videoindexer/accounts?branch=videoindex). +As mentioned in the beginning of this article, during the 30 days of the transition state, “[Get-access-token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token)” will be supported side by side the ARM-based “[Generate-Access token](/rest/api/videoindexer/generate/access-token)”. +Make sure to change to the new "Generate-Access token" by updating all your solutions that use the API. + +APIs to be changed: + +- Get Access token for each scope: Account, Project & Video. +- Get account – the account’s details. +- Get accounts – List of all account in a region. +- Create paid account – would create a classic account. + +For a full description of [Azure Video Indexer REST API](/rest/api/videoindexer/accounts?branch=videoindex) calls and documentation, follow the link. + +For code sample generating an access token through ARM see [C# code sample](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ApiUsage/ArmBased/Program.cs). + +### Next steps + +Learn how to [Upload a video using C#](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ApiUsage/ArmBased). + + +[docs-arm-overview]: ../azure-resource-manager/management/overview.md +[docs-rbac-overview]: ../role-based-access-control/overview.md +[docs-rbac-assignment]: ../role-based-access-control/role-assignments-portal.md +[docs-governance-policy]: ../governance/policy/overview.md diff --git a/articles/azure-video-indexer/connect-to-azure.md b/articles/azure-video-indexer/connect-to-azure.md new file mode 100644 index 0000000000000..3da5bc7071504 --- /dev/null +++ b/articles/azure-video-indexer/connect-to-azure.md @@ -0,0 +1,231 @@ +--- +title: Create an Azure Video Indexer (formerly Azure Video Analyzer for Media) account connected to Azure +description: Learn how to create an Azure Video Indexer (formerly Azure Video Analyzer for Media) account connected to Azure. +ms.topic: tutorial +ms.date: 10/19/2021 +ms.author: itnorman +ms.custom: ignite-fall-2021 +--- + +# Create an Azure Video Indexer account + +When creating an Azure Video Indexer (formerly Azure Video Analyzer for Media) account, you can choose a free trial account (where you get a certain number of free indexing minutes) or a paid option (where you're not limited by the quota). With a free trial, Azure Video Indexer provides up to 600 minutes of free indexing to users and up to 2400 minutes of free indexing to users that subscribe to the Video Analyzer API on the [developer portal](https://aka.ms/avam-dev-portal). With the paid options, Azure Video Indexer offers two types of accounts: classic accounts(General Availability), and ARM-based accounts(Public Preview). Main difference between the two is account management platform. While classic accounts are built on the API Management, ARM-based accounts management is built on Azure, enables to apply access control to all services with role-based access control (Azure RBAC) natively. + +* You can create an Azure Video Indexer **classic** account through our [API](https://aka.ms/avam-dev-portal). +* You can create an Azure Video Indexer **ARM-based** account through one of the following: + + 1. [Azure Video Indexer portal](https://aka.ms/vi-portal-link) + 2. [Azure portal](https://portal.azure.com/#home) + 3. [QuickStart ARM template](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account) + +To read more on how to create a **new ARM-Based** Azure Video Indexer account, read this [article](create-video-analyzer-for-media-account.md) + +## How to create classic accounts +This article shows how to create an Azure Video Indexer classic account. The topic provides steps for connecting to Azure using the automatic (default) flow. It also shows how to connect to Azure manually (advanced). + +If you are moving from a *trial* to *paid ARM-Based* Azure Video Indexer account, you can choose to copy all of the videos and model customization to the new account, as discussed in the [Import your content from the trial account](#import-your-content-from-the-trial-account) section. + +The article also covers [Linking an Azure Video Indexer account to Azure Government](#azure-video-indexer-in-azure-government). + +## Prerequisites for connecting to Azure + +* An Azure subscription. + + If you don't have an Azure subscription yet, sign up for [Azure Free Trial](https://azure.microsoft.com/free/). +* An Azure Active Directory (Azure AD) domain. + + If you don't have an Azure AD domain, create this domain with your Azure subscription. For more information, see [Managing custom domain names in your Azure AD](../active-directory/enterprise-users/domains-manage.md) +* A user in your Azure AD domain with an **Application administrator** role. You'll use this member when connecting your Azure Video Indexer account to Azure. + + This user should be an Azure AD user with a work or school account. Don't use a personal account, such as outlook.com, live.com, or hotmail.com. + + ![all Azure AD users](./media/create-account/all-aad-users.png) + +### Additional prerequisites for automatic flow + +* A user and member in your Azure AD domain. + + You'll use this member when connecting your Azure Video Indexer account to Azure. + + This user should be a member in your Azure subscription with either an **Owner** role, or both **Contributor** and **User Access Administrator** roles. A user can be added twice, with two roles. Once with Contributor and once with user Access Administrator. For more information, see [View the access a user has to Azure resources](../role-based-access-control/check-access.md). + + ![access control](./media/create-account/access-control-iam.png) + +### Additional prerequisites for manual flow + +* Register the Event Grid resource provider using the Azure portal. + + In the [Azure portal](https://portal.azure.com/), go to **Subscriptions**->[subscription]->**ResourceProviders**. + + Search for **Microsoft.Media** and **Microsoft.EventGrid**. If not in the "Registered" state, click **Register**. It takes a couple of minutes to register. + + ![EventGrid](./media/create-account/event-grid.png) + +## Connect to Azure manually (advanced option) + +If the connection to Azure failed, you can attempt to troubleshoot the problem by connecting manually. + +> [!NOTE] +> It's mandatory to have the following three accounts in the same region: the Azure Video Indexer account that you're connecting with the Media Services account, as well as the Azure storage account connected to the same Media Services account. + +### Create and configure a Media Services account + +1. Use the [Azure](https://portal.azure.com/) portal to create an Azure Media Services account, as described in [Create an account](/azure/azure/media-services/previous/media-services-portal-create-account). + + Make sure the Media Services account was created with the classic APIs. + + ![Media Services classic API](./media/create-account/enable-classic-api.png) + + + When creating a storage account for your Media Services account, select **StorageV2** for account kind and **Geo-redundant** (GRS) for replication fields. + + ![New AMS account](./media/create-account/create-new-ams-account.png) + + > [!NOTE] + > Make sure to write down the Media Services resource and account names. You'll need them for the steps in the next section. + +1. Before you can play your videos in the Azure Video Indexer web app, you must start the default **Streaming Endpoint** of the new Media Services account. + + In the new Media Services account, select **Streaming endpoints**. Then select the streaming endpoint and press start. + + ![Streaming endpoints](./media/create-account/create-ams-account-se.png) +4. For Azure Video Indexer to authenticate with Media Services API, an AD app needs to be created. The following steps guide you through the Azure AD authentication process described in [Get started with Azure AD authentication by using the Azure portal](/azure/azure/media-services/previous/media-services-portal-get-started-with-aad): + + 1. In the new Media Services account, select **API access**. + 2. Select [Service principal authentication method](/azure/azure/media-services/previous/media-services-portal-get-started-with-aad). + 3. Get the client ID and client secret + + After you select **Settings**->**Keys**, add **Description**, press **Save**, and the key value gets populated. + + If the key expires, the account owner will have to contact Azure Video Indexer support to renew the key. + + > [!NOTE] + > Make sure to write down the key value and the Application ID. You'll need it for the steps in the next section. + +### Connect manually + +In the **Create a new account on an Azure subscription** dialog of your [Azure Video Indexer](https://www.videoindexer.ai/) page, select the **Switch to manual configuration** link. + +In the dialog, provide the following information: + +|Setting|Description| +|---|---| +|Azure Video Indexer account region|The name of the Azure Video Indexer account region. For better performance and lower costs, it's highly recommended to specify the name of the region where the Azure Media Services resource and Azure Storage account are located. | +|Azure AD tenant|The name of the Azure AD tenant, for example "contoso.onmicrosoft.com". The tenant information can be retrieved from the Azure portal. Place your cursor over the name of the signed-in user in the top-right corner. Find the name to the right of **Domain**.| +|Subscription ID|The Azure subscription under which this connection should be created. The subscription ID can be retrieved from the Azure portal. Select **All services** in the left panel, and search for "subscriptions". Select **Subscriptions** and choose the desired ID from the list of your subscriptions.| +|Azure Media Services resource group name|The name for the resource group in which you created the Media Services account.| +|Media service resource name|The name of the Azure Media Services account that you created in the previous section.| +|Application ID|The Azure AD application ID (with permissions for the specified Media Services account) that you created in the previous section.| +|Application key|The Azure AD application key that you created in the previous section. | + +### Import your content from the *trial* account + +When creating a new **ARM-Based** account, you have an option to import your content from the *trial* account into the new **ARM-Based** account free of charge. +> [!NOTE] +> * Import from trial can be performed only once per trial account. +> * The target ARM-Based account needs to be created and available before import is assigned. +> * Target ARM-Based account has to be an empty account (never indexed any media files). + +To import your data, follow the steps: + 1. Go to [Azure Video Indexer portal](https://aka.ms/vi-portal-link) + 2. Select your trial account and go to the *account settings* page + 3. Click the *Import content to an ARM-based account* + 4. From the dropdown menu choose the ARM-based account you wish to import the data to. + * If the account ID isn't showing, you can copy and paste the account ID from Azure portal or the account list, on the side blade in the Azure Video Indexer Portal. + 5. Click **Import content** + +![import](./media/create-account/import-steps.png) + + +All media and content model customizations will be copied from the *trial* account into the new ARM-Based account. + + +> [!NOTE] +> +> The *trial* account is not availagle on the Azure Government cloud. + +## Azure Media Services considerations + +The following Azure Media Services related considerations apply: + +* If you plan to connect to an existing Media Services account, make sure the Media Services account was created with the classic APIs. + + ![Media Services classic API](./media/create-account/enable-classic-api.png) +* If you connect to an existing Media Services account, Azure Video Indexer doesn't change the existing media **Reserved Units** configuration. + + You might need to adjust the type and number of Media Reserved Units according to your planned load. Keep in mind that if your load is high and you don't have enough units or speed, videos processing can result in timeout failures. +* If you connect to a new Media Services account, Azure Video Indexer automatically starts the default **Streaming Endpoint** in it: + + ![Media Services streaming endpoint](./media/create-account/ams-streaming-endpoint.png) + + Streaming endpoints have a considerable startup time. Therefore, it may take several minutes from the time you connected your account to Azure until your videos can be streamed and watched in the Azure Video Indexer web app. +* If you connect to an existing Media Services account, Azure Video Indexer doesn't change the default Streaming Endpoint configuration. If there's no running **Streaming Endpoint**, you can't watch videos from this Media Services account or in Azure Video Indexer. +* If you connect automatically, Azure Video Indexer sets the media **Reserved Units** to 10 S3 units: + + ![Media Services reserved units](./media/create-account/ams-reserved-units.png) + +## Automate creation of the Azure Video Indexer account + +To automate the creation of the account is a two steps process: + +1. Use Azure Resource Manager to create an Azure Media Services account + Azure AD application. + + See an example of the [Media Services account creation template](https://github.com/Azure-Samples/media-services-v3-arm-templates). +1. Call [Create-Account with the Media Services and Azure AD application](https://videoindexer.ai.azure.us/account/login?source=apim). + +## Azure Video Indexer in Azure Government + +### Prerequisites for connecting to Azure Government + +- An Azure subscription in [Azure Government](../azure-government/index.yml). +- An Azure AD account in Azure Government. +- All pre-requirements of permissions and resources as described above in [Prerequisites for connecting to Azure](#prerequisites-for-connecting-to-azure). Make sure to check [Additional prerequisites for automatic flow](#additional-prerequisites-for-automatic-flow) and [Additional prerequisites for manual flow](#additional-prerequisites-for-manual-flow). + +### Create new account via the Azure Government portal + +> [!NOTE] +> The Azure Government cloud does not include a *trial* experience of Azure Video Indexer. + +To create a paid account via the Azure Video Indexer portal: + +1. Go to https://videoindexer.ai.azure.us +1. Log in with your Azure Government Azure AD account. +1. If you do not have any Azure Video Indexer accounts in Azure Government that you are an owner or a contributor to, you will get an empty experience from which you can start creating your account. + + The rest of the flow is as described in above, only the regions to select from will be Government regions in which Azure Video Indexer is available + + If you already are a contributor or an admin of an existing one or more Azure Video Indexer accounts in Azure Government, you will be taken to that account and from there you can start a following steps for creating an additional account if needed, as described above. + +### Create new account via the API on Azure Government + +To create a paid account in Azure Government, follow the instructions in [Create-Paid-Account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Paid-Account). This API end point only includes Government cloud regions. + +### Limitations of Azure Video Indexer on Azure Government + +* No manual content moderation available in Government cloud. + + In the public cloud when content is deemed offensive based on a content moderation, the customer can ask for a human to look at that content and potentially revert that decision. +* No trial accounts. +* Bing description - in Gov cloud we will not present a description of celebrities and named entities identified. This is a UI capability only. + +## Clean up resources + +After you are done with this tutorial, delete resources that you are not planning to use. + +### Delete an Azure Video Indexer account + +If you want to delete an Azure Video Indexer account, you can delete the account from the Azure Video Indexer website. To delete the account, you must be the owner. + +Select the account -> **Settings** -> **Delete this account**. + +The account will be permanently deleted in 90 days. + +## Firewall + +See [Storage account that is behind a firewall](faq.yml#can-a-storage-account-connected-to-the-media-services-account-be-behind-a-firewall). + +## Next steps + +You can programmatically interact with your trial account and/or with your Azure Video Indexer accounts that are connected to Azure by following the instructions in: [Use APIs](video-indexer-use-apis.md). + +You should use the same Azure AD user you used when connecting to Azure. diff --git a/articles/azure-video-indexer/considerations-when-use-at-scale.md b/articles/azure-video-indexer/considerations-when-use-at-scale.md new file mode 100644 index 0000000000000..2e7c7d9d619f4 --- /dev/null +++ b/articles/azure-video-indexer/considerations-when-use-at-scale.md @@ -0,0 +1,86 @@ +--- +title: Things to consider when using Azure Video Indexer (formerly Azure Video Analyzer for Media) at scale - Azure +description: This topic explains what things to consider when using Azure Video Indexer (formerly Azure Video Analyzer for Media) at scale. +ms.topic: how-to +ms.date: 11/13/2020 +ms.author: juliako +--- + +# Things to consider when using Azure Video Indexer at scale + +When using Azure Video Indexer (formerly Azure Video Analyzer for Media) to index videos and your archive of videos is growing, consider scaling. + +This article answers questions like: + +* Are there any technological constraints I need to take into account? +* Is there a smart and efficient way of doing it? +* Can I prevent spending excess money in the process? + +The article provides six best practices of how to use Azure Video Indexer at scale. + +## When uploading videos consider using a URL over byte array + +Azure Video Indexer does give you the choice to upload videos from URL or directly by sending the file as a byte array, the latter comes with some constraints. For more information, see [uploading considerations and limitations)](upload-index-videos.md#uploading-considerations-and-limitations) + +First, it has file size limitations. The size of the byte array file is limited to 2 GB compared to the 30-GB upload size limitation while using URL. + +Second, consider just some of the issues that can affect your performance and hence your ability to scale: + +* Sending files using multi-part means high dependency on your network, +* service reliability, +* connectivity, +* upload speed, +* lost packets somewhere in the world wide web. + +:::image type="content" source="./media/considerations-when-use-at-scale/first-consideration.png" alt-text="First consideration for using Azure Video Indexer at scale"::: + +When you upload videos using URL, you just need to provide a path to the location of a media file and Video Indexer takes care of the rest (see the `videoUrl` field in the [upload video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) API). + +> [!TIP] +> Use the `videoUrl` optional parameter of the upload video API. + +To see an example of how to upload videos using URL, check out [this example](upload-index-videos.md#code-sample). Or, you can use [AzCopy](../storage/common/storage-use-azcopy-v10.md) for a fast and reliable way to get your content to a storage account from which you can submit it to Azure Video Indexer using [SAS URL](../storage/common/storage-sas-overview.md). Azure Video Indexer recommends using *readonly* SAS URLs. + +## Automatic Scaling of Media Reserved Units + +Starting August 1st 2021, Azure Video Indexer enabled [Reserved Units](/azure/azure/media-services/latest/concept-media-reserved-units)(MRUs) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview) (AMS), as a result you do not need to manage them through Azure Video Indexer. That will allow price optimization, e.g. price reduction in many cases, based on your business needs as it is being auto scaled. + +## Respect throttling + +Azure Video Indexer is built to deal with indexing at scale, and when you want to get the most out of it you should also be aware of the system's capabilities and design your integration accordingly. You don't want to send an upload request for a batch of videos just to discover that some of the movies didn't upload and you are receiving an HTTP 429 response code (too many requests). It can happen due to the fact that you sent more requests than the [limit of movies per minute we support](upload-index-videos.md#uploading-considerations-and-limitations). Azure Video Indexer adds a `retry-after` header in the HTTP response, the header specifies when you should attempt your next retry. Make sure you respect it before trying your next request. + +:::image type="content" source="./media/considerations-when-use-at-scale/respect-throttling.jpg" alt-text="Design your integration well, respect throttling"::: + +## Use callback URL + +We recommend that instead of polling the status of your request constantly from the second you sent the upload request, you can add a [callback URL](upload-index-videos.md#callbackurl), and wait for Azure Video Indexer to update you. As soon as there is any status change in your upload request, you get a POST notification to the URL you specified. + +You can add a callback URL as one of the parameters of the [upload video API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). Check out the code samples in [GitHub repo](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/). + +For callback URL you can also use Azure Functions, a serverless event-driven platform that can be triggered by HTTP and implement a following flow. + +### callBack URL definition + +[!INCLUDE [callback url](./includes/callback-url.md)] + +## Use the right indexing parameters for you + +When making decisions related to using Azure Video Indexer at scale, look at how to get the most out of it with the right parameters for your needs. Think about your use case, by defining different parameters you can save money and make the indexing process for your videos faster. + +Before uploading and indexing your video read this short [documentation](upload-index-videos.md), check the [indexingPreset](upload-index-videos.md#indexingpreset) and [streamingPreset](upload-index-videos.md#streamingpreset) to get a better idea of what your options are. + +For example, don’t set the preset to streaming if you don't plan to watch the video, don't index video insights if you only need audio insights. + +## Index in optimal resolution, not highest resolution + +You might be asking, what video quality do you need for indexing your videos? + +In many cases, indexing performance has almost no difference between HD (720P) videos and 4K videos. Eventually, you’ll get almost the same insights with the same confidence. The higher the quality of the movie you upload means the higher the file size, and this leads to higher computing power and time needed to upload the video. + +For example, for the face detection feature, a higher resolution can help with the scenario where there are many small but contextually important faces. However, this will come with a quadratic increase in runtime and an increased risk of false positives. + +Therefore, we recommend you to verify that you get the right results for your use case and to first test it locally. Upload the same video in 720P and in 4K and compare the insights you get. + +## Next steps + +[Examine the Azure Video Indexer output produced by API](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-indexer/create-video-analyzer-for-media-account.md b/articles/azure-video-indexer/create-video-analyzer-for-media-account.md new file mode 100644 index 0000000000000..5ae93e457729d --- /dev/null +++ b/articles/azure-video-indexer/create-video-analyzer-for-media-account.md @@ -0,0 +1,83 @@ +--- +title: Create an Azure Video Indexer account +description: This topic explains how to create an account for Azure Video Indexer. +ms.topic: tutorial +ms.author: itnorman +ms.date: 10/13/2021 +ms.custom: ignite-fall-2021 +--- + +# Get started with Azure Video Indexer in Azure portal + +This Quickstart walks you through the steps to get started with Azure Video Indexer. You will create an Azure Video Indexer account and its accompanying resources by using the Azure portal. + +To start using Azure Video Indexer, you will need to create an Azure Video Indexer account. The account needs to be associated with a [Media Services][docs-ms] resource and a [User-assigned managed identity][docs-uami]. The managed identity will need to have Contributor permissions role on the Media Services. + +## Prerequisites +> [!NOTE] +> You'll need an Azure subscription where you have access to both the Contributor role and the User Access Administrator role to the resource group under which you will create new resources, and Contributor role on both Azure Media Services and the User-assigned managed identity. If you don't have the right permissions, ask your account administrator to grant you those permissions. The associated Azure Media Services must be in the same region as the Azure Video Indexer account. + + +## Azure portal + +### Create an Azure Video Indexer account in the Azure portal + +1. Sign into the [Azure portal](https://portal.azure.com/). +1. Using the search bar at the top, enter **"Azure Video Indexer"**. +1. Click on *Azure Video Indexer* under *Services*. + + ![Image of search bar](media/create-video-analyzer-for-media-account/search-bar1.png) + +1. Click **Create**. +1. In the **Create an Azure Video Indexer resource** section enter required values. + + ![Image of create account](media/create-video-analyzer-for-media-account/create-account-blade.png) + + +| Name | Description | +| ---|---| +|**Subscription**|Choose the subscription that you are using to create the Azure Video Indexer account.| +|**Resource Group**|Choose a resource group where you are creating the Azure Video Indexer account, or select **Create new** to create a resource group.| +|**Azure Video Indexer account**|Select *Create a new account* option.| +|**Resource name**|Enter the name of the new Azure Video Indexer account, the name can contain letters, numbers and dashes with no spaces.| +|**Location**|Select the geographic region that will be used to deploy the Azure Video Indexer account. The location matches the **resource group location** you chose, if you'd like to change the selected location change the selected resource group or create a new one in the preferred location. [Azure region in which Azure Video Indexer is available](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all)| +|**Media Services account name**|Select a Media Services that the new Azure Video Indexer account will use to process the videos. You can select an existing Media Services or you can create a new one. The Media Services must be in the same location you selected.| +|**User-assigned managed identity**|Select a user-assigned managed identity that the new Azure Video Indexer account will use to access the Media Services. You can select an existing user-assigned managed identity or you can create a new one. The user-assignment managed identity will be assigned the role of Contributor role on the Media Services.| + +1. Click **Review + create** at the bottom of the form. + +### Review deployed resource + +You can use the Azure portal to validate the Azure Video Indexer account and other resources that were created. After the deployment is finished, select **Go to resource** to see your new Azure Video Indexer account. + +### Overview + +![Image of overview](media/create-video-analyzer-for-media-account/overview-screenshot.png) + +Click on *Explore Azure Video Indexer's portal* to view your new account on the [Azure Video Indexer portal](https://aka.ms/vi-portal-link) + +### Management API + +![Image of Generate-access-token](media/create-video-analyzer-for-media-account/generate-access-token.png) + +Use the *Management API* tab to manually generate access tokens for the account. +This token can be used to authenticate API calls for this account. Each token is valid for one hour. + +Choose the following: +* Permission type: **Contributor** or **Reader** +* Scope: **Account**, **Project** or **Video** + * For **Project** or **Video** you should also insert the matching ID +* Click **Generate** + +--- + +### Next steps + +Learn how to [Upload a video using C#](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ApiUsage/ArmBased). + + + +[docs-uami]: ../active-directory/managed-identities-azure-resources/overview.md +[docs-ms]: /azure/media-services/latest/media-services-overview +[docs-role-contributor]: ../../role-based-access-control/built-in-roles.md#contibutor +[docs-contributor-on-ms]: ./add-contributor-role-on-the-media-service.md diff --git a/articles/azure-video-indexer/customize-brands-model-overview.md b/articles/azure-video-indexer/customize-brands-model-overview.md new file mode 100644 index 0000000000000..3649f0723f869 --- /dev/null +++ b/articles/azure-video-indexer/customize-brands-model-overview.md @@ -0,0 +1,30 @@ +--- +title: Customize a Brands model in Azure Video Indexer (formerly Azure Video Analyzer for Media) - Azure +description: This article gives an overview of what is a Brands model in Azure Video Indexer (formerly Azure Video Analyzer for Media) and how to customize it. + +ms.topic: conceptual +ms.date: 12/15/2019 +ms.author: juliako +--- + +# Customize a Brands model in Azure Video Indexer + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in a video or audio content or if it shows up in visual text in a video, Azure Video Indexer detects it as a brand in the content. Brands are disambiguated from other terms using context. + +Brand detection is useful in a wide variety of business scenarios such as contents archive and discovery, contextual advertising, social media analysis, retail compete analysis, and many more. Azure Video Indexer brand detection enables you to index brand mentions in speech and visual text, using Bing's brands database as well as with customization by building a custom Brands model for each Azure Video Indexer account. The custom Brands model feature allows you to select whether or not Azure Video Indexer will detect brands from the Bing brands database, exclude certain brands from being detected (essentially creating a list of unapproved brands), and include brands that should be part of your model that might not be in Bing's brands database (essentially creating a list of approved brands). The custom Brands model that you create will only be available in the account in which you created the model. + +## Out of the box detection example + +In the "Microsoft Build 2017 Day 2" presentation, the brand "Microsoft Windows" appears multiple times. Sometimes in the transcript, sometimes as visual text and never as verbatim. Azure Video Indexer detects with high precision that a term is indeed brand based on the context, covering over 90k brands out of the box, and constantly updating. At 02:25, Azure Video Indexer detects the brand from speech and then again at 02:40 from visual text, which is part of the Windows logo. + +![Brands overview](./media/content-model-customization/brands-overview.png) + +Talking about Windows in the context of construction will not detect the word "Windows" as a brand, and same for Box, Apple, Fox, etc., based on advanced Machine Learning algorithms that know how to disambiguate from context. Brand Detection works for all our supported languages. + +## Next steps + +To bring your own brands, check out these topics: + +[Customize Brands model using APIs](customize-brands-model-with-api.md) + +[Customize Brands model using the website](customize-brands-model-with-website.md) diff --git a/articles/azure-video-indexer/customize-brands-model-with-api.md b/articles/azure-video-indexer/customize-brands-model-with-api.md new file mode 100644 index 0000000000000..f98fe1eb63d9e --- /dev/null +++ b/articles/azure-video-indexer/customize-brands-model-with-api.md @@ -0,0 +1,188 @@ +--- +title: Customize a Brands model with Azure Video Indexer (formerly Azure Video Analyzer for Media) API +description: Learn how to customize a Brands model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) API. +services: azure-video-analyzer +author: anikaz +manager: johndeu +ms.topic: article +ms.date: 01/14/2020 +ms.author: kumud +--- + +# Customize a Brands model with the Azure Video Indexer API + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in video or audio content or if it shows up in visual text in a video, Azure Video Indexer detects it as a brand in the content. A custom Brands model allows you to exclude certain brands from being detected and include brands that should be part of your model that might not be in Bing's brands database. For more information, see [Overview](customize-brands-model-overview.md). + +> [!NOTE] +> If your video was indexed prior to adding a brand, you need to reindex it. + +You can use the Azure Video Indexer APIs to create, use, and edit custom Brands models detected in a video, as described in this topic. You can also use the Azure Video Indexer website, as described in [Customize Brands model using the Azure Video Indexer website](customize-brands-model-with-api.md). + +## Create a Brand + +The [create a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Brand) API creates a new custom brand and adds it to the custom Brands model for the specified account. + +> [!NOTE] +> Setting `enabled` (in the body) to true puts the brand in the *Include* list for Azure Video Indexer to detect. Setting `enabled` to false puts the brand in the *Exclude* list, so Azure Video Indexer won't detect it. + +Some other parameters that you can set in the body: + +* The `referenceUrl` value can be any reference websites for the brand, such as a link to its Wikipedia page. +* The `tags` value is a list of tags for the brand. This tag shows up in the brand's *Category* field in the Azure Video Indexer website. For example, the brand "Azure" can be tagged or categorized as "Cloud". + +### Response + +The response provides information on the brand that you just created following the format of the example below. + +```json +{ + "referenceUrl": "https://en.wikipedia.org/wiki/Example", + "id": 97974, + "name": "Example", + "accountId": "SampleAccountId", + "lastModifierUserName": "SampleUserName", + "created": "2018-04-25T14:59:52.7433333", + "lastModified": "2018-04-25T14:59:52.7433333", + "enabled": true, + "description": "This is an example", + "tags": [ + "Tag1", + "Tag2" + ] +} +``` + +## Delete a Brand + +The [delete a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Brand) API removes a brand from the custom Brands model for the specified account. The account is specified in the `accountId` parameter. Once called successfully, the brand will no longer be in the *Include* or *Exclude* brands lists. + +### Response + +There's no returned content when the brand is deleted successfully. + +## Get a specific Brand + +The [get a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brand) API lets you search for the details of a brand in the custom Brands model for the specified account using the brand ID. + +### Response + +The response provides information on the brand that you searched (using brand ID) following the format of the example below. + +```json +{ + "referenceUrl": "https://en.wikipedia.org/wiki/Example", + "id": 128846, + "name": "Example", + "accountId": "SampleAccountId", + "lastModifierUserName": "SampleUserName", + "created": "2018-01-06T13:51:38.3666667", + "lastModified": "2018-01-11T13:51:38.3666667", + "enabled": true, + "description": "This is an example", + "tags": [ + "Tag1", + "Tag2" + ] +} +``` + +> [!NOTE] +> `enabled` being set to `true` signifies that the brand is in the *Include* list for Azure Video Indexer to detect, and `enabled` being false signifies that the brand is in the *Exclude* list, so Azure Video Indexer won't detect it. + +## Update a specific brand + +The [update a brand](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Brand) API lets you search for the details of a brand in the custom Brands model for the specified account using the brand ID. + +### Response + +The response provides the updated information on the brand that you updated following the format of the example below. + +```json +{ + "referenceUrl": null, + "id": 97974, + "name": "Example", + "accountId": "SampleAccountId", + "lastModifierUserName": "SampleUserName", + "Created": "2018-04-25T14:59:52.7433333", + "lastModified": "2018-04-25T15:37:50.67", + "enabled": false, + "description": "This is an update example", + "tags": [ + "Tag1", + "NewTag2" + ] +} +``` + +## Get all of the Brands + +The [get all brands](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brands) API returns all of the brands in the custom Brands model for the specified account regardless of whether the brand is meant to be in the *Include* or *Exclude* brands list. + +### Response + +The response provides a list of all of the brands in your account and each of their details following the format of the example below. + +```json +[ + { + "ReferenceUrl": null, + "id": 97974, + "name": "Example", + "accountId": "AccountId", + "lastModifierUserName": "UserName", + "Created": "2018-04-25T14:59:52.7433333", + "LastModified": "2018-04-25T14:59:52.7433333", + "enabled": true, + "description": "This is an example", + "tags": ["Tag1", "Tag2"] + }, + { + "ReferenceUrl": null, + "id": 97975, + "name": "Example2", + "accountId": "AccountId", + "lastModifierUserName": "UserName", + "Created": "2018-04-26T14:59:52.7433333", + "LastModified": "2018-04-26T14:59:52.7433333", + "enabled": false, + "description": "This is another example", + "tags": ["Tag1", "Tag2"] + }, +] +``` + +> [!NOTE] +> The brand named *Example* is in the *Include* list for Azure Video Indexer to detect, and the brand named *Example2* is in the *Exclude* list, so Azure Video Indexer won't detect it. + +## Get Brands model settings + +The [get brands settings](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Brands) API returns the Brands model settings in the specified account. The Brands model settings represent whether detection from the Bing brands database is enabled or not. If Bing brands aren't enabled, Azure Video Indexer will only detect brands from the custom Brands model of the specified account. + +### Response + +The response shows whether Bing brands are enabled following the format of the example below. + +```json +{ + "state": true, + "useBuiltIn": true +} +``` + +> [!NOTE] +> `useBuiltIn` being set to true represents that Bing brands are enabled. If `useBuiltin` is false, Bing brands are disabled. The `state` value can be ignored because it has been deprecated. + +## Update Brands model settings + +The [update brands](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Brands-Model-Settings) API updates the Brands model settings in the specified account. The Brands model settings represent whether detection from the Bing brands database is enabled or not. If Bing brands aren't enabled, Azure Video Indexer will only detect brands from the custom Brands model of the specified account. + +The `useBuiltIn` flag set to true means that Bing brands are enabled. If `useBuiltin` is false, Bing brands are disabled. + +### Response + +There's no returned content when the Brands model setting is updated successfully. + +## Next steps + +[Customize Brands model using website](customize-brands-model-with-website.md) diff --git a/articles/azure-video-indexer/customize-brands-model-with-website.md b/articles/azure-video-indexer/customize-brands-model-with-website.md new file mode 100644 index 0000000000000..dfd274748c2d9 --- /dev/null +++ b/articles/azure-video-indexer/customize-brands-model-with-website.md @@ -0,0 +1,99 @@ +--- +title: Customize a Brands model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) website +description: Learn how to customize a Brands model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) website. +services: azure-video-analyzer +author: anikaz +manager: johndeu +ms.topic: article +ms.date: 12/15/2019 +ms.author: kumud +--- + +# Customize a Brands model with the Azure Video Indexer website + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports brand detection from speech and visual text during indexing and reindexing of video and audio content. The brand detection feature identifies mentions of products, services, and companies suggested by Bing's brands database. For example, if Microsoft is mentioned in video or audio content or if it shows up in visual text in a video, Azure Video Indexer detects it as a brand in the content. + +A custom Brands model allows you to: + +- select if you want Azure Video Indexer to detect brands from the Bing brands database. +- select if you want Azure Video Indexer to exclude certain brands from being detected (essentially creating a deny list of brands). +- select if you want Azure Video Indexer to include brands that should be part of your model that might not be in Bing's brands database (essentially creating an accept list of brands). + +For a detailed overview, see this [Overview](customize-brands-model-overview.md). + +You can use the Azure Video Indexer website to create, use, and edit custom Brands models detected in a video, as described in this topic. You can also use the API, as described in [Customize Brands model using APIs](customize-brands-model-with-api.md). + +> [!NOTE] +> If your video was indexed prior to adding a brand, you need to reindex it. You will find **Re-index** item in the drop-down menu associated with the video. Select **Advanced options** -> **Brand categories** and check **All brands**. + +## Edit Brands model settings + +You have the option to set whether or not you want brands from the Bing brands database to be detected. To set this option, you need to edit the settings of your Brands model. Follow these steps: + +1. Go to the [Azure Video Indexer](https://www.videoindexer.ai/) website and sign in. +1. To customize a model in your account, select the **Content model customization** button on the left of the page. + + > [!div class="mx-imgBorder"] + > :::image type="content" source="./media/content-model-customization/content-model-customization.png" alt-text="Customize content model in Azure Video Indexer "::: +1. To edit brands, select the **Brands** tab. + + > [!div class="mx-imgBorder"] + > :::image type="content" source="./media/customize-brand-model/customize-brand-model.png" alt-text="Screenshot shows the Brands tab of the Content model customization dialog box"::: +1. Check the **Show brands suggested by Bing** option if you want Azure Video Indexer to detect brands suggested by Bing—leave the option unchecked if you don't. + +## Include brands in the model + +The **Include brands** section represents custom brands that you want Azure Video Indexer to detect, even if they aren't suggested by Bing. + +### Add a brand to include list + +1. Select **+ Create new brand**. + + Provide a name (required), category (optional), description (optional), and reference URL (optional). + The category field is meant to help you tag your brands. This field shows up as the brand's *tags* when using the Azure Video Indexer APIs. For example, the brand "Azure" can be tagged or categorized as "Cloud". + + The reference URL field can be any reference website for the brand (like a link to its Wikipedia page). + +2. Select **Save** and you'll see that the brand has been added to the **Include brands** list. + +### Edit a brand on the include list + +1. Select the pencil icon next to the brand that you want to edit. + + You can update the category, description, or reference URL of a brand. You can't change the name of a brand because names of brands are unique. If you need to change the brand name, delete the entire brand (see next section) and create a new brand with the new name. + +2. Select the **Update** button to update the brand with the new information. + +### Delete a brand on the include list + +1. Select the trash icon next to the brand that you want to delete. +2. Select **Delete** and the brand will no longer appear in your *Include brands* list. + +## Exclude brands from the model + +The **Exclude brands** section represents the brands that you don't want Azure Video Indexer to detect. + +### Add a brand to exclude list + +1. Select **+ Create new brand.** + + Provide a name (required), category (optional). + +2. Select **Save** and you'll see that the brand has been added to the *Exclude brands* list. + +### Edit a brand on the exclude list + +1. Select the pencil icon next to the brand that you want to edit. + + You can only update the category of a brand. You can't change the name of a brand because names of brands are unique. If you need to change the brand name, delete the entire brand (see next section) and create a new brand with the new name. + +2. Select the **Update** button to update the brand with the new information. + +### Delete a brand on the exclude list + +1. Select the trash icon next to the brand that you want to delete. +2. Select **Delete** and the brand will no longer appear in your *Exclude brands* list. + +## Next steps + +[Customize Brands model using APIs](customize-brands-model-with-api.md) diff --git a/articles/azure-video-indexer/customize-content-models-overview.md b/articles/azure-video-indexer/customize-content-models-overview.md new file mode 100644 index 0000000000000..362d531a92ac4 --- /dev/null +++ b/articles/azure-video-indexer/customize-content-models-overview.md @@ -0,0 +1,39 @@ +--- +title: Customizing content models in Azure Video Indexer (formerly Azure Video Analyzer for Media) +description: This article gives links to the conceptual articles that explain the benefits of each type of customization. This article also links to how-to guides that show how you can implement the customization of each model. +ms.topic: conceptual +ms.date: 06/26/2019 +ms.author: kumud +--- + +# Customizing content models in Azure Video Indexer + +Azure Video Indexer (formerly Azure Video Analyzer for Media) allows you to customize some of its models to be adapted to your specific use case. These models include [brands](customize-brands-model-overview.md), [language](customize-language-model-overview.md), and [person](customize-person-model-overview.md). You can easily customize these models using the Azure Video Indexer website or API. + +This article gives links to articles that explain the benefits of each type of customization. The article also links to how-to guides that show how you can implement the customization of each model. + +## Animated characters + +* [Animated character detection](animated-characters-recognition.md) + +## Brands model + +* [Customizing the brands model overview](customize-brands-model-overview.md) +* [Customizing the brands model using the Azure Video Indexer website](customize-brands-model-with-website.md) +* [Customizing the brands model using the Azure Video Indexer API](customize-brands-model-with-api.md) + +## Language model + +* [Customizing language models overview](customize-language-model-overview.md) +* [Customizing language models using the Azure Video Indexer website](customize-language-model-with-website.md) +* [Customizing language models using the Azure Video Indexer API](customize-language-model-with-api.md) + +## Person model + +* [Customizing person models overview](customize-person-model-overview.md) +* [Customizing person models using the Azure Video Indexer website](customize-person-model-with-website.md) +* [Customizing person models using the Azure Video Indexer API](customize-person-model-with-api.md) + +## Next steps + +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/customize-language-model-overview.md b/articles/azure-video-indexer/customize-language-model-overview.md new file mode 100644 index 0000000000000..3415eee9755c4 --- /dev/null +++ b/articles/azure-video-indexer/customize-language-model-overview.md @@ -0,0 +1,38 @@ +--- +title: Customize a Language model in Azure Video Indexer (formerly Azure Video Analyzer for Media) - Azure +description: This article gives an overview of what is a Language model in Azure Video Indexer (formerly Azure Video Analyzer for Media) and how to customize it. +author: Juliako +manager: femila +ms.topic: conceptual +ms.author: juliako +ms.date: 02/02/2022 +--- + +# Customize a Language model with Azure Video Indexer + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports automatic speech recognition through integration with the Microsoft [Custom Speech Service](https://azure.microsoft.com/services/cognitive-services/custom-speech-service/). You can customize the Language model by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized, assuming default pronunciation, and the Language model will learn new probable sequences of words. See the list of supported by Azure Video Indexer languages in [supported langues](language-support.md). + +Let's take a word that is highly specific, like "Kubernetes" (in the context of Azure Kubernetes service), as an example. Since the word is new to Azure Video Indexer, it is recognized as "communities". You need to train the model to recognize it as "Kubernetes". In other cases, the words exist, but the Language model is not expecting them to appear in a certain context. For example, "container service" is not a 2-word sequence that a non-specialized Language model would recognize as a specific set of words. + +You have the option to upload words without context in a list in a text file. This is considered partial adaptation. Alternatively, you can upload text file(s) of documentation or sentences related to your content for better adaptation. + +You can use the Azure Video Indexer APIs or the website to create and edit custom Language models, as described in topics in the [Next steps](#next-steps) section of this topic. + +## Best practices for custom Language models + +Azure Video Indexer learns based on probabilities of word combinations, so to learn best: + +* Give enough real examples of sentences as they would be spoken. +* Put only one sentence per line, not more. Otherwise the system will learn probabilities across sentences. +* It is okay to put one word as a sentence to boost the word against others, but the system learns best from full sentences. +* When introducing new words or acronyms, if possible, give as many examples of usage in a full sentence to give as much context as possible to the system. +* Try to put several adaptation options, and see how they work for you. +* Avoid repetition of the exact same sentence multiple times. It may create bias against the rest of the input. +* Avoid including uncommon symbols (~, # @ % &) as they will get discarded. The sentences in which they appear will also get discarded. +* Avoid putting too large inputs, such as hundreds of thousands of sentences, because doing so will dilute the effect of boosting. + +## Next steps + +[Customize Language model using APIs](customize-language-model-with-api.md) + +[Customize Language model using the website](customize-language-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-api.md b/articles/azure-video-indexer/customize-language-model-with-api.md similarity index 89% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-api.md rename to articles/azure-video-indexer/customize-language-model-with-api.md index b4f5db65264c8..43112ad7b4347 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-language-model-with-api.md +++ b/articles/azure-video-indexer/customize-language-model-with-api.md @@ -1,23 +1,21 @@ --- -title: Customize a Language model with Azure Video Analyzer for Media (formerly Video Indexer) API -titlesuffix: Azure Video Analyzer for Media -description: Learn how to customize a Language model with the Azure Video Analyzer for Media (formerly Video Indexer) API. +title: Customize a Language model with Azure Video Indexer (formerly Azure Video Analyzer for Media) API +description: Learn how to customize a Language model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) API. services: azure-video-analyzer author: anikaz manager: johndeu ms.topic: article -ms.subservice: azure-video-analyzer-media ms.date: 02/04/2020 ms.author: kumud --- -# Customize a Language model with the Video Analyzer for Media API +# Customize a Language model with the Azure Video Indexer API -Azure Video Analyzer for Media (formerly Video Indexer) lets you create custom Language models to customize speech recognition by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized. +Azure Video Indexer (formerly Azure Video Analyzer for Media) lets you create custom Language models to customize speech recognition by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized. -For a detailed overview and best practices for custom Language models, see [Customize a Language model with Video Analyzer for Media](customize-language-model-overview.md). +For a detailed overview and best practices for custom Language models, see [Customize a Language model with Azure Video Indexer](customize-language-model-overview.md). -You can use the Video Analyzer for Media APIs to create and edit custom Language models in your account, as described in this topic. You can also use the website, as described in [Customize Language model using the Video Analyzer for Media website](customize-language-model-with-api.md). +You can use the Azure Video Indexer APIs to create and edit custom Language models in your account, as described in this topic. You can also use the website, as described in [Customize Language model using the Azure Video Indexer website](customize-language-model-with-api.md). ## Create a Language model @@ -99,11 +97,11 @@ The response provides metadata on the newly trained Language model along with me } ``` -The returned `id` is a unique ID used to distinguish between language models, while `languageModelId` is used both for [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) and [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) APIs (also known as `linguisticModelId` in Video Analyzer for Media upload/reindex APIs). +The returned `id` is a unique ID used to distinguish between language models, while `languageModelId` is used both for [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) and [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) APIs (also known as `linguisticModelId` in Azure Video Indexer upload/reindex APIs). ## Delete a Language model -The [delete a language model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Language-Model) API deletes a custom Language model from the specified account. Any video that was using the deleted Language model will keep the same index until you reindex the video. If you reindex the video, you can assign a new Language model to the video. Otherwise, Video Analyzer for Media will use its default model to reindex the video. +The [delete a language model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Language-Model) API deletes a custom Language model from the specified account. Any video that was using the deleted Language model will keep the same index until you reindex the video. If you reindex the video, you can assign a new Language model to the video. Otherwise, Azure Video Indexer will use its default model to reindex the video. ### Response diff --git a/articles/azure-video-indexer/customize-language-model-with-website.md b/articles/azure-video-indexer/customize-language-model-with-website.md new file mode 100644 index 0000000000000..6fbe015b3fa52 --- /dev/null +++ b/articles/azure-video-indexer/customize-language-model-with-website.md @@ -0,0 +1,134 @@ +--- +title: Customize Language model with Azure Video Indexer (formerly Azure Video Analyzer for Media) website +description: Learn how to customize a Language model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) website. +services: azure-video-analyzer +author: anikaz +manager: johndeu +ms.topic: article +ms.date: 08/10/2020 +ms.author: kumud +--- + +# Customize a Language model with the Azure Video Indexer website + +Azure Video Indexer (formerly Azure Video Analyzer for Media) lets you create custom Language models to customize speech recognition by uploading adaptation text, namely text from the domain whose vocabulary you'd like the engine to adapt to. Once you train your model, new words appearing in the adaptation text will be recognized. + +For a detailed overview and best practices for custom language models, see [Customize a Language model with Azure Video Indexer](customize-language-model-overview.md). + +You can use the Azure Video Indexer website to create and edit custom Language models in your account, as described in this topic. You can also use the API, as described in [Customize Language model using APIs](customize-language-model-with-api.md). + +## Create a Language model + +1. Go to the [Azure Video Indexer](https://www.videoindexer.ai/) website and sign in. +1. To customize a model in your account, select the **Content model customization** button on the left of the page. + + > [!div class="mx-imgBorder"] + > :::image type="content" source="./media/customize-language-model/model-customization.png" alt-text="Customize content model in Azure Video Indexer "::: +1. Select the **Language** tab. + + You see a list of supported languages. +1. Under the language that you want, select **Add model**. +1. Type in the name for the Language model and hit enter. + + This step creates the model and gives the option to upload text files to the model. +1. To add a text file, select **Add file**. Your file explorer will open. +1. Navigate to and select the text file. You can add multiple text files to a Language model. + + You can also add a text file by selecting the **...** button on the right side of the Language model and selecting **Add file**. +1. Once you're done uploading the text files, select the green **Train** option. + +The training process can take a few minutes. Once the training is done, you see **Trained** next to the model. You can preview, download, and delete the file from the model. + +> [!div class="mx-imgBorder"] +> :::image type="content" source="./media/customize-language-model/customize-language-model.png" alt-text="Train the model"::: + +### Using a Language model on a new video + +To use your Language model on a new video, do one of the following actions: + +* Select the **Upload** button on the top of the page. + + ![Upload button Azure Video Indexer](./media/customize-language-model/upload.png) +* Drop your audio or video file or browse for your file. + +You're given the option to select the **Video source language**. Select the drop-down and select a Language model that you created from the list. It should say the language of your Language model and the name that you gave it in parentheses. For example: + +![Choose video source language—Reindex a video with Azure Video Indexer](./media/customize-language-model/reindex.png) + +Select the **Upload** option in the bottom of the page, and your new video will be indexed using your Language model. + +### Using a Language model to reindex + +To use your Language model to reindex a video in your collection, follow these steps: + +1. Sign in to the [Azure Video Indexer](https://www.videoindexer.ai/) home page. +1. Click on **...** button on the video and select **Re-index**. +1. You're given the option to select the **Video source language** to reindex your video with. Select the drop-down and select a Language model that you created from the list. It should say the language of your language model and the name that you gave it in parentheses. +1. Select the **Re-index** button and your video will be reindexed using your Language model. + +## Edit a Language model + +You can edit a Language model by changing its name, adding files to it, and deleting files from it. + +If you add or delete files from the Language model, you'll have to train the model again by selecting the green **Train** option. + +### Rename the Language model + +You can change the name of the Language model by selecting the ellipsis (**...**) button on the right side of the Language model and selecting **Rename**. + +Type in the new name and hit enter. + +### Add files + +To add a text file, select **Add file**. Your file explorer will open. + +Navigate to and select the text file. You can add multiple text files to a Language model. + +You can also add a text file by selecting the ellipsis (**...**) button on the right side of the Language model and selecting **Add file**. + +### Delete files + +To delete a file from the Language model, select the ellipsis (**...**) button on the right side of the text file and select **Delete**. A new window pops up telling you that the deletion can't be undone. Select the **Delete** option in the new window. + +This action removes the file completely from the Language model. + +## Delete a Language model + +To delete a Language model from your account, select the ellipsis (**...**) button on the right side of the Language model and select **Delete**. + +A new window pops up telling you that the deletion can't be undone. Select the **Delete** option in the new window. + +This action removes the Language model completely from your account. Any video that was using the deleted Language model will keep the same index until you reindex the video. If you reindex the video, you can assign a new Language model to the video. Otherwise, Azure Video Indexer will use its default model to reindex the video. + +## Customize Language models by correcting transcripts + +Azure Video Indexer supports automatic customization of Language models based on the actual corrections users make to the transcriptions of their videos. + +1. To make corrections to a transcript, open up the video that you want to edit from your Account Videos. Select the **Timeline** tab. + + ![Customize language model timeline tab—Azure Video Indexer](./media/customize-language-model/timeline.png) + +1. Select the pencil icon to edit the transcript of your transcription. + + ![Customize language model edit transcription—Azure Video Indexer](./media/customize-language-model/edits.png) + + Azure Video Indexer captures all lines that are corrected by you in the transcription of your video and adds them automatically to a text file called "From transcript edits". These edits are used to retrain the specific Language model that was used to index this video. + + The edits that were done in the [widget's](video-indexer-embed-widgets.md) timeline are also included. + + If you didn't specify a Language model when indexing this video, all edits for this video will be stored in a default Language model called "Account adaptations" within the detected language of the video. + + In case multiple edits have been made to the same line, only the last version of the corrected line will be used for updating the Language model. + + > [!NOTE] + > Only textual corrections are used for the customization. Corrections that don't involve actual words (for example, punctuation marks or spaces) aren't included. + +1. You'll see transcript corrections show up in the Language tab of the Content model customization page. + + To look at the "From transcript edits" file for each of your Language models, select it to open it. + + ![From transcript edits—Azure Video Indexer](./media/customize-language-model/from-transcript-edits.png) + +## Next steps + +[Customize language model using APIs](customize-language-model-with-api.md) diff --git a/articles/azure-video-indexer/customize-person-model-overview.md b/articles/azure-video-indexer/customize-person-model-overview.md new file mode 100644 index 0000000000000..53e932b3f63f6 --- /dev/null +++ b/articles/azure-video-indexer/customize-person-model-overview.md @@ -0,0 +1,22 @@ +--- +title: Customize a Person model in Azure Video Indexer (formerly Azure Video Analyzer for Media) - Azure +description: This article gives an overview of what is a Person model in Azure Video Indexer (formerly Azure Video Analyzer for Media) and how to customize it. +ms.topic: conceptual +ms.date: 05/15/2019 +ms.author: kumud +--- + +# Customize a Person model in Azure Video Indexer + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports celebrity recognition in your videos. The celebrity recognition feature covers approximately one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. Faces that are not recognized by Azure Video Indexer are still detected but are left unnamed. Customers can build custom Person models and enable Azure Video Indexer to recognize faces that are not recognized by default. Customers can build these Person models by pairing a person's name with image files of the person's face. + +If your account caters to different use-cases, you can benefit from being able to create multiple Person models per account. For example, if the content in your account is meant to be sorted into different channels, you might want to create a separate Person model for each channel. + +> [!NOTE] +> Each Person model supports up to 1 million people and each account has a limit of 50 Person models. + +Once a model is created, you can use it by providing the model ID of a specific Person model when uploading/indexing or reindexing a video. Training a new face for a video, updates the specific custom model that the video was associated with. + +If you do not need the multiple Person model support, do not assign a Person model ID to your video when uploading/indexing or reindexing. In this case, Azure Video Indexer will use the default Person model in your account. + +You can use the Azure Video Indexer website to edit faces that were detected in a video and to manage multiple custom Person models in your account, as described in the [Customize a Person model using a website](customize-person-model-with-website.md) topic. You can also use the API, as described in [Customize a Person model using APIs](customize-person-model-with-api.md). diff --git a/articles/azure-video-indexer/customize-person-model-with-api.md b/articles/azure-video-indexer/customize-person-model-with-api.md new file mode 100644 index 0000000000000..4d89f91951902 --- /dev/null +++ b/articles/azure-video-indexer/customize-person-model-with-api.md @@ -0,0 +1,86 @@ +--- +title: Customize a Person model with Azure Video Indexer (formerly Azure Video Analyzer for Media) API +description: Learn how to customize a Person model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) API. +services: azure-video-analyzer +author: anikaz +manager: johndeu +ms.topic: article +ms.date: 01/14/2020 +ms.author: kumud +--- + +# Customize a Person model with the Azure Video Indexer API + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports face detection and celebrity recognition for video content. The celebrity recognition feature covers about one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. Faces that aren't recognized by the celebrity recognition feature are detected but left unnamed. After you upload your video to Azure Video Indexer and get results back, you can go back and name the faces that weren't recognized. Once you label a face with a name, the face and name get added to your account's Person model. Azure Video Indexer will then recognize this face in your future videos and past videos. + +You can use the Azure Video Indexer API to edit faces that were detected in a video, as described in this topic. You can also use the Azure Video Indexer website, as described in [Customize Person model using the Azure Video Indexer website](customize-person-model-with-api.md). + +## Managing multiple Person models + +Azure Video Indexer supports multiple Person models per account. This feature is currently available only through the Azure Video Indexer APIs. + +If your account caters to different use-case scenarios, you might want to create multiple Person models per account. For example, if your content is related to sports, you can then create a separate Person model for each sport (football, basketball, soccer, and so on). + +Once a model is created, you can use it by providing the model ID of a specific Person model when uploading/indexing or reindexing a video. Training a new face for a video updates the specific custom model that the video was associated with. + +Each account has a limit of 50 Person models. If you don't need the multiple Person model support, don't assign a Person model ID to your video when uploading/indexing or reindexing. In this case, Azure Video Indexer uses the default custom Person model in your account. + +## Create a new Person model + +To create a new Person model in the specified account, use the [create a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Person-Model) API. + +The response provides the name and generated model ID of the Person model that you just created following the format of the example below. + +```json +{ + "id": "227654b4-912c-4b92-ba4f-641d488e3720", + "name": "Example Person Model" +} +``` + +You then use the **id** value for the **personModelId** parameter when [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video). + +## Delete a Person model + +To delete a custom Person model from the specified account, use the [delete a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Person-Model) API. + +Once the Person model is deleted successfully, the index of your current videos that were using the deleted model will remain unchanged until you reindex them. Upon reindexing, the faces that were named in the deleted model won't be recognized by Azure Video Indexer in your current videos that were indexed using that model but the faces will still be detected. Your current videos that were indexed using the deleted model will now use your account's default Person model. If faces from the deleted model are also named in your account's default model, those faces will continue to be recognized in the videos. + +There's no returned content when the Person model is deleted successfully. + +## Get all Person models + +To get all Person models in the specified account, use the [get a person model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Person-Models) API. + +The response provides a list of all of the Person models in your account (including the default Person model in the specified account) and each of their names and IDs following the format of the example below. + +```json +[ + { + "id": "59f9c326-b141-4515-abe7-7d822518571f", + "name": "Default" + }, + { + "id": "9ef2632d-310a-4510-92e1-cc70ae0230d4", + "name": "Test" + } +] +``` + +You can choose which model you want to use for a video by using the `id` value of the Person model for the `personModelId` parameter when [uploading a video to index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [reindexing a video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video). + +## Update a face + +This command allows you to update a face in your video with a name using the ID of the video and ID of the face. This action then updates the Person model that the video was associated with upon uploading/indexing or reindexing. If no Person model was assigned, it updates the account's default Person model. + +The system then recognizes the occurrences of the same face in your other current videos that share the same Person model. Recognition of the face in your other current videos might take some time to take effect as this is a batch process. + +You can update a face that Azure Video Indexer recognized as a celebrity with a new name. The new name that you give will take precedence over the built-in celebrity recognition. + +To update the face, use the [update a video face](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Face) API. + +Names are unique for Person models, so if you give two different faces in the same Person model the same `name` parameter value, Azure Video Indexer views the faces as the same person and converges them once you reindex your video. + +## Next steps + +[Customize Person model using the Azure Video Indexer website](customize-person-model-with-website.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-website.md b/articles/azure-video-indexer/customize-person-model-with-website.md similarity index 80% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-website.md rename to articles/azure-video-indexer/customize-person-model-with-website.md index 404c0e3ebdac5..77e9d8d30e935 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/customize-person-model-with-website.md +++ b/articles/azure-video-indexer/customize-person-model-with-website.md @@ -1,25 +1,23 @@ --- -title: Customize a Person model with Azure Video Analyzer for Media (formerly Video Indexer) website -titleSuffix: Azure Video Analyzer for Media -description: Learn how to customize a Person model with the Azure Video Analyzer for Media (formerly Video Indexer) website. +title: Customize a Person model with Azure Video Indexer (formerly Azure Video Analyzer for Media) website +description: Learn how to customize a Person model with the Azure Video Indexer (formerly Azure Video Analyzer for Media) website. services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.subservice: azure-video-analyzer-media ms.date: 12/16/2020 ms.author: juliako --- -# Customize a Person model with the Video Analyzer for Media website +# Customize a Person model with the Azure Video Indexer website -Azure Video Analyzer for Media (formerly Video Indexer) supports celebrity recognition for video content. The celebrity recognition feature covers approximately one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. For a detailed overview, see [Customize a Person model in Video Analyzer for Media](customize-person-model-overview.md). +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports celebrity recognition for video content. The celebrity recognition feature covers approximately one million faces based on commonly requested data source such as IMDB, Wikipedia, and top LinkedIn influencers. For a detailed overview, see [Customize a Person model in Azure Video Indexer](customize-person-model-overview.md). -You can use the Video Analyzer for Media website to edit faces that were detected in a video, as described in this topic. You can also use the API, as described in [Customize a Person model using APIs](customize-person-model-with-api.md). +You can use the Azure Video Indexer website to edit faces that were detected in a video, as described in this topic. You can also use the API, as described in [Customize a Person model using APIs](customize-person-model-with-api.md). ## Central management of Person models in your account -1. To view, edit, and delete the Person models in your account, browse to the Video Analyzer for Media website and sign in. +1. To view, edit, and delete the Person models in your account, browse to the Azure Video Indexer website and sign in. 1. Select the content model customization button on the left of the page. > [!div class="mx-imgBorder"] @@ -45,16 +43,16 @@ You can use the Video Analyzer for Media website to edit faces that were detecte ## Add a new person to a Person model > [!NOTE] -> Video Analyzer for Media allows you to add multiple people with the same name in a Person model. However, it's recommended you give unique names to each person in your model for usability and clarity. +> Azure Video Indexer allows you to add multiple people with the same name in a Person model. However, it's recommended you give unique names to each person in your model for usability and clarity. 1. To add a new face to a Person model, select the list menu button next to the Person model that you want to add the face to. 1. Select **+ Add person** from the menu. A pop-up will prompt you to fill out the Person's details. Type in the name of the person and select the check button. - You can then choose from your file explorer or drag and drop the face images of the face. Video Analyzer for Media will take all standard image file types (ex: JPG, PNG, and more). + You can then choose from your file explorer or drag and drop the face images of the face. Azure Video Indexer will take all standard image file types (ex: JPG, PNG, and more). - Video Analyzer for Media can detect occurrences of this person in the future videos that you index and the current videos that you had already indexed, using the Person model to which you added this new face to. Recognition of the person in your current videos might take some time to take effect, as this is a batch process. + Azure Video Indexer can detect occurrences of this person in the future videos that you index and the current videos that you had already indexed, using the Person model to which you added this new face to. Recognition of the person in your current videos might take some time to take effect, as this is a batch process. ## Rename a Person model @@ -126,11 +124,11 @@ To use your Person model on a new video, do the following steps: 1. Select the drop-down and select the Person model that you created. 1. Select the **Upload** option in the bottom of the page, and your new video will be indexed using your Person model. -If you don't specify a Person model during the upload, Video Analyzer for Media will index the video using the Default Person model in your account. +If you don't specify a Person model during the upload, Azure Video Indexer will index the video using the Default Person model in your account. ## Use a Person model to reindex a video -To use a Person model to reindex a video in your collection, go to your account videos on the Video Analyzer for Media home page and hover over the name of the video that you want to reindex. +To use a Person model to reindex a video in your collection, go to your account videos on the Azure Video Indexer home page and hover over the name of the video that you want to reindex. You see options to edit, delete, and reindex your video. @@ -157,9 +155,9 @@ If you don't assign a Person model to the video during upload, your edit is save ### Edit a face > [!NOTE] -> If a Person model has two or more different people with the same name, you won't be able to tag that name within the videos that use that Person model. You'll only be able to make changes to people that share that name in the People tab of the content model customization page in Video Analyzer for Media. For this reason, it's recommended that you give unique names to each person in your Person model. +> If a Person model has two or more different people with the same name, you won't be able to tag that name within the videos that use that Person model. You'll only be able to make changes to people that share that name in the People tab of the content model customization page in Azure Video Indexer. For this reason, it's recommended that you give unique names to each person in your Person model. -1. Browse to the Video Analyzer for Media website and sign in. +1. Browse to the Azure Video Indexer website and sign in. 1. Search for a video you want to view and edit in your account. 1. To edit a face in your video, go to the Insights tab and select the pencil icon on the top-right corner of the window. diff --git a/articles/azure-video-indexer/deploy-with-arm-template.md b/articles/azure-video-indexer/deploy-with-arm-template.md new file mode 100644 index 0000000000000..b18adfa7133e4 --- /dev/null +++ b/articles/azure-video-indexer/deploy-with-arm-template.md @@ -0,0 +1,109 @@ +--- +title: Deploy Azure Video Indexer with ARM template +description: In this tutorial you will create an Azure Video Indexer account by using Azure Resource Manager (ARM) template. +ms.topic: tutorial +ms.date: 12/01/2021 +ms.author: juliako +--- + +# Tutorial: deploy Azure Video Indexer with ARM template + +## Overview + +In this tutorial you will create an Azure Video Indexer (formerly Azure Video Analyzer for Media) account by using Azure Resource Manager (ARM) template (preview). +The resource will be deployed to your subscription and will create the Azure Video Indexer resource based on parameters defined in the avam.template file. + +> [!NOTE] +> This sample is *not* for connecting an existing Azure Video Indexer classic account to an ARM-based Azure Video Indexer account. +> For full documentation on Azure Video Indexer API, visit the [Developer portal](https://aka.ms/avam-dev-portal) page. +> The current API Version is "2021-10-27-preview". Check this Repo from time to time to get updates on new API Versions. + +## Prerequisites + +* An Azure Media Services (AMS) account. You can create one for free through the [Create AMS Account](/azure/azure/media-services/latest/account-create-how-to). + +## Deploy the sample + +---- + +### Option 1: Click the "Deploy To Azure Button", and fill in the missing parameters + +[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure-Samples%2Fmedia-services-video-indexer%2Fmaster%2FARM-Samples%2FCreate-Account%2Favam.template.json) + +---- + +### Option 2 : Deploy using PowerShell Script + +1. Open The [template file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.json) file and inspect its content. +2. Fill in the required parameters (see below) +3. Run the Following PowerShell commands: + + * Create a new Resource group on the same location as your Azure Video Indexer account, using the [New-AzResourceGroup](/powershell/module/az.resources/new-azresourcegroup) cmdlet. + + + ```powershell + New-AzResourceGroup -Name myResourceGroup -Location eastus + ``` + + * Deploy the template to the resource group using the [New-AzResourceGroupDeployment](/powershell/module/az.resources/new-azresourcegroupdeployment) cmdlet. + + ```powershell + New-AzResourceGroupDeployment -ResourceGroupName myResourceGroup -TemplateFile ./avam.template.json + ``` + +> [!NOTE] +> If you would like to work with bicep format, inspect the [bicep file](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/ARM-Samples/Create-Account/avam.template.bicep) on this repo. + +## Parameters + +### name + +* Type: string +* Description: Specifies the name of the new Azure Video Indexer account. +* required: true + +### location + +* Type: string +* Description: Specifies the Azure location where the Azure Video Indexer account should be created. +* Required: false + +> [!NOTE] +> You need to deploy Your Azure Video Indexer account in the same location (region) as the associated Azure Media Services(AMS) resource exists. + +### mediaServiceAccountResourceId + +* Type: string +* Description: The Resource ID of the Azure Media Services(AMS) resource. +* Required: true + +### managedIdentityId + +* Type: string +* Description: The Resource ID of the Managed Identity used to grant access between Azure Media Services(AMS) resource and the Azure Video Indexer account. +* Required: true + +### tags + +* Type: object +* Description: Array of objects that represents custom user tags on the Azure Video Indexer account + + Required: false + +## Reference documentation + +If you're new to Azure Video Indexer (formerly Azure Video Analyzer for Media), see: + +* [Azure Video Indexer Documentation](/azure/azure-video-indexer) +* [Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai/) +* After completing this tutorial, head to other Azure Video Indexer samples, described on [README.md](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/README.md) + +If you're new to template deployment, see: + +* [Azure Resource Manager documentation](../azure-resource-manager/index.yml) +* [Deploy Resources with ARM Template](../azure-resource-manager/templates/deploy-powershell.md) +* [Deploy Resources with Bicep and Azure CLI](../azure-resource-manager/bicep/deploy-cli.md) + +## Next steps + +[Connect an existing classic paid Azure Video Indexer account to ARM-based account](connect-classic-account-to-arm.md) \ No newline at end of file diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/detected-clothing.md b/articles/azure-video-indexer/detected-clothing.md similarity index 80% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/detected-clothing.md rename to articles/azure-video-indexer/detected-clothing.md index a93bc93641706..9f2df094aff28 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/detected-clothing.md +++ b/articles/azure-video-indexer/detected-clothing.md @@ -9,7 +9,7 @@ ms.author: juliako # People's detected clothing (preview) -Video Analyzer for Media detects clothing associated with the person wearing it in the video and provides information such as the type of clothing detected and the timestamp of the appearance (start, end). The API returns the detection confidence level. +Azure Video Indexer detects clothing associated with the person wearing it in the video and provides information such as the type of clothing detected and the timestamp of the appearance (start, end). The API returns the detection confidence level. Two examples where this feature could be useful: @@ -20,17 +20,17 @@ The newly added clothing detection feature is available when indexing your file :::image type="content" source="./media/detected-clothing/index-video.png" alt-text="This screenshot represents an indexing video option"::: -When you choose to see **Insights** of your video on the [Video Analyzer for Media](https://www.videoindexer.ai/) (former Video Indexer) website, the People's detected clothing could be viewed from the **Observed People** tracing insight. When choosing a thumbnail of a person the detected clothing became available. +When you choose to see **Insights** of your video on the [Azure Video Indexer](https://www.videoindexer.ai/) (former Video Indexer) website, the People's detected clothing could be viewed from the **Observed People** tracing insight. When choosing a thumbnail of a person the detected clothing became available. :::image type="content" source="./media/detected-clothing/observed-people.png" alt-text="Observed people screenshot"::: -If you are interested to view People's detected clothing in the Timeline of your video on the Video Analyzer for Media website, go to **View** -> **Show Insights** and select the **All** option or **View** -> **Custom View** and select **Observed People**. +If you are interested to view People's detected clothing in the Timeline of your video on the Azure Video Indexer website, go to **View** -> **Show Insights** and select the **All** option or **View** -> **Custom View** and select **Observed People**. :::image type="content" source="./media/detected-clothing/observed-person.png" alt-text="Observed person screenshot"::: -Searching for a specific clothing to return all the observed people wearing it is enabled using the search bar of either the **Insights** or from the **Timeline** of your video on the Video Analyzer for Media website . +Searching for a specific clothing to return all the observed people wearing it is enabled using the search bar of either the **Insights** or from the **Timeline** of your video on the Azure Video Indexer website . -The following JSON response illustrates what Video Analyzer for Media returns when tracing observed people having detected clothing associated: +The following JSON response illustrates what Azure Video Indexer returns when tracing observed people having detected clothing associated: ```json "observedPeople": [ diff --git a/articles/azure-video-indexer/faq.yml b/articles/azure-video-indexer/faq.yml new file mode 100644 index 0000000000000..d61873834b49b --- /dev/null +++ b/articles/azure-video-indexer/faq.yml @@ -0,0 +1,199 @@ +### YamlMime:FAQ +metadata: + title: Frequently asked questions about Azure Video Indexer (formerly Azure Video Analyzer for Media) - Azure + description: This article gives answers to frequently asked questions about Azure Video Indexer (formerly Azure Video Analyzer for Media). + services: azure-video-analyzer + author: Juliako + manager: femila + ms.topic: faq + ms.date: 05/25/2021 + ms.author: juliako +title: Azure Video Indexer frequently asked questions +summary: This article answers frequently asked questions about Azure Video Indexer (formerly Azure Video Analyzer for Media). + + +sections: + - name: General questions + questions: + - question: What is Azure Video Indexer? + answer: | + Azure Video Indexer is an artificial intelligence service that is part of Microsoft Azure Media Services. Azure Video Indexer provides an orchestration of multiple machine learning models that enable you to easily extract deep insight from a video. To provide advanced and accurate insights, Azure Video Indexer makes use of multiple channels of the video: audio, speech, and visual. Azure Video Indexer’s insights may be used in many ways, like improving content discoverability and accessibility, creating new monetization opportunities, or building new experiences that use the insights. Azure Video Indexer provides a web-based interface for testing, configuration, and customization of models in your account. Developers can use a REST-based API to integrate Azure Video Indexer into production system. + + - question: What can I do with Azure Video Indexer? + answer: | + Some of the operations that Azure Video Indexer can perform on media files include: + + * Identifying and extracting speech and identify speakers. + * Identifying and extracting on-screen text in a video. + * Detecting objects in a video file. + * Identify brands (for example: Microsoft) from audio tracks and on-screen text in a video. + * Detecting and recognizing faces from a database of celebrities and a user-defined database of faces. + * Extracting topics discussed but not necessarily mentioned in audio and video content. + * Creating closed captions or subtitles from the audio track. + + For more information and more Azure Video Indexer features, see [Overview](video-indexer-overview.md). + + - question: How do I get started with Azure Video Indexer? + answer: | + Azure Video Indexer includes a free trial offering that provides you with 600 minutes in the web-based interface and 2,400 minutes via the API. You can [login to the Azure Video Indexer web-based interface](https://www.videoindexer.ai/) and try it for yourself using any web identity and without having to set up an Azure Subscription. Follow [this easy introduction lab](https://github.com/Azure-Samples/media-services-video-indexer/blob/master/IntroToVideoIndexer.md) to get better idea of how to use Azure Video Indexer. + + To index videos and audio flies at scale, you can connect Azure Video Indexer to a paid Microsoft Azure subscription. You can find more information on pricing on the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. + + You can find more information on getting started in [Get started](video-indexer-get-started.md). + + - question: Do I need coding skills to use Azure Video Indexer? + answer: | + You can use the Azure Video Indexer web-based interface to evaluate, configure, and manage your account with **no coding required**. When you are ready to develop more complex applications, you can use the [Azure Video Indexer API](https://api-portal.videoindexer.ai/) to integrate Azure Video Indexer into your own applications, web sites, or [custom workflows using serverless technologies like Azure Logic Apps](https://azure.microsoft.com/blog/logic-apps-flow-connectors-will-make-automating-video-indexer-simpler-than-ever/) or Azure Functions. + + - question: Do I need machine learning skills to use Azure Video Indexer? + answer: No, Azure Video Indexer provides the integration of multiple machine learning models into one pipeline. Indexing a video or audio file via Azure Video Indexer retrieves a full set of insights extracted on one shared timeline without any machine learning skills or knowledge on algorithms needed on the customer's part. + + - question: What media formats does Azure Video Indexer support? + answer: | + Azure Video Indexer supports most common media formats. Refer to the [Azure Media Encoder standard formats](/azure/media-services/latest/encode-media-encoder-standard-formats-reference) list for more details. + + - question: How do I upload a media file into Azure Video Indexer and what are the limitations? + answer: | + In the Azure Video Indexer web-based portal, you can upload a media file using the file upload dialog or by pointing to a URL that directly hosts the source file (see [example](https://nimbuscdn-nimbuspm.streaming.mediaservices.windows.net/2b533311-b215-4409-80af-529c3e853622/Ignite-short.mp4)). Any URL that hosts the media content using an iFrame or embed code will not work (see [example](https://www.videoindexer.ai/accounts/7e1282e8-083c-46ab-8c20-84cae3dc289d/videos/5cfa29e152/?t=4.11)). + + For more information, please see this [how-to guide](./upload-index-videos.md). + + - question: How long does it take Azure Video Indexer to extract insights from media? + answer: | + The amount of time it takes to index a video or audio file, both using the Azure Video Indexer API and the Azure Video Indexer web-based interface, depends on multiple parameters such as the file length and quality, the number of insights found in the file, the number of [reserved units](/azure/media-services/previous/media-services-scale-media-processing-overview) available, and whether the [streaming endpoint](/azure/media-services/previous/media-services-streaming-endpoints-overview) is enabled or not. We recommend that you run a few test files with your own content and take an average to get a better idea. + + - question: Can I create customized workflows to automate processes with Azure Video Indexer? + answer: | + Yes, you can integrate Azure Video Indexer into serverless technologies like Logic Apps, Flow, and [Azure Functions](https://azure.microsoft.com/services/functions/). You can find more details on the [Logic App](https://azure.microsoft.com/services/logic-apps/) and [Flow](https://flow.microsoft.com/en-us/) connectors for Azure Video Indexer [here](https://azure.microsoft.com/blog/logic-apps-flow-connectors-will-make-automating-video-indexer-simpler-than-ever/). You can see some automation projects done by partners in the [Azure Video Indexer Samples](https://github.com/Azure-Samples/media-services-video-indexer) repo. + + - question: In which Azure regions is Azure Video Indexer available? + answer: | + You can see which Azure regions Azure Video Indexer is available on the [regions](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all) page. + + - question: Can I customize Azure Video Indexer models for my specific use case? + answer: | + Yes. In Azure Video Indexer you can customize some of the available models to better fit your needs. + + For example, our Person model supports out-of-the-box 1,000,000 faces of celebrity recognition, but you can also train it to recognize other faces which are not in that database. + + For details, see articles about customizing [Person](customize-person-model-overview.md), [Brands](customize-brands-model-overview.md), and [Language](customize-language-model-overview.md) models. + + - question: Can I edit the videos in my library? + answer: | + Yes. Press the **edit video** button from the library display or the **open in editor** button from the player display to get to the **Projects** tab. You can create a new project and add more videos from your library to edit them together, once you are done you can render your video and download. + + If you want to get insights on your new video, index it with Azure Video Indexer and it will appear in your library with its insights. + + - question: Can I index multiple audio streams or channels? + answer: If there are multiple audio streams, Azure Video Indexer takes the first one it encounters and will process only this stream. In any audio stream Azure Video Indexer processes, it takes the different channels (if present) and processes them together as mono. For streams/channels manipulation you can use ffmpeg commands on the file before indexing it. + + - question: Can a storage account connected to the Media Services account be behind a firewall? + answer: | + Your paid Azure Video Indexer account uses the specified Media Services account that is connected to a storage account. Currently, to use the connected storage account that is behind firewall, you need to contact Azure Video Indexer support and they will give the exact directions. + + To open a new support request on Azure portal, navigate to [support request](https://portal.azure.com/#blade/Microsoft_Azure_Support/HelpAndSupportBlade/newsupportrequest). + + - question: What is the SLA for Azure Video Indexer? + answer: | + Azure Media Service’s SLA covers Azure Video Indexer and can be found on the [SLA](https://azure.microsoft.com/support/legal/sla/azure/media-services/v1_2/) page. The SLA only applies to Azure Video Indexer paid accounts and does not apply to the free trial. + + - name: Privacy Questions + questions: + - question: Are video and audio files indexed by Azure Video Indexer stored? + answer: Yes, unless you delete the file from Azure Video Indexer, either using the Azure Video Indexer website or API, your video and audio files are stored. For the free trial, the video and audio files that you index are stored in the Azure region East US. Otherwise, your video and audio files are stored in the storage account of your Azure subscription. + + - question: Can I delete my files that are stored in Azure Video Indexer Portal? + answer: Yes, you can always delete your video and audio files as well as any metadata and insights extracted from them by Azure Video Indexer. Once you delete a file from Azure Video Indexer, the file and its metadata and insights are permanently removed from Azure Video Indexer. However, if you have implemented your own backup solution in Azure storage, the file remains in your Azure storage. + + - question: Can I control user access to my Azure Video Indexer account? + answer: Yes, only account admins can invite and uninvite people to their accounts, as well as assign who has editing privileges and who has read-only access. + + - question: Who has access to my video and audio files that have been indexed and/or stored by Azure Video Indexer and the metadata and insights that were extracted? + answer: Your video or audio content that have public as its privacy setting can be accessed by anyone who has the link to your video or audio content and its insights. Your video or audio content that have private as its privacy setting can only be accessed by users that were invited to the account of the video or audio content. The privacy setting of your content also applies to the metadata and insights that Azure Video Indexer extracts. You assign the privacy setting when you upload your video or audio file. You can also change the privacy setting after indexing. + + - question: What access does Microsoft have to my video or audio files that have been indexed and/or stored by Azure Video Indexer and the metadata and insights that were extracted? + answer: | + Per the [Azure Online Services Terms](https://www.microsoftvolumelicensing.com/DocumentSearch.aspx?Mode=3&DocumentTypeId=31) (OST), you completely own your content, and Microsoft will only access your content and the metadata and insights that Azure Video Indexer extracts from your content according to the OST and the Microsoft Privacy Statement. + + - question: Are the custom models that I build in my Azure Video Indexer account available to other accounts? + answer: | + No, the custom models that you create in your account are not available to any other account. Azure Video Indexer currently allows you to build custom [brands](customize-brands-model-overview.md), [language](customize-language-model-overview.md), and [person](customize-person-model-overview.md) models in your account. These models are only available in the account in which you created the models. + + - question: Is the content indexed by Video Indexer kept within the Azure region where I am using Video Indexer? + answer: | + Yes, the content and its insights are kept within the Azure region (except for Singapore and Brazil South regions) unless you have a manual configuration in your Azure subscription that uses multiple Azure regions. + + Customer data in a region is replicated for BCDR reasons to the [paired region](../availability-zones/cross-region-replication-azure.md#azure-cross-region-replication-pairings-for-all-geographies). + + - question: What is the privacy policy for Azure Video Indexer? + answer: | + Azure Video Indexer is covered by the [Microsoft Privacy Statement](https://privacy.microsoft.com/privacystatement). The privacy statement explains the personal data Microsoft processes, how Microsoft processes it, and for what purposes Microsoft processes it. To learn more about privacy, visit the [Microsoft Trust Center](https://www.microsoft.com/trustcenter). + + - question: What certifications does Azure Video Indexer have? + answer: | + Azure Video Indexer currently has the SOC certification. To review Azure Video Indexer's certification, please refer to the [Microsoft Trust Center](https://www.microsoft.com/trustcenter/compliance/complianceofferings?product=Azure). + + - question: What is the difference between private and public videos? + answer: | + When videos are uploaded to Azure Video Indexer, you can choose from two privacy settings: private and public. Public videos are accessible for anyone, including anonymous and unidentified users. Private ones are restricted solely to the account members. + + - question: I tried to upload a video as public and it was flagged for inappropriate or offensive content, what does that mean? + answer: | + When uploading a video to Azure Video Indexer, an automatic content analysis is done by algorithms and models in order to make sure no inappropriate content will be presented publicly. If a video is found to be suspicious as containing explicit content, it will not be possible to set it as public. However, the account members can still access it as a private video (view it, download the insights and extracted artifacts, and perform other operations available to account members). + + In order to set the video for public access, you can either: + + * Build your own interface layer (such as app or website) and use it to interact with the Azure Video Indexer service. This way the video remains private in our portal and your users can interact with it through your interface. For example, you can still get the insights or allow viewing of the video in your own interface. + * Request a human review of the content, which would result in removing of the restriction assuming the content is not explicit. + + This option can be explored if the Azure Video Indexer website is used directly by your users as the interface layer, and for public (unauthenticated) viewing. + + - name: API Questions + questions: + - question: What APIs does Azure Video Indexer offer? + answer: | + Azure Video Indexer's APIs allows for indexing, extracting metadata, asset management, translation, embedding, customization of models and more. To find more detailed information on using the Azure Video Indexer API, refer to the [Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai/). + + - question: What client SDKs does Azure Video Indexer offer? + answer: There are currently no client SDKs offered. The Azure Video Indexer team is working on the SDKs and plans to deliver them soon. + + - question: How do I get started with Azure Video Indexer's API? + answer: | + Follow [Tutorial: get started with the Azure Video Indexer API](video-indexer-use-apis.md). + + - question: What is the difference between the Azure Video Indexer API and the Azure Media Service v3 API? + answer: | + Currently there are some overlaps in features offered by the Azure Video Indexer API and the Azure Media Service v3 API. You can find more information on how to compare both services [here](compare-video-indexer-with-media-services-presets.md). + + - question: What is an API access token and why do I need it? + answer: | + The Azure Video Indexer API contains an Authorization API and an Operations API. The Authorizations API contains calls that give you access token. Each call to the Operations API should be associated with an access token, matching the authorization scope of the call. + + Access tokens are needed to use the Azure Video Indexer APIs for security purposes. This ensures that any calls are coming from you or those who have access permissions to your account.  + + - question: What is the difference between Account access token, User access token, and Video access token? + answer: | + * Account level – account level access tokens let you perform operations on the account level or the video level. For example, upload a video, list all videos, get video insights. + * User level - user level access tokens let you perform operations on the user level. For example, get associated accounts. + * Video level – video level access tokens let you perform operations on a specific video. For example, get video insights, download captions, get widgets, etc. + + - question: How often do I need to get a new access token? When do access tokens expire? + answer: Access tokens expire every hour, so you need to generate a new access token every hour. + + - question: What are the login options to Azure Video Indexer Developer portal? + answer: | + See a release note regarding [login information](release-notes.md#october-2020). + + Once you register your email account using an identity provider, you cannot use this email account with another identity provider. + + - name: Billing questions + questions: + - question: How much does Azure Video Indexer cost? + answer: | + Azure Video Indexer uses a simple pay-as-you-go pricing model based on the duration of the content input that you index. Additional charges may apply for encoding, streaming, storage, network usage, and media reserved units. For more information, see the [pricing](https://azure.microsoft.com/pricing/details/cognitive-services/video-indexer/) page. + + - question: When am I billed for using Azure Video Indexer? + answer: When sending a video to be indexed, the user will define the indexing to be video analysis, audio analysis or both. This will determine which SKUs will be charged. If there is a critical level error during processing, an error code will be returned as a response. In such a case, no billing occurs. A critical error can be caused by a bug in our code or a critical failure in an internal dependency the service has. Errors such as wrong identification or insight extraction are not considered as critical and a response is returned. In any case where a valid (non-error code) response is returned, billing occurs. + + - question: Does Azure Video Indexer offer a free trial? + answer: Yes, Azure Video Indexer offers a free trial that gives full service and API functionality. There is a quota of 600 minutes worth of videos for web-based interface users and 2,400 minutes for API users. diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/callback-url.md b/articles/azure-video-indexer/includes/callback-url.md similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/includes/callback-url.md rename to articles/azure-video-indexer/includes/callback-url.md diff --git a/articles/azure-video-indexer/includes/note-account-ms-uami-same-subscription-and-region.md b/articles/azure-video-indexer/includes/note-account-ms-uami-same-subscription-and-region.md new file mode 100644 index 0000000000000..35195300b2070 --- /dev/null +++ b/articles/azure-video-indexer/includes/note-account-ms-uami-same-subscription-and-region.md @@ -0,0 +1,10 @@ +--- +author: uratzmon +ms.topic: include +ms.date: 10/13/2021 +ms.author: uratzmon +ms.custom: ignite-fall-2021 +--- + +> [!NOTE] +> The associated user-assigned managed identit and the media service must be in the same region as the Azure Video Indexer account. diff --git a/articles/azure-video-indexer/includes/regulation.md b/articles/azure-video-indexer/includes/regulation.md new file mode 100644 index 0000000000000..fa72f05237b62 --- /dev/null +++ b/articles/azure-video-indexer/includes/regulation.md @@ -0,0 +1,9 @@ +--- +author: Juliako +ms.topic: include +ms.date: 04/15/2021 +ms.author: juliako +--- + +> [!Warning] +> On June 11, 2020, Microsoft announced that it will not sell facial recognition technology to police departments in the United States until strong regulation, grounded in human rights, has been enacted. As such, customers may not use facial recognition features or functionality included in Azure Services, such as Face or Azure Video Indexer (formerly Azure Video Analyzer for Media), if a customers is, or is allowing use of such services by or for, a police department in the United States. diff --git a/articles/azure-video-indexer/index.yml b/articles/azure-video-indexer/index.yml new file mode 100644 index 0000000000000..dccf0a3e78733 --- /dev/null +++ b/articles/azure-video-indexer/index.yml @@ -0,0 +1,77 @@ +### YamlMime:Landing + +title: Learn about Azure Video Indexer (formerly Azure Video Analyzer for Media) +summary: Azure Video Indexer (formerly Azure Video Analyzer for Media) is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Azure Video Indexer video and audio models. + +metadata: + title: Azure Video Indexer (formerly Azure Video Analyzer for Media) documentation + description: Azure Video Indexer (formerly Azure Video Analyzer for Media) is a cloud application, part of Azure Applied AI Services, built on Azure Media Services and Azure Cognitive Services (such as the Face, Translator, Computer Vision, and Speech). It enables you to extract the insights from your videos using Azure Video Indexer video and audio models. + services: azure-video-indexer + ms.service: azure-video-indexer + ms.topic: landing-page # Required + ms.collection: collection + author: Juliako + ms.author: juliako + ms.date: 05/07/2021 #Required; mm/dd/yyyy format. + +# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | tutorial | video | whats-new + +landingContent: +# Cards and links should be based on top customer tasks or top subjects +# Start card title with a verb + # Card (optional) + - title: About Azure Video Indexer (formerly Azure Video Analyzer for Media) + linkLists: + - linkListType: overview + links: + - text: "What is Azure Video Indexer?" + url: video-indexer-overview.md + - text: "Compare Media Services v3 presets and Azure Video Indexer" + url: compare-video-indexer-with-media-services-presets.md + - text: Frequently asked questions + url: faq.yml + - text: User voice + url: https://aka.ms/UserVoiceVI + - linkListType: whats-new + links: + - text: "What's new in Azure Video Indexer?" + url: release-notes.md + + # Card (optional) + - title: Get started + linkLists: + - linkListType: quickstart + links: + - text: Sign up and upload a video + url: video-indexer-get-started.md + - text: Invite users + url: invite-users.md + - linkListType: how-to-guide + links: + - text: Create an account connected to Azure + url: connect-to-azure.md + - text: Use Azure Video Indexer API + url: video-indexer-use-apis.md + - text: Upload and index your videos + url: upload-index-videos.md + - text: Examine output produced by API + url: video-indexer-output-json-v2.md + - text: Embed Azure Video Indexer widgets into apps + url: video-indexer-embed-widgets.md + - linkListType: sample + links: + - text: Code samples + url: https://github.com/Azure-Samples/media-services-video-indexer + +# Card + - title: Customize content models + linkLists: + - linkListType: how-to-guide + links: + - text: Customize a brands model + url: customize-brands-model-with-website.md + - text: Customize a language model + url: customize-language-model-with-website.md + - text: Customize a person model + url: customize-person-model-with-website.md + diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/invite-users.md b/articles/azure-video-indexer/invite-users.md similarity index 77% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/invite-users.md rename to articles/azure-video-indexer/invite-users.md index 13672c9d2c8ab..d812e03a718c1 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/invite-users.md +++ b/articles/azure-video-indexer/invite-users.md @@ -1,14 +1,14 @@ --- -title: Invite users to Azure Video Analyzer for Media (former Video Analyzer for Media) - Azure -description: This article shows how to invite users to Azure Video Analyzer for Media (former Video Analyzer for Media). +title: Invite users to Azure Video Indexer (former Azure Video Indexer) - Azure +description: This article shows how to invite users to Azure Video Indexer (former Azure Video Indexer). ms.topic: quickstart ms.date: 09/14/2021 ms.custom: mode-other --- -# Quickstart: Invite users to Video Analyzer for Media +# Quickstart: Invite users to Azure Video Indexer -To collaborate with your colleagues, you can invite them to your Azure Video Analyzer for Media (formerly Video Indexer) account. +To collaborate with your colleagues, you can invite them to your Azure Video Indexer (formerly Azure Video Analyzer for Media) account. > [!NOTE] > Only the account’s admin can add or remove users.
                @@ -16,11 +16,11 @@ To collaborate with your colleagues, you can invite them to your Azure Video Ana ## Invite new users -1. Sign in on the [Video Analyzer for Media](https://www.videoindexer.ai/) website. Make sure you are connected with an admin account. +1. Sign in on the [Azure Video Indexer](https://www.videoindexer.ai/) website. Make sure you are connected with an admin account. 1. If you are the admin, you see the **Share account** button in the top-right corner. Click on the button and you can invite users. :::image type="content" source="./media/invite-users/share-account.png" alt-text="Share your account"::: -1. In the **Share this account with others** dialog, enter an email addresses of a person you want to invite to your Video Analyzer for Media account: +1. In the **Share this account with others** dialog, enter an email addresses of a person you want to invite to your Azure Video Indexer account: :::image type="content" source="./media/invite-users/share-account-others.png" alt-text="Invite users to this account"::: 1. After you press **Invite**, the person will be added to the list of pending invites.
                You can choose from two options for each invitee who didn't yet join the account: **remove invitation** or **copy invitation URL**. @@ -49,10 +49,10 @@ In addition to bringing up the **Share this account with others** dialog by clic ## Next steps -You can now use the [Video Analyzer for Media website](video-indexer-view-edit.md) or [Video Analyzer for Media Developer Portal](video-indexer-use-apis.md) to see the insights of the video. +You can now use the [Azure Video Indexer website](video-indexer-view-edit.md) or [Azure Video Indexer Developer Portal](video-indexer-use-apis.md) to see the insights of the video. ## See also -- [Video Analyzer for Media overview](video-indexer-overview.md) +- [Azure Video Indexer overview](video-indexer-overview.md) - [How to sign up and upload your first video](video-indexer-get-started.md) - [Start using APIs](video-indexer-use-apis.md) diff --git a/articles/azure-video-indexer/language-identification-model.md b/articles/azure-video-indexer/language-identification-model.md new file mode 100644 index 0000000000000..6a1b0d090e664 --- /dev/null +++ b/articles/azure-video-indexer/language-identification-model.md @@ -0,0 +1,59 @@ +--- +title: Use Azure Video Indexer (formerly Azure Video Analyzer for Media) to auto identify spoken languages - Azure +description: This article describes how the Azure Video Indexer (formerly Azure Video Analyzer for Media) language identification model is used to automatically identifying the spoken language in a video. +ms.topic: conceptual +ms.date: 04/12/2020 +ms.author: ellbe +--- + +# Automatically identify the spoken language with language identification model + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports automatic language identification (LID), which is the process of automatically identifying the spoken language content from audio and sending the media file to be transcribed in the dominant identified language. + +Currently LID supports: English, Spanish, French, German, Italian, Mandarin Chinese, Japanese, Russian, and Portuguese (Brazilian). + +Make sure to review the [Guidelines and limitations](#guidelines-and-limitations) section below. + +## Choosing auto language identification on indexing + +When indexing or [re-indexing](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) a video using the API, choose the `auto detect` option in the `sourceLanguage` parameter. + +When using portal, go to your **Account videos** on the [Azure Video Indexer](https://www.videoindexer.ai/) home page and hover over the name of the video that you want to re-index. On the right-bottom corner click the re-index button. In the **Re-index video** dialog, choose *Auto detect* from the **Video source language** drop-down box. + +![auto detect](./media/language-identification-model/auto-detect.png) + +## Model output + +Azure Video Indexer transcribes the video according to the most likely language if the confidence for that language is `> 0.6`. If the language cannot be identified with confidence, it assumes the spoken language is English. + +Model dominant language is available in the insights JSON as the `sourceLanguage` attribute (under root/videos/insights). A corresponding confidence score is also available under the `sourceLanguageConfidence` attribute. + +```json +"insights": { + "version": "1.0.0.0", + "duration": "0:05:30.902", + "sourceLanguage": "fr-FR", + "language": "fr-FR", + "transcript": [...], + . . . + "sourceLanguageConfidence": 0.8563 + }, +``` + +## Guidelines and limitations + +* Automatic language identification (LID) supports the following languages: + + English, Spanish, French, German, Italian, Mandarin Chines, Japanese, Russian, and Portuguese (Brazilian). +* Even though Azure Video Indexer supports Arabic (Modern Standard and Levantine), Hindi, and Korean, these languages are not supported in LID. +* If the audio contains languages other than the supported list above, the result is unexpected. +* If Azure Video Indexer cannot identify the language with a high enough confidence (`>0.6`), the fallback language is English. +* There is no current support for file with mixed languages audio. If the audio contains mixed languages, the result is unexpected. +* Low-quality audio may impact the model results. +* The model requires at least one minute of speech in the audio. +* The model is designed to recognize a spontaneous conversational speech (not voice commands, singing, etc.). + +## Next steps + +* [Overview](video-indexer-overview.md) +* [Automatically identify and transcribe multi-language content](multi-language-identification-transcription.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-support.md b/articles/azure-video-indexer/language-support.md similarity index 95% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/language-support.md rename to articles/azure-video-indexer/language-support.md index cd16885236237..6ea21fb945398 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/language-support.md +++ b/articles/azure-video-indexer/language-support.md @@ -1,6 +1,6 @@ --- -title: Language support in Azure Video Analyzer for Media -description: This article provides a comprehensive list of language support by service features in Azure Video Analyzer for Media (formerly Video Indexer). +title: Language support in Azure Video Indexer +description: This article provides a comprehensive list of language support by service features in Azure Video Indexer (formerly Azure Video Analyzer for Media). author: Juliako manager: femila ms.topic: conceptual @@ -8,13 +8,13 @@ ms.author: juliako ms.date: 04/07/2022 --- -# Language support in Video Analyzer for Media +# Language support in Azure Video Indexer -This article provides a comprehensive list of language support by service features in Azure Video Analyzer for Media (formerly Video Indexer). For the list and definitions of all the features, see [Overview](video-indexer-overview.md). +This article provides a comprehensive list of language support by service features in Azure Video Indexer (formerly Azure Video Analyzer for Media). For the list and definitions of all the features, see [Overview](video-indexer-overview.md). ## General language support -This section describes language support in Video Analyzer for Media. +This section describes language support in Azure Video Indexer. - Transcription (source language of the video/audio file) - Language identification (LID) @@ -106,7 +106,7 @@ This section describes language support in Video Analyzer for Media. ## Language support in frontend experiences -The following table describes language support in the Video Analyzer for Media frontend experiences. +The following table describes language support in the Azure Video Indexer frontend experiences. * portal - the portal column lists supported languages for the [web portal](https://aka.ms/vi-portal-link) * widgets - the [widgets](video-indexer-embed-widgets.md) column lists supported languages for translating the index file diff --git a/articles/azure-video-indexer/live-stream-analysis.md b/articles/azure-video-indexer/live-stream-analysis.md new file mode 100644 index 0000000000000..e2185ed2b70d2 --- /dev/null +++ b/articles/azure-video-indexer/live-stream-analysis.md @@ -0,0 +1,34 @@ +--- +title: Live stream analysis using Azure Video Indexer (formerly Azure Video Analyzer for Media) +description: This article shows how to perform a live stream analysis using Azure Video Indexer (formerly Azure Video Analyzer for Media). +ms.topic: conceptual +ms.date: 11/13/2019 +--- + +# Live stream analysis with Azure Video Indexer + +Azure Video Indexer (formerly Azure Video Analyzer for Media) is an Azure service designed to extract deep insights from video and audio files offline. This is to analyze a given media file already created in advance. However, for some use cases it's important to get the media insights from a live feed as quick as possible to unlock operational and other use cases pressed in time. For example, such rich metadata on a live stream could be used by content producers to automate TV production. + +A solution described in this article, allows customers to use Azure Video Indexer in near real-time resolutions on live feeds. The delay in indexing can be as low as four minutes using this solution, depending on the chunks of data being indexed, the input resolution, the type of content and the compute powered used for this process. + +![The Azure Video Indexer metadata on the live stream](./media/live-stream-analysis/live-stream-analysis01.png) + +*Figure 1 – Sample player displaying the Azure Video Indexer metadata on the live stream* + +The [stream analysis solution](https://aka.ms/livestreamanalysis) at hand, uses Azure Functions and two Logic Apps to process a live program from a live channel in Azure Media Services with Azure Video Indexer and displays the result with Azure Media Player showing the near real-time resulted stream. + +In high level, it is comprised of two main steps. The first step runs every 60 seconds, and takes a subclip of the last 60 seconds played, creates an asset from it and indexes it via Azure Video Indexer. Then the second step is called once indexing is complete. The insights captured are processed, sent to Azure Cosmos DB, and the subclip indexed is deleted. + +The sample player plays the live stream and gets the insights from Azure Cosmos DB, using a dedicated Azure Function. It displays the metadata and thumbnails in sync with the live video. + +![The two logic apps processing the live stream every minute in the cloud](./media/live-stream-analysis/live-stream-analysis02.png) + +*Figure 2 – The two logic apps processing the live stream every minute in the cloud.* + +## Step-by-step guide + +The full code and a step-by-step guide to deploy the results can be found in [GitHub project for Live media analytics with Azure Video Indexer](https://aka.ms/livestreamanalysis). + +## Next steps + +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/logic-apps-connector-tutorial.md b/articles/azure-video-indexer/logic-apps-connector-tutorial.md new file mode 100644 index 0000000000000..782cb05c56244 --- /dev/null +++ b/articles/azure-video-indexer/logic-apps-connector-tutorial.md @@ -0,0 +1,131 @@ +--- +title: The Azure Video Indexer (formerly Azure Video Analyzer for Media) connectors with Logic App and Power Automate tutorial. +description: This tutorial shows how to unlock new experiences and monetization opportunities Azure Video Indexer (formerly Azure Video Analyzer for Media) connectors with Logic App and Power Automate. +ms.author: alzam +ms.topic: tutorial #Required +ms.date: 09/21/2020 +--- + +# Tutorial: use Azure Video Indexer with Logic App and Power Automate + +Azure Video Indexer (formerly Azure Video Analyzer for Media) [REST API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Delete-Video) supports both server-to-server and client-to-server communication and enables Azure Video Indexer users to integrate video and audio insights easily into their application logic, unlocking new experiences and monetization opportunities. + +To make the integration even easier, we support [Logic Apps](https://azure.microsoft.com/services/logic-apps/) and [Power Automate](https://preview.flow.microsoft.com/connectors/shared_videoindexer-v2/video-indexer-v2/) connectors that are compatible with our API. You can use the connectors to set up custom workflows to effectively index and extract insights from a large amount of video and audio files, without writing a single line of code. Furthermore, using the connectors for your integration gives you better visibility on the health of your workflow and an easy way to debug it.  + +To help you get started quickly with the Azure Video Indexer connectors, we will do a walkthrough of an example Logic App and Power Automate solution you can set up. This tutorial shows how to set up flows using Logic Apps. However, the editors and capabilities are almost identical in both solutions, thus the diagrams and explanations are applicable to both Logic Apps and Power Automate. + +The "upload and index your video automatically" scenario covered in this tutorial is comprised of two different flows that work together. +* The first flow is triggered when a blob is added or modified in an Azure Storage account. It uploads the new file to Azure Video Indexer with a callback URL to send a notification once the indexing operation completes. +* The second flow is triggered based on the callback URL and saves the extracted insights back to a JSON file in Azure Storage. This two flow approach is used to support async upload and indexing of larger files effectively. + +This tutorial is using Logic App to show how to: + +> [!div class="checklist"] +> * Set up the file upload flow +> * Set up the JSON extraction flow + +[!INCLUDE [quickstarts-free-trial-note](../../includes/quickstarts-free-trial-note.md)] + +## Prerequisites + +* To begin with, you will need an Azure Video Indexer account along with [access to the APIs via API key](video-indexer-use-apis.md). +* You will also need an Azure Storage account. Keep note of the access key for your Storage account. Create two containers – one to store videos in and one to store insights generated by Azure Video Indexer in. +* Next, you will need to open two separate flows on either Logic Apps or Power Automate (depending on which you are using). + +## Set up the first flow - file upload + +The first flow is triggered whenever a blob is added in your Azure Storage container. Once triggered, it will create a SAS URI that you can use to upload and index the video in Azure Video Indexer. In this section you will create the following flow. + +![File upload flow](./media/logic-apps-connector-tutorial/file-upload-flow.png) + +To set up the first flow, you will need to provide your Azure Video Indexer API Key and Azure Storage credentials. + +![Azure blob storage](./media/logic-apps-connector-tutorial/azure-blob-storage.png) + +![Connection name and API key](./media/logic-apps-connector-tutorial/connection-name-api-key.png) + +> [!TIP] +> If you previously connected an Azure Storage account or Azure Video Indexer account to a Logic App, your connection details are stored and you will be connected automatically.
                You can edit the connection by clicking on **Change connection** at the bottom of an Azure Storage (the storage window) or Azure Video Indexer (the player window) action. + +Once you can connect to your Azure Storage and Azure Video Indexer accounts, find and choose the "When a blob is added or modified" trigger in **Logic Apps Designer**. + +Select the container that you will place your video files in. + +![Screenshot shows the When a blob is added or modified dialog box where you can select a container.](./media/logic-apps-connector-tutorial/container.png) + +Next, find and select the "Create SAS URI by path” action. In the dialog for the action, select List of Files Path from the Dynamic content options. + +Also, add a new "Shared Access Protocol" parameter. Choose HttpsOnly for the value of the parameter. + +![SAS uri by path](./media/logic-apps-connector-tutorial/sas-uri-by-path.jpg) + +Fill out [your account location](regions.md) and [account ID](./video-indexer-use-apis.md#account-id) to get the Azure Video Indexer account token. + +![Get account access token](./media/logic-apps-connector-tutorial/account-access-token.png) + +For “Upload video and index”, fill out the required parameters and Video URL. Select “Add new parameter” and select Callback URL. + +![Upload and index](./media/logic-apps-connector-tutorial/upload-and-index.png) + +You will leave the callback URL empty for now. You’ll add it only after finishing the second flow where the callback URL is created. + +You can use the default value for the other parameters or set them according to your needs. + +Click **Save**, and let’s move on to configure the second flow, to extract the insights once the upload and indexing is completed. + +## Set up the second flow - JSON extraction + +The completion of the uploading and indexing from the first flow will send an HTTP request with the correct callback URL to trigger the second flow. Then, it will retrieve the insights generated by Azure Video Indexer. In this example, it will store the output of your indexing job in your Azure Storage. However, it is up to you what you can do with the output. + +Create the second flow separate from the first one. + +![JSON extraction flow](./media/logic-apps-connector-tutorial/json-extraction-flow.png) + +To set up this flow, you will need to provide your Azure Video Indexer API Key and Azure Storage credentials again. You will need to update the same parameters as you did for the first flow. + +For your trigger, you will see a HTTP POST URL field. The URL won’t be generated until after you save your flow; however, you will need the URL eventually. We will come back to this. + +Fill out [your account location](regions.md) and [account ID](./video-indexer-use-apis.md#account-id) to get the Azure Video Indexer account token. + +Go to the “Get Video Index” action and fill out the required parameters. For Video ID, put in the following expression: triggerOutputs()['queries']['id'] + +![Azure Video Indexer action info](./media/logic-apps-connector-tutorial/video-indexer-action-info.jpg) + +This expression tells the connecter to get the Video ID from the output of your trigger. In this case, the output of your trigger will be the output of “Upload video and index” in your first trigger. + +Go to the “Create blob” action and select the path to the folder in which you will save the insights to. Set the name of the blob you are creating. For Blob content, put in the following expression: body(‘Get_Video_Index’) + +![Create blob action](./media/logic-apps-connector-tutorial/create-blob-action.jpg) + +This expression takes the output of the “Get Video Index” action from this flow. + +Click **Save flow**. + +Once the flow is saved, an HTTP POST URL is created in the trigger. Copy the URL from the trigger. + +![Save URL trigger](./media/logic-apps-connector-tutorial/save-url-trigger.png) + +Now, go back to the first flow and paste the URL in the "Upload video and index" action for the Callback URL parameter. + +Make sure both flows are saved, and you’re good to go! + +Try out your newly created Logic App or Power Automate solution by adding a video to your Azure blobs container, and go back a few minutes later to see that the insights appear in the destination folder. + +## Generate captions + +See the following blog for the steps that show [how to generate captions with Azure Video Indexer and Logic Apps](https://techcommunity.microsoft.com/t5/azure-media-services/generating-captions-with-video-indexer-and-logic-apps/ba-p/1672198). + +The article also shows how to index a video automatically by copying it to OneDrive and how to store the captions generated by Azure Video Indexer in OneDrive. + +## Clean up resources + +After you are done with this tutorial, feel free to keep this Logic App or Power Automate solution up and running if you need. However, if you do not want to keep this running and do not want to be billed, Turn Off both of your flows if you’re using Power Automate. Disable both of the flows if you’re using Logic Apps. + +## Next steps + +This tutorial showed just one Azure Video Indexer connectors example. You can use the Azure Video Indexer connectors for any API call provided by Azure Video Indexer. For example: upload and retrieve insights, translate the results, get embeddable widgets and even customize your models. Additionally, you can choose to trigger those actions based on different sources like updates to file repositories or emails sent. You can then choose to have the results update to our relevant infrastructure or application or generate any number of action items. + +> [!div class="nextstepaction"] +> [Use the Azure Video Indexer API](video-indexer-use-apis.md) + +For additional resources, refer to [Azure Video Indexer](/connectors/videoindexer-v2/) diff --git a/articles/azure-video-indexer/manage-account-connected-to-azure.md b/articles/azure-video-indexer/manage-account-connected-to-azure.md new file mode 100644 index 0000000000000..1f43756f08b81 --- /dev/null +++ b/articles/azure-video-indexer/manage-account-connected-to-azure.md @@ -0,0 +1,73 @@ +--- +title: Manage an Azure Video Indexer (formerly Azure Video Analyzer for Media) account +description: Learn how to manage an Azure Video Indexer (formerly Azure Video Analyzer for Media) account connected to Azure. +ms.topic: how-to +ms.date: 01/14/2021 +ms.author: juliako +--- + +# Manage an Azure Video Indexer account connected to Azure + +This article demonstrates how to manage an Azure Video Indexer (formerly Azure Video Analyzer for Media) account that's connected to your Azure subscription and an Azure Media Services account. + +> [!NOTE] +> You have to be the Azure Video Indexer account owner to do account configuration adjustments discussed in this topic. + +## Prerequisites + +Connect your Azure Video Indexer account to Azure, as described in [Connected to Azure](connect-to-azure.md). + +Make sure to follow [Prerequisites](connect-to-azure.md#prerequisites-for-connecting-to-azure) and review [Considerations](connect-to-azure.md#azure-media-services-considerations) in the article. + +## Examine account settings + +This section examines settings of your Azure Video Indexer account. + +To view settings: + +1. Click on the user icon in the top-right corner and select **Settings**. + + ![Settings in Azure Video Indexer](./media/manage-account-connected-to-azure/select-settings.png) + +2. On the **Settings** page, select the **Account** tab. + +If your Videos Indexer account is connected to Azure, you see the following things: + +* The name of the underlying Azure Media Services account. +* The number of indexing jobs running and queued. +* The number and type of allocated reserved units. + +If your account needs some adjustments, you'll see relevant errors and warnings about your account configuration on the **Settings** page. The messages contain links to exact places in Azure portal where you need to make changes. For more information, see the [errors and warnings](#errors-and-warnings) section that follows. + +## Repair the connection to Azure + +In the **Update connection to Azure Media Services** dialog of your [Azure Video Indexer](https://www.videoindexer.ai/) page, you're asked to provide values for the following settings: + +|Setting|Description| +|---|---| +|Azure subscription ID|The subscription ID can be retrieved from the Azure portal. Click on **All services** in the left panel and search for "subscriptions". Select **Subscriptions** and choose the desired ID from the list of your subscriptions.| +|Azure Media Services resource group name|The name for the resource group in which you created the Media Services account.| +|Application ID|The Azure AD application ID (with permissions for the specified Media Services account) that you created for this Azure Video Indexer account.

                To get the app ID, navigate to Azure portal. Under the Media Services account, choose your account and go to **API Access**. Select **Connect to Media Services API with service principal** -> **Azure AD App**. Copy the relevant parameters.| +|Application key|The Azure AD application key associated with your Media Services account that you specified above.

                To get the app key, navigate to Azure portal. Under the Media Services account, choose your account and go to **API Access**. Select **Connect to Media Services API with service principal** -> **Manage application** -> **Certificates & secrets**. Copy the relevant parameters.| + +## Errors and warnings + +If your account needs some adjustments, you see relevant errors and warnings about your account configuration on the **Settings** page. The messages contain links to exact places in Azure portal where you need to make changes. This section gives more details about the error and warning messages. + +* Event Grid + + You have to register the Event Grid resource provider using the Azure portal. In the [Azure portal](https://portal.azure.com/), go to **Subscriptions** > [subscription] > **ResourceProviders** > **Microsoft.EventGrid**. If not in the **Registered** state, select **Register**. It takes a couple of minutes to register. + +* Streaming endpoint + + Make sure the underlying Media Services account has the default **Streaming Endpoint** in a started state. Otherwise, you can't watch videos from this Media Services account or in Azure Video Indexer. + +* Media reserved units + + You must allocate Media Reserved Units on your Media Service resource in order to index videos. For optimal indexing performance, it's recommended to allocate at least 10 S3 Reserved Units. For pricing information, see the FAQ section of the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. + +## Next steps + +You can programmatically interact with your trial account or Azure Video Indexer accounts that are connected to Azure by following the instructions in: [Use APIs](video-indexer-use-apis.md). + +Use the same Azure AD user you used when connecting to Azure. diff --git a/articles/azure-video-indexer/manage-multiple-tenants.md b/articles/azure-video-indexer/manage-multiple-tenants.md new file mode 100644 index 0000000000000..05014a0b1b6d0 --- /dev/null +++ b/articles/azure-video-indexer/manage-multiple-tenants.md @@ -0,0 +1,68 @@ +--- +title: Manage multiple tenants with Azure Video Indexer (formerly Azure Video Analyzer for Media) - Azure +description: This article suggests different integration options for managing multiple tenants with Azure Video Indexer (formerly Azure Video Analyzer for Media). +ms.topic: conceptual +ms.date: 05/15/2019 +ms.author: ikbarmen +--- + +# Manage multiple tenants + +This article discusses different options for managing multiple tenants with Azure Video Indexer (formerly Azure Video Analyzer for Media). Choose a method that is most suitable for your scenario: + +* Azure Video Indexer account per tenant +* Single Azure Video Indexer account for all tenants +* Azure subscription per tenant + +## Azure Video Indexer account per tenant + +When using this architecture, an Azure Video Indexer account is created for each tenant. The tenants have full isolation in the persistent and compute layer. + +![Azure Video Indexer account per tenant](./media/manage-multiple-tenants/video-indexer-account-per-tenant.png) + +### Considerations + +* Customers don't share storage accounts (unless manually configured by the customer). +* Customers don't share compute (reserved units) and don't impact processing jobs times of one another. +* You can easily remove a tenant from the system by deleting the Azure Video Indexer account. +* There's no ability to share custom models between tenants. + + Make sure there's no business requirement to share custom models. +* Harder to manage due to multiple Azure Video Indexer (and associated Media Services) accounts per tenant. + +> [!TIP] +> Create an admin user for your system in [Video Indexer Developer Portal](https://api-portal.videoindexer.ai/) and use the Authorization API to provide your tenants the relevant [account access token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account-Access-Token). + +## Single Azure Video Indexer account for all users + +When using this architecture, the customer is responsible for tenants isolation. All tenants have to use a single Azure Video Indexer account with a single Azure Media Service account. When uploading, searching, or deleting content, the customer will need to filter the proper results for that tenant. + +![Single Azure Video Indexer account for all users](./media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png) + +With this option, customization models (Person, Language, and Brands) can be shared or isolated between tenants by filtering the models by tenant. + +When [uploading videos](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video), you can specify a different partition attribute per tenant. This will allow isolation in the [search API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Search-Videos). By specifying the partition attribute in the search API you'll only get results of the specified partition. + +### Considerations + +* Ability to share content and customization models between tenants. +* One tenant impacts the performance of other tenants. +* Customer needs to build a complex management layer on top of Azure Video Indexer. + +> [!TIP] +> You can use the [priority](upload-index-videos.md) attribute to prioritize tenants jobs. + +## Azure subscription per tenant + +When using this architecture, each tenant will have their own Azure subscription. For each user, you'll create a new Azure Video Indexer account in the tenant subscription. + +![Azure subscription per tenant](./media/manage-multiple-tenants/azure-subscription-per-tenant.png) + +### Considerations + +* This is the only option that enables billing separation. +* This integration has more management overhead than Azure Video Indexer account per tenant. If billing isn't a requirement, it's recommended to use one of the other options described in this article. + +## Next steps + +[Overview](video-indexer-overview.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/matched-person.md b/articles/azure-video-indexer/matched-person.md similarity index 79% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/matched-person.md rename to articles/azure-video-indexer/matched-person.md index fd00362466926..7cf0496531526 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/matched-person.md +++ b/articles/azure-video-indexer/matched-person.md @@ -8,7 +8,7 @@ ms.author: juliako # Matched person (preview) -Azure Video Analyzer for Media matches observed people that were detected in the video with the corresponding faces ("People" insight). To produce the matching algorithm, the bounding boxes for both the faces and the observed people are assigned spatially along the video. The API returns the confidence level of each matching. +Azure Video Indexer matches observed people that were detected in the video with the corresponding faces ("People" insight). To produce the matching algorithm, the bounding boxes for both the faces and the observed people are assigned spatially along the video. The API returns the confidence level of each matching. The following are some scenarios that benefit from this feature: @@ -25,20 +25,20 @@ The **Matched person** feature is available when indexing your file by choosing > [!div class="mx-imgBorder"] > :::image type="content" source="./media/matched-person/index-matched-person-feature.png" alt-text="Advanced video or Advanced video + audio preset"::: -To view the Matched person on the [Video Analyzer for Media](https://www.videoindexer.ai/) website, go to **View** -> **Show Insights** -> select the **All** option or **View** -> **Custom View** -> **Mapped Faces**. +To view the Matched person on the [Azure Video Indexer](https://www.videoindexer.ai/) website, go to **View** -> **Show Insights** -> select the **All** option or **View** -> **Custom View** -> **Mapped Faces**. -When you choose to see insights of your video on the [Video Analyzer for Media](https://www.videoindexer.ai/) website, the matched person could be viewed from the **Observed People tracing** insight. When choosing a thumbnail of a person the matched person became available. +When you choose to see insights of your video on the [Azure Video Indexer](https://www.videoindexer.ai/) website, the matched person could be viewed from the **Observed People tracing** insight. When choosing a thumbnail of a person the matched person became available. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/matched-person/from-observed-people.png" alt-text="View matched people from the Observed People insight"::: If you would like to view people's detected clothing in the **Timeline** of your video on the [Video Indexer website](https://www.videoindexer.ai/), go to **View** -> **Show Insights** and select the **All option** or **View** -> **Custom View** -> **Observed People**. -Searching for a specific person by name, returning all the appearances of the specific person is enables using the search bar of the Insights of your video on the Video Analyzer for Media. +Searching for a specific person by name, returning all the appearances of the specific person is enables using the search bar of the Insights of your video on the Azure Video Indexer. ## JSON code sample -The following JSON response illustrates what Video Analyzer for Media returns when tracing observed people having Mapped person associated: +The following JSON response illustrates what Azure Video Indexer returns when tracing observed people having Mapped person associated: ```json "observedPeople": [ diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/content-model-customization-tab.png b/articles/azure-video-indexer/media/animated-characters-recognition/content-model-customization-tab.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/content-model-customization-tab.png rename to articles/azure-video-indexer/media/animated-characters-recognition/content-model-customization-tab.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/content-model-customization.png b/articles/azure-video-indexer/media/animated-characters-recognition/content-model-customization.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/content-model-customization.png rename to articles/azure-video-indexer/media/animated-characters-recognition/content-model-customization.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/flow.png b/articles/azure-video-indexer/media/animated-characters-recognition/flow.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/animated-characters-recognition/flow.png rename to articles/azure-video-indexer/media/animated-characters-recognition/flow.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/audio-effects.jpg b/articles/azure-video-indexer/media/audio-effects-detection/audio-effects.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/audio-effects.jpg rename to articles/azure-video-indexer/media/audio-effects-detection/audio-effects.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/close-caption.jpg b/articles/azure-video-indexer/media/audio-effects-detection/close-caption.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/close-caption.jpg rename to articles/azure-video-indexer/media/audio-effects-detection/close-caption.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/index-audio-effect.png b/articles/azure-video-indexer/media/audio-effects-detection/index-audio-effect.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/audio-effects-detection/index-audio-effect.png rename to articles/azure-video-indexer/media/audio-effects-detection/index-audio-effect.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/banner/banner.png b/articles/azure-video-indexer/media/banner/banner.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/banner/banner.png rename to articles/azure-video-indexer/media/banner/banner.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/banner/banner.svg b/articles/azure-video-indexer/media/banner/banner.svg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/banner/banner.svg rename to articles/azure-video-indexer/media/banner/banner.svg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-blade-new.png b/articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-blade-new.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-blade-new.png rename to articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-blade-new.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-blade.png b/articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-blade.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-blade.png rename to articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-blade.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-button.png b/articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-button.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/connect-button.png rename to articles/azure-video-indexer/media/connect-classic-account-to-arm/connect-button.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/user-account-settings.png b/articles/azure-video-indexer/media/connect-classic-account-to-arm/user-account-settings.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/connect-classic-account-to-arm/user-account-settings.png rename to articles/azure-video-indexer/media/connect-classic-account-to-arm/user-account-settings.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/first-consideration.png b/articles/azure-video-indexer/media/considerations-when-use-at-scale/first-consideration.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/first-consideration.png rename to articles/azure-video-indexer/media/considerations-when-use-at-scale/first-consideration.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/reserved-units.jpg b/articles/azure-video-indexer/media/considerations-when-use-at-scale/reserved-units.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/reserved-units.jpg rename to articles/azure-video-indexer/media/considerations-when-use-at-scale/reserved-units.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/respect-throttling.jpg b/articles/azure-video-indexer/media/considerations-when-use-at-scale/respect-throttling.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/respect-throttling.jpg rename to articles/azure-video-indexer/media/considerations-when-use-at-scale/respect-throttling.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/second-consideration.jpg b/articles/azure-video-indexer/media/considerations-when-use-at-scale/second-consideration.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/considerations-when-use-at-scale/second-consideration.jpg rename to articles/azure-video-indexer/media/considerations-when-use-at-scale/second-consideration.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/content-model-customization/brands-overview.png b/articles/azure-video-indexer/media/content-model-customization/brands-overview.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/content-model-customization/brands-overview.png rename to articles/azure-video-indexer/media/content-model-customization/brands-overview.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/content-model-customization/content-model-customization.png b/articles/azure-video-indexer/media/content-model-customization/content-model-customization.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/content-model-customization/content-model-customization.png rename to articles/azure-video-indexer/media/content-model-customization/content-model-customization.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/access-control-iam.png b/articles/azure-video-indexer/media/create-account/access-control-iam.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/access-control-iam.png rename to articles/azure-video-indexer/media/create-account/access-control-iam.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/all-aad-users.png b/articles/azure-video-indexer/media/create-account/all-aad-users.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/all-aad-users.png rename to articles/azure-video-indexer/media/create-account/all-aad-users.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/ams-reserved-units.png b/articles/azure-video-indexer/media/create-account/ams-reserved-units.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/ams-reserved-units.png rename to articles/azure-video-indexer/media/create-account/ams-reserved-units.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/ams-streaming-endpoint.png b/articles/azure-video-indexer/media/create-account/ams-streaming-endpoint.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/ams-streaming-endpoint.png rename to articles/azure-video-indexer/media/create-account/ams-streaming-endpoint.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/connect-to-azure.png b/articles/azure-video-indexer/media/create-account/connect-to-azure.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/connect-to-azure.png rename to articles/azure-video-indexer/media/create-account/connect-to-azure.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/connect-vi-to-azure-subscription.png b/articles/azure-video-indexer/media/create-account/connect-vi-to-azure-subscription.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/connect-vi-to-azure-subscription.png rename to articles/azure-video-indexer/media/create-account/connect-vi-to-azure-subscription.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-ams-account-se.png b/articles/azure-video-indexer/media/create-account/create-ams-account-se.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-ams-account-se.png rename to articles/azure-video-indexer/media/create-account/create-ams-account-se.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-new-ams-account.png b/articles/azure-video-indexer/media/create-account/create-new-ams-account.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-new-ams-account.png rename to articles/azure-video-indexer/media/create-account/create-new-ams-account.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-unlimited-account.png b/articles/azure-video-indexer/media/create-account/create-unlimited-account.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/create-unlimited-account.png rename to articles/azure-video-indexer/media/create-account/create-unlimited-account.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/enable-classic-api.png b/articles/azure-video-indexer/media/create-account/enable-classic-api.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/enable-classic-api.png rename to articles/azure-video-indexer/media/create-account/enable-classic-api.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/event-grid.png b/articles/azure-video-indexer/media/create-account/event-grid.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/event-grid.png rename to articles/azure-video-indexer/media/create-account/event-grid.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/import-steps.png b/articles/azure-video-indexer/media/create-account/import-steps.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/import-steps.png rename to articles/azure-video-indexer/media/create-account/import-steps.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/new-account-on-azure-subscription.png b/articles/azure-video-indexer/media/create-account/new-account-on-azure-subscription.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/new-account-on-azure-subscription.png rename to articles/azure-video-indexer/media/create-account/new-account-on-azure-subscription.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/new-account.png b/articles/azure-video-indexer/media/create-account/new-account.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-account/new-account.png rename to articles/azure-video-indexer/media/create-account/new-account.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/DocsAddRole.png b/articles/azure-video-indexer/media/create-video-analyzer-for-media-account/DocsAddRole.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/DocsAddRole.png rename to articles/azure-video-indexer/media/create-video-analyzer-for-media-account/DocsAddRole.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/create-account-blade.png b/articles/azure-video-indexer/media/create-video-analyzer-for-media-account/create-account-blade.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/create-account-blade.png rename to articles/azure-video-indexer/media/create-video-analyzer-for-media-account/create-account-blade.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/generate-access-token.png b/articles/azure-video-indexer/media/create-video-analyzer-for-media-account/generate-access-token.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/generate-access-token.png rename to articles/azure-video-indexer/media/create-video-analyzer-for-media-account/generate-access-token.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/overview-screenshot.png b/articles/azure-video-indexer/media/create-video-analyzer-for-media-account/overview-screenshot.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/overview-screenshot.png rename to articles/azure-video-indexer/media/create-video-analyzer-for-media-account/overview-screenshot.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/search-bar1.png b/articles/azure-video-indexer/media/create-video-analyzer-for-media-account/search-bar1.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/create-video-analyzer-for-media-account/search-bar1.png rename to articles/azure-video-indexer/media/create-video-analyzer-for-media-account/search-bar1.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-brand-model/add-brand.png b/articles/azure-video-indexer/media/customize-brand-model/add-brand.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-brand-model/add-brand.png rename to articles/azure-video-indexer/media/customize-brand-model/add-brand.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-brand-model/customize-brand-model.png b/articles/azure-video-indexer/media/customize-brand-model/customize-brand-model.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-brand-model/customize-brand-model.png rename to articles/azure-video-indexer/media/customize-brand-model/customize-brand-model.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/add-new-face.png b/articles/azure-video-indexer/media/customize-face-model/add-new-face.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/add-new-face.png rename to articles/azure-video-indexer/media/customize-face-model/add-new-face.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/add-new-person.png b/articles/azure-video-indexer/media/customize-face-model/add-new-person.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/add-new-person.png rename to articles/azure-video-indexer/media/customize-face-model/add-new-person.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/content-model-customization-people-tab.png b/articles/azure-video-indexer/media/customize-face-model/content-model-customization-people-tab.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/content-model-customization-people-tab.png rename to articles/azure-video-indexer/media/customize-face-model/content-model-customization-people-tab.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/content-model-customization.png b/articles/azure-video-indexer/media/customize-face-model/content-model-customization.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/content-model-customization.png rename to articles/azure-video-indexer/media/customize-face-model/content-model-customization.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/delete-face.png b/articles/azure-video-indexer/media/customize-face-model/delete-face.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/delete-face.png rename to articles/azure-video-indexer/media/customize-face-model/delete-face.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/delete-person.png b/articles/azure-video-indexer/media/customize-face-model/delete-person.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/delete-person.png rename to articles/azure-video-indexer/media/customize-face-model/delete-person.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/edit-face.png b/articles/azure-video-indexer/media/customize-face-model/edit-face.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/edit-face.png rename to articles/azure-video-indexer/media/customize-face-model/edit-face.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/manage-people.png b/articles/azure-video-indexer/media/customize-face-model/manage-people.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/manage-people.png rename to articles/azure-video-indexer/media/customize-face-model/manage-people.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/reindex.png b/articles/azure-video-indexer/media/customize-face-model/reindex.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/reindex.png rename to articles/azure-video-indexer/media/customize-face-model/reindex.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/rename-person.png b/articles/azure-video-indexer/media/customize-face-model/rename-person.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/rename-person.png rename to articles/azure-video-indexer/media/customize-face-model/rename-person.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/upload.png b/articles/azure-video-indexer/media/customize-face-model/upload.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-face-model/upload.png rename to articles/azure-video-indexer/media/customize-face-model/upload.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/customize-language-model.png b/articles/azure-video-indexer/media/customize-language-model/customize-language-model.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/customize-language-model.png rename to articles/azure-video-indexer/media/customize-language-model/customize-language-model.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/customize.png b/articles/azure-video-indexer/media/customize-language-model/customize.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/customize.png rename to articles/azure-video-indexer/media/customize-language-model/customize.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/edits.png b/articles/azure-video-indexer/media/customize-language-model/edits.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/edits.png rename to articles/azure-video-indexer/media/customize-language-model/edits.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/from-transcript-edits.png b/articles/azure-video-indexer/media/customize-language-model/from-transcript-edits.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/from-transcript-edits.png rename to articles/azure-video-indexer/media/customize-language-model/from-transcript-edits.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/model-customization.png b/articles/azure-video-indexer/media/customize-language-model/model-customization.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/model-customization.png rename to articles/azure-video-indexer/media/customize-language-model/model-customization.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/preview-model.png b/articles/azure-video-indexer/media/customize-language-model/preview-model.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/preview-model.png rename to articles/azure-video-indexer/media/customize-language-model/preview-model.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/reindex.png b/articles/azure-video-indexer/media/customize-language-model/reindex.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/reindex.png rename to articles/azure-video-indexer/media/customize-language-model/reindex.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/timeline.png b/articles/azure-video-indexer/media/customize-language-model/timeline.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/timeline.png rename to articles/azure-video-indexer/media/customize-language-model/timeline.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/train-model.png b/articles/azure-video-indexer/media/customize-language-model/train-model.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/train-model.png rename to articles/azure-video-indexer/media/customize-language-model/train-model.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/upload.png b/articles/azure-video-indexer/media/customize-language-model/upload.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/customize-language-model/upload.png rename to articles/azure-video-indexer/media/customize-language-model/upload.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/index-video.png b/articles/azure-video-indexer/media/detected-clothing/index-video.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/index-video.png rename to articles/azure-video-indexer/media/detected-clothing/index-video.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/observed-people.png b/articles/azure-video-indexer/media/detected-clothing/observed-people.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/observed-people.png rename to articles/azure-video-indexer/media/detected-clothing/observed-people.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/observed-person.png b/articles/azure-video-indexer/media/detected-clothing/observed-person.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/detected-clothing/observed-person.png rename to articles/azure-video-indexer/media/detected-clothing/observed-person.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/index/AMSIcons-media_analytics.svg b/articles/azure-video-indexer/media/index/AMSIcons-media_analytics.svg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/index/AMSIcons-media_analytics.svg rename to articles/azure-video-indexer/media/index/AMSIcons-media_analytics.svg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invite-more-people.png b/articles/azure-video-indexer/media/invite-users/invite-more-people.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invite-more-people.png rename to articles/azure-video-indexer/media/invite-users/invite-more-people.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invite-msg.png b/articles/azure-video-indexer/media/invite-users/invite-msg.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invite-msg.png rename to articles/azure-video-indexer/media/invite-users/invite-msg.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invites-pending.png b/articles/azure-video-indexer/media/invite-users/invites-pending.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/invites-pending.png rename to articles/azure-video-indexer/media/invite-users/invites-pending.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/joined-invitee-options.png b/articles/azure-video-indexer/media/invite-users/joined-invitee-options.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/joined-invitee-options.png rename to articles/azure-video-indexer/media/invite-users/joined-invitee-options.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/manage-roles.png b/articles/azure-video-indexer/media/invite-users/manage-roles.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/manage-roles.png rename to articles/azure-video-indexer/media/invite-users/manage-roles.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/remove-users.png b/articles/azure-video-indexer/media/invite-users/remove-users.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/remove-users.png rename to articles/azure-video-indexer/media/invite-users/remove-users.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/settings.png b/articles/azure-video-indexer/media/invite-users/settings.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/settings.png rename to articles/azure-video-indexer/media/invite-users/settings.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/share-account-others.png b/articles/azure-video-indexer/media/invite-users/share-account-others.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/share-account-others.png rename to articles/azure-video-indexer/media/invite-users/share-account-others.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/share-account.png b/articles/azure-video-indexer/media/invite-users/share-account.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/invite-users/share-account.png rename to articles/azure-video-indexer/media/invite-users/share-account.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/language-identification-model/auto-detect.png b/articles/azure-video-indexer/media/language-identification-model/auto-detect.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/language-identification-model/auto-detect.png rename to articles/azure-video-indexer/media/language-identification-model/auto-detect.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis01.png b/articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis01.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis01.png rename to articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis01.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis02.png b/articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis02.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis02.png rename to articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis02.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis03.png b/articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis03.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/live-stream-analysis/live-stream-analysis03.png rename to articles/azure-video-indexer/media/live-stream-analysis/live-stream-analysis03.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/location/location1.png b/articles/azure-video-indexer/media/location/location1.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/location/location1.png rename to articles/azure-video-indexer/media/location/location1.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/account-access-token.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/account-access-token.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/account-access-token.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/account-access-token.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/azure-blob-storage.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/azure-blob-storage.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/azure-blob-storage.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/azure-blob-storage.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/connection-name-api-key.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/connection-name-api-key.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/connection-name-api-key.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/connection-name-api-key.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/container.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/container.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/container.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/container.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/create-blob-action.jpg b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/create-blob-action.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/create-blob-action.jpg rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/create-blob-action.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/file-upload-flow.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/file-upload-flow.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/file-upload-flow.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/file-upload-flow.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/json-extraction-flow.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/json-extraction-flow.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/json-extraction-flow.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/json-extraction-flow.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/sas-uri-by-path.jpg b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/sas-uri-by-path.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/sas-uri-by-path.jpg rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/sas-uri-by-path.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/save-url-trigger.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/save-url-trigger.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/save-url-trigger.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/save-url-trigger.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/upload-and-index.png b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/upload-and-index.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/upload-and-index.png rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/upload-and-index.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/video-indexer-action-info.jpg b/articles/azure-video-indexer/media/logic-apps-connector-tutorial/video-indexer-action-info.jpg similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/logic-apps-connector-tutorial/video-indexer-action-info.jpg rename to articles/azure-video-indexer/media/logic-apps-connector-tutorial/video-indexer-action-info.jpg diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-account-connected-to-azure/autoscale-reserved-units.png b/articles/azure-video-indexer/media/manage-account-connected-to-azure/autoscale-reserved-units.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-account-connected-to-azure/autoscale-reserved-units.png rename to articles/azure-video-indexer/media/manage-account-connected-to-azure/autoscale-reserved-units.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-account-connected-to-azure/select-settings.png b/articles/azure-video-indexer/media/manage-account-connected-to-azure/select-settings.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-account-connected-to-azure/select-settings.png rename to articles/azure-video-indexer/media/manage-account-connected-to-azure/select-settings.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/azure-subscription-per-tenant.png b/articles/azure-video-indexer/media/manage-multiple-tenants/azure-subscription-per-tenant.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/azure-subscription-per-tenant.png rename to articles/azure-video-indexer/media/manage-multiple-tenants/azure-subscription-per-tenant.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png b/articles/azure-video-indexer/media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png rename to articles/azure-video-indexer/media/manage-multiple-tenants/single-video-indexer-account-for-all-users.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/video-indexer-account-per-tenant.png b/articles/azure-video-indexer/media/manage-multiple-tenants/video-indexer-account-per-tenant.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/manage-multiple-tenants/video-indexer-account-per-tenant.png rename to articles/azure-video-indexer/media/manage-multiple-tenants/video-indexer-account-per-tenant.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/matched-person/from-observed-people.png b/articles/azure-video-indexer/media/matched-person/from-observed-people.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/matched-person/from-observed-people.png rename to articles/azure-video-indexer/media/matched-person/from-observed-people.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/matched-person/index-matched-person-feature.png b/articles/azure-video-indexer/media/matched-person/index-matched-person-feature.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/matched-person/index-matched-person-feature.png rename to articles/azure-video-indexer/media/matched-person/index-matched-person-feature.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/multi-language-identification-transcription/portal-experience.png b/articles/azure-video-indexer/media/multi-language-identification-transcription/portal-experience.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/multi-language-identification-transcription/portal-experience.png rename to articles/azure-video-indexer/media/multi-language-identification-transcription/portal-experience.png diff --git a/articles/azure-video-indexer/media/network-security/nsg-service-tag.png b/articles/azure-video-indexer/media/network-security/nsg-service-tag.png new file mode 100644 index 0000000000000..b05a91312671b Binary files /dev/null and b/articles/azure-video-indexer/media/network-security/nsg-service-tag.png differ diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/observed-people-tracing/youtube-trailer.png b/articles/azure-video-indexer/media/observed-people-tracing/youtube-trailer.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/observed-people-tracing/youtube-trailer.png rename to articles/azure-video-indexer/media/observed-people-tracing/youtube-trailer.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/release-notes/audio-detection.png b/articles/azure-video-indexer/media/release-notes/audio-detection.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/release-notes/audio-detection.png rename to articles/azure-video-indexer/media/release-notes/audio-detection.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/release-notes/dark-mode.png b/articles/azure-video-indexer/media/release-notes/dark-mode.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/release-notes/dark-mode.png rename to articles/azure-video-indexer/media/release-notes/dark-mode.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/extracting-keyframes.png b/articles/azure-video-indexer/media/scenes-shots-keyframes/extracting-keyframes.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/extracting-keyframes.png rename to articles/azure-video-indexer/media/scenes-shots-keyframes/extracting-keyframes.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/extracting-keyframes2.png b/articles/azure-video-indexer/media/scenes-shots-keyframes/extracting-keyframes2.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/extracting-keyframes2.png rename to articles/azure-video-indexer/media/scenes-shots-keyframes/extracting-keyframes2.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/scenes-shots-keyframes.png b/articles/azure-video-indexer/media/scenes-shots-keyframes/scenes-shots-keyframes.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/scenes-shots-keyframes/scenes-shots-keyframes.png rename to articles/azure-video-indexer/media/scenes-shots-keyframes/scenes-shots-keyframes.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget01.png b/articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget01.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget01.png rename to articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget01.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget02.png b/articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget02.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget02.png rename to articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget02.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget03.png b/articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget03.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-embed-widgets/video-indexer-widget03.png rename to articles/azure-video-indexer/media/video-indexer-embed-widgets/video-indexer-widget03.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-enter-file-url.png b/articles/azure-video-indexer/media/video-indexer-get-started/avam-enter-file-url.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-enter-file-url.png rename to articles/azure-video-indexer/media/video-indexer-get-started/avam-enter-file-url.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-embed-generate.png b/articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-embed-generate.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-embed-generate.png rename to articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-embed-generate.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-embed.png b/articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-embed.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-embed.png rename to articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-embed.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-url.png b/articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-url.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/avam-odrv-url.png rename to articles/azure-video-indexer/media/video-indexer-get-started/avam-odrv-url.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/progress.png b/articles/azure-video-indexer/media/video-indexer-get-started/progress.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/progress.png rename to articles/azure-video-indexer/media/video-indexer-get-started/progress.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/uploaded.png b/articles/azure-video-indexer/media/video-indexer-get-started/uploaded.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/uploaded.png rename to articles/azure-video-indexer/media/video-indexer-get-started/uploaded.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/video-indexer-upload.png b/articles/azure-video-indexer/media/video-indexer-get-started/video-indexer-upload.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-get-started/video-indexer-upload.png rename to articles/azure-video-indexer/media/video-indexer-get-started/video-indexer-upload.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-output-json/video-indexer-summarized-insights.png b/articles/azure-video-indexer/media/video-indexer-output-json/video-indexer-summarized-insights.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-output-json/video-indexer-summarized-insights.png rename to articles/azure-video-indexer/media/video-indexer-output-json/video-indexer-summarized-insights.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-overview/model-chart.png b/articles/azure-video-indexer/media/video-indexer-overview/model-chart.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-overview/model-chart.png rename to articles/azure-video-indexer/media/video-indexer-overview/model-chart.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/embed-download-create-projects.png b/articles/azure-video-indexer/media/video-indexer-search/embed-download-create-projects.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/embed-download-create-projects.png rename to articles/azure-video-indexer/media/video-indexer-search/embed-download-create-projects.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/filter.png b/articles/azure-video-indexer/media/video-indexer-search/filter.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/filter.png rename to articles/azure-video-indexer/media/video-indexer-search/filter.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/insights.png b/articles/azure-video-indexer/media/video-indexer-search/insights.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/insights.png rename to articles/azure-video-indexer/media/video-indexer-search/insights.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/results.png b/articles/azure-video-indexer/media/video-indexer-search/results.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/results.png rename to articles/azure-video-indexer/media/video-indexer-search/results.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/timeline.png b/articles/azure-video-indexer/media/video-indexer-search/timeline.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-search/timeline.png rename to articles/azure-video-indexer/media/video-indexer-search/timeline.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/account-id.png b/articles/azure-video-indexer/media/video-indexer-use-apis/account-id.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/account-id.png rename to articles/azure-video-indexer/media/video-indexer-use-apis/account-id.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/authorization.png b/articles/azure-video-indexer/media/video-indexer-use-apis/authorization.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/authorization.png rename to articles/azure-video-indexer/media/video-indexer-use-apis/authorization.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/sign-in.png b/articles/azure-video-indexer/media/video-indexer-use-apis/sign-in.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/sign-in.png rename to articles/azure-video-indexer/media/video-indexer-use-apis/sign-in.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/subscriptions.png b/articles/azure-video-indexer/media/video-indexer-use-apis/subscriptions.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-use-apis/subscriptions.png rename to articles/azure-video-indexer/media/video-indexer-use-apis/subscriptions.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/add-all.png b/articles/azure-video-indexer/media/video-indexer-view-edit/add-all.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/add-all.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/add-all.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/custom-vision.png b/articles/azure-video-indexer/media/video-indexer-view-edit/custom-vision.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/custom-vision.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/custom-vision.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/filter-options.png b/articles/azure-video-indexer/media/video-indexer-view-edit/filter-options.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/filter-options.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/filter-options.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/include.png b/articles/azure-video-indexer/media/video-indexer-view-edit/include.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/include.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/include.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/insights.png b/articles/azure-video-indexer/media/video-indexer-view-edit/insights.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/insights.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/insights.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/new-project-edit-name.png b/articles/azure-video-indexer/media/video-indexer-view-edit/new-project-edit-name.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/new-project-edit-name.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/new-project-edit-name.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/new-project.png b/articles/azure-video-indexer/media/video-indexer-view-edit/new-project.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/new-project.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/new-project.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/preview.png b/articles/azure-video-indexer/media/video-indexer-view-edit/preview.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/preview.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/preview.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/rearrange.png b/articles/azure-video-indexer/media/video-indexer-view-edit/rearrange.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/rearrange.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/rearrange.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/render-download.png b/articles/azure-video-indexer/media/video-indexer-view-edit/render-download.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/render-download.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/render-download.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/rendering-done.png b/articles/azure-video-indexer/media/video-indexer-view-edit/rendering-done.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/rendering-done.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/rendering-done.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/search-try-cognitive-services.png b/articles/azure-video-indexer/media/video-indexer-view-edit/search-try-cognitive-services.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/search-try-cognitive-services.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/search-try-cognitive-services.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/video-editor.png b/articles/azure-video-indexer/media/video-indexer-view-edit/video-editor.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/video-editor.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/video-editor.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/video-indexer-summarized-insights.png b/articles/azure-video-indexer/media/video-indexer-view-edit/video-indexer-summarized-insights.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/video-indexer-summarized-insights.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/video-indexer-summarized-insights.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/visual-text.png b/articles/azure-video-indexer/media/video-indexer-view-edit/visual-text.png similarity index 100% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/media/video-indexer-view-edit/visual-text.png rename to articles/azure-video-indexer/media/video-indexer-view-edit/visual-text.png diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/multi-language-identification-transcription.md b/articles/azure-video-indexer/multi-language-identification-transcription.md similarity index 82% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/multi-language-identification-transcription.md rename to articles/azure-video-indexer/multi-language-identification-transcription.md index f5d6e54ba523a..bb6eacc4f7524 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/multi-language-identification-transcription.md +++ b/articles/azure-video-indexer/multi-language-identification-transcription.md @@ -1,25 +1,23 @@ --- -title: Automatically identify and transcribe multi-language content with Azure Video Analyzer for Media (formerly Video Indexer) -titleSuffix: Azure Video Analyzer for Media -description: This topic demonstrates how to automatically identify and transcribe multi-language content with Azure Video Analyzer for Media (formerly Video Indexer). +title: Automatically identify and transcribe multi-language content with Azure Video Indexer (formerly Azure Video Analyzer for Media) +description: This topic demonstrates how to automatically identify and transcribe multi-language content with Azure Video Indexer (formerly Azure Video Analyzer for Media). services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.subservice: azure-video-analyzer-media ms.date: 09/01/2019 ms.author: juliako --- # Automatically identify and transcribe multi-language content -Azure Video Analyzer for Media (formerly Video Indexer) supports automatic language identification and transcription in multi-language content. This process involves automatically identifying the spoken language in different segments from audio, sending each segment of the media file to be transcribed and combine the transcription back to one unified transcription. +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports automatic language identification and transcription in multi-language content. This process involves automatically identifying the spoken language in different segments from audio, sending each segment of the media file to be transcribed and combine the transcription back to one unified transcription. ## Choosing multilingual identification on indexing with portal You can choose **multi-language detection** when uploading and indexing your video. Alternatively, you can choose **multi-language detection** when re-indexing your video. The following steps describe how to reindex: -1. Browse to the [Video Analyzer for Media](https://vi.microsoft.com/) website and sign in. +1. Browse to the [Azure Video Indexer](https://vi.microsoft.com/) website and sign in. 1. Go to the **Library** page and hover over the name of the video that you want to reindex. 1. On the right-bottom corner, click the **Re-index video** button. 1. In the **Re-index video** dialog, choose **multi-language detection** from the **Video source language** drop-down box. @@ -85,4 +83,4 @@ Additionally, each instance in the transcription section will include the langua ## Next steps -[Video Analyzer for Media overview](video-indexer-overview.md) +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/network-security.md b/articles/azure-video-indexer/network-security.md new file mode 100644 index 0000000000000..2fa208e0aaea9 --- /dev/null +++ b/articles/azure-video-indexer/network-security.md @@ -0,0 +1,45 @@ +--- +title: How to enable network security +description: This article gives an overview of the Azure Video Indexer (formerly Video Analyzer for Media) network security options. +ms.topic: article +ms.date: 04/11/2022 +ms.author: juliako +--- + +# NSG service tags for Azure Video Indexer + +Azure Video Indexer (formerly Video Analyzer for Media) is a service hosted on Azure. In some architecture cases the service needs to interact with other services in order to index video files (that is, a Storage Account) or when a customer orchestrates indexing jobs against our API endpoint using their own service hosted on Azure (i.e AKS, Web Apps, Logic Apps, Functions). Customers who would like to limit access to their resources on a network level can use [Network Security Groups with Service Tags](https://docs.microsoft.com/azure/virtual-network/service-tags-overview). A service tag represents a group of IP address prefixes from a given Azure service, in this case Azure Video Indexer. Microsoft manages the address prefixes grouped by the service tag and automatically updates the service tag as addresses change in our backend, minimizing the complexity of frequent updates to network security rules by the customer. + +## Get started with service tags + +Currently we support the global service tag option for using service tags in your network security groups: + +**Use a single global AzureVideoAnalyzerForMedia service tag**: This option opens your virtual network to all IP addresses that the Azure Video Indexer service uses across all regions we offer our service. This method will allow for all IP addresses owned and used by Azure Video Indexer to reach your network resources behind the NSG. + +> [!NOTE] +> Currently we do not support IPs allocated to our services in the Switzerland North Region. These will be added soon. If your account is located in this region you cannot use Service Tags in your NSG today since these IPs are not in the Service Tag list and will be rejected by the NSG rule. + +## Use a single global Azure Video Indexer service tag + +The easiest way to begin using service tags with your Azure Video Indexer account is to add the global tag `AzureVideoAnalyzerForMedia` to an NSG rule. + +1. From the [Azure portal](https://portal.azure.com/), select your network security group. +1. Under **Settings**, select **Inbound security rules**, and then select **+ Add**. +1. From the **Source** drop-down list, select **Service Tag**. +1. From the **Source service tag** drop-down list, select **AzureVideoAnalyzerForMedia**. + +:::image type="content" source="./media/network-security/nsg-service-tag.png" alt-text="Add a service tag from the Azure portal"::: + +This tag contains the IP addresses of Azure Video Indexer services for all regions where available. The tag will ensure that your resource can communicate with the Azure Video Indexer services no matter where it's created. + +## Using Azure CLI + +You can also use Azure CLI to create a new or update an existing NSG rule and add the **AzureVideoAnalyzerForMedia** service tag using the `--source-address-prefixes`. For a full list of CLI commands and parameters see [az network nsg](https://docs.microsoft.com/cli/azure/network/nsg/rule?view=azure-cli-latest) + +Example of a security rule using service tags. For more details, visit https://aka.ms/servicetags + +`az network nsg rule create -g MyResourceGroup --nsg-name MyNsg -n MyNsgRuleWithTags --priority 400 --source-address-prefixes AzureVideoAnalyzerForMedia --destination-address-prefixes '*' --destination-port-ranges '*' --direction Inbound --access Allow --protocol Tcp --description "Allow from VideoAnalyzerForMedia"` + +## Next steps + +[Disaster recovery](video-indexer-disaster-recovery.md) \ No newline at end of file diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/observed-people-tracing.md b/articles/azure-video-indexer/observed-people-tracing.md similarity index 92% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/observed-people-tracing.md rename to articles/azure-video-indexer/observed-people-tracing.md index 6fc9cce2e1636..b41987bd41147 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/observed-people-tracing.md +++ b/articles/azure-video-indexer/observed-people-tracing.md @@ -9,7 +9,7 @@ ms.author: juliako # Trace observed people in a video (preview) -Azure Video Analyzer for Media (formerly Video Indexer) detects observed people in videos and provides information such as the location of the person in the video frame and the exact timestamp (start, end) when a person appears. The API returns the bounding box coordinates (in pixels) for each person instance detected, including detection confidence. +Azure Video Indexer (formerly Azure Video Analyzer for Media) detects observed people in videos and provides information such as the location of the person in the video frame and the exact timestamp (start, end) when a person appears. The API returns the bounding box coordinates (in pixels) for each person instance detected, including detection confidence. Some scenarios where this feature could be useful: diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/odrv-download.md b/articles/azure-video-indexer/odrv-download.md similarity index 90% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/odrv-download.md rename to articles/azure-video-indexer/odrv-download.md index 7ba07d4c5318b..c25656bc73aab 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/odrv-download.md +++ b/articles/azure-video-indexer/odrv-download.md @@ -1,21 +1,21 @@ --- -title: Index videos stored on OneDrive - Azure Video Analyzer for media -description: Learn how to index videos stored on OneDrive by using Azure Video Analyzer for Media (formerly Video Indexer). +title: Index videos stored on OneDrive - Azure Video Indexer +description: Learn how to index videos stored on OneDrive by using Azure Video Indexer (formerly Azure Video Analyzer for Media). ms.topic: article ms.date: 12/17/2021 --- # Index your videos stored on OneDrive -This article shows how to index videos stored on OneDrive by using the Azure Video Analyzer for Media (formerly Video Indexer) website. +This article shows how to index videos stored on OneDrive by using the Azure Video Indexer (formerly Azure Azure Video Indexer) website. ## Supported file formats -For a list of file formats that you can use with Video Analyzer for Media, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Index a video by using the website -1. Sign into the [Video Analyzer for Media](https://www.videoindexer.ai/) website, and then select **Upload**. +1. Sign into the [Azure Video Indexer](https://www.videoindexer.ai/) website, and then select **Upload**. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/video-indexer-upload.png" alt-text="Screenshot that shows the Upload button."::: @@ -40,16 +40,16 @@ For a list of file formats that you can use with Video Analyzer for Media, see [ `https://onedrive.live.com/download?cid=5BC591B7C713B04F&resid=5DC518B6B713C40F%2110126&authkey=HnsodidN_50oA3lLfk` -1. Now enter this URL in the Azure Video Analyzer for Media portal in the URL field. +1. Now enter this URL in the Azure Video Indexer portal in the URL field. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/avam-odrv-url.png" alt-text="Screenshot that shows the onedrive url field."::: -After your video is downloaded from OneDrive, Video Analyzer for Media starts indexing and analyzing the video. +After your video is downloaded from OneDrive, Azure Video Indexer starts indexing and analyzing the video. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Screenshot that shows the progress of an upload."::: -Once Video Analyzer for Media is done analyzing, you will receive an email with a link to your indexed video. The email also includes a short description of what was found in your video (for example: people, topics, optical character recognition). +Once Azure Video Indexer is done analyzing, you will receive an email with a link to your indexed video. The email also includes a short description of what was found in your video (for example: people, topics, optical character recognition). ## Upload and index a video by using the API @@ -57,11 +57,11 @@ You can use the [Upload Video](https://api-portal.videoindexer.ai/api-details#ap ### Configurations and parameters -This section describes some of the optional parameters and when to set them. For the most up-to-date info about parameters, see the [Video Analyzer for Media portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). +This section describes some of the optional parameters and when to set them. For the most up-to-date info about parameters, see the [Azure Video Indexer portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). #### externalID -Use this parameter to specify an ID that will be associated with the video. The ID can be applied to integration into an external video content management (VCM) system. The videos that are in the Video Analyzer for Media portal can be searched via the specified external ID. +Use this parameter to specify an ID that will be associated with the video. The ID can be applied to integration into an external video content management (VCM) system. The videos that are in the Azure Video Indexer portal can be searched via the specified external ID. #### callbackUrl @@ -69,7 +69,7 @@ Use this parameter to specify a callback URL. [!INCLUDE [callback url](./includes/callback-url.md)] -Video Analyzer for Media returns any existing parameters provided in the original URL. The URL must be encoded. +Azure Video Indexer returns any existing parameters provided in the original URL. The URL must be encoded. #### indexingPreset @@ -89,46 +89,46 @@ Use this parameter to define an AI bundle that you want to apply on your audio o > [!NOTE] > The preceding advanced presets include models that are in public preview. When these models reach general availability, there might be implications for the price. -Video Analyzer for Media covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. +Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). #### priority -Video Analyzer for Media indexes videos according to their priority. Use the `priority` parameter to specify the index priority. The following values are valid: `Low`, `Normal` (default), and `High`. +Azure Video Indexer indexes videos according to their priority. Use the `priority` parameter to specify the index priority. The following values are valid: `Low`, `Normal` (default), and `High`. This parameter is supported only for paid accounts. #### streamingPreset -After your video is uploaded, Video Analyzer for Media optionally encodes the video. It then proceeds to indexing and analyzing the video. When Video Analyzer for Media is done analyzing, you get a notification with the video ID. +After your video is uploaded, Azure Video Indexer optionally encodes the video. It then proceeds to indexing and analyzing the video. When Azure Video Indexer is done analyzing, you get a notification with the video ID. When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [Re-Index Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) API, one of the optional parameters is `streamingPreset`. If you set `streamingPreset` to `Default`, `SingleBitrate`, or `AdaptiveBitrate`, the encoding process is triggered. After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. -For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Video Analyzer for Media encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). +For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. +The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. #### videoUrl -This parameter specifies the URL of the video or audio file to be indexed. If the `videoUrl` parameter is not specified, Video Analyzer for Media expects you to pass the file as multipart/form body content. +This parameter specifies the URL of the video or audio file to be indexed. If the `videoUrl` parameter is not specified, Azure Video Indexer expects you to pass the file as multipart/form body content. ### Code sample -The following C# code snippets demonstrate the usage of all the Video Analyzer for Media APIs together. +The following C# code snippets demonstrate the usage of all the Azure Video Indexer APIs together. ### [Classic account](#tab/With-classic-account/) After you copy the following code into your development platform, you'll need to provide two parameters: -* API key (`apiKey`): Your personal API management subscription key. It allows you to get an access token in order to perform operations on your Video Analyzer for Media account. +* API key (`apiKey`): Your personal API management subscription key. It allows you to get an access token in order to perform operations on your Azure Video Indexer account. To get your API key: - 1. Go to the [Video Analyzer for Media portal](https://api-portal.videoindexer.ai/). + 1. Go to the [Azure Video Indexer portal](https://api-portal.videoindexer.ai/). 1. Sign in. 1. Go to **Products** > **Authorization** > **Authorization subscription**. 1. Copy the **Primary key** value. @@ -371,7 +371,7 @@ namespace VideoIndexerArm public static async Task Main(string[] args) { - // Build Azure Video Analyzer for Media resource provider client that has access token through Azure Resource Manager + // Build Azure Video Indexer resource provider client that has access token through Azure Resource Manager var videoIndexerResourceProviderClient = await VideoIndexerResourceProviderClient.BuildVideoIndexerResourceProviderClient(); // Get account details @@ -381,7 +381,7 @@ namespace VideoIndexerArm Console.WriteLine($"account id: {accountId}"); Console.WriteLine($"account location: {accountLocation}"); - // Get account-level access token for Azure Video Analyzer for Media + // Get account-level access token for Azure Video Indexer var accessTokenRequest = new AccessTokenRequest { PermissionType = AccessTokenPermission.Contributor, @@ -654,7 +654,7 @@ The upload operation might return the following status codes: - The byte array option times out after 30 minutes. - The URL provided in the `videoURL` parameter must be encoded. - Indexing Media Services assets has the same limitation as indexing from a URL. -- Video Analyzer for Media has a duration limit of 4 hours for a single file. +- Azure Video Indexer has a duration limit of 4 hours for a single file. - The URL must be accessible (for example, a public URL). If it's a private URL, the access token must be provided in the request. @@ -674,4 +674,4 @@ For information about a storage account that's behind a firewall, see the [FAQ]( ## Next steps -[Examine the Azure Video Analyzer for Media output produced by an API](video-indexer-output-json-v2.md) +[Examine the Azure Video Indexer output produced by an API](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-indexer/regions.md b/articles/azure-video-indexer/regions.md new file mode 100644 index 0000000000000..7b31f83801cf5 --- /dev/null +++ b/articles/azure-video-indexer/regions.md @@ -0,0 +1,54 @@ +--- +title: Regions in which Azure Video Indexer (formerly Azure Video Analyzer for Media) is available +description: This article talks about Azure regions in which Azure Video Indexer (formerly Azure Video Analyzer for Media) is available. +services: azure-video-analyzer +author: Juliako +manager: femila +ms.topic: article +ms.date: 09/14/2020 +ms.author: juliako +--- + +# Azure regions in which Azure Video Indexer exists + +Azure Video Indexer (formerly Azure Video Analyzer for Media) APIs contain a **location** parameter that you should set to the Azure region to which the call should be routed. This must be an [Azure region in which Azure Video Indexer is available](https://azure.microsoft.com/global-infrastructure/services/?products=cognitive-services®ions=all). + +## Locations + +The `location` parameter must be given the Azure region code name as its value. If you are using Azure Video Indexer in preview mode, you should put `"trial"` as the value. `trial` is the default value for the `location` parameter. Otherwise, to get the code name of the Azure region that your account is in and that your call should be routed to, you can use the Azure portal or run a [Azure CLI](/cli/azure) command. + +### Azure portal + +1. Sign in on the [Azure Video Indexer](https://www.videoindexer.ai/) website. +1. Select **User accounts** from the top-right corner of the page. +1. Find the location of your account in the top-right corner. + + > [!div class="mx-imgBorder"] + > :::image type="content" source="./media/location/location1.png" alt-text="Location"::: + +### CLI command + +```azurecli-interactive +az account list-locations +``` + +Once you run the line shown above, you get a list of all Azure regions. Navigate to the Azure region that has the *displayName* you are looking for, and use its *name* value for the **location** parameter. + +For example, for the Azure region West US 2 (displayed below), you will use "westus2" for the **location** parameter. + +```json + { + "displayName": "West US 2", + "id": "/subscriptions/00000000-0000-0000-0000-000000000000/locations/westus2", + "latitude": "47.233", + "longitude": "-119.852", + "name": "westus2", + "subscriptionId": null + } +``` + +## Next steps + +- [Customize Language model using APIs](customize-language-model-with-api.md) +- [Customize Brands model using APIs](customize-brands-model-with-api.md) +- [Customize Person model using APIs](customize-person-model-with-api.md) diff --git a/articles/azure-video-indexer/release-notes.md b/articles/azure-video-indexer/release-notes.md new file mode 100644 index 0000000000000..29d337fa6b690 --- /dev/null +++ b/articles/azure-video-indexer/release-notes.md @@ -0,0 +1,590 @@ +--- +title: Azure Video Indexer (formerly Azure Video Analyzer for Media) release notes | Microsoft Docs +description: To stay up-to-date with the most recent developments, this article provides you with the latest updates on Azure Video Indexer (formerly Azure Video Analyzer for Media). +ms.topic: article +ms.custom: references_regions +ms.date: 04/27/2022 +ms.author: juliako +--- + +# Azure Video Indexer release notes + +>Get notified about when to revisit this page for updates by copying and pasting this URL: `https://docs.microsoft.com/api/search/rss?search=%22Azure+Media+Services+Video+Indexer+release+notes%22&locale=en-us` into your RSS feed reader. + +To stay up-to-date with the most recent Azure Video Indexer (former Video Indexer) developments, this article provides you with information about: + +* [Important notice](#upcoming-critical-changes) about planned changes +* The latest releases +* Known issues +* Bug fixes +* Deprecated functionality + +## Upcoming critical changes + +> [!Important] +> This section describes a critical upcoming change for the `Upload-Video` API. + + +### Upload-Video API + +In the past, the `Upload-Video` API was tolerant to calls to upload a video from a URL where an empty multipart form body was provided in the C# code, such as: + +```csharp +var content = new MultipartFormDataContent(); +var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Location}/Accounts/{accountInfo.Id}/Videos?{queryParams}", content); +``` + +In the coming weeks, our service will fail requests of this type. + +In order to upload a video from a URL, change your code to send null in the request body: + +```csharp +var uploadRequestResult = await client.PostAsync($"{apiUrl}/{accountInfo.Location}/Accounts/{accountInfo.Id}/Videos?{queryParams}", null); +``` + +## April 2022 release updates + +### Renamed **Azure Video Analyzer for Media** back to **Azure Video Indexer** + +As of today, Azure Video analyzer for Media product name is **Azure Video Indexer** and all product related assets (web portal, marketing materials). It is a backward compatible change that has no implication on APIs and links. **Azure Video Indexer**'s new logo: + +:::image type="content" source="../applied-ai-services/media/video-indexer.svg" alt-text="New logo"::: + +## March 2022 + +### Closed Captioning files now support including speakers’ attributes + +Azure Video Indexer enables you to include speakers' characteristic based on a closed captioning file that you choose to download. To include the speakers’ attributes, select Downloads -> Closed Captions -> choose the closed captioning downloadable file format (SRT, VTT, TTML, TXT, or CSV) and check **Include speakers** checkbox. + +### Improvements to the widget offering + +The following improvements were made: + +* Azure Video Indexer widgets support more than 1 locale in a widget's parameter. +* The Insights widgets support initial search parameters and multiple sorting options. +* The Insights widgets also include a confirmation step before deleting a face to avoid mistakes. +* The widget customization now supports width as strings (for example 100%, 100vw). + +## February 2022 + +### Public preview of Azure Video Indexer account management based on ARM in Government cloud + +Azure Video Indexer website is now supporting account management based on ARM in public preview (see, [November 2021 release note](#november-2021)). + +### Leverage open-source code to create ARM based account + +Added new code samples including HTTP calls to use Azure Video Indexer create, read, update and delete (CRUD) ARM API for solution developers. See [this sample](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/ARM-Samples/Create-Account +). + +## January 2022 + +### Improved audio effects detection + +The audio effects detection capability was improved to have a better detection rate over the following classes: + +* Crowd reactions (cheering, clapping, and booing), +* Gunshot or explosion, +* Laughter + +For more information, see [Audio effects detection](audio-effects-detection.md). + +### New source languages support for STT, translation, and search on the website + +Azure Video Indexer introduces source languages support for STT (speech-to-text), translation, and search in Hebrew (he-IL), Portuguese (pt-PT), and Persian (fa-IR) on the [Azure Video Indexer](https://www.videoindexer.ai/) website. +It means transcription, translation, and search features are also supported for these languages in Azure Video Indexer web applications and widgets. + +## December 2021 + +### The projects feature is now GA + +The projects feature is now GA and ready for productive use. There is no pricing impact related to the "Preview to GA" transition. See [Add video clips to your projects](use-editor-create-project.md). + +### New source languages support for STT, translation, and search on API level + +Azure Video Indexer introduces source languages support for STT (speech-to-text), translation, and search in Hebrew (he-IL), Portuguese (pt-PT), and Persian (fa-IR) on the API level. + +### Matched person detection capability + +When indexing a video through our advanced video settings, you can view the new matched person detection capability. If there are people observed in your media file, you can now view the specific person who matched each of them through the media player. + +## November 2021 + +### Public preview of Azure Video Indexer account management based on ARM + +Azure Video Indexer introduces a public preview of Azure Resource Manager (ARM) based account management. You can leverage ARM-based Azure Video Indexer APIs to create, edit, and delete an account from the [Azure portal](https://portal.azure.com/#home). + +> [!NOTE] +> The Government cloud includes support for CRUD ARM based accounts from Azure Video Indexer API and from the Azure portal. +> +> There is currently no support from the Azure Video Indexer [website](https://www.videoindexer.ai). + +For more information go to [create an Azure Video Indexer account](https://techcommunity.microsoft.com/t5/azure-ai/azure-video-analyzer-for-media-is-now-available-as-an-azure/ba-p/2912422). + +### People’s clothing detection + +When indexing a video through the advanced video settings, you can view the new **People’s clothing detection** capability. If there are people detected in your media file, you can now view the clothing type they are wearing through the media player. + +### Face bounding box (preview) + +You can now turn on a bounding box for detected faces during indexing of the media file. The face bounding box feature is available when indexing your file by choosing the **standard**, **basic**, or **advanced** indexing presets. + +You can enable the bounding boxes through the player. + +## October 2021 + +### Embed widgets in your app using Azure Video Indexer package + +Use the new Azure Video Indexer (AVAM) `@azure/video-analyzer-for-media-widgets` npm package to add `insights` widgets to your app and customize it according to your needs. + +The new AVAM package enables you to easily embed and communicate between our widgets and your app, instead of adding an `iframe` element to embed the insights widget. Learn more in [Embed and customize Azure Video Indexer widgets in your app](https://techcommunity.microsoft.com/t5/azure-media-services/embed-and-customize-azure-video-analyzer-for-media-widgets-in/ba-p/2847063).  + +## August 2021 + +### Re-index video or audio files + +There is now an option to re-index video or audio files that have failed during the indexing process. + +### Improve accessibility support + +Fixed bugs related to CSS, theming and accessibility: + +* high contrast +* account settings and insights views in the [portal](https://www.videoindexer.ai). + +## July 2021 + +### Automatic Scaling of Media Reserved Units + +Starting August 1st 2021, Azure Video Analyzer for Media (formerly Video Indexer) enabled [Media Reserved Units (MRUs)](/azure/azure/media-services/latest/concept-media-reserved-units) auto scaling by [Azure Media Services](/azure/azure/media-services/latest/media-services-overview), as a result you do not need to manage them through Azure Video Analyzer for Media. That will allow price optimization, for example price reduction in many cases, based on your business needs as it is being auto scaled. + +## June 2021 + +### Azure Video Indexer deployed in six new regions + +You can now create an Azure Video Indexer paid account in France Central, Central US, Brazil South, West Central US, Korea Central, and Japan West regions. + +## May 2021 + +### New source languages support for speech-to-text (STT), translation, and search + +Azure Video Indexer now supports STT, translation, and search in Chinese (Cantonese) ('zh-HK'), Dutch (Netherlands) ('Nl-NL'), Czech ('Cs-CZ'), Polish ('Pl-PL'), Swedish (Sweden) ('Sv-SE'), Norwegian('nb-NO'), Finnish('fi-FI'), Canadian French ('fr-CA'), Thai('th-TH'), +Arabic: (United Arab Emirates) ('ar-AE', 'ar-EG'), (Iraq) ('ar-IQ'), (Jordan) ('ar-JO'), (Kuwait) ('ar-KW'), (Lebanon) ('ar-LB'), (Oman) ('ar-OM'), (Qatar) ('ar-QA'), (Palestinian Authority) ('ar-PS'), (Syria) ('ar-SY'), and Turkish('tr-TR'). + +These languages are available in both API and Azure Video Indexer website. Select the language from the combobox under **Video source language**. + +### New theme for Azure Video Indexer + +New theme is available: 'Azure' along with the 'light' and 'dark themes. To select a theme, click on the gear icon in the top-right corner of the website, find themes under **User settings**. + +### New open-source code you can leverage + +Three new Git-Hub projects are available at our [GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer): + +* Code to help you leverage the newly added [widget customization](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/Embedding%20widgets). +* Solution to help you add [custom search](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/VideoSearchWithAutoMLVision) to your video libraries. +* Solution to help you add [de-duplication](https://github.com/Azure-Samples/media-services-video-indexer/commit/6b828f598f5bf61ce1b6dbcbea9e8b87ba11c7b1) to your video libraries. + +### New option to toggle bounding boxes (for observed people) on the player + +When indexing a video through our advanced video settings, you can view our new observed people capabilities. If there are people detected in your media file, you can enable a bounding box on the detected person through the media player. + +## April 2021 + +The Video Indexer service was renamed to Azure Video Indexer. + +### Improved upload experience in the portal + +Azure Video Indexer has a new upload experience in the [portal](https://www.videoindexer.ai). To upload your media file, press the **Upload** button from the **Media files** tab. + +### New developer portal in available in gov-cloud + +[Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai) is now also available in Azure for US Government. + +### Observed people tracing (preview) + +Azure Video Indexer now detects observed people in videos and provides information such as the location of the person in the video frame and the exact timestamp (start, end) when a person appears. The API returns the bounding box coordinates (in pixels) for each person instance detected, including its confidence. + +For example, if a video contains a person, the detect operation will list the person appearances together with their coordinates in the video frames. You can use this functionality to determine the person path in a video. It also lets you determine whether there are multiple instances of the same person in a video. + +The newly added observed people tracing feature is available when indexing your file by choosing the **Advanced option** -> **Advanced video** or **Advanced video + audio** preset (under Video + audio indexing). Standard and basic indexing presets will not include this new advanced model. + +When you choose to see Insights of your video on the Azure Video Indexer website, the Observed People Tracing will show up on the page with all detected people thumbnails. You can choose a thumbnail of a person and see where the person appears in the video player. + +The feature is also available in the JSON file generated by Azure Video Indexer. For more information, see [Trace observed people in a video](observed-people-tracing.md). + +### Detected acoustic events with **Audio Effects Detection** (preview) + +You can now see the detected acoustic events in the closed captions file. The file can be downloaded from the Azure Video Indexer portal and is available as an artifact in the GetArtifact API. + +**Audio Effects Detection** (preview) component detects various acoustics events and classifies them into different acoustic categories (such as Gunshot, Screaming, Crowd Reaction and more). For more information, see [Audio effects detection](audio-effects-detection.md). + +## March 2021 + +### Audio analysis + +Audio analysis is available now in additional new bundle of audio features at different price point. The new **Basic Audio** analysis preset provides a low-cost option to only extract speech transcription, translation and format output captions and subtitles. The **Basic Audio** preset will produce two separate meters on your bill, including a line for transcription and a separate line for caption and subtitle formatting. More information on the pricing, see the [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/) page. + +The newly added bundle is available when indexing or re-indexing your file by choosing the **Advanced option** -> **Basic Audio** preset (under the **Video + audio indexing** drop-down box). + +### New developer portal + +Azure Video Indexer has a new [Developer Portal](https://api-portal.videoindexer.ai/), try out the new Azure Video Indexer APIs and find all the relevant resources in one place: [GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer), [Stack overflow](https://stackoverflow.com/questions/tagged/video-indexer), [Azure Video Indexer tech community](https://techcommunity.microsoft.com/t5/azure-media-services/bg-p/AzureMediaServices/label-name/Video%20Indexer) with relevant blog posts, [Azure Video Indexer FAQs](faq.yml), [User Voice](https://feedback.azure.com/d365community/forum/09041fae-0b25-ec11-b6e6-000d3a4f0858) to provide your feedback and suggest features, and ['CodePen' link](https://codepen.io/videoindexer) with widgets code samples. + +### Advanced customization capabilities for insight widget + +SDK is now available to embed Azure Video Indexer's insights widget in your own service and customize its style and data. The SDK supports the standard Azure Video Indexer insights widget and a fully customizable insights widget. Code sample is available in [Azure Video Indexer GitHub repository](https://github.com/Azure-Samples/media-services-video-indexer/tree/master/Embedding%20widgets/widget-customization). With this advanced customization capabilities, solution developer can apply custom styling and bring customer’s own AI data and present that in the insight widget (with or without Azure Video Indexer insights). + +### Azure Video Indexer deployed in the US North Central , US West and Canada Central + +You can now create an Azure Video Indexer paid account in the US North Central, US West and Canada Central regions + +### New source languages support for speech-to-text (STT), translation and search + +Azure Video Indexer now support STT, translation and search in Danish ('da-DK'), Norwegian('nb-NO'), Swedish('sv-SE'), Finnish('fi-FI'), Canadian French ('fr-CA'), Thai('th-TH'), Arabic ('ar-BH', 'ar-EG', 'ar-IQ', 'ar-JO', 'ar-KW', 'ar-LB', 'ar-OM', 'ar-QA', 'ar-S', and 'ar-SY'), and Turkish('tr-TR'). Those languages are available in both API and Azure Video Indexer website. + +### Search by Topic in Azure Video Indexer Website + +You can now use the search feature, at the top of the [Azure Video Indexer website](https://www.videoindexer.ai/account/login) page, to search for videos with specific topics. + +## February 2021 + +### Multiple account owners + +Account owner role was added to Azure Video Indexer. You can add, change, and remove users; change their role. For details on how to share an account, see [Invite users](invite-users.md). + +### Audio event detection (public preview) + +> [!NOTE] +> This feature is only available in trial accounts. + +Azure Video Indexer now detects the following audio effects in the non-speech segments of the content: gunshot, glass shatter, alarm, siren, explosion, dog bark, screaming, laughter, crowd reactions (cheering, clapping, and booing) and Silence. + +The newly added audio affects feature is available when indexing your file by choosing the **Advanced option** -> **Advanced audio** preset (under Video + audio indexing). Standard indexing will only include **silence** and **crowd reaction**. + +The **clapping** event type that was included in the previous audio effects model, is now extracted a part of the **crowd reaction** event type. + +When you choose to see **Insights** of your video on the [Azure Video Indexer](https://www.videoindexer.ai/) website, the Audio Effects show up on the page. + +:::image type="content" source="./media/release-notes/audio-detection.png" alt-text="Audio event detection"::: + +### Named entities enhancement + +The extracted list of people and location was extended and updated in general. + +In addition, the model now includes people and locations in-context which are not famous, like a ‘Sam’ or ‘Home’ in the video. + +## January 2021 + +### Azure Video Indexer is deployed on US Government cloud + +You can now create an Azure Video Indexer paid account on US government cloud in Virginia and Arizona regions. +Azure Video Indexer free trial offering isn't available in the mentioned region. For more information go to Azure Video Indexer Documentation. + +### Azure Video Indexer deployed in the India Central region + +You can now create an Azure Video Indexer paid account in the India Central region. + +### New Dark Mode for the Azure Video Indexer website experience + +The Azure Video Indexer website experiences is now available in dark mode. +To enable the dark mode open the settings panel and toggle on the **Dark Mode** option. + +:::image type="content" source="./media/release-notes/dark-mode.png" alt-text="Dark mode setting"::: + +## December 2020 + +### Azure Video Indexer deployed in the Switzerland West and Switzerland North + +You can now create an Azure Video Indexer paid account in the Switzerland West and Switzerland North regions. + +## October 2020 + +### Animated character identification improvements + +Azure Video Indexer supports detection, grouping, and recognition of characters in animated content via integration with Cognitive Services custom vision. We added a major improvement to this AI algorithm in the detection and characters recognition, as a result insight accuracy and identified characters are significantly improved. + +### Planned Azure Video Indexer website authenticatication changes + +Starting March 1st 2021, you no longer will be able to sign up and sign in to the [Azure Video Indexer website](https://www.videoindexer.ai/) [developer portal](video-indexer-use-apis.md) using Facebook or LinkedIn. + +You will be able to sign up and sign in using one of these providers: Azure AD, Microsoft, and Google. + +> [!NOTE] +> The Azure Video Indexer accounts connected to LinkedIn and Facebook will not be accessible after March 1st 2021. +> +> You should [invite](invite-users.md) an Azure AD, Microsoft, or Google email you own to the Azure Video Indexer account so you will still have access. You can add an additional owner of supported providers, as described in [invite](invite-users.md).
                +> Alternatively, you can create a paid account and migrate the data. + +## August 2020 + +### Mobile design for the Azure Video Indexer website + +The Azure Video Indexer website experience is now supporting mobile devices. The user experience is responsive to adapt to your mobile screen size (excluding customization UIs). + +### Accessibility improvements and bug fixes + +As part of WCAG (Web Content Accessibility guidelines), the Azure Video Indexer website experiences is aligned with grade C, as part of Microsoft Accessibility standards. Several bugs and improvements related to keyboard navigation, programmatic access, and screen reader were solved. + +## July 2020 + +### GA for multi-language identification + +Multi-language identification is moved from preview to GA and ready for productive use. + +There is no pricing impact related to the "Preview to GA" transition. + +### Azure Video Indexer website improvements + +#### Adjustments in the video gallery + +New search bar for deep insights search with additional filtering capabilities was added. Search results were also enhanced. + +New list view with ability to sort and manage video archive with multiple files. + +#### New panel for easy selection and configuration + +Side panel for easy selection and user configuration was added, allowing simple and quick account creation and sharing as well as setting configuration. + +Side panel is also used for user preferences and help. + +## June 2020 + +### Search by topics + +You can now use the search API to search for videos with specific topics (API only). + +Topics is added as part of the `textScope` (optional parameter). See [API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Search-Videos) for details. + +### Labels enhancement + +The label tagger was upgraded and now includes more visual labels that can be identified. + +## May 2020 + +### Azure Video Indexer deployed in the East US + +You can now create an Azure Video Indexer paid account in the East US region. + +### Azure Video Indexer URL + +Azure Video Indexer regional endpoints were all unified to start only with www. No action item is required. + +From now on, you reach www.videoindexer.ai whether it is for embedding widgets or logging into Azure Video Indexer web applications. + +Also wus.videoindexer.ai would be redirected to www. More information is available in [Embed Azure Video Indexer widgets in your apps](video-indexer-embed-widgets.md). + +## April 2020 + +### New widget parameters capabilities + +The **Insights** widget includes new parameters: `language` and `control`. + +The **Player** widget has a new `locale` parameter. Both `locale` and `language` parameters control the player’s language. + +For more information, see the [widget types](video-indexer-embed-widgets.md#widget-types) section. + +### New player skin + +A new player skin launched with updated design. + +### Prepare for upcoming changes + +* Today, the following APIs return an account object: + + * [Create-Paid-Account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Paid-Account) + * [Get-Account](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Account) + * [Get-Accounts-Authorization](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Accounts-Authorization) + * [Get-Accounts-With-Token](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Accounts-With-Token) + + The Account object has a `Url` field pointing to the location of the [Azure Video Indexer website](https://www.videoindexer.ai/). +For paid accounts the `Url` field is currently pointing to an internal URL instead of the public website. +In the coming weeks we will change it and return the [Azure Video Indexer website](https://www.videoindexer.ai/) URL for all accounts (trial and paid). + + Do not use the internal URLs, you should be using the [Azure Video Indexer public APIs](https://api-portal.videoindexer.ai/). +* If you are embedding Azure Video Indexer URLs in your applications and the URLs are not pointing to the [Azure Video Indexer website](https://www.videoindexer.ai/) or the Azure Video Indexer API endpoint (`https://api.videoindexer.ai`) but rather to a regional endpoint (for example, `https://wus2.videoindexer.ai`), regenerate the URLs. + + You can do it by either: + + * Replacing the URL with a URL pointing to the Azure Video Indexer widget APIs (for example, the [insights widget](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Insights-Widget)) + * Using the Azure Video Indexer website to generate a new embedded URL: + + Press **Play** to get to your video's page -> click the **</> Embed** button -> copy the URL into your application: + + The regional URLs are not supported and will be blocked in the coming weeks. + +## January 2020 + +### Custom language support for additional languages + +Azure Video Indexer now supports custom language models for `ar-SY` , `en-UK`, and `en-AU` (API only). + +### Delete account timeframe action update + +Delete account action now deletes the account within 90 days instead of 48 hours. + +### New Azure Video Indexer GitHub repository + +A new Azure Video Indexer GitHub with different projects, getting started guides and code samples is now available: +https://github.com/Azure-Samples/media-services-video-indexer + +### Swagger update + +Azure Video Indexer unified **authentications** and **operations** into a single [Azure Video Indexer OpenAPI Specification (swagger)](https://api-portal.videoindexer.ai/api-details#api=Operations&operation). Developers can find the APIs in [Azure Video Indexer Developer Portal](https://api-portal.videoindexer.ai/). + +## December 2019 + +### Update transcript with the new API + +Update a specific section in the transcript using the [Update-Video-Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Index) API. + +### Fix account configuration from the Azure Video Indexer portal + +You can now update Media Services connection configuration in order to self-help with issues like: + +* incorrect Azure Media Services resource +* password changes +* Media Services resources were moved between subscriptions + +To fix the account configuration, in the Azure Video Indexer portal navigate to Settings > Account tab (as owner). + +### Configure the custom vision account + +Configure the custom vision account on paid accounts using the Azure Video Indexer portal (previously, this was only supported by API). To do that, sign in to the Azure Video Indexer portal, choose Model Customization > Animated characters > Configure. + +### Scenes, shots and keyframes – now in one insight pane + +Scenes, shots, and keyframes are now merged into one insight for easier consumption and navigation. When you select the desired scene you can see what shots and keyframes it consists of. + +### Notification about a long video name + +When a video name is longer than 80 characters, Azure Video Indexer shows a descriptive error on upload. + +### Streaming endpoint is disabled notification + +When streaming endpoint is disabled, Azure Video Indexer will show a descriptive error on the player page. + +### Error handling improvement + +Status code 409 will now be returned from [Re-Index Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) and [Update Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Index) APIs in case a video is actively indexed, to prevent overriding the current re-index changes by accident. + +## November 2019 + +* Korean custom language models support + + Azure Video Indexer now supports custom language models in Korean (`ko-KR`) in both the API and portal. +* New languages supported for speech-to-text (STT) + + Azure Video Indexer APIs now support STT in Arabic Levantine (ar-SY), English UK dialect (en-GB), and English Australian dialect (en-AU). + + For video upload, we replaced zh-HANS to zh-CN, both are supported but zh-CN is recommended and more accurate. + +## October 2019 + +* Search for animated characters in the gallery + + When indexing animated characters, you can now search for them in the account’s video galley. For more information, see [Animated characters recognition](animated-characters-recognition.md). + +## September 2019 + +Multiple advancements announced at IBC 2019: + +* Animated character recognition (public preview) + + Ability to detect group ad recognize characters in animated content, via integration with custom vision. For more information, see [Animated character detection](animated-characters-recognition.md). +* Multi-language identification (public preview) + + Detect segments in multiple languages in the audio track and create a multilingual transcript based on them. Initial support: English, Spanish, German and French. For more information, see [Automatically identify and transcribe multi-language content](multi-language-identification-transcription.md). +* Named entity extraction for People and Location + + Extracts brands, locations, and people from speech and visual text via natural language processing (NLP). +* Editorial shot type classification + + Tagging of shots with editorial types such as close up, medium shot, two shot, indoor, outdoor etc. For more information, see [Editorial shot type detection](scenes-shots-keyframes.md#editorial-shot-type-detection). +* Topic inferencing enhancement - now covering level 2 + + The topic inferencing model now supports deeper granularity of the IPTC taxonomy. Read full details at [Azure Media Services new AI-powered innovation](https://azure.microsoft.com/blog/azure-media-services-new-ai-powered-innovation/). + +## August 2019 updates + +### Azure Video Indexer deployed in UK South + +You can now create an Azure Video Indexer paid account in the UK south region. + +### New Editorial Shot Type insights available + +New tags added to video shots provides editorial “shot types” to identify them with common editorial phrases used in the content creation workflow such as: extreme closeup, closeup, wide, medium, two shot, outdoor, indoor, left face and right face (Available in the JSON). + +### New People and Locations entities extraction available + +Azure Video Indexer identifies named locations and people via natural language processing (NLP) from the video’s OCR and transcription. Azure Video Indexer uses machine learning algorithm to recognize when specific locations (for example, the Eiffel Tower) or people (for example, John Doe) are being called out in a video. + +### Keyframes extraction in native resolution + +Keyframes extracted by Azure Video Indexer are available in the original resolution of the video. + +### GA for training custom face models from images + +Training faces from images moved from Preview mode to GA (available via API and in the portal). + +> [!NOTE] +> There is no pricing impact related to the "Preview to GA" transition. + +### Hide gallery toggle option + +User can choose to hide the gallery tab from the portal (similar to hiding the samples tab). + +### Maximum URL size increased + +Support for URL query string of 4096 (instead of 2048) on indexing a video. + +### Support for multi-lingual projects + +Projects can now be created based on videos indexed in different languages (API only). + +## July 2019 + +### Editor as a widget + +The Azure Video Indexer AI-editor is now available as a widget to be embedded in customer applications. + +### Update custom language model from closed caption file from the portal + +Customers can provide VTT, SRT, and TTML file formats as input for language models in the customization page of the portal. + +## June 2019 + +### Azure Video Indexer deployed to Japan East + +You can now create an Azure Video Indexer paid account in the Japan East region. + +### Create and repair account API (Preview) + +Added a new API that enables you to [update the Azure Media Service connection endpoint or key](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Paid-Account-Azure-Media-Services). + +### Improve error handling on upload + +A descriptive message is returned in case of misconfiguration of the underlying Azure Media Services account. + +### Player timeline Keyframes preview + +You can now see an image preview for each time on the player's timeline. + +### Editor semi-select + +You can now see a preview of all the insights that are selected as a result of choosing a specific insight timeframe in the editor. + +## May 2019 + +### Update custom language model from closed caption file + +[Create custom language model](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Create-Language-Model) and [Update custom language models](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Language-Model) APIs now support VTT, SRT, and TTML file formats as input for language models. + +When calling the [Update Video transcript API](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Update-Video-Transcript), the transcript is added automatically. The training model associated with the video is updated automatically as well. For information on how to customize and train your language models, see [Customize a Language model with Azure Video Indexer](customize-language-model-overview.md). + +### New download transcript formats – TXT and CSV + +In addition to the closed captioning format already supported (SRT, VTT, and TTML), Azure Video Indexer now supports downloading the transcript in TXT and CSV formats. + +## Next steps + +[Overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/scenes-shots-keyframes.md b/articles/azure-video-indexer/scenes-shots-keyframes.md new file mode 100644 index 0000000000000..e25b68a622c45 --- /dev/null +++ b/articles/azure-video-indexer/scenes-shots-keyframes.md @@ -0,0 +1,121 @@ +--- +title: Azure Video Indexer (formerly Azure Video Analyzer for Media) scenes, shots, and keyframes +description: This topic gives an overview of the Azure Video Indexer (formerly Azure Video Analyzer for Media) scenes, shots, and keyframes. +ms.topic: how-to +ms.date: 07/05/2019 +ms.author: juliako +--- + +# Scenes, shots, and keyframes + +Azure Video Indexer (formerly Azure Video Analyzer for Media) supports segmenting videos into temporal units based on structural and semantic properties. This capability enables customers to easily browse, manage, and edit their video content based on varying granularities. For example, based on scenes, shots, and keyframes, described in this topic. + +![Scenes, shots, and keyframes](./media/scenes-shots-keyframes/scenes-shots-keyframes.png) + +## Scene detection + +Azure Video Indexer determines when a scene changes in video based on visual cues. A scene depicts a single event and it is composed of a series of consecutive shots, which are semantically related. A scene thumbnail is the first keyframe of its underlying shot. Azure Video Indexer segments a video into scenes based on color coherence across consecutive shots and retrieves the beginning and end time of each scene. Scene detection is considered a challenging task as it involves quantifying semantic aspects of videos. + +> [!NOTE] +> Applicable to videos that contain at least 3 scenes. + +## Shot detection + +Azure Video Indexer determines when a shot changes in the video based on visual cues, by tracking both abrupt and gradual transitions in the color scheme of adjacent frames. The shot's metadata includes a start and end time, as well as the list of keyframes included in that shot. The shots are consecutive frames taken from the same camera at the same time. + +## Keyframe detection + +Azure Video Indexer selects the frame(s) that best represent each shot. Keyframes are the representative frames selected from the entire video based on aesthetic properties (for example, contrast and stableness). Azure Video Indexer retrieves a list of keyframe IDs as part of the shot's metadata, based on which customers can extract the keyframe as a high resolution image. + +### Extracting Keyframes + +To extract high-resolution keyframes for your video, you must first upload and index the video. + +![Keyframes](./media/scenes-shots-keyframes/extracting-keyframes.png) + +#### With the Azure Video Indexer website + +To extract keyframes using the Azure Video Indexer website, upload and index your video. Once the indexing job is complete, click on the **Download** button and select **Artifacts (ZIP)**. This will download the artifacts folder to your computer. + +![Screenshot that shows the "Download" drop-down with "Artifacts" selected.](./media/scenes-shots-keyframes/extracting-keyframes2.png) + +Unzip and open the folder. In the *_KeyframeThumbnail* folder, and you will find all of the keyframes that were extracted from your video. + +#### With the Azure Video Indexer API + +To get keyframes using the Video Indexer API, upload and index your video using the [Upload Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) call. Once the indexing job is complete, call [Get Video Index](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Index). This will give you all of the insights that Video Indexer extracted from your content in a JSON file. + +You will get a list of keyframe IDs as part of each shot's metadata. + +```json +"shots":[ + { + "id":0, + "keyFrames":[ + { + "id":0, + "instances":[ + { + "thumbnailId":"00000000-0000-0000-0000-000000000000", + "start":"0:00:00.209", + "end":"0:00:00.251", + "duration":"0:00:00.042" + } + ] + }, + { + "id":1, + "instances":[ + { + "thumbnailId":"00000000-0000-0000-0000-000000000000", + "start":"0:00:04.755", + "end":"0:00:04.797", + "duration":"0:00:00.042" + } + ] + } + ], + "instances":[ + { + "start":"0:00:00", + "end":"0:00:06.34", + "duration":"0:00:06.34" + } + ] + }, + +] +``` + +You will now need to run each of these keyframe IDs on the [Get Thumbnails](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Get-Video-Thumbnail) call. This will download each of the keyframe images to your computer. + +## Editorial shot type detection + +Keyframes are associated with shots in the output JSON. + +The shot type associated with an individual shot in the insights JSON represents its editorial type. You may find these shot type characteristics useful when editing videos into clips, trailers, or when searching for a specific style of keyframe for artistic purposes. The different types are determined based on analysis of the first keyframe of each shot. Shots are identified by the scale, size, and location of the faces appearing in their first keyframe. + +The shot size and scale are determined based on the distance between the camera and the faces appearing in the frame. Using these properties, Azure Video Indexer detects the following shot types: + +* Wide: shows an entire person’s body. +* Medium: shows a person's upper-body and face. +* Close up: mainly shows a person’s face. +* Extreme close-up: shows a person’s face filling the screen. + +Shot types can also be determined by location of the subject characters with respect to the center of the frame. This property defines the following shot types in Azure Video Indexer: + +* Left face: a person appears in the left side of the frame. +* Center face: a person appears in the central region of the frame. +* Right face: a person appears in the right side of the frame. +* Outdoor: a person appears in an outdoor setting. +* Indoor: a person appears in an indoor setting. + +Additional characteristics: + +* Two shots: shows two persons’ faces of medium size. +* Multiple faces: more than two persons. + + +## Next steps + +[Examine the Azure Video Indexer output produced by the API](video-indexer-output-json-v2.md#scenes) diff --git a/articles/azure-video-indexer/toc.yml b/articles/azure-video-indexer/toc.yml new file mode 100644 index 0000000000000..2789ef6b9179e --- /dev/null +++ b/articles/azure-video-indexer/toc.yml @@ -0,0 +1,145 @@ +- name: Azure Video Indexer documentation + href: ./index.yml +- name: Overview + items: + - name: What is Azure Video Indexer? + href: video-indexer-overview.md + - name: Language support + href: language-support.md +- name: Quickstarts + expanded: true + items: + - name: Get started + href: video-indexer-get-started.md + - name: Invite users + href: invite-users.md +- name: Tutorials + items: + - name: Create a new ARM account (Preview) + href: create-video-analyzer-for-media-account.md + - name: Create a new account + href: connect-to-azure.md + - name: Use Azure Video Indexer API + href: video-indexer-use-apis.md + - name: Logic Apps connector + href: logic-apps-connector-tutorial.md + - name: Deploy using ARM template + href: deploy-with-arm-template.md + - name: Index Video from OneDrive + href: odrv-download.md +- name: Samples + items: + - name: Azure Video Indexer samples + href: https://github.com/Azure-Samples/media-services-video-indexer +- name: Concepts + items: + - name: Overview + href: concepts-overview.md + - name: Compare Azure Video Indexer and Media Services presets + href: compare-video-indexer-with-media-services-presets.md + - name: Manage multiple tenants + href: manage-multiple-tenants.md + - name: Language identification model + href: language-identification-model.md + - name: Live stream analysis + href: live-stream-analysis.md + - name: Observed people in a video + href: observed-people-tracing.md + - name: Matched person + href: matched-person.md + - name: People's detected clothing + href: detected-clothing.md + - name: Audio effects detection + href: audio-effects-detection.md + - name: Customizing content models + items: + - name: Overview + href: customize-content-models-overview.md + - name: Animated characters + href: animated-characters-recognition.md + - name: Brands + href: customize-brands-model-overview.md + - name: Language + href: customize-language-model-overview.md + - name: Person + href: customize-person-model-overview.md +- name: How to guides + items: + - name: Connect an existing account to ARM (Preview) + href: connect-classic-account-to-arm.md + - name: Manage account connected to Azure + href: manage-account-connected-to-azure.md + - name: Upload and index videos + href: upload-index-videos.md + - name: Examine Azure Video Indexer output + href: video-indexer-output-json-v2.md + - name: Find exact moments within videos + displayName: search + href: video-indexer-search.md + - name: Detect scenes, shots, keyframes + href: scenes-shots-keyframes.md + - name: Identify and transcribe multi-language content + href: multi-language-identification-transcription.md + - name: View and edit insights + href: video-indexer-view-edit.md + - name: Use editor to create projects + href: use-editor-create-project.md + - name: Embed widgets into your application + href: video-indexer-embed-widgets.md + - name: Considerations when using Azure Video Indexer at scale + href: considerations-when-use-at-scale.md + - name: Network Security + href: network-security.md + - name: Disaster recovery + href: video-indexer-disaster-recovery.md + - name: Customize content models + items: + - name: Animated characters + href: animated-characters-recognition-how-to.md + - name: Person + items: + - name: using the website + href: customize-person-model-with-website.md + - name: using the API + href: customize-person-model-with-api.md + - name: Brands + items: + - name: using the website + href: customize-brands-model-with-website.md + - name: using the API + href: customize-brands-model-with-api.md + - name: Language + items: + - name: using the website + href: customize-language-model-with-website.md + - name: using the API + href: customize-language-model-with-api.md +- name: Reference + items: + - name: Azure Video Indexer API + href: https://api-portal.videoindexer.ai/ + - name: Azure Video Indexer ARM REST API + href: /rest/api/videoindexer/accounts?branch=videoindex +- name: Resources + items: + - name: Azure Roadmap + href: https://azure.microsoft.com/roadmap/?category=web-mobile + - name: Pricing + href: https://azure.microsoft.com/pricing/details/azure/media-services/ + - name: Regional availability + href: https://azure.microsoft.com/global-infrastructure/services/ + - name: Regions + displayName: location + href: regions.md + - name: FAQ + href: faq.yml + - name: Compliance + href: https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942 + - name: Release notes + href: release-notes.md + - name: Stack Overflow + href: https://stackoverflow.com/search?q=video-indexer + - name: User voice + href: https://aka.ms/UserVoiceVI + - name: Blogs + href: https://azure.microsoft.com/blog/tag/video-indexer/ diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/upload-index-videos.md b/articles/azure-video-indexer/upload-index-videos.md similarity index 87% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/upload-index-videos.md rename to articles/azure-video-indexer/upload-index-videos.md index 7474dc5dd6898..f6dac6c89ed1b 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/upload-index-videos.md +++ b/articles/azure-video-indexer/upload-index-videos.md @@ -1,7 +1,6 @@ --- -title: Upload and index videos with Azure Video Analyzer for Media (formerly Video Indexer) -description: Learn two methods for uploading and indexing videos by using Azure Video Analyzer for Media (formerly Video Indexer). -ms.service: azure-video-analyzer +title: Upload and index videos with Azure Video Indexer (formerly Azure Video Analyzer for Media) +description: Learn two methods for uploading and indexing videos by using Azure Video Indexer (formerly Azure Video Analyzer for Media). ms.topic: article ms.date: 11/15/2021 ms.custom: ignite-fall-2021 @@ -9,46 +8,46 @@ ms.custom: ignite-fall-2021 # Upload and index your videos -This article shows how to upload and index videos by using the Azure Video Analyzer for Media (formerly Video Indexer) website and the Upload Video API. +This article shows how to upload and index videos by using the Azure Video Indexer (formerly Azure Video Analyzer for Media) website and the Upload Video API. -When you're creating a Video Analyzer for Media account, you choose between: +When you're creating an Azure Video Indexer account, you choose between: -- A free trial account. Video Analyzer for Media provides up to 600 minutes of free indexing to website users and up to 2,400 minutes of free indexing to API users. -- A paid option where you're not limited by a quota. You create a Video Analyzer for Media account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for indexed minutes. +- A free trial account. Azure Video Indexer provides up to 600 minutes of free indexing to website users and up to 2,400 minutes of free indexing to API users. +- A paid option where you're not limited by a quota. You create an Azure Video Indexer account that's [connected to your Azure subscription and an Azure Media Services account](connect-to-azure.md). You pay for indexed minutes. -For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). +For more information about account types, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). When you're uploading videos by using the API, you have the following options: * Upload your video from a URL (preferred). * Send the video file as a byte array in the request body. -* Use existing an Azure Media Services asset by providing the [asset ID](/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. +* Use existing an Azure Media Services asset by providing the [asset ID](/azure/azure/media-services/latest/assets-concept). This option is supported in paid accounts only. ## Supported file formats -For a list of file formats that you can use with Video Analyzer for Media, see [Standard Encoder formats and codecs](/azure/media-services/latest/encode-media-encoder-standard-formats-reference). +For a list of file formats that you can use with Azure Video Indexer, see [Standard Encoder formats and codecs](/azure/azure/media-services/latest/encode-media-encoder-standard-formats-reference). ## Storage of video files -When you use Video Analyzer for Media, video files are stored in Azure Storage through Media Services. The limits are 30 GB in size and 4 hours in length. +When you use Azure Video Indexer, video files are stored in Azure Storage through Media Services. The limits are 30 GB in size and 4 hours in length. -You can always delete your video and audio files, along with any metadata and insights that Video Analyzer for Media has extracted from them. After you delete a file from Video Analyzer for Media, the file and its metadata and insights are permanently removed from Video Analyzer for Media. However, if you've implemented your own backup solution in Azure Storage, the file remains in Azure Storage. +You can always delete your video and audio files, along with any metadata and insights that Azure Video Indexer has extracted from them. After you delete a file from Azure Video Indexer, the file and its metadata and insights are permanently removed from Azure Video Indexer. However, if you've implemented your own backup solution in Azure Storage, the file remains in Azure Storage. -The persistence of a video is identical whether you upload by using the Video Analyzer for Media website or by using the Upload Video API. +The persistence of a video is identical whether you upload by using the Azure Video Indexer website or by using the Upload Video API. ## Upload and index a video by using the website -Sign in on the [Video Analyzer for Media](https://www.videoindexer.ai/) website, and then select **Upload**. +Sign in on the [Azure Video Indexer](https://www.videoindexer.ai/) website, and then select **Upload**. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/video-indexer-upload.png" alt-text="Screenshot that shows the Upload button."::: -After your video is uploaded, Video Analyzer for Media starts indexing and analyzing the video. +After your video is uploaded, Azure Video Indexer starts indexing and analyzing the video. > [!div class="mx-imgBorder"] > :::image type="content" source="./media/video-indexer-get-started/progress.png" alt-text="Screenshot that shows the progress of an upload."::: -After Video Analyzer for Media is done analyzing, you get an email with a link to your video. The email also includes a short description of what was found in your video (for example: people, topics, optical character recognition). +After Azure Video Indexer is done analyzing, you get an email with a link to your video. The email also includes a short description of what was found in your video (for example: people, topics, optical character recognition). ## Upload and index a video by using the API @@ -56,11 +55,11 @@ You can use the [Upload Video](https://api-portal.videoindexer.ai/api-details#ap ### Configurations and parameters -This section describes some of the optional parameters and when to set them. For the most up-to-date info about parameters, see the [Video Analyzer for Media portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). +This section describes some of the optional parameters and when to set them. For the most up-to-date info about parameters, see the [Azure Video Indexer portal](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video). #### externalID -Use this parameter to specify an ID that will be associated with the video. The ID can be applied to integration into an external video content management (VCM) system. The videos that are in the Video Analyzer for Media portal can be searched via the specified external ID. +Use this parameter to specify an ID that will be associated with the video. The ID can be applied to integration into an external video content management (VCM) system. The videos that are in the Azure Video Indexer portal can be searched via the specified external ID. #### callbackUrl @@ -68,7 +67,7 @@ Use this parameter to specify a callback URL. [!INCLUDE [callback url](./includes/callback-url.md)] -Video Analyzer for Media returns any existing parameters provided in the original URL. The URL must be encoded. +Azure Video Indexer returns any existing parameters provided in the original URL. The URL must be encoded. #### indexingPreset @@ -88,46 +87,46 @@ Use this parameter to define an AI bundle that you want to apply on your audio o > [!NOTE] > The preceding advanced presets include models that are in public preview. When these models reach general availability, there might be implications for the price. -Video Analyzer for Media covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. +Azure Video Indexer covers up to two tracks of audio. If the file has more audio tracks, they're treated as one track. If you want to index the tracks separately, you need to extract the relevant audio file and index it as `AudioOnly`. -Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/media-services/). +Price depends on the selected indexing option. For more information, see [Media Services pricing](https://azure.microsoft.com/pricing/details/azure/media-services/). #### priority -Video Analyzer for Media indexes videos according to their priority. Use the `priority` parameter to specify the index priority. The following values are valid: `Low`, `Normal` (default), and `High`. +Azure Video Indexer indexes videos according to their priority. Use the `priority` parameter to specify the index priority. The following values are valid: `Low`, `Normal` (default), and `High`. This parameter is supported only for paid accounts. #### streamingPreset -After your video is uploaded, Video Analyzer for Media optionally encodes the video. It then proceeds to indexing and analyzing the video. When Video Analyzer for Media is done analyzing, you get a notification with the video ID. +After your video is uploaded, Azure Video Indexer optionally encodes the video. It then proceeds to indexing and analyzing the video. When Azure Video Indexer is done analyzing, you get a notification with the video ID. When you're using the [Upload Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Upload-Video) or [Re-Index Video](https://api-portal.videoindexer.ai/api-details#api=Operations&operation=Re-Index-Video) API, one of the optional parameters is `streamingPreset`. If you set `streamingPreset` to `Default`, `SingleBitrate`, or `AdaptiveBitrate`, the encoding process is triggered. After the indexing and encoding jobs are done, the video is published so you can also stream your video. The streaming endpoint from which you want to stream the video must be in the **Running** state. -For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Video Analyzer for Media encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. -The default setting is [content-aware encoding](/azure/media-services/latest/encode-content-aware-concept). +For `SingleBitrate`, the standard encoder cost will apply for the output. If the video height is greater than or equal to 720, Azure Video Indexer encodes it as 1280 x 720. Otherwise, it's encoded as 640 x 468. +The default setting is [content-aware encoding](/azure/azure/media-services/latest/encode-content-aware-concept). If you only want to index your video and not encode it, set `streamingPreset` to `NoStreaming`. #### videoUrl -This parameter specifies the URL of the video or audio file to be indexed. If the `videoUrl` parameter is not specified, Video Analyzer for Media expects you to pass the file as multipart/form body content. +This parameter specifies the URL of the video or audio file to be indexed. If the `videoUrl` parameter is not specified, Azure Video Indexer expects you to pass the file as multipart/form body content. ### Code sample -The following C# code snippets demonstrate the usage of all the Video Analyzer for Media APIs together. +The following C# code snippets demonstrate the usage of all the Azure Video Indexer APIs together. ### [Classic account](#tab/With-classic-account/) After you copy the following code into your development platform, you'll need to provide two parameters: -* API key (`apiKey`): Your personal API management subscription key. It allows you to get an access token in order to perform operations on your Video Analyzer for Media account. +* API key (`apiKey`): Your personal API management subscription key. It allows you to get an access token in order to perform operations on your Azure Video Indexer account. To get your API key: - 1. Go to the [Video Analyzer for Media portal](https://api-portal.videoindexer.ai/). + 1. Go to the [Azure Video Indexer portal](https://api-portal.videoindexer.ai/). 1. Sign in. 1. Go to **Products** > **Authorization** > **Authorization subscription**. 1. Copy the **Primary key** value. @@ -372,7 +371,7 @@ namespace VideoIndexerArm public static async Task Main(string[] args) { - // Build Azure Video Analyzer for Media resource provider client that has access token through Azure Resource Manager + // Build Azure Video Indexer resource provider client that has access token through Azure Resource Manager var videoIndexerResourceProviderClient = await VideoIndexerResourceProviderClient.BuildVideoIndexerResourceProviderClient(); // Get account details @@ -382,7 +381,7 @@ namespace VideoIndexerArm Console.WriteLine($"account id: {accountId}"); Console.WriteLine($"account location: {accountLocation}"); - // Get account-level access token for Azure Video Analyzer for Media + // Get account-level access token for Azure Video Indexer var accessTokenRequest = new AccessTokenRequest { PermissionType = AccessTokenPermission.Contributor, @@ -655,7 +654,7 @@ The upload operation might return the following status codes: - The byte array option times out after 30 minutes. - The URL provided in the `videoURL` parameter must be encoded. - Indexing Media Services assets has the same limitation as indexing from a URL. -- Video Analyzer for Media has a duration limit of 4 hours for a single file. +- Azure Video Indexer has a duration limit of 4 hours for a single file. - The URL must be accessible (for example, a public URL). If it's a private URL, the access token must be provided in the request. @@ -675,4 +674,4 @@ For information about a storage account that's behind a firewall, see the [FAQ]( ## Next steps -[Examine the Azure Video Analyzer for Media output produced by an API](video-indexer-output-json-v2.md) +[Examine the Azure Video Indexer output produced by an API](video-indexer-output-json-v2.md) diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/use-editor-create-project.md b/articles/azure-video-indexer/use-editor-create-project.md similarity index 79% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/use-editor-create-project.md rename to articles/azure-video-indexer/use-editor-create-project.md index b224bffa814e6..fbda5be3d4535 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/use-editor-create-project.md +++ b/articles/azure-video-indexer/use-editor-create-project.md @@ -1,21 +1,19 @@ --- -title: Use the Azure Video Analyzer for Media (formerly Video Indexer) editor to create projects and add video clips -titleSuffix: Azure Video Analyzer for Media -description: This topic demonstrates how to use the Azure Video Analyzer for Media (formerly Video Indexer) editor to create projects and add video clips. +title: Use the Azure Video Indexer (formerly Azure Video Analyzer for Media) editor to create projects and add video clips +description: This topic demonstrates how to use the Azure Video Indexer (formerly Azure Video Analyzer for Media) editor to create projects and add video clips. services: azure-video-analyzer author: Juliako manager: femila ms.topic: article -ms.subservice: azure-video-analyzer-media ms.date: 11/28/2020 ms.author: juliako --- # Add video clips to your projects -The [Azure Video Analyzer for Media (formerly Video Indexer)](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. +The [Azure Video Indexer (formerly Azure Video Analyzer for Media)](https://www.videoindexer.ai/) website enables you to use your video's deep insights to: find the right media content, locate the parts that you’re interested in, and use the results to create an entirely new project. -Once created, the project can be rendered and downloaded from Video Analyzer for Media and be used in your own editing applications or downstream workflows. +Once created, the project can be rendered and downloaded from Azure Video Indexer and be used in your own editing applications or downstream workflows. Some scenarios where you may find this feature useful are: @@ -27,7 +25,7 @@ This article shows how to create a project and add selected clips from the video ## Create new project and manage videos -1. Browse to the [Video Analyzer for Media](https://www.videoindexer.ai/) website and sign in. +1. Browse to the [Azure Video Indexer](https://www.videoindexer.ai/) website and sign in. 1. Select the **Projects** tab. If you have created projects before, you will see all of your other projects here. 1. Click **Create new project**. @@ -78,11 +76,11 @@ As you are selecting and ordering your clips, you can preview the video in the p ### Render and download the project > [!NOTE] -> For Video Analyzer for Media paid accounts, rendering your project has encoding costs. Video Analyzer for Media trial accounts are limited to 5 hours of rendering. +> For Azure Video Indexer paid accounts, rendering your project has encoding costs. Azure Video Indexer trial accounts are limited to 5 hours of rendering. -1. Once you are done, make sure that your project has been saved. You can now render this project. Click **Render**, a popup dialog comes up that tells you that Video Analyzer for Media will render a file and then the download link will be sent to your email. Select Proceed. +1. Once you are done, make sure that your project has been saved. You can now render this project. Click **Render**, a popup dialog comes up that tells you that Azure Video Indexer will render a file and then the download link will be sent to your email. Select Proceed. - :::image type="content" source="./media/video-indexer-view-edit/render-download.png" alt-text="Screenshot shows Video Analyzer for Media with the option to Render and download your project"::: + :::image type="content" source="./media/video-indexer-view-edit/render-download.png" alt-text="Screenshot shows Azure Video Indexer with the option to Render and download your project"::: You will also see a notification that the project is being rendered on top of the page. Once it is done being rendered, you will see a new notification that the project has been successfully rendered. Click the notification to download the project. It will download the project in mp4 format. 1. You can access saved projects from the **Projects** tab. @@ -93,12 +91,12 @@ As you are selecting and ordering your clips, you can preview the video in the p You can create a new project directly from a video in your account. -1. Go to the **Library** tab of the Video Analyzer for Media website. +1. Go to the **Library** tab of the Azure Video Indexer website. 1. Open the video that you want to use to create your project. On the insights and timeline page, select the **Video editor** button. This takes you to the same page that you used to create a new project. Unlike the new project, you see the timestamped insights segments of the video, that you had started editing previously. ## See also -[Video Analyzer for Media overview](video-indexer-overview.md) +[Azure Video Indexer overview](video-indexer-overview.md) diff --git a/articles/azure-video-indexer/video-indexer-disaster-recovery.md b/articles/azure-video-indexer/video-indexer-disaster-recovery.md new file mode 100644 index 0000000000000..685880a3ef2d5 --- /dev/null +++ b/articles/azure-video-indexer/video-indexer-disaster-recovery.md @@ -0,0 +1,37 @@ +--- +title: Azure Video Indexer (formerly Azure Video Analyzer for Media) failover and disaster recovery +description: Learn how to fail over to a secondary Azure Video Indexer (formerly Azure Video Analyzer for Media) account if a regional datacenter failure or disaster occurs. +author: juliako +manager: femila +editor: '' +ms.workload: +ms.topic: article +ms.custom: +ms.date: 07/29/2019 +ms.author: juliako +--- +# Azure Video Indexer failover and disaster recovery + +Azure Video Indexer (formerly Azure Video Analyzer for Media) doesn't provide instant failover of the service if there's a regional datacenter outage or failure. This article explains how to configure your environment for a failover to ensure optimal availability for apps and minimized recovery time if a disaster occurs. + +We recommend that you configure business continuity disaster recovery (BCDR) across regional pairs to benefit from Azure's isolation and availability policies. For more information, see [Azure paired regions](../availability-zones/cross-region-replication-azure.md). + +## Prerequisites + +An Azure subscription. If you don't have an Azure subscription yet, sign up for [Azure free trial](https://azure.microsoft.com/free/). + +## Fail over to a secondary account + +To implement BCDR, you need to have two Azure Video Indexer accounts to handle redundancy. + +1. Create two Azure Video Indexer accounts connected to Azure (see [Create an Azure Video Indexer account](connect-to-azure.md)). Create one account for your primary region and the other to the paired Azure region. +1. If there's a failure in your primary region, switch to indexing using the secondary account. + +> [!TIP] +> You can automate BCDR by setting up activity log alerts for service health notifications as per [Create activity log alerts on service notifications](../service-health/alerts-activity-log-service-notifications-portal.md). + +For information about using multiple tenants, see [Manage multiple tenants](manage-multiple-tenants.md). To implement BCDR, choose one of these two options: [Azure Video Indexer account per tenant](./manage-multiple-tenants.md#azure-video-indexer-account-per-tenant) or [Azure subscription per tenant](./manage-multiple-tenants.md#azure-subscription-per-tenant). + +## Next steps + +[Manage an Azure Video Indexer account connected to Azure](manage-account-connected-to-azure.md). diff --git a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-embed-widgets.md b/articles/azure-video-indexer/video-indexer-embed-widgets.md similarity index 84% rename from articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-embed-widgets.md rename to articles/azure-video-indexer/video-indexer-embed-widgets.md index e1d613760b35c..87d102133dca1 100644 --- a/articles/azure-video-analyzer/video-analyzer-for-media-docs/video-indexer-embed-widgets.md +++ b/articles/azure-video-indexer/video-indexer-embed-widgets.md @@ -1,6 +1,6 @@ --- -title: Embed Azure Video Analyzer for Media (formerly Video Indexer) widgets in your apps -description: Learn how to embed Azure Video Analyzer for Media (formerly Video Indexer) widgets in your apps. +title: Embed Azure Video Indexer (formerly Azure Video Analyzer for Media) widgets in your apps +description: Learn how to embed Azure Video Indexer (formerly Azure Video Analyzer for Media) widgets in your apps. ms.topic: how-to ms.date: 04/15/2022 ms.author: juliako @@ -9,7 +9,7 @@ ms.custom: devx-track-js # Embed Video Analyzer for Media widgets in your apps -This article shows how you can embed Azure Video Analyzer for Media (formerly Video Indexer) widgets in your apps. Video Analyzer for Media supports embedding three types of widgets into your apps: *Cognitive Insights*, *Player*, and *Editor*. +This article shows how you can embed Azure Video Indexer (formerly Azure Video Analyzer for Media) widgets in your apps. Azure Video Indexer supports embedding three types of widgets into your apps: *Cognitive Insights*, *Player*, and *Editor*. Starting with version 2, the widget base URL includes the region of the specified account. For example, an account in the West US region generates: `https://www.videoindexer.ai/embed/insights/.../?location=westus2`. @@ -21,7 +21,7 @@ A Cognitive Insights widget includes all visual insights that were extracted fro |Name|Definition|Description| |---|---|---| -|`widgets` | Strings separated by comma | Allows you to control the insights that you want to render.
                Example: `https://www.videoindexer.ai/embed/insights///?widgets=people,keywords` renders only people and keywords UI insights.
                Available options: people, animatedCharacters ,keywords, audioEffects, labels, sentiments, emotions, topics, keyframes, transcript, ocr, speakers, scenes, spokenLanguage, observedPeople and namedEntities.| +|`widgets` | Strings separated by comma | Allows you to control the insights that you want to render.
                Example: `https://www.videoindexer.ai/embed/insights///?widgets=people,keywords` renders only people and keywords UI insights.
                Available options: people, animatedCharacters, keywords, audioEffects, labels, sentiments, emotions, topics, keyframes, transcript, ocr, speakers, scenes, spokenLanguage, observedPeople and namedEntities.| |`controls`|Strings separated by comma|Allows you to control the controls that you want to render.
                Example: `https://www.videoindexer.ai/embed/insights///?controls=search,download` renders only search option and download button.
                Available options: search, download, presets, language.| |`language`|A short language code (language name)|Controls insights language.
                Example: `https://www.videoindexer.ai/embed/insights///?language=es-es`
                or `https://www.videoindexer.ai/embed/insights///?language=spanish`| |`locale` | A short language code | Controls the language of the UI. The default value is `en`.
                Example: `locale=de`.| @@ -67,7 +67,7 @@ The `location` parameter must be included in the embedded links, see [how to get To embed a video, use the portal as described below: -1. Sign in to the [Video Analyzer for Media](https://www.videoindexer.ai/) website. +1. Sign in to the [Azure Video Indexer](https://www.videoindexer.ai/) website. 1. Select the video that you want to work with and press **Play**. 1. Select the type of widget that you want (**Cognitive Insights**, **Player**, or **Editor**). 1. Click **</> Embed**. @@ -100,22 +100,22 @@ To provide editing insights capabilities in your embedded widget, you must pass The Cognitive Insights widget can interact with a video on your app. This section shows how to achieve this interaction. -![Cognitive Insights widget Video Analyzer for Media](./media/video-indexer-embed-widgets/video-indexer-widget03.png) +![Cognitive Insights widget](./media/video-indexer-embed-widgets/video-indexer-widget03.png) ### Flow overview When you edit the transcripts, the following flow occurs: 1. You edit the transcript in the timeline. -1. Video Analyzer for Media gets these updates and saves them in the [from transcript edits](customize-language-model-with-website.md#customize-language-models-by-correcting-transcripts) in the language model. +1. Azure Video Indexer gets these updates and saves them in the [from transcript edits](customize-language-model-with-website.md#customize-language-models-by-correcting-transcripts) in the language model. 1. The captions are updated: - * If you are using Video Analyzer for Media's player widget - it’s automatically updated. + * If you are using Azure Video Indexer's player widget - it’s automatically updated. * If you are using an external player - you get a new captions file user the **Get video captions** call. ### Cross-origin communications -To get Video Analyzer for Media widgets to communicate with other components, the Video Analyzer for Media service: +To get Azure Video Indexer widgets to communicate with other components: - Uses the cross-origin communication HTML5 method `postMessage`. - Validates the message across VideoIndexer.ai origin. @@ -124,7 +124,7 @@ If you implement your own player code and integrate with Cognitive Insights widg ### Embed widgets in your app or blog (recommended) -This section shows how to achieve interaction between two Video Analyzer for Media widgets so that when a user selects the insight control on your app, the player jumps to the relevant moment. +This section shows how to achieve interaction between two Azure Video Indexer widgets so that when a user selects the insight control on your app, the player jumps to the relevant moment. 1. Copy the Player widget embed code. 2. Copy the Cognitive Insights embed code. @@ -133,14 +133,14 @@ This section shows how to achieve interaction between two Video Analyzer for Med Now when a user selects the insight control on your app, the player jumps to the relevant moment. -For more information, see the [Video Analyzer for Media - Embed both Widgets demo](https://codepen.io/videoindexer/pen/NzJeOb). +For more information, see the [Azure Video Indexer - Embed both Widgets demo](https://codepen.io/videoindexer/pen/NzJeOb). ### Embed the Cognitive Insights widget and use Azure Media Player to play the content This section shows how to achieve interaction between a Cognitive Insights widget and an Azure Media Player instance by using the [AMP plug-in](https://breakdown.blob.core.windows.net/public/amp-vb.plugin.js). -1. Add a Video Analyzer for Media plug-in for the AMP player:
                `` -2. Instantiate Azure Media Player with the Video Analyzer for Media plug-in. +1. Add an Azure Video Indexer plug-in for the AMP player:
                `` +2. Instantiate Azure Media Player with the Azure Video Indexer plug-in. ```javascript // Init the source. @@ -191,7 +191,7 @@ You can now communicate with Azure Media Player. For more information, see the [Azure Media Player + VI Insights demo](https://codepen.io/videoindexer/pen/rYONrO). -### Embed the Video Analyzer for Media Cognitive Insights widget and use a different video player +### Embed the Azure Video Indexer Cognitive Insights widget and use a different video player If you use a video player other than Azure Media Player, you must manually manipulate the video player to achieve the communication. @@ -244,7 +244,7 @@ For more information, see the [Azure Media Player + VI Insights demo](https://co ## Adding subtitles -If you embed Video Analyzer for Media insights with your own [Azure Media Player](https://aka.ms/azuremediaplayer), you can use the `GetVttUrl` method to get closed captions (subtitles). You can also call a JavaScript method from the Video Analyzer for Media AMP plug-in `getSubtitlesUrl` (as shown earlier). +If you embed Azure Video Indexer insights with your own [Azure Media Player](https://aka.ms/azuremediaplayer), you can use the `GetVttUrl` method to get closed captions (subtitles). You can also call a JavaScript method from the Azure Video Indexer AMP plug-in `getSubtitlesUrl` (as shown earlier). ## Customizing embeddable widgets @@ -268,13 +268,13 @@ Notice that this option is relevant only in cases when you need to open the insi ### Player widget -If you embed Video Analyzer for Media player, you can choose the size of the player by specifying the size of the iframe. +If you embed Azure Video Indexer player, you can choose the size of the player by specifying the size of the iframe. For example: `