diff --git a/.gitignore b/.gitignore index 791b4ebf6..957a66fa5 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,5 @@ _themes*/ _repo.*/ .openpublishing.buildcore.ps1 -ie.log \ No newline at end of file +ie.log +report.json \ No newline at end of file diff --git a/azure-vote-nginx-ssl.yml b/azure-vote-nginx-ssl.yml deleted file mode 100644 index d03fd94b1..000000000 --- a/azure-vote-nginx-ssl.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# INGRESS WITH SSL PROD -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: vote-ingress - namespace: default - annotations: - kubernetes.io/tls-acme: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod -spec: - ingressClassName: nginx - tls: - - hosts: - - mydnslabel9730fc.westeurope.cloudapp.azure.com - secretName: azure-vote-nginx-secret - rules: - - host: mydnslabel9730fc.westeurope.cloudapp.azure.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: azure-vote-front - port: - number: 80 diff --git a/azure-vote-start.yml b/azure-vote-start.yml deleted file mode 100644 index fabe2db67..000000000 --- a/azure-vote-start.yml +++ /dev/null @@ -1,226 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rabbitmq -spec: - replicas: 1 - selector: - matchLabels: - app: rabbitmq - template: - metadata: - labels: - app: rabbitmq - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: rabbitmq - image: mcr.microsoft.com/mirror/docker/library/rabbitmq:3.10-management-alpine - ports: - - containerPort: 5672 - name: rabbitmq-amqp - - containerPort: 15672 - name: rabbitmq-http - env: - - name: RABBITMQ_DEFAULT_USER - value: "username" - - name: RABBITMQ_DEFAULT_PASS - value: "password" - resources: - requests: - cpu: 10m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: rabbitmq-enabled-plugins - mountPath: /etc/rabbitmq/enabled_plugins - subPath: enabled_plugins - volumes: - - name: rabbitmq-enabled-plugins - configMap: - name: rabbitmq-enabled-plugins - items: - - key: rabbitmq_enabled_plugins - path: enabled_plugins ---- -apiVersion: v1 -data: - rabbitmq_enabled_plugins: | - [rabbitmq_management,rabbitmq_prometheus,rabbitmq_amqp1_0]. -kind: ConfigMap -metadata: - name: rabbitmq-enabled-plugins ---- -apiVersion: v1 -kind: Service -metadata: - name: rabbitmq -spec: - selector: - app: rabbitmq - ports: - - name: rabbitmq-amqp - port: 5672 - targetPort: 5672 - - name: rabbitmq-http - port: 15672 - targetPort: 15672 - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: order-service -spec: - replicas: 1 - selector: - matchLabels: - app: order-service - template: - metadata: - labels: - app: order-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: order-service - image: ghcr.io/azure-samples/aks-store-demo/order-service:latest - ports: - - containerPort: 3000 - env: - - name: ORDER_QUEUE_HOSTNAME - value: "rabbitmq" - - name: ORDER_QUEUE_PORT - value: "5672" - - name: ORDER_QUEUE_USERNAME - value: "username" - - name: ORDER_QUEUE_PASSWORD - value: "password" - - name: ORDER_QUEUE_NAME - value: "orders" - - name: FASTIFY_ADDRESS - value: "0.0.0.0" - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - initContainers: - - name: wait-for-rabbitmq - image: busybox - command: ['sh', '-c', 'until nc -zv rabbitmq 5672; do echo waiting for rabbitmq; sleep 2; done;'] - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: order-service -spec: - type: ClusterIP - ports: - - name: http - port: 3000 - targetPort: 3000 - selector: - app: order-service ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: product-service -spec: - replicas: 1 - selector: - matchLabels: - app: product-service - template: - metadata: - labels: - app: product-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: product-service - image: ghcr.io/azure-samples/aks-store-demo/product-service:latest - ports: - - containerPort: 3002 - resources: - requests: - cpu: 1m - memory: 1Mi - limits: - cpu: 1m - memory: 7Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: product-service -spec: - type: ClusterIP - ports: - - name: http - port: 3002 - targetPort: 3002 - selector: - app: product-service ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: store-front -spec: - replicas: 1 - selector: - matchLabels: - app: store-front - template: - metadata: - labels: - app: store-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: store-front - image: ghcr.io/azure-samples/aks-store-demo/store-front:latest - ports: - - containerPort: 8080 - name: store-front - env: - - name: VUE_APP_ORDER_SERVICE_URL - value: "http://order-service:3000/" - - name: VUE_APP_PRODUCT_SERVICE_URL - value: "http://product-service:3002/" - resources: - requests: - cpu: 1m - memory: 200Mi - limits: - cpu: 1000m - memory: 512Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: store-front -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: store-front - type: LoadBalancer diff --git a/cluster-issuer-prod.yml b/cluster-issuer-prod.yml deleted file mode 100644 index e49a9a8c9..000000000 --- a/cluster-issuer-prod.yml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-prod -spec: - acme: - # You must replace this email address with your own. - # Let's Encrypt will use this to contact you about expiring - # certificates, and issues related to your account. - email: namanparikh@microsoft.com - # ACME server URL for Let’s Encrypt’s prod environment. - # The staging environment will not issue trusted certificates but is - # used to ensure that the verification process is working properly - # before moving to production - server: https://acme-v02.api.letsencrypt.org/directory - # Secret resource used to store the account's private key. - privateKeySecretRef: - name: letsencrypt - # Enable the HTTP-01 challenge provider - # you prove ownership of a domain by ensuring that a particular - # file is present at the domain - solvers: - - http01: - ingress: - class: nginx - podTemplate: - spec: - nodeSelector: - "kubernetes.io/os": linux diff --git a/report.json b/report.json deleted file mode 100644 index 69001f6f5..000000000 --- a/report.json +++ /dev/null @@ -1,356 +0,0 @@ -{ - "name": "Quickstart: Deploy Inspektor Gadget in an Azure Kubernetes Service cluster", - "properties": { - "author": "josebl", - "description": "This tutorial shows how to deploy Inspektor Gadget in an AKS cluster", - "ms.author": "josebl", - "ms.custom": "innovation-engine", - "ms.date": "12/06/2023", - "ms.topic": "article", - "title": "Deploy Inspektor Gadget in an Azure Kubernetes Service cluster" - }, - "environmentVariables": { - "AKS_CLUSTER_NAME": "aks-cnpg-3cee3l", - "AKS_CLUSTER_VERSION": "1.29", - "AKS_MANAGED_IDENTITY_NAME": "mi-aks-cnpg-3cee3l", - "AKS_NODE_COUNT": "2", - "AKS_PRIMARY_CLUSTER_FED_CREDENTIAL_NAME": "pg-primary-fedcred1-cnpg-l1tsugyd", - "AKS_PRIMARY_CLUSTER_NAME": "aks-primary-cnpg-l1tsugyd", - "AKS_PRIMARY_CLUSTER_PG_DNSPREFIX": "a33a3d08c14", - "AKS_PRIMARY_MANAGED_RG_NAME": "rg-cnpg-primary-aksmanaged-l1tsugyd", - "AKS_UAMI_CLUSTER_IDENTITY_NAME": "mi-aks-cnpg-l1tsugyd", - "BARMAN_CONTAINER_NAME": "barman", - "CLUSTER_VERSION": "1.27", - "ENABLE_AZURE_PVC_UPDATES": "true", - "ERROR": "\u001b[31m", - "IP_ADDRESS": "52.233.203.69", - "KEYVAULT_NAME": "kv-cnpg-3cee3l", - "LOCAL_NAME": "cnpg", - "LOCATION": "eastus", - "MOTD_SHOWN": "update-motd", - "MY_AKS_CLUSTER_NAME": "myAKSClusterb60d78", - "MY_COMPUTER_VISION_NAME": "computervisiont6xygvc3", - "MY_CONTAINER_APP_ENV_NAME": "containerappenvt6xygvc3", - "MY_CONTAINER_APP_NAME": "containerappt6xygvc3", - "MY_DATABASE_NAME": "dbt6xygvc3", - "MY_DATABASE_PASSWORD": "dbpasst6xygvc3", - "MY_DATABASE_SERVER_NAME": "dbservert6xygvc3", - "MY_DATABASE_USERNAME": "dbusert6xygvc3", - "MY_DNS_LABEL": "mydnslabel3f8d9e", - "MY_RESOURCE_GROUP_NAME": "myResourceGroupb60d78", - "MY_STATIC_WEB_APP_NAME": "myStaticWebApp85f4f3", - "MY_STORAGE_ACCOUNT_NAME": "storaget6xygvc3", - "MY_USERNAME": "azureuser", - "MY_VM_IMAGE": "Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts-gen2:latest", - "MY_VM_NAME": "myVMecb9fc", - "MyAction": "allow", - "MyAddressPrefix": "0.0.0.0/0", - "MyAddressPrefixes1": "10.0.0.0/8", - "MyAddressPrefixes2": "10.10.1.0/24", - "MyAddressPrefixes3": "10.20.1.0/24", - "MyAddressPrefixes4": "10.100.1.0/26", - "MyAddressPrefixes5": "10.30.1.0/24", - "MyAdminUsername": "d95734", - "MyApiserverVisibility": "Private", - "MyCollectionName1": "AROd95734", - "MyCollectionName2": "Dockerd95734", - "MyCustomData": "cloud_init_upgrade.txt", - "MyDearmor": "-o", - "MyDisablePrivateLinkServiceNetworkPolicies": "true", - "MyGenerateSshKeys": "export", - "MyImage": "Ubuntu2204", - "MyIngressVisibility": "Private", - "MyMasterSubnet": "-master", - "MyName": "NetworkWatcherAgentLinux2ef723", - "MyName1": "ubuntu-jumpd95734", - "MyName2": "aro-udrd95734", - "MyName3": "-masterd95734", - "MyName4": "-workerd95734", - "MyNextHopType": "VirtualAppliance", - "MyPriority1": "100", - "MyPriority2": "200", - "MyProtocols": "http=80", - "MyPublicIpAddress1": "jumphost-ip", - "MyPublicIpAddress2": "fw-ip", - "MyPublisher": "Microsoft.Azure.NetworkWatcher", - "MyPullSecret": "@pull-secret.txt", - "MyQuery1": "ipAddress", - "MyQuery2": "ipConfigurations[0].privateIPAddress", - "MyRemove": "routeTable", - "MyResourceGroup": "d95734", - "MyRouteTable": "aro-udr", - "MyRouteTableName": "aro-udrd95734", - "MyServiceEndpoints": "Microsoft.ContainerRegistry", - "MySku": "Standard", - "MySourceAddresses": "*", - "MyTargetFqdns1": "cert-api.access.redhat.com", - "MyTargetFqdns2": "*cloudflare.docker.com", - "MyVersion": "1.4", - "MyVmName": "myVM12ef723", - "MyVnetName": "d95734", - "MyWorkerSubnet": "-worker", - "NC": "\u001b(B\u001b[m", - "OUTPUT": "\u001b[32m", - "PG_NAMESPACE": "cnpg-database", - "PG_PRIMARY_CLUSTER_NAME": "pg-primary-cnpg-l1tsugyd", - "PG_PRIMARY_STORAGE_ACCOUNT_NAME": "hacnpgpsal1tsugyd", - "PG_STORAGE_BACKUP_CONTAINER_NAME": "backups", - "PG_SYSTEM_NAMESPACE": "cnpg-system", - "PRIMARY_CLUSTER_REGION": "westus3", - "RANDOM_ID": "b60d78", - "REGION": "eastus", - "RESOURCE_GROUP_NAME": "rg-cnpg-l1tsugyd", - "RGTAGS": "owner=cnpg", - "RG_NAME": "rg-cnpg-3cee3l", - "STORAGE_ACCOUNT_NAME": "storcnpg3cee3l", - "SUFFIX": "3cee3l", - "TAGS": "owner=user" - }, - "success": false, - "error": "failed to execute code block 0 on step 2.\nError: command exited with 'exit status 1' and the message 'WARNING: The behavior of this command has been altered by the following extension: aks-preview\nERROR: (SkuNotAvailable) Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \nCode: SkuNotAvailable\nMessage: Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \n'\nStdErr: WARNING: The behavior of this command has been altered by the following extension: aks-preview\nERROR: (SkuNotAvailable) Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \nCode: SkuNotAvailable\nMessage: Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \n", - "failedAtStep": -1, - "steps": [ - { - "codeBlock": { - "language": "bash", - "content": "if ! [ -x \"$(command -v kubectl)\" ]; then az aks install-cli; fi\n", - "header": "Connect to the cluster", - "description": "Install az aks CLI locally using the az aks install-cli command", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Connect to the cluster", - "stepNumber": 3, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "IG_VERSION=$(curl -s https://api.github.com/repos/inspektor-gadget/inspektor-gadget/releases/latest | jq -r .tag_name)\nIG_ARCH=amd64\nmkdir -p $HOME/.local/bin\nexport PATH=$PATH:$HOME/.local/bin\ncurl -sL https://github.com/inspektor-gadget/inspektor-gadget/releases/download/${IG_VERSION}/kubectl-gadget-linux-${IG_ARCH}-${IG_VERSION}.tar.gz | tar -C $HOME/.local/bin -xzf - kubectl-gadget\n", - "header": "Installing the kubectl plugin: `gadget`", - "description": "[!NOTE]\nIf you want to install it using [`krew`](https://sigs.k8s.io/krew) or compile it from the source, please follow the official documentation: [installing kubectl gadget](https://github.com/inspektor-gadget/inspektor-gadget/blob/main/docs/install.md#installing-kubectl-gadget).", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Installing the kubectl plugin: `gadget`", - "stepNumber": 4, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "kubectl gadget version\n", - "header": "Installing Inspektor Gadget in the cluster", - "description": "Now, let’s verify the installation by running the `version` command again:", - "resultBlock": { - "language": "text", - "content": "Client version: vX.Y.Z\nServer version: vX.Y.Z\n", - "expectedSimilarityScore": 0, - "expectedRegexPattern": "(?m)^Client version: v\\d+\\.\\d+\\.\\d+$\\n^Server version: v\\d+\\.\\d+\\.\\d+$" - } - }, - "codeBlockNumber": 1, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Installing Inspektor Gadget in the cluster", - "stepNumber": 5, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "kubectl gadget help\n", - "header": "Installing Inspektor Gadget in the cluster", - "description": "You can now start running the gadgets:", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 2, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Installing Inspektor Gadget in the cluster", - "stepNumber": 5, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION\n", - "header": "Create a resource group", - "description": "A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters.", - "resultBlock": { - "language": "JSON", - "content": "{\n \"id\": \"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup210\",\n \"location\": \"eastus\",\n \"managedBy\": null,\n \"name\": \"testResourceGroup\",\n \"properties\": {\n \"provisioningState\": \"Succeeded\"\n },\n \"tags\": null,\n \"type\": \"Microsoft.Resources/resourceGroups\"\n}\n", - "expectedSimilarityScore": 0.3, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": null, - "stdErr": "", - "stdOut": "{\n \"id\": \"/subscriptions/325e7c34-99fb-4190-aa87-1df746c67705/resourceGroups/myResourceGroupb60d78\",\n \"location\": \"eastus\",\n \"managedBy\": null,\n \"name\": \"myResourceGroupb60d78\",\n \"properties\": {\n \"provisioningState\": \"Succeeded\"\n },\n \"tags\": null,\n \"type\": \"Microsoft.Resources/resourceGroups\"\n}\n", - "stepName": "Create a resource group", - "stepNumber": 1, - "success": true, - "similarityScore": 0.7850672214487863 - }, - { - "codeBlock": { - "language": "bash", - "content": "az aks create \\\n --resource-group $MY_RESOURCE_GROUP_NAME \\\n --name $MY_AKS_CLUSTER_NAME \\\n --location $REGION \\\n --no-ssh-key\n", - "header": "Create AKS Cluster", - "description": "This will take a few minutes.", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": {}, - "stdErr": "WARNING: The behavior of this command has been altered by the following extension: aks-preview\nERROR: (SkuNotAvailable) Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \nCode: SkuNotAvailable\nMessage: Preflight validation check for resource(s) for container service myAKSClusterb60d78 in resource group MC_myResourceGroupb60d78_myAKSClusterb60d78_eastus failed. Message: The requested VM size for resource 'Following SKUs have failed for Capacity Restrictions: Standard_DS2_v2' is currently not available in location 'eastus'. Please try another size or deploy to a different location or different zone. See https://aka.ms/azureskunotavailable for details.. Details: \n", - "stdOut": "", - "stepName": "Create AKS Cluster", - "stepNumber": 2, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME --overwrite-existing\n", - "header": "Connect to the cluster", - "description": "[!WARNING]\nThis will overwrite any existing credentials with the same entry", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 1, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Connect to the cluster", - "stepNumber": 3, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "kubectl get nodes\n", - "header": "Connect to the cluster", - "description": "Verify the connection to your cluster using the kubectl get command. This command returns a list of the cluster nodes.", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 2, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Connect to the cluster", - "stepNumber": 3, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "kubectl gadget version\n", - "header": "Installing the kubectl plugin: `gadget`", - "description": "Now, let’s verify the installation by running the `version` command:", - "resultBlock": { - "language": "text", - "content": "Client version: vX.Y.Z\nServer version: not installed\n", - "expectedSimilarityScore": 0, - "expectedRegexPattern": "(?m)^Client version: v\\d+\\.\\d+\\.\\d+$\\n^Server version: not installed$" - } - }, - "codeBlockNumber": 1, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Installing the kubectl plugin: `gadget`", - "stepNumber": 4, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "kubectl gadget deploy\n", - "header": "Installing Inspektor Gadget in the cluster", - "description": "[!NOTE]\nSeveral options are available to customize the deployment: use a specific container image, deploy to specific nodes, and many others. To know all of them, please check the official documentation: [installing in the cluster](https://github.com/inspektor-gadget/inspektor-gadget/blob/main/docs/install.md#installing-in-the-cluster).", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Installing Inspektor Gadget in the cluster", - "stepNumber": 5, - "success": false, - "similarityScore": 0 - }, - { - "codeBlock": { - "language": "bash", - "content": "export RANDOM_ID=\"$(openssl rand -hex 3)\"\nexport MY_RESOURCE_GROUP_NAME=\"myResourceGroup$RANDOM_ID\"\nexport REGION=\"eastus\"\nexport MY_AKS_CLUSTER_NAME=\"myAKSCluster$RANDOM_ID\"\n", - "header": "Define Environment Variables", - "description": "The First step in this tutorial is to define environment variables:", - "resultBlock": { - "language": "", - "content": "", - "expectedSimilarityScore": 0, - "expectedRegexPattern": null - } - }, - "codeBlockNumber": 0, - "error": null, - "stdErr": "", - "stdOut": "", - "stepName": "Define Environment Variables", - "stepNumber": 0, - "success": true, - "similarityScore": 1 - } - ] -} \ No newline at end of file diff --git a/scenarios/AIChatApp/ai-chat-app.md b/scenarios/AIChatApp/ai-chat-app.md new file mode 100644 index 000000000..c5fb4689b --- /dev/null +++ b/scenarios/AIChatApp/ai-chat-app.md @@ -0,0 +1,139 @@ +# Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps + +This guide will walk you through the steps to create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Azure Container Apps. + +## Prerequisites + +- An Azure account with an active subscription. +- Azure CLI installed on your local machine. +- Docker installed on your local machine. + +## Step 1: Create an Azure OpenAI Service + +1. **Create a Resource Group**: + ```azurecli-interactive + az group create --name myResourceGroup --location eastus + ``` + +2. **Create an Azure OpenAI Service**: + ```azurecli-interactive + az cognitiveservices account create \ + --name myOpenAIService \ + --resource-group myResourceGroup \ + --kind OpenAI \ + --sku S0 \ + --location eastus \ + --yes + ``` + +## Step 2: Set Up LangChain and ChromaDB + +1. **Create a Dockerfile**: + Create a `Dockerfile` to set up LangChain and ChromaDB. + + ```dockerfile + # Use an official Python runtime as a parent image + FROM python:3.9-slim + + # Set the working directory in the container + WORKDIR /app + + # Copy the current directory contents into the container at /app + COPY . /app + + # Install any needed packages specified in requirements.txt + RUN pip install --no-cache-dir -r requirements.txt + + # Make port 80 available to the world outside this container + EXPOSE 80 + + # Define environment variable + ENV NAME World + + # Run app.py when the container launches + CMD ["python", "app.py"] + ``` + +2. **Create a `requirements.txt` file**: + List the dependencies for LangChain and ChromaDB. + + ```plaintext + langchain + chromadb + chainlit + azure-openai + ``` + +3. **Create an `app.py` file**: + Set up a basic Chainlit app using LangChain and ChromaDB. + + ```python + from langchain import LangChain + from chromadb import ChromaDB + from chainlit import Chainlit + + # Initialize LangChain + langchain = LangChain() + + # Initialize ChromaDB + chromadb = ChromaDB() + + # Initialize Chainlit + chainlit = Chainlit(langchain, chromadb) + + # Define a simple chat endpoint + @chainlit.route('/chat', methods=['POST']) + def chat(): + user_input = request.json.get('input') + response = chainlit.chat(user_input) + return jsonify({'response': response}) + + if __name__ == '__main__': + chainlit.run(host='0.0.0.0', port=80) + ``` + +## Step 3: Build and Push the Docker Image + +1. **Build the Docker Image**: + ```bash + docker build -t mychainlitapp . + ``` + +2. **Push the Docker Image to Azure Container Registry**: + ```azurecli-interactive + az acr create --resource-group myResourceGroup --name myContainerRegistry --sku Basic + az acr login --name myContainerRegistry + docker tag mychainlitapp mycontainerregistry.azurecr.io/mychainlitapp:v1 + docker push mycontainerregistry.azurecr.io/mychainlitapp:v1 + ``` + +## Step 4: Deploy to Azure Container Apps + +1. **Create a Container App Environment**: + ```azurecli-interactive + az containerapp env create --name myContainerAppEnv --resource-group myResourceGroup --location eastus + ``` + +2. **Deploy the Container App**: + ```azurecli-interactive + az containerapp create \ + --name myChainlitApp \ + --resource-group myResourceGroup \ + --environment myContainerAppEnv \ + --image mycontainerregistry.azurecr.io/mychainlitapp:v1 \ + --target-port 80 \ + --ingress 'external' \ + --cpu 0.5 --memory 1.0Gi + ``` + +## Step 5: Test the Deployment + +1. **Get the URL of the Container App**: + ```azurecli-interactive + az containerapp show --name myChainlitApp --resource-group myResourceGroup --query properties.configuration.ingress.fqdn + ``` + +2. **Test the Chat App**: + Open the URL in your browser and interact with your Chainlit chat app. + +By following these steps, you will have successfully created and deployed an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Azure Container Apps. \ No newline at end of file diff --git a/scenarios/AKSKaito/aks-kaito.md b/scenarios/AKSKaito/aks-kaito.md new file mode 100644 index 000000000..4e579f29c --- /dev/null +++ b/scenarios/AKSKaito/aks-kaito.md @@ -0,0 +1,178 @@ +--- +title: Deploy an AI model on Azure Kubernetes Service (AKS) with the AI toolchain operator (preview) +description: Learn how to enable the AI toolchain operator add-on on Azure Kubernetes Service (AKS) to simplify OSS AI model management and deployment. +ms.topic: article +ms.custom: azure-kubernetes-service, devx-track-azurecli +ms.date: 02/28/2024 +author: schaffererin +ms.author: schaffererin + +--- + +## Deploy an AI model on Azure Kubernetes Service (AKS) with the AI toolchain operator (preview) + +The AI toolchain operator (KAITO) is a managed add-on for AKS that simplifies the experience of running OSS AI models on your AKS clusters. The AI toolchain operator automatically provisions the necessary GPU nodes and sets up the associated inference server as an endpoint server to your AI models. Using this add-on reduces your onboarding time and enables you to focus on AI model usage and development rather than infrastructure setup. + +This article shows you how to enable the AI toolchain operator add-on and deploy an AI model on AKS. + +[!INCLUDE [preview features callout](~/reusable-content/ce-skilling/azure/includes/aks/includes/preview/preview-callout.md)] + +## Before you begin + +* This article assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for AKS](./concepts-clusters-workloads.md). +* For ***all hosted model inference images*** and recommended infrastructure setup, see the [KAITO GitHub repository](https://github.com/Azure/kaito). +* The AI toolchain operator add-on currently supports KAITO version **v0.1.0**, please make a note of this in considering your choice of model from the KAITO model repository. + +## Prerequisites + +* If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) before you begin. + * If you have multiple Azure subscriptions, make sure you select the correct subscription in which the resources will be created and charged using the [az account set](https://learn.microsoft.com/en-us/cli/azure/account?view=azure-cli-latest#az-account-set) command. + + > [!NOTE] + > The subscription you use must have GPU VM quota. + +* Azure CLI version 2.47.0 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). +* The Kubernetes command-line client, kubectl, installed and configured. For more information, see [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). +* [Install the Azure CLI AKS preview extension](#install-the-azure-cli-preview-extension). +* [Register the AI toolchain operator add-on feature flag](#register-the-ai-toolchain-operator-add-on-feature-flag). + +## Set up resource group + +Set up a resource group with a random ID. Create an Azure resource group using the [az group create](https://learn.microsoft.com/en-us/cli/azure/group?view=azure-cli-latest#az-group-create) command. + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export AZURE_RESOURCE_GROUP="myKaitoResourceGroup$RANDOM_ID" +export REGION="centralus" +export CLUSTER_NAME="myClusterName$RANDOM_ID" +export SUBSCRIPTION_ID="0c8875c7-e423-4caa-827a-1f0350bd8dd3" + +az group create \ + --name $AZURE_RESOURCE_GROUP \ + --location $REGION \ +``` + +## Install the Azure CLI preview extension + +Install the Azure CLI preview extension using the [az extension add](https://learn.microsoft.com/en-us/cli/azure/extension?view=azure-cli-latest#az-extension-add) command. Then update the extension to make sure you have the latest version using the [az extension update](https://learn.microsoft.com/en-us/cli/azure/extension?view=azure-cli-latest#az-extension-update) command. + +```bash +az extension add --name aks-preview +az extension update --name aks-preview +``` + +## Register the AI toolchain operator add-on feature flag + +Register the AIToolchainOperatorPreview feature flag using the az feature register command. +It takes a few minutes for the registration to complete. + +```bash +az feature register --namespace "Microsoft.ContainerService" --name "AIToolchainOperatorPreview" +``` + +## Verify the AI toolchain operator add-on registration + +Verify the registration using the [az feature show](https://learn.microsoft.com/en-us/cli/azure/feature?view=azure-cli-latest#az-feature-show) command. + +```bash +while true; do + status=$(az feature show --namespace "Microsoft.ContainerService" --name "AIToolchainOperatorPreview" --query "properties.state" -o tsv) + if [ "$status" == "Registered" ]; then + break + else + sleep 15 + fi +done +``` + +## Create an AKS cluster with the AI toolchain operator add-on enabled + +Create an AKS cluster with the AI toolchain operator add-on enabled using the [az aks create](https://learn.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-create) command with the `--enable-ai-toolchain-operator` and `--enable-oidc-issuer` flags. + +```bash +az aks create --location ${REGION} \ + --resource-group ${AZURE_RESOURCE_GROUP} \ + --name ${CLUSTER_NAME} \ + --enable-oidc-issuer \ + --node-os-upgrade-channel SecurityPatch \ + --auto-upgrade-channel stable \ + --enable-ai-toolchain-operator \ + --generate-ssh-keys \ + --k8s-support-plan KubernetesOfficial +``` + +## Connect to your cluster + +Configure `kubectl` to connect to your cluster using the [az aks get-credentials](https://learn.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-get-credentials) command. + +```bash +az aks get-credentials --resource-group ${AZURE_RESOURCE_GROUP} --name ${CLUSTER_NAME} +``` + +## Create role assignment for the service principal + +```bash +export MC_RESOURCE_GROUP=$(az aks show --resource-group ${AZURE_RESOURCE_GROUP} \ + --name ${CLUSTER_NAME} \ + --query nodeResourceGroup \ + -o tsv) + +export KAITO_IDENTITY_NAME="ai-toolchain-operator-${CLUSTER_NAME}" + +export AKS_OIDC_ISSUER=$(az aks show --resource-group "${AZURE_RESOURCE_GROUP}" \ + --name "${CLUSTER_NAME}" \ + --query "oidcIssuerProfile.issuerUrl" \ + -o tsv) + +export PRINCIPAL_ID=$(az identity show --name "ai-toolchain-operator-${CLUSTER_NAME}" \ + --resource-group "${MC_RESOURCE_GROUP}" \ + --query 'principalId' \ + -o tsv) + +az role assignment create --role "Contributor" \ + --assignee "${PRINCIPAL_ID}" \ + --scope "/subscriptions/${SUBSCRIPTION_ID}/resourcegroups/${AZURE_RESOURCE_GROUP}" +``` + +## Establish a federated identity credential + +Create the federated identity credential between the managed identity, AKS OIDC issuer, and subject using the [az identity federated-credential create](https://learn.microsoft.com/en-us/cli/azure/identity/federated-credential?view=azure-cli-latest) command. + +```bash +az identity federated-credential create --name "kaito-federated-identity" \ + --identity-name "${KAITO_IDENTITY_NAME}" \ + -g "${MC_RESOURCE_GROUP}" \ + --issuer "${AKS_OIDC_ISSUER}" \ + --subject system:serviceaccount:"kube-system:kaito-gpu-provisioner" \ + --audience api://AzureADTokenExchange +``` + +## Verify that your deployment is running + +Restart the KAITO GPU provisioner deployment on your pods using the `kubectl rollout restart` command: + +```bash +kubectl rollout restart deployment/kaito-gpu-provisioner -n kube-system +``` + +## Deploy a default hosted AI model + +Deploy the Falcon 7B-instruct model from the KAITO model repository using the `kubectl apply` command. + +```bash +kubectl apply -f https://raw.githubusercontent.com/Azure/kaito/main/examples/inference/kaito_workspace_falcon_7b-instruct.yaml +``` + +## Ask a question + +Verify deployment done: `kubectl get workspace workspace-falcon-7b-instruct -w`. +Store IP: `export SERVICE_IP=$(kubectl get svc workspace-falcon-7b-instruct -o jsonpath='{.spec.clusterIP}')`. +Ask question: `kubectl run -it --rm --restart=Never curl --image=curlimages/curl -- curl -X POST http://$SERVICE_IP/chat -H "accept: application/json" -H "Content-Type: application/json" -d "{\"prompt\":\"YOUR QUESTION HERE\"}"` + +```bash +echo "See last step for details on how to ask questions to the model." +``` + +## Next steps + +For more inference model options, see the [KAITO GitHub repository](https://github.com/Azure/kaito). \ No newline at end of file diff --git a/scenarios/ConfigurePythonContainer/configure-python-container.md b/scenarios/ConfigurePythonContainer/configure-python-container.md new file mode 100644 index 000000000..036bf9315 --- /dev/null +++ b/scenarios/ConfigurePythonContainer/configure-python-container.md @@ -0,0 +1,450 @@ +--- +title: Configure Linux Python apps +description: Learn how to configure the Python container in which web apps are run, using both the Azure portal and the Azure CLI. +ms.topic: quickstart +ms.date: 08/29/2024 +ms.reviewer: astay +ms.author: msangapu +author: msangapu-msft +ms.devlang: python +ms.custom: mvc, devx-track-python, devx-track-azurecli, mode-other, py-fresh-zinc, linux-related-content +adobe-target: true +--- + +# Configure a Linux Python app for Azure App Service + +This article describes how [Azure App Service](overview.md) runs Python apps, how you can migrate existing apps to Azure, and how you can customize the behavior of App Service when you need to. Python apps must be deployed with all the required [pip](https://pypi.org/project/pip/) modules. + +The App Service deployment engine automatically activates a virtual environment and runs `pip install -r requirements.txt` for you when you deploy a [Git repository](deploy-local-git.md), or when you deploy a [zip package](deploy-zip.md) [with build automation enabled](deploy-zip.md#enable-build-automation-for-zip-deploy). + +This guide provides key concepts and instructions for Python developers who use a built-in Linux container in App Service. If you've never used Azure App Service, first follow the [Python quickstart](quickstart-python.md) and [Python with PostgreSQL tutorial](tutorial-python-postgresql-app.md). + +You can use either the [Azure portal](https://portal.azure.com) or the Azure CLI for configuration: + +- **Azure portal**, use the app's **Settings** > **Configuration** page as described in [Configure an App Service app in the Azure portal](configure-common.md). + +- **Azure CLI**: you have two options. + + - Run commands in the [Azure Cloud Shell](../cloud-shell/overview.md). + - Run commands locally by installing the latest version of the [Azure CLI](/cli/azure/install-azure-cli), then sign in to Azure using [az login](/cli/azure/reference-index#az-login). + +> [!NOTE] +> Linux is the only operating system option for running Python apps in App Service. Python on Windows is no longer supported. You can however build your own custom Windows container image and run that in App Service. For more information, see [use a custom Docker image](tutorial-custom-container.md?pivots=container-windows). + +## Configure Python version + +- **Azure portal**: use the **General settings** tab on the **Configuration** page as described in [Configure general settings](configure-common.md#configure-general-settings) for Linux containers. + +- **Azure CLI**: + + - Show the current Python version with [az webapp config show](/cli/azure/webapp/config#az-webapp-config-show): + + ```azurecli + az webapp config show --resource-group --name --query linuxFxVersion + ``` + + Replace `` and `` with the names appropriate for your web app. + + - Set the Python version with [az webapp config set](/cli/azure/webapp/config#az-webapp-config-set) + + ```azurecli + az webapp config set --resource-group --name --linux-fx-version "PYTHON|3.11" + ``` + + - Show all Python versions that are supported in Azure App Service with [az webapp list-runtimes](/cli/azure/webapp#az-webapp-list-runtimes): + + ```azurecli + az webapp list-runtimes --os linux | grep PYTHON + ``` + +You can run an unsupported version of Python by building your own container image instead. For more information, see [use a custom Docker image](tutorial-custom-container.md?pivots=container-linux). + + + + +## Customize build automation + +App Service's build system, called Oryx, performs the following steps when you deploy your app, if the app setting `SCM_DO_BUILD_DURING_DEPLOYMENT` is set to `1`: + +1. Run a custom pre-build script, if that step is specified by the `PRE_BUILD_COMMAND` setting. (The script can itself run other Python and Node.js scripts, pip and npm commands, and Node-based tools like yarn, for example, `yarn install` and `yarn build`.) + +1. Run `pip install -r requirements.txt`. The *requirements.txt* file must be present in the project's root folder. Otherwise, the build process reports the error: "Could not find setup.py or requirements.txt; Not running pip install." + +1. If *manage.py* is found in the root of the repository (indicating a Django app), run *manage.py collectstatic*. However, if the `DISABLE_COLLECTSTATIC` setting is `true`, this step is skipped. + +1. Run custom post-build script, if that step is specified by the `POST_BUILD_COMMAND` setting. (Again, the script can run other Python and Node.js scripts, pip and npm commands, and Node-based tools.) + +By default, the `PRE_BUILD_COMMAND`, `POST_BUILD_COMMAND`, and `DISABLE_COLLECTSTATIC` settings are empty. + +- To disable running collectstatic when building Django apps, set the `DISABLE_COLLECTSTATIC` setting to `true`. + +- To run pre-build commands, set the `PRE_BUILD_COMMAND` setting to contain either a command, such as `echo Pre-build command`, or a path to a script file, relative to your project root, such as `scripts/prebuild.sh`. All commands must use relative paths to the project root folder. + +- To run post-build commands, set the `POST_BUILD_COMMAND` setting to contain either a command, such as `echo Post-build command`, or a path to a script file, relative to your project root, such as `scripts/postbuild.sh`. All commands must use relative paths to the project root folder. + +For other settings that customize build automation, see [Oryx configuration](https://github.com/microsoft/Oryx/blob/master/doc/configuration.md). + +To access the build and deployment logs, see [Access deployment logs](#access-deployment-logs). + +For more information on how App Service runs and builds Python apps in Linux, see [How Oryx detects and builds Python apps](https://github.com/microsoft/Oryx/blob/master/doc/runtimes/python.md). + +> [!NOTE] +> The `PRE_BUILD_SCRIPT_PATH` and `POST_BUILD_SCRIPT_PATH` settings are identical to `PRE_BUILD_COMMAND` and `POST_BUILD_COMMAND` and are supported for legacy purposes. +> +> A setting named `SCM_DO_BUILD_DURING_DEPLOYMENT`, if it contains `true` or `1`, triggers an Oryx build that happens during deployment. The setting is `true` when you deploy by using Git, the Azure CLI command `az webapp up`, and Visual Studio Code. + +> [!NOTE] +> Always use relative paths in all pre- and post-build scripts because the build container in which Oryx runs is different from the runtime container in which the app runs. Never rely on the exact placement of your app project folder within the container (for example, that it's placed under *site/wwwroot*). + +## Migrate existing applications to Azure + +Existing web applications can be redeployed to Azure as follows: + +1. **Source repository**: Maintain your source code in a suitable repository like GitHub, which enables you to set up continuous deployment later in this process. + - Your *requirements.txt* file must be at the root of your repository for App Service to automatically install the necessary packages. + +1. **Database**: If your app depends on a database, create the necessary resources on Azure as well. + +1. **App service resources**: Create a resource group, App Service plan, and App Service web app to host your application. You can do this easily by running the Azure CLI command [`az webapp up`](/cli/azure/webapp#az-webapp-up). Or, you can create and deploy resources as shown in [Tutorial: Deploy a Python (Django or Flask) web app with PostgreSQL](tutorial-python-postgresql-app.md). Replace the names of the resource group, App Service plan, and web app to be more suitable for your application. + +1. **Environment variables**: If your application requires any environment variables, create equivalent [App Service application settings](configure-common.md#configure-app-settings). These App Service settings appear to your code as environment variables, as described in [Access environment variables](#access-app-settings-as-environment-variables). + - Database connections, for example, are often managed through such settings, as shown in [Tutorial: Deploy a Django web app with PostgreSQL - verify connection settings](tutorial-python-postgresql-app.md#2-verify-connection-settings). + - See [Production settings for Django apps](#production-settings-for-django-apps) for specific settings for typical Django apps. + +1. **App startup**: Review the section [Container startup process](#container-startup-process) later in this article to understand how App Service attempts to run your app. App Service uses the Gunicorn web server by default, which must be able to find your app object or *wsgi.py* folder. If you need to, you can [Customize the startup command](#customize-startup-command). + +1. **Continuous deployment**: Set up continuous deployment from GitHub Actions, Bitbucket, or Azure Repos as described in the article [Continuous deployment to Azure App Service](deploy-continuous-deployment.md). Or, set up continuous deployment from Local Git as described in the article [Local Git deployment to Azure App Service](deploy-local-git.md). + +1. **Custom actions**: To perform actions within the App Service container that hosts your app, such as Django database migrations, you can [connect to the container through SSH](configure-linux-open-ssh-session.md). For an example of running Django database migrations, see [Tutorial: Deploy a Django web app with PostgreSQL - generate database schema](tutorial-python-postgresql-app.md#4-generate-database-schema). + - When using continuous deployment, you can perform those actions using post-build commands as described earlier under [Customize build automation](#customize-build-automation). + +With these steps completed, you should be able to commit changes to your source repository and have those updates automatically deployed to App Service. + +### Production settings for Django apps + +For a production environment like Azure App Service, Django apps should follow Django's [Deployment checklist](https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/). + +The following table describes the production settings that are relevant to Azure. These settings are defined in the app's *setting.py* file. + +| Django setting | Instructions for Azure | +| --- | --- | +| `SECRET_KEY` | Store the value in an App Service setting as described on [Access app settings as environment variables](#access-app-settings-as-environment-variables). You can alternatively [store the value as a secret in Azure Key Vault](/azure/key-vault/secrets/quick-create-python). | +| `DEBUG` | Create a `DEBUG` setting on App Service with the value 0 (false), then load the value as an environment variable. In your development environment, create a `DEBUG` environment variable with the value 1 (true). | +| `ALLOWED_HOSTS` | In production, Django requires that you include the app's URL in the `ALLOWED_HOSTS` array of *settings.py*. You can retrieve this URL at runtime with the code `os.environ['WEBSITE_HOSTNAME']`. App Service automatically sets the `WEBSITE_HOSTNAME` environment variable to the app's URL. | +| `DATABASES` | Define settings in App Service for the database connection and load them as environment variables to populate the [`DATABASES`](https://docs.djangoproject.com/en/4.1/ref/settings/#std:setting-DATABASES) dictionary. You can alternatively store the values (especially the username and password) as [Azure Key Vault secrets](/azure/key-vault/secrets/quick-create-python). | + +## Serve static files for Django apps + +If your Django web app includes static front-end files, first follow the instructions on [managing static files](https://docs.djangoproject.com/en/4.1/howto/static-files/) in the Django documentation. + +For App Service, you then make the following modifications: + +1. Consider using environment variables (for local development) and App Settings (when deploying to the cloud) to dynamically set the Django `STATIC_URL` and `STATIC_ROOT` variables. For example: + + ```python + STATIC_URL = os.environ.get("DJANGO_STATIC_URL", "/static/") + STATIC_ROOT = os.environ.get("DJANGO_STATIC_ROOT", "./static/") + ``` + + `DJANGO_STATIC_URL` and `DJANGO_STATIC_ROOT` can be changed as necessary for your local and cloud environments. For example, if the build process for your static files places them in a folder named `django-static`, then you can set `DJANGO_STATIC_URL` to `/django-static/` to avoid using the default. + +1. If you have a pre-build script that generates static files in a different folder, include that folder in the Django `STATICFILES_DIRS` variable so that Django's `collectstatic` process finds them. For example, if you run `yarn build` in your front-end folder, and yarn generates a `build/static` folder containing static files, then include that folder as follows: + + ```python + FRONTEND_DIR = "path-to-frontend-folder" + STATICFILES_DIRS = [os.path.join(FRONTEND_DIR, 'build', 'static')] + ``` + + Here, `FRONTEND_DIR` is used to build a path to where a build tool like yarn is run. You can again use an environment variable and App Setting as desired. + +1. Add `whitenoise` to your *requirements.txt* file. [WhiteNoise](http://whitenoise.evans.io/en/stable/) (whitenoise.evans.io) is a Python package that makes it simple for a production Django app to serve its own static files. WhiteNoise specifically serves those files that are found in the folder specified by the Django `STATIC_ROOT` variable. + +1. In your *settings.py* file, add the following line for WhiteNoise: + + ```python + STATICFILES_STORAGE = ('whitenoise.storage.CompressedManifestStaticFilesStorage') + ``` + +1. Also modify the `MIDDLEWARE` and `INSTALLED_APPS` lists to include WhiteNoise: + + ```python + MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + # Add whitenoise middleware after the security middleware + 'whitenoise.middleware.WhiteNoiseMiddleware', + # Other values follow + ] + + INSTALLED_APPS = [ + "whitenoise.runserver_nostatic", + # Other values follow + ] + ``` + +## Serve static files for Flask apps + +If your Flask web app includes static front-end files, first follow the instructions on [managing static files](https://flask.palletsprojects.com/en/2.2.x/tutorial/static/) in the Flask documentation. For an example of serving static files in a Flask application, see the [sample Flask application](https://github.com/Azure-Samples/msdocs-python-flask-webapp-quickstart) on GitHub. + +To serve static files directly from a route on your application, you can use the [`send_from_directory`](https://flask.palletsprojects.com/en/2.2.x/api/#flask.send_from_directory) method: + +```python +from flask import send_from_directory + +@app.route('/reports/') +def send_report(path): + return send_from_directory('reports', path) +``` + +## Container characteristics + +When deployed to App Service, Python apps run within a Linux Docker container that's defined in the [App Service Python GitHub repository](https://github.com/Azure-App-Service/python). You can find the image configurations inside the version-specific directories. + +This container has the following characteristics: + +- Apps are run using the [Gunicorn WSGI HTTP Server](https://gunicorn.org/), using the extra arguments `--bind=0.0.0.0 --timeout 600`. + - You can provide configuration settings for Gunicorn by [customizing the startup command](#customize-startup-command). + + - To protect your web app from accidental or deliberate DDOS attacks, Gunicorn is run behind an Nginx reverse proxy as described in [Deploying Gunicorn](https://docs.gunicorn.org/en/latest/deploy.html). + +- By default, the base container image includes only the Flask web framework, but the container supports other frameworks that are WSGI-compliant and compatible with Python 3.6+, such as Django. + +- To install other packages, such as Django, create a [*requirements.txt*](https://pip.pypa.io/en/stable/user_guide/#requirements-files) file in the root of your project that specifies your direct dependencies. App Service then installs those dependencies automatically when you deploy your project. + + The *requirements.txt* file *must* be in the project root for dependencies to be installed. Otherwise, the build process reports the error: "Could not find setup.py or requirements.txt; Not running pip install." If you encounter this error, check the location of your requirements file. + +- App Service automatically defines an environment variable named `WEBSITE_HOSTNAME` with the web app's URL, such as `msdocs-hello-world.azurewebsites.net`. It also defines `WEBSITE_SITE_NAME` with the name of your app, such as `msdocs-hello-world`. + +- npm and Node.js are installed in the container so you can run Node-based build tools, such as yarn. + +## Container startup process + +During startup, the App Service on Linux container runs the following steps: + +1. Use a [custom startup command](#customize-startup-command), if one is provided. +1. Check for the existence of a [Django app](#django-app), and launch Gunicorn for it if one is detected. +1. Check for the existence of a [Flask app](#flask-app), and launch Gunicorn for it if one is detected. +1. If no other app is found, start a default app that's built into the container. + +The following sections provide extra details for each option. + +### Django app + +For Django apps, App Service looks for a file named `wsgi.py` within your app code, and then runs Gunicorn using the following command: + +```bash +# is the name of the folder that contains wsgi.py +gunicorn --bind=0.0.0.0 --timeout 600 .wsgi +``` + +If you want more specific control over the startup command, use a [custom startup command](#customize-startup-command), replace `` with the name of folder that contains *wsgi.py*, and add a `--chdir` argument if that module isn't in the project root. For example, if your *wsgi.py* is located under *knboard/backend/config* from your project root, use the arguments `--chdir knboard/backend config.wsgi`. + +To enable production logging, add the `--access-logfile` and `--error-logfile` parameters as shown in the examples for [custom startup commands](#example-startup-commands). + +### Flask app + +For Flask, App Service looks for a file named *application.py* or *app.py* and starts Gunicorn as follows: + +```bash +# If application.py +gunicorn --bind=0.0.0.0 --timeout 600 application:app + +# If app.py +gunicorn --bind=0.0.0.0 --timeout 600 app:app +``` + +If your main app module is contained in a different file, use a different name for the app object. If you want to provide other arguments to Gunicorn, use a [custom startup command](#customize-startup-command). + +### Default behavior + +If the App Service doesn't find a custom command, a Django app, or a Flask app, then it runs a default read-only app, located in the *opt/defaultsite* folder and shown in the following image. + +If you deployed code and still see the default app, see [Troubleshooting - App doesn't appear](#app-doesnt-appear). + +:::image type="content" source="media/configure-language-python/default-python-app.png" alt-text="Screenshot of the default App Service on Linux web page." link="#app-doesnt-appear"::: + +## Customize startup command + +You can control the container's startup behavior by providing either a custom startup command or multiple commands in a startup command file. A startup command file can use whatever name you choose, such as *startup.sh*, *startup.cmd*, *startup.txt*, and so on. + +All commands must use relative paths to the project root folder. + +To specify a startup command or command file: + +- **Azure portal**: select the app's **Configuration** page, then select **General settings**. In the **Startup Command** field, place either the full text of your startup command or the name of your startup command file. Then select **Save** to apply the changes. See [Configure general settings](configure-common.md#configure-general-settings) for Linux containers. + +- **Azure CLI**: use the [az webapp config set](/cli/azure/webapp/config#az-webapp-config-set) command with the `--startup-file` parameter to set the startup command or file: + + ```azurecli + az webapp config set --resource-group --name --startup-file "" + ``` + + Replace `` with either the full text of your startup command or the name of your startup command file. + +App Service ignores any errors that occur when processing a custom startup command or file, then continues its startup process by looking for Django and Flask apps. If you don't see the behavior you expect, check that your startup command or file is error-free, and that a startup command file is deployed to App Service along with your app code. You can also check the [diagnostic logs](#access-diagnostic-logs) for more information. Also check the app's **Diagnose and solve problems** page on the [Azure portal](https://portal.azure.com). + +### Example startup commands + +- **Added Gunicorn arguments**: The following example adds the `--workers=4` argument to a Gunicorn command line for starting a Django app: + + ```bash + # is the relative path to the folder that contains the module + # that contains wsgi.py; is the name of the folder containing wsgi.py. + gunicorn --bind=0.0.0.0 --timeout 600 --workers=4 --chdir .wsgi + ``` + + For more information, see [Running Gunicorn](https://docs.gunicorn.org/en/stable/run.html). If you're using auto-scale rules to scale your web app up and down, you should also dynamically set the number of Gunicorn workers using the `NUM_CORES` environment variable in your startup command, for example: `--workers $((($NUM_CORES*2)+1))`. For more information on setting the recommended number of Gunicorn workers, see [the Gunicorn FAQ](https://docs.gunicorn.org/en/stable/design.html#how-many-workers). + +- **Enable production logging for Django**: Add the `--access-logfile '-'` and `--error-logfile '-'` arguments to the command line: + + ```bash + # '-' for the log files means stdout for --access-logfile and stderr for --error-logfile. + gunicorn --bind=0.0.0.0 --timeout 600 --workers=4 --chdir .wsgi --access-logfile '-' --error-logfile '-' + ``` + + These logs will appear in the [App Service log stream](#access-diagnostic-logs). + + For more information, see [Gunicorn logging](https://docs.gunicorn.org/en/stable/settings.html#logging). + +- **Custom Flask main module**: By default, App Service assumes that a Flask app's main module is *application.py* or *app.py*. If your main module uses a different name, then you must customize the startup command. For example, if you have a Flask app whose main module is *hello.py* and the Flask app object in that file is named `myapp`, then the command is as follows: + + ```bash + gunicorn --bind=0.0.0.0 --timeout 600 hello:myapp + ``` + + If your main module is in a subfolder, such as `website`, specify that folder with the `--chdir` argument: + + ```bash + gunicorn --bind=0.0.0.0 --timeout 600 --chdir website hello:myapp + ``` + +- **Use a non-Gunicorn server**: To use a different web server, such as [aiohttp](https://aiohttp.readthedocs.io/en/stable/web_quickstart.html), use the appropriate command as the startup command or in the startup command file: + + ```bash + python3.7 -m aiohttp.web -H localhost -P 8080 package.module:init_func + ``` + +## Access app settings as environment variables + +App settings are values stored in the cloud specifically for your app, as described in [Configure app settings](configure-common.md#configure-app-settings). These settings are available to your app code as environment variables and accessed using the standard [os.environ](https://docs.python.org/3/library/os.html#os.environ) pattern. + +For example, if you've created an app setting called `DATABASE_SERVER`, the following code retrieves that setting's value: + +```python +db_server = os.environ['DATABASE_SERVER'] +``` + +## Detect HTTPS session + +In App Service, [TLS/SSL termination](https://wikipedia.org/wiki/TLS_termination_proxy) happens at the network load balancers, so all HTTPS requests reach your app as unencrypted HTTP requests. If your app logic needs to check if the user requests are encrypted or not, inspect the `X-Forwarded-Proto` header. + +```python +if 'X-Forwarded-Proto' in request.headers and request.headers['X-Forwarded-Proto'] == 'https': +# Do something when HTTPS is used +``` + +Popular web frameworks let you access the `X-Forwarded-*` information in your standard app pattern. For example, in Django you can use the [SECURE_PROXY_SSL_HEADER](https://docs.djangoproject.com/en/4.1/ref/settings/#secure-proxy-ssl-header) to tell Django to use the `X-Forwarded-Proto` header. + +## Access diagnostic logs + +[!INCLUDE [Access diagnostic logs](../../includes/app-service-web-logs-access-linux-no-h.md)] + +To access logs through the Azure portal, select **Monitoring** > **Log stream** on the left side menu for your app. + +## Access deployment logs + +When you deploy your code, App Service performs the build process described earlier in the section [Customize build automation](#customize-build-automation). Because the build runs in its own container, build logs are stored separately from the app's diagnostic logs. + +Use the following steps to access the deployment logs: + +1. On the Azure portal for your web app, select **Deployment** > **Deployment Center** on the left menu. +1. On the **Logs** tab, select the **Commit ID** for the most recent commit. +1. On the **Log details** page that appears, select the **Show Logs** link that appears next to "Running oryx build...". + +Build issues such as incorrect dependencies in *requirements.txt* and errors in pre- or post-build scripts will appear in these logs. Errors also appear if your requirements file isn't named *requirements.txt* or doesn't appear in the root folder of your project. + +## Open SSH session in browser + +[!INCLUDE [Open SSH session in browser](../../includes/app-service-web-ssh-connect-builtin-no-h.md)] + +When you're successfully connected to the SSH session, you should see the message "SSH CONNECTION ESTABLISHED" at the bottom of the window. If you see errors such as "SSH_CONNECTION_CLOSED" or a message that the container is restarting, an error might be preventing the app container from starting. See [Troubleshooting](#other-issues) for steps to investigate possible issues. + +## URL rewrites + +When deploying Python applications on Azure App Service for Linux, you might need to handle URL rewrites within your application. This is particularly useful for ensuring specific URL patterns are redirected to the correct endpoints without relying on external web server configurations. For Flask applications, [URL processors](https://flask.palletsprojects.com/patterns/urlprocessors/) and custom middleware can be used to achieve this. In Django applications, the robust [URL dispatcher](https://docs.djangoproject.com/en/5.0/topics/http/urls/) allows for efficient management of URL rewrites. + +## Troubleshooting + +In general, the first step in troubleshooting is to use App Service diagnostics: + +1. In the Azure portal for your web app, select **Diagnose and solve problems** from the left menu. +1. Select **Availability and Performance**. +1. Examine the information in the **Application Logs**, **Container Crash**, and **Container Issues** options, where the most common issues will appear. + +Next, examine both the [deployment logs](#access-deployment-logs) and the [app logs](#access-diagnostic-logs) for any error messages. These logs often identify specific issues that can prevent app deployment or app startup. For example, the build can fail if your *requirements.txt* file has the wrong filename or isn't present in your project root folder. + +The following sections provide guidance for specific issues. + +- [App doesn't appear - default app shows](#app-doesnt-appear) +- [App doesn't appear - "service unavailable" message](#service-unavailable) +- [Could not find setup.py or requirements.txt](#could-not-find-setuppy-or-requirementstxt) +- [ModuleNotFoundError on startup](#modulenotfounderror-when-app-starts) +- [Database is locked](#database-is-locked) +- [Passwords don't appear in SSH session when typed](#other-issues) +- [Commands in the SSH session appear to be cut off](#other-issues) +- [Static assets don't appear in a Django app](#other-issues) +- [Fatal SSL Connection is Required](#other-issues) + +#### App doesn't appear + +- **You see the default app after deploying your own app code.** The [default app](#default-behavior) appears because you either haven't deployed your app code to App Service, or App Service failed to find your app code and ran the default app instead. + + - Restart the App Service, wait 15-20 seconds, and check the app again. + + - Use [SSH](#open-ssh-session-in-browser) to connect directly to the App Service container and verify that your files exist under *site/wwwroot*. If your files don't exist, use the following steps: + 1. Create an app setting named `SCM_DO_BUILD_DURING_DEPLOYMENT` with the value of 1, redeploy your code, wait a few minutes, then try to access the app again. For more information on creating app settings, see [Configure an App Service app in the Azure portal](configure-common.md). + 1. Review your deployment process, [check the deployment logs](#access-deployment-logs), correct any errors, and redeploy the app. + + - If your files exist, then App Service wasn't able to identify your specific startup file. Check that your app is structured as App Service expects for [Django](#django-app) or [Flask](#flask-app), or use a [custom startup command](#customize-startup-command). + +- **You see the message "Service Unavailable" in the browser.** The browser has timed out waiting for a response from App Service, which indicates that App Service started the Gunicorn server, but the app itself didn't start. This condition could indicate that the Gunicorn arguments are incorrect, or that there's an error in the app code. + + - Refresh the browser, especially if you're using the lowest pricing tiers in your App Service plan. The app might take longer to start up when you use free tiers, for example, and becomes responsive after you refresh the browser. + + - Check that your app is structured as App Service expects for [Django](#django-app) or [Flask](#flask-app), or use a [custom startup command](#customize-startup-command). + + - Examine the [app log stream](#access-diagnostic-logs) for any error messages. The logs will show any errors in the app code. + +#### Could not find setup.py or requirements.txt + +- **The log stream shows "Could not find setup.py or requirements.txt; Not running pip install."**: The Oryx build process failed to find your *requirements.txt* file. + + - Connect to the web app's container via [SSH](#open-ssh-session-in-browser) and verify that *requirements.txt* is named correctly and exists directly under *site/wwwroot*. If it doesn't exist, make sure the file exists in your repository and is included in your deployment. If it exists in a separate folder, move it to the root. + +#### ModuleNotFoundError when app starts + +If you see an error like `ModuleNotFoundError: No module named 'example'`, then Python couldn't find one or more of your modules when the application started. This error most often occurs if you deploy your virtual environment with your code. Virtual environments aren't portable, so a virtual environment shouldn't be deployed with your application code. Instead, let Oryx create a virtual environment and install your packages on the web app by creating an app setting, `SCM_DO_BUILD_DURING_DEPLOYMENT`, and setting it to `1`. This setting will force Oryx to install your packages whenever you deploy to App Service. For more information, see [this article on virtual environment portability](https://azure.github.io/AppService/2020/12/11/cicd-for-python-apps.html). + +### Database is locked + +When attempting to run database migrations with a Django app, you might see "sqlite3. OperationalError: database is locked." The error indicates that your application is using a SQLite database, for which Django is configured by default, rather than using a cloud database such as Azure Database for PostgreSQL. + +Check the `DATABASES` variable in the app's *settings.py* file to ensure that your app is using a cloud database instead of SQLite. + +If you're encountering this error with the sample in [Tutorial: Deploy a Django web app with PostgreSQL](tutorial-python-postgresql-app.md), check that you completed the steps in [Verify connection settings](tutorial-python-postgresql-app.md#2-verify-connection-settings). + +#### Other issues + +- **Passwords don't appear in the SSH session when typed**: For security reasons, the SSH session keeps your password hidden when you type. The characters are being recorded, however, so type your password as usual and select **Enter** when done. + +- **Commands in the SSH session appear to be cut off**: The editor might not be word-wrapping commands, but they should still run correctly. + +- **Static assets don't appear in a Django app**: Ensure that you've enabled the [WhiteNoise module](http://whitenoise.evans.io/en/stable/django.html). + +- **You see the message, "Fatal SSL Connection is Required"**: Check any usernames and passwords used to access resources (such as databases) from within the app. + +## Related content + +- [Tutorial: Python app with PostgreSQL](tutorial-python-postgresql-app.md) +- [Tutorial: Deploy from private container repository](tutorial-custom-container.md?pivots=container-linux) +- [App Service on Linux FAQ](faq-app-service-linux.yml) +- [Environment variables and app settings reference](reference-app-settings.md) \ No newline at end of file diff --git a/scenarios/CreateAKSDeployment/create-aks-deployment.md b/scenarios/CreateAKSDeployment/create-aks-deployment.md deleted file mode 100644 index a3bcdf4bc..000000000 --- a/scenarios/CreateAKSDeployment/create-aks-deployment.md +++ /dev/null @@ -1,441 +0,0 @@ ---- -title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI' -description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI. -ms.topic: quickstart -ms.date: 04/09/2024 -author: tamram -ms.author: tamram -ms.custom: H1Hack27Feb2017, mvc, devcenter, devx-track-azurecli, mode-api, innovation-engine, linux-related-content -#Customer intent: As a developer or cluster operator, I want to deploy an AKS cluster and deploy an application so I can see how to run applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you learn how to: - -- Deploy an AKS cluster using the Azure CLI. -- Run a sample multi-container application with a group of microservices and web front ends simulating a retail scenario. - -> [!NOTE] -> To get started with quickly provisioning an AKS cluster, this article includes steps to deploy a cluster with default settings for evaluation purposes only. Before deploying a production-ready cluster, we recommend that you familiarize yourself with our [baseline reference architecture][baseline-reference-architecture] to consider how it aligns with your business requirements. - -## Before you begin - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -- [!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] - -- This article requires version 2.0.64 or later of the Azure CLI. If you're using Azure Cloud Shell, the latest version is already installed there. -- Make sure that the identity you're using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). -- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. - -## Define environment variables - -Define the following environment variables for use throughout this quickstart: - -```azurecli-interactive -export RANDOM_ID="$(openssl rand -hex 3)" -export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" -export REGION="westeurope" -export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" -export MY_DNS_LABEL="mydnslabel$RANDOM_ID" -``` - -## Create a resource group - -An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. - -Create a resource group using the [`az group create`][az-group-create] command. - -```azurecli-interactive -az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION -``` - -Results: - -```JSON -{ - "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", - "location": "eastus", - "managedBy": null, - "name": "testResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null, - "type": "Microsoft.Resources/resourceGroups" -} -``` - -## Create an AKS cluster - -Create an AKS cluster using the [`az aks create`][az-aks-create] command. The following example creates a cluster with one node and enables a system-assigned managed identity. - -```azurecli-interactive -az aks create --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME --enable-managed-identity --node-count 1 --generate-ssh-keys -``` - -> [!NOTE] -> When you create a new cluster, AKS automatically creates a second resource group to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, use the [`az aks install-cli`][az-aks-install-cli] command. - -1. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - - ```azurecli-interactive - az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME - ``` - -1. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - - ```azurecli-interactive - kubectl get nodes - ``` - -## Deploy the application - -To deploy the application, you use a manifest file to create all the objects required to run the [AKS Store application](https://github.com/Azure-Samples/aks-store-demo). A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. The manifest includes the following Kubernetes deployments and services: - -:::image type="content" source="media/quick-kubernetes-deploy-portal/aks-store-architecture.png" alt-text="Screenshot of Azure Store sample architecture." lightbox="media/quick-kubernetes-deploy-portal/aks-store-architecture.png"::: - -- **Store front**: Web application for customers to view products and place orders. -- **Product service**: Shows product information. -- **Order service**: Places orders. -- **Rabbit MQ**: Message queue for an order queue. - -> [!NOTE] -> We don't recommend running stateful containers, such as Rabbit MQ, without persistent storage for production. These are used here for simplicity, but we recommend using managed services, such as Azure CosmosDB or Azure Service Bus. - -1. Create a file named `aks-store-quickstart.yaml` and copy in the following manifest: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: rabbitmq - spec: - replicas: 1 - selector: - matchLabels: - app: rabbitmq - template: - metadata: - labels: - app: rabbitmq - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: rabbitmq - image: mcr.microsoft.com/mirror/docker/library/rabbitmq:3.10-management-alpine - ports: - - containerPort: 5672 - name: rabbitmq-amqp - - containerPort: 15672 - name: rabbitmq-http - env: - - name: RABBITMQ_DEFAULT_USER - value: "username" - - name: RABBITMQ_DEFAULT_PASS - value: "password" - resources: - requests: - cpu: 10m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: rabbitmq-enabled-plugins - mountPath: /etc/rabbitmq/enabled_plugins - subPath: enabled_plugins - volumes: - - name: rabbitmq-enabled-plugins - configMap: - name: rabbitmq-enabled-plugins - items: - - key: rabbitmq_enabled_plugins - path: enabled_plugins - --- - apiVersion: v1 - data: - rabbitmq_enabled_plugins: | - [rabbitmq_management,rabbitmq_prometheus,rabbitmq_amqp1_0]. - kind: ConfigMap - metadata: - name: rabbitmq-enabled-plugins - --- - apiVersion: v1 - kind: Service - metadata: - name: rabbitmq - spec: - selector: - app: rabbitmq - ports: - - name: rabbitmq-amqp - port: 5672 - targetPort: 5672 - - name: rabbitmq-http - port: 15672 - targetPort: 15672 - type: ClusterIP - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: order-service - spec: - replicas: 1 - selector: - matchLabels: - app: order-service - template: - metadata: - labels: - app: order-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: order-service - image: ghcr.io/azure-samples/aks-store-demo/order-service:latest - ports: - - containerPort: 3000 - env: - - name: ORDER_QUEUE_HOSTNAME - value: "rabbitmq" - - name: ORDER_QUEUE_PORT - value: "5672" - - name: ORDER_QUEUE_USERNAME - value: "username" - - name: ORDER_QUEUE_PASSWORD - value: "password" - - name: ORDER_QUEUE_NAME - value: "orders" - - name: FASTIFY_ADDRESS - value: "0.0.0.0" - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - initContainers: - - name: wait-for-rabbitmq - image: busybox - command: ['sh', '-c', 'until nc -zv rabbitmq 5672; do echo waiting for rabbitmq; sleep 2; done;'] - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: order-service - spec: - type: ClusterIP - ports: - - name: http - port: 3000 - targetPort: 3000 - selector: - app: order-service - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: product-service - spec: - replicas: 1 - selector: - matchLabels: - app: product-service - template: - metadata: - labels: - app: product-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: product-service - image: ghcr.io/azure-samples/aks-store-demo/product-service:latest - ports: - - containerPort: 3002 - resources: - requests: - cpu: 1m - memory: 1Mi - limits: - cpu: 1m - memory: 7Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: product-service - spec: - type: ClusterIP - ports: - - name: http - port: 3002 - targetPort: 3002 - selector: - app: product-service - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: store-front - spec: - replicas: 1 - selector: - matchLabels: - app: store-front - template: - metadata: - labels: - app: store-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: store-front - image: ghcr.io/azure-samples/aks-store-demo/store-front:latest - ports: - - containerPort: 8080 - name: store-front - env: - - name: VUE_APP_ORDER_SERVICE_URL - value: "http://order-service:3000/" - - name: VUE_APP_PRODUCT_SERVICE_URL - value: "http://product-service:3002/" - resources: - requests: - cpu: 1m - memory: 200Mi - limits: - cpu: 1000m - memory: 512Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: store-front - spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: store-front - type: LoadBalancer - ``` - - For a breakdown of YAML manifest files, see [Deployments and YAML manifests](../concepts-clusters-workloads.md#deployments-and-yaml-manifests). - - If you create and save the YAML file locally, then you can upload the manifest file to your default directory in CloudShell by selecting the **Upload/Download files** button and selecting the file from your local file system. - -1. Deploy the application using the [`kubectl apply`][kubectl-apply] command and specify the name of your YAML manifest. - - ```azurecli-interactive - kubectl apply -f aks-store-quickstart.yaml - ``` - -## Test the application - -You can validate that the application is running by visiting the public IP address or the application URL. - -Get the application URL using the following commands: - -```azurecli-interactive -runtime="5 minutes" -endtime=$(date -ud "$runtime" +%s) -while [[ $(date -u +%s) -le $endtime ]] -do - STATUS=$(kubectl get pods -l app=store-front -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') - echo $STATUS - if [ "$STATUS" == 'True' ] - then - export IP_ADDRESS=$(kubectl get service store-front --output 'jsonpath={..status.loadBalancer.ingress[0].ip}') - echo "Service IP Address: $IP_ADDRESS" - break - else - sleep 10 - fi -done -``` - -```azurecli-interactive -curl $IP_ADDRESS -``` - -Results: - -```HTML - - - - - - - - store-front - - - - - -
- - -``` - -```output -echo "You can now visit your web server at $IP_ADDRESS" -``` - -:::image type="content" source="media/quick-kubernetes-deploy-cli/aks-store-application.png" alt-text="Screenshot of AKS Store sample application." lightbox="media/quick-kubernetes-deploy-cli/aks-store-application.png"::: - -## Delete the cluster - -If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. You can remove the resource group, container service, and all related resources using the [`az group delete`][az-group-delete] command. - -> [!NOTE] -> The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity so you don't need to manually remove it. - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. - -To learn more about AKS and walk through a complete code-to-deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[kubectl]: https://kubernetes.io/docs/reference/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get - - -[kubernetes-concepts]: ../concepts-clusters-workloads.md -[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md -[azure-resource-group]: ../../azure-resource-manager/management/overview.md -[az-aks-create]: /cli/azure/aks#az-aks-create -[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials -[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli -[az-group-create]: /cli/azure/group#az-group-create -[az-group-delete]: /cli/azure/group#az-group-delete -[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests -[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json -[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json diff --git a/scenarios/CreateAKSWebApp/README.md b/scenarios/CreateAKSWebApp/create-aks-webapp.md similarity index 99% rename from scenarios/CreateAKSWebApp/README.md rename to scenarios/CreateAKSWebApp/create-aks-webapp.md index 7111f546a..2a03db4d9 100644 --- a/scenarios/CreateAKSWebApp/README.md +++ b/scenarios/CreateAKSWebApp/create-aks-webapp.md @@ -14,31 +14,14 @@ ms.custom: innovation-engine Welcome to this tutorial where we will take you step by step in creating an Azure Kubernetes Web Application that is secured via https. This tutorial assumes you are logged into Azure CLI already and have selected a subscription to use with the CLI. It also assumes that you have Helm installed ([Instructions can be found here](https://helm.sh/docs/intro/install/)). -## Define Environment Variables +## Create a resource group -The first step in this tutorial is to define environment variables. +A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. ```bash export RANDOM_ID="$(openssl rand -hex 3)" -export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" -export SSL_EMAIL_ADDRESS="$(az account show --query user.name --output tsv)" export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" export REGION="westeurope" -export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" -export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" -export MY_DNS_LABEL="mydnslabel$RANDOM_ID" -export MY_VNET_NAME="myVNet$RANDOM_ID" -export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" -export MY_SN_NAME="mySN$RANDOM_ID" -export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/22" -export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" -``` - -## Create a resource group - -A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. - -```bash az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION ``` @@ -65,6 +48,11 @@ Results: A virtual network is the fundamental building block for private networks in Azure. Azure Virtual Network enables Azure resources like VMs to securely communicate with each other and the internet. ```bash +export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" +export MY_VNET_NAME="myVNet$RANDOM_ID" +export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" +export MY_SN_NAME="mySN$RANDOM_ID" +export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/22" az network vnet create \ --resource-group $MY_RESOURCE_GROUP_NAME \ --location $REGION \ @@ -129,6 +117,7 @@ This will take a few minutes. ```bash export MY_SN_ID=$(az network vnet subnet list --resource-group $MY_RESOURCE_GROUP_NAME --vnet-name $MY_VNET_NAME --query "[0].id" --output tsv) +export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" az aks create \ --resource-group $MY_RESOURCE_GROUP_NAME \ --name $MY_AKS_CLUSTER_NAME \ @@ -178,6 +167,8 @@ To manage a Kubernetes cluster, use the Kubernetes command-line client, kubectl. ## Install NGINX Ingress Controller ```bash +export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" +export MY_DNS_LABEL="mydnslabel$RANDOM_ID" export MY_STATIC_IP=$(az network public-ip create --resource-group MC_${MY_RESOURCE_GROUP_NAME}_${MY_AKS_CLUSTER_NAME}_${REGION} --location ${REGION} --name ${MY_PUBLIC_IP_NAME} --dns-name ${MY_DNS_LABEL} --sku Standard --allocation-method static --version IPv4 --zone 1 2 3 --query publicIp.ipAddress -o tsv) helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx helm repo update @@ -466,6 +457,7 @@ done ``` ```bash +export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" curl "http://$FQDN" ``` @@ -563,6 +555,7 @@ Cert-manager provides Helm charts as a first-class method of installation on Kub The issuer we are using can be found in the `cluster-issuer-prod.yml file` ```bash + export SSL_EMAIL_ADDRESS="$(az account show --query user.name --output tsv)" cat < cluster-issuer-prod.yml apiVersion: cert-manager.io/v1 kind: ClusterIssuer diff --git a/scenarios/CreateAOAIDeployment/create-aoai-deployment.md b/scenarios/CreateAOAIDeployment/create-aoai-deployment.md new file mode 100644 index 000000000..a57922fd5 --- /dev/null +++ b/scenarios/CreateAOAIDeployment/create-aoai-deployment.md @@ -0,0 +1,357 @@ +--- +title: 'Create and manage Azure OpenAI Service deployments with the Azure CLI' +titleSuffix: Azure OpenAI +description: Learn how to use the Azure CLI to create an Azure OpenAI resource and manage deployments with the Azure OpenAI Service. +#services: cognitive-services +manager: nitinme +ms.author: colinmixon +ms.service: azure-ai-openai +ms.custom: devx-track-azurecli, linux-related-content,innovation-engine +ms.topic: include +ms.date: 07/11/2024 +--- + +## Prerequisites + +- An Azure subscription. Create one for free. +- Access granted to Azure OpenAI in the desired Azure subscription. +- Access permissions to [create Azure OpenAI resources and to deploy models](../how-to/role-based-access-control.md). +- The Azure CLI. For more information, see [How to install the Azure CLI](/cli/azure/install-azure-cli). + +> [!NOTE] +> Currently, you must submit an application to access Azure OpenAI Service. To apply for access, complete [this form](https://aka.ms/oai/access). If you need assistance, open an issue on this repository to contact Microsoft. + +## Sign in to the Azure CLI + +[Sign in](/cli/azure/authenticate-azure-cli) to the Azure CLI or select **Open Cloudshell** in the following steps. + +## Create an Azure resource group + +To create an Azure OpenAI resource, you need an Azure resource group. When you create a new resource through the Azure CLI, you can also create a new resource group or instruct Azure to use an existing group. The following example shows how to create a new resource group named _$MY_RESOURCE_GROUP_NAME_ with the [az group create](/cli/azure/group?view=azure-cli-latest&preserve-view=true#az-group-create) command. The resource group is created in the East US region as defined by the enviornment variable _$REGION_. + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myAOAIResourceGroup$RANDOM_ID" +export REGION="eastus" +export TAGS="owner=user" + +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION --tags $TAGS +``` + +Results: + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAOAIResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "myAIResourceGroupxxxxxx", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": { + "owner": "user" + }, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Create a resource + +Use the [az cognitiveservices account create](/cli/azure/cognitiveservices/account?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-create) command to create an Azure OpenAI resource in the resource group. In the following example, you create a resource named _$MY_OPENAI_RESOURCE_NAME_ in the _$MY_RESOURCE_GROUP_NAME_ resource group. When you try the example, update the environment variables to use your desired values for the resource group and resource name. + +```bash +export MY_OPENAI_RESOURCE_NAME="myOAIResource$RANDOM_ID" +az cognitiveservices account create \ +--name $MY_OPENAI_RESOURCE_NAME \ +--resource-group $MY_RESOURCE_GROUP_NAME \ +--location $REGION \ +--kind OpenAI \ +--sku s0 \ +``` +Results: + +```JSON +{ + "etag": "\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAOAIResourceGroupxxxxxx/providers/Microsoft.CognitiveServices/accounts/myOAIResourcexxxxxx", + "identity": null, + "kind": "OpenAI", + "location": "eastus", + "name": "myOAIResourcexxxxxx", + "properties": { + "abusePenalty": null, + "allowedFqdnList": null, + "apiProperties": null, + "callRateLimit": { + "count": null, + "renewalPeriod": null, + "rules": [ + { + "count": 30.0, + "dynamicThrottlingEnabled": null, + "key": "openai.dalle.post", + "matchPatterns": [ + { + "method": "POST", + "path": "dalle/*" + }, + { + "method": "POST", + "path": "openai/images/*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + }, + { + "count": 30.0, + "dynamicThrottlingEnabled": null, + "key": "openai.dalle.other", + "matchPatterns": [ + { + "method": "*", + "path": "dalle/*" + }, + { + "method": "*", + "path": "openai/operations/images/*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + }, + { + "count": 30.0, + "dynamicThrottlingEnabled": null, + "key": "openai", + "matchPatterns": [ + { + "method": "*", + "path": "openai/*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + }, + { + "count": 30.0, + "dynamicThrottlingEnabled": null, + "key": "default", + "matchPatterns": [ + { + "method": "*", + "path": "*" + } + ], + "minCount": null, + "renewalPeriod": 1.0 + } + ] + }, + "capabilities": [ + { + "name": "VirtualNetworks", + "value": null + }, + { + "name": "CustomerManagedKey", + "value": null + }, + { + "name": "MaxFineTuneCount", + "value": "100" + }, + { + "name": "MaxRunningFineTuneCount", + "value": "1" + }, + { + "name": "MaxUserFileCount", + "value": "50" + }, + { + "name": "MaxTrainingFileSize", + "value": "512000000" + }, + { + "name": "MaxUserFileImportDurationInHours", + "value": "1" + }, + { + "name": "MaxFineTuneJobDurationInHours", + "value": "720" + }, + { + "name": "TrustedServices", + "value": "Microsoft.CognitiveServices,Microsoft.MachineLearningServices,Microsoft.Search" + } + ], + "commitmentPlanAssociations": null, + "customSubDomainName": null, + "dateCreated": "xxxx-xx-xxxxx:xx:xx.xxxxxxxx", + "deletionDate": null, + "disableLocalAuth": null, + "dynamicThrottlingEnabled": null, + "encryption": null, + "endpoint": "https://eastus.api.cognitive.microsoft.com/", + "endpoints": { + "OpenAI Dall-E API": "https://eastus.api.cognitive.microsoft.com/", + "OpenAI Language Model Instance API": "https://eastus.api.cognitive.microsoft.com/", + "OpenAI Model Scaleset API": "https://eastus.api.cognitive.microsoft.com/", + "OpenAI Whisper API": "https://eastus.api.cognitive.microsoft.com/" + }, + "internalId": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "isMigrated": false, + "locations": null, + "migrationToken": null, + "networkAcls": null, + "privateEndpointConnections": [], + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "quotaLimit": null, + "restore": null, + "restrictOutboundNetworkAccess": null, + "scheduledPurgeDate": null, + "skuChangeInfo": null, + "userOwnedStorage": null + }, + "resourceGroup": "myAOAIResourceGroupxxxxxx", + "sku": { + "capacity": null, + "family": null, + "name": "S0", + "size": null, + "tier": null + }, + "systemData": { + "createdAt": "xxxx-xx-xxxxx:xx:xx.xxxxxxxx", + "createdBy": "yyyyyyyyyyyyyyyyyyyyyyyy", + "createdByType": "User", + "lastModifiedAt": "xxxx-xx-xxxxx:xx:xx.xxxxxx+xx:xx", + "lastModifiedBy": "yyyyyyyyyyyyyyyyyyyyyyyy", + "lastModifiedByType": "User" + }, + "tags": null, + "type": "Microsoft.CognitiveServices/accounts" +} +``` + +## Retrieve information about the resource + +After you create the resource, you can use different commands to find useful information about your Azure OpenAI Service instance. The following examples demonstrate how to retrieve the REST API endpoint base URL and the access keys for the new resource. + +### Get the endpoint URL + +Use the [az cognitiveservices account show](/cli/azure/cognitiveservices/account?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-show) command to retrieve the REST API endpoint base URL for the resource. In this example, we direct the command output through the [jq](https://jqlang.github.io/jq/) JSON processor to locate the `.properties.endpoint` value. + +When you try the example, update the environment variables to use your values for the resource group _$MY_RESOURCE_GROUP_NAME_ and resource _$MY_OPENAI_RESOURCE_NAME_. + +```bash +az cognitiveservices account show \ +--name $MY_OPENAI_RESOURCE_NAME \ +--resource-group $MY_RESOURCE_GROUP_NAME \ +| jq -r .properties.endpoint +``` + +### Get the primary API key + +To retrieve the access keys for the resource, use the [az cognitiveservices account keys list](/cli/azure/cognitiveservices/account?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-keys-list) command. In this example, we direct the command output through the [jq](https://jqlang.github.io/jq/) JSON processor to locate the `.key1` value. + +When you try the example, update the environment variables to use your values for the resource group and resource. + +```bash +az cognitiveservices account keys list \ +--name $MY_OPENAI_RESOURCE_NAME \ +--resource-group $MY_RESOURCE_GROUP_NAME \ +| jq -r .key1 +``` + +## Deploy a model + +To deploy a model, use the [az cognitiveservices account deployment create](/cli/azure/cognitiveservices/account/deployment?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-deployment-create) command. In the following example, you deploy an instance of the `text-embedding-ada-002` model and give it the name _$MY_MODEL_NAME_. When you try the example, update the variables to use your values for the resource group and resource. You don't need to change the `model-version`, `model-format` or `sku-capacity`, and `sku-name` values. + +```bash +export MY_MODEL_NAME="myModel$RANDOM_ID" +az cognitiveservices account deployment create \ +--name $MY_OPENAI_RESOURCE_NAME \ +--resource-group $MY_RESOURCE_GROUP_NAME \ +--deployment-name $MY_MODEL_NAME \ +--model-name text-embedding-ada-002 \ +--model-version "2" \ +--model-format OpenAI \ +--sku-capacity "1" \ +--sku-name "Standard" +``` + +`--sku-name` accepts the following deployment types: `Standard`, `GlobalStandard`, and `ProvisionedManaged`. Learn more about [deployment type options](../how-to/deployment-types.md). + + +> [!IMPORTANT] +> When you access the model via the API, you need to refer to the deployment name rather than the underlying model name in API calls, which is one of the [key differences](../how-to/switching-endpoints.yml) between OpenAI and Azure OpenAI. OpenAI only requires the model name. Azure OpenAI always requires deployment name, even when using the model parameter. In our docs, we often have examples where deployment names are represented as identical to model names to help indicate which model works with a particular API endpoint. Ultimately your deployment names can follow whatever naming convention is best for your use case. + +Results: + +```JSON +{ + "etag": "\"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\"", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAOAIResourceGroupxxxxxx/providers/Microsoft.CognitiveServices/accounts/myOAIResourcexxxxxx/deployments/myModelxxxxxx", + "name": "myModelxxxxxx", + "properties": { + "callRateLimit": null, + "capabilities": { + "embeddings": "true", + "embeddingsMaxInputs": "1" + }, + "model": { + "callRateLimit": null, + "format": "OpenAI", + "name": "text-embedding-ada-002", + "source": null, + "version": "1" + }, + "provisioningState": "Succeeded", + "raiPolicyName": null, + "rateLimits": [ + { + "count": 1.0, + "dynamicThrottlingEnabled": null, + "key": "request", + "matchPatterns": null, + "minCount": null, + "renewalPeriod": 10.0 + }, + { + "count": 1000.0, + "dynamicThrottlingEnabled": null, + "key": "token", + "matchPatterns": null, + "minCount": null, + "renewalPeriod": 60.0 + } + ], + "scaleSettings": null, + "versionUpgradeOption": "OnceNewDefaultVersionAvailable" + }, + "resourceGroup": "myAOAIResourceGroupxxxxxx", + "sku": { + "capacity": 1, + "family": null, + "name": "Standard", + "size": null, + "tier": null + }, + "systemData": { + "createdAt": "xxxx-xx-xxxxx:xx:xx.xxxxxx+xx:xx", + "createdBy": "yyyyyyyyyyyyyyyyyyyyyyyy", + "createdByType": "User", + "lastModifiedAt": "xxxx-xx-xxxxx:xx:xx.xxxxxx+xx:xx", + "lastModifiedBy": "yyyyyyyyyyyyyyyyyyyyyyyy", + "lastModifiedByType": "User" + }, + "type": "Microsoft.CognitiveServices/accounts/deployments" +} +``` +## Delete a model from your resource + +You can delete any model deployed from your resource with the [az cognitiveservices account deployment delete](/cli/azure/cognitiveservices/account/deployment?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-deployment-delete) command. \ No newline at end of file diff --git a/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md b/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md index aec7a8882..6ebeb700b 100644 --- a/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md +++ b/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md @@ -19,24 +19,6 @@ In this guide, we'll be walking through deploying the necessary resources for a Note: If you've never created a Computer Vision resource before, you will not be able to create one using the Azure CLI. You must create your first Computer Vision resource from the Azure portal to review and acknowledge the Responsible AI terms and conditions. You can do so here: [Create a Computer Vision Resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesComputerVision). After that, you can create subsequent resources using any deployment tool (SDK, CLI, or ARM template, etc) under the same Azure subscription. -## Define Environment Variables - -The first step in this tutorial is to define environment variables. **Replace the values on the right with your own unique values.** These values will be used throughout the tutorial to create resources and configure the application. Use lowercase and no special characters for the storage account name. - -```bash -export SUFFIX=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-z0-9' | fold -w 8 | head -n 1) -export MY_RESOURCE_GROUP_NAME=rg$SUFFIX -export REGION=westus -export MY_STORAGE_ACCOUNT_NAME=storage$SUFFIX -export MY_DATABASE_SERVER_NAME=dbserver$SUFFIX -export MY_DATABASE_NAME=db$SUFFIX -export MY_DATABASE_USERNAME=dbuser$SUFFIX -export MY_DATABASE_PASSWORD=dbpass$SUFFIX -export MY_COMPUTER_VISION_NAME=computervision$SUFFIX -export MY_CONTAINER_APP_NAME=containerapp$SUFFIX -export MY_CONTAINER_APP_ENV_NAME=containerappenv$SUFFIX -``` - ## Clone the sample repository First, we're going to clone this repository onto our local machines. This will provide the starter code required to provide the functionality for the simple application outlined above. We can clone with a simple git command. @@ -56,6 +38,9 @@ In order to run commands against Azure using [the CLI ](https://learn.microsoft. A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. ```bash +export SUFFIX=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-z0-9' | fold -w 8 | head -n 1) +export MY_RESOURCE_GROUP_NAME=rg$SUFFIX +export REGION=westus az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION ``` @@ -81,6 +66,7 @@ Results: To create a storage account in this resource group we need to run a simple command. To this command, we are passing the name of the storage account, the resource group to deploy it in, the physical region to deploy it in, and the SKU of the storage account. All values are configured using environment variables. ```bash +export MY_STORAGE_ACCOUNT_NAME=storage$SUFFIX az storage account create --name $MY_STORAGE_ACCOUNT_NAME --resource-group $MY_RESOURCE_GROUP_NAME --location $REGION --sku Standard_LRS ``` @@ -219,6 +205,10 @@ We will be creating an Azure Database for PostgreSQL flexible server for the app - The datatabase credentials: username and password ```bash +export MY_DATABASE_SERVER_NAME=dbserver$SUFFIX +export MY_DATABASE_NAME=db$SUFFIX +export MY_DATABASE_USERNAME=dbuser$SUFFIX +export MY_DATABASE_PASSWORD=dbpass$SUFFIX az postgres flexible-server create \ --name $MY_DATABASE_SERVER_NAME \ --database-name $MY_DATABASE_NAME \ @@ -266,6 +256,8 @@ We will be creating a Computer Vision resource to be able to identify cats or do - The SKU as `S1`, or the most cost-effective paid performance tier. ```bash +export MY_COMPUTER_VISION_NAME=computervision$SUFFIX + az cognitiveservices account create \ --name $MY_COMPUTER_VISION_NAME \ --resource-group $MY_RESOURCE_GROUP_NAME \ @@ -437,6 +429,9 @@ This command will create an Azure Container Registry resource to host our Docker - The path to the source code ```bash +export MY_CONTAINER_APP_NAME=containerapp$SUFFIX +export MY_CONTAINER_APP_ENV_NAME=containerappenv$SUFFIX + az containerapp up \ --name $MY_CONTAINER_APP_NAME \ --resource-group $MY_RESOURCE_GROUP_NAME \ diff --git a/scenarios/CreateRHELVMAndSSH/create-rhel-vm-ssh.md b/scenarios/CreateRHELVMAndSSH/create-rhel-vm-ssh.md index 8efb38189..ac65901a3 100644 --- a/scenarios/CreateRHELVMAndSSH/create-rhel-vm-ssh.md +++ b/scenarios/CreateRHELVMAndSSH/create-rhel-vm-ssh.md @@ -28,19 +28,6 @@ To open the Cloud Shell, just select **Try it** from the upper right corner of a If you prefer to install and use the CLI locally, this quickstart requires Azure CLI version 2.0.30 or later. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI]( /cli/azure/install-azure-cli). -## Define environment variables - -The first step is to define the environment variables. Environment variables are commonly used in Linux to centralize configuration data to improve consistency and maintainability of the system. Create the following environment variables to specify the names of resources that you create later in this tutorial: - -```bash -export RANDOM_ID="$(openssl rand -hex 3)" -export MY_RESOURCE_GROUP_NAME="myVMResourceGroup$RANDOM_ID" -export REGION="westeurope" -export MY_VM_NAME="myVM$RANDOM_ID" -export MY_USERNAME=azureuser -export MY_VM_IMAGE="RedHat:RHEL:8-LVM:latest" -``` - ## Log in to Azure using the CLI In order to run commands in Azure using the CLI, you need to log in first. Log in using the `az login` command. @@ -50,6 +37,9 @@ In order to run commands in Azure using the CLI, you need to log in first. Log i A resource group is a container for related resources. All resources must be placed in a resource group. The [az group create](/cli/azure/group) command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. ```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myVMResourceGroup$RANDOM_ID" +export REGION="westeurope" az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION ``` @@ -79,6 +69,9 @@ The following example creates a VM and adds a user account. The `--generate-ssh- All other values are configured using environment variables. ```bash +export MY_VM_NAME="myVM$RANDOM_ID" +export MY_USERNAME=azureuser +export MY_VM_IMAGE="RedHat:RHEL:8-LVM:latest" az vm create \ --resource-group $MY_RESOURCE_GROUP_NAME \ --name $MY_VM_NAME \ diff --git a/scenarios/CreateSpeechService/create-speech-service.md b/scenarios/CreateSpeechService/create-speech-service.md new file mode 100644 index 000000000..b321be24b --- /dev/null +++ b/scenarios/CreateSpeechService/create-speech-service.md @@ -0,0 +1,185 @@ +--- +title: "Quickstart: The Speech CLI - Speech service" +titleSuffix: Azure AI services +description: In this Azure AI Speech CLI quickstart, you interact with speech to text, text to speech, and speech translation without having to write code. +author: eric-urban +manager: nitinme +ms.service: azure-ai-speech +ms.topic: quickstart +ms.date: 1/22/2024 +ms.author: eur +ms.custom: mode-api +--- + +# Quickstart: Get started with the Azure AI Speech CLI + +In this article, you learn how to use the Azure AI Speech CLI (also called SPX) to access Speech services such as speech to text, text to speech, and speech translation, without having to write any code. The Speech CLI is production ready, and you can use it to automate simple workflows in the Speech service by using `.bat` or shell scripts. + +This article assumes that you have working knowledge of the Command Prompt window, terminal, or PowerShell. + +> [!NOTE] +> In PowerShell, the [stop-parsing token](/powershell/module/microsoft.powershell.core/about/about_special_characters#stop-parsing-token---) (`--%`) should follow `spx`. For example, run `spx --% config @region` to view the current region config value. + +## Download and install + +[!INCLUDE [spx-setup](includes/spx-setup.md)] + +## Create a resource configuration + +# [Terminal](#tab/terminal) + +To get started, you need a Speech resource key and region identifier (for example, `eastus`, `westus`). Create a Speech resource on the [Azure portal](https://portal.azure.com). For more information, see [Create an Azure AI services resource](../../ai-services/multi-service-resource.md?pivots=azportal). + +To configure your resource key and region identifier, run the following commands: + +```console +spx config @key --set SPEECH-KEY +spx config @region --set SPEECH-REGION +``` + +The key and region are stored for future Speech CLI commands. To view the current configuration, run the following commands: + +```console +spx config @key +spx config @region +``` + +As needed, include the `clear` option to remove either stored value: + +```console +spx config @key --clear +spx config @region --clear +``` + +# [PowerShell](#tab/powershell) + +To get started, you need a Speech resource key and region identifier (for example, `eastus`, `westus`). Create a Speech resource on the [Azure portal](https://portal.azure.com/#create/Microsoft.CognitiveServicesSpeechServices). + +To configure your Speech resource key and region identifier, run the following commands in PowerShell: + +```powershell +spx --% config @key --set SPEECH-KEY +spx --% config @region --set SPEECH-REGION +``` + +The key and region are stored for future SPX commands. To view the current configuration, run the following commands: + +```powershell +spx --% config @key +spx --% config @region +``` + +As needed, include the `clear` option to remove either stored value: + +```powershell +spx --% config @key --clear +spx --% config @region --clear +``` + +*** + +## Basic usage + +> [!IMPORTANT] +> When you use the Speech CLI in a container, include the `--host` option. You must also specify `--key none` to ensure that the CLI doesn't try to use a Speech key for authentication. For example, run `spx recognize --key none --host wss://localhost:5000/ --file myaudio.wav` to recognize speech from an audio file in a [speech to text container](speech-container-stt.md). + +This section shows a few basic SPX commands that are often useful for first-time testing and experimentation. Run the following command to view the in-tool help: + +```console +spx +``` + +You can search help topics by keyword. For example, to see a list of Speech CLI usage examples, run the following command: + +```console +spx help find --topics "examples" +``` + +To see options for the `recognize` command, run the following command: + +```console +spx help recognize +``` + +More help commands are listed in the console output. You can enter these commands to get detailed help about subcommands. + +## Speech to text (speech recognition) + +> [!NOTE] +> You can't use your computer's microphone when you run the Speech CLI within a Docker container. However, you can read from and save audio files in your local mounted directory. + +To convert speech to text (speech recognition) by using your system's default microphone, run the following command: + +```console +spx recognize --microphone +``` + +After you run the command, SPX begins listening for audio on the current active input device. It stops listening when you select **Enter**. The spoken audio is then recognized and converted to text in the console output. + +With the Speech CLI, you can also recognize speech from an audio file. Run the following command: + +```console +spx recognize --file /path/to/file.wav +``` + +> [!TIP] +> If you get stuck or want to learn more about the Speech CLI recognition options, you can run ```spx help recognize```. + +## Text to speech (speech synthesis) + +The following command takes text as input and then outputs the synthesized speech to the current active output device (for example, your computer speakers). + +```console +spx synthesize --text "Testing synthesis using the Speech CLI" --speakers +``` + +You can also save the synthesized output to a file. In this example, let's create a file named *my-sample.wav* in the directory where you're running the command. + +```console +spx synthesize --text "Enjoy using the Speech CLI." --audio output my-sample.wav +``` + +These examples presume that you're testing in English. However, Speech service supports speech synthesis in many languages. You can pull down a full list of voices either by running the following command or by visiting the [language support page](./language-support.md?tabs=tts). + +```console +spx synthesize --voices +``` + +Here's a command for using one of the voices you discovered. + +```console +spx synthesize --text "Bienvenue chez moi." --voice fr-FR-AlainNeural --speakers +``` + +> [!TIP] +> If you get stuck or want to learn more about the Speech CLI recognition options, you can run ```spx help synthesize```. + +## Speech to text translation + +With the Speech CLI, you can also do speech to text translation. Run the following command to capture audio from your default microphone and output the translation as text. Keep in mind that you need to supply the `source` and `target` language with the `translate` command. + +```console +spx translate --microphone --source en-US --target ru-RU +``` + +When you're translating into multiple languages, separate the language codes with a semicolon (`;`). + +```console +spx translate --microphone --source en-US --target 'ru-RU;fr-FR;es-ES' +``` + +If you want to save the output of your translation, use the `--output` flag. In this example, you also read from a file. + +```console +spx translate --file /some/file/path/input.wav --source en-US --target ru-RU --output file /some/file/path/russian_translation.txt +``` + +> [!TIP] +> If you get stuck or want to learn more about the Speech CLI recognition options, you can run ```spx help translate```. + + +## Next steps + +* [Install GStreamer to use the Speech CLI with MP3 and other formats](./how-to-use-codec-compressed-audio-input-streams.md) +* [Configuration options for the Speech CLI](./spx-data-store-configuration.md) +* [Batch operations with the Speech CLI](./spx-batch-operations.md) \ No newline at end of file diff --git a/scenarios/DeployApacheAirflowOnAKS/deploy-apache-airflow-on-aks.md b/scenarios/DeployApacheAirflowOnAKS/deploy-apache-airflow-on-aks.md new file mode 100644 index 000000000..03fc11780 --- /dev/null +++ b/scenarios/DeployApacheAirflowOnAKS/deploy-apache-airflow-on-aks.md @@ -0,0 +1,302 @@ +--- +title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster and Apache Airflow using Azure CLI' +description: Learn how to quickly deploy a Kubernetes cluster and deploy Apache Airflow in Azure Kubernetes Service (AKS) using Azure CLI. +ms.topic: quickstart +ms.date: 04/09/2024 +author: tamram +ms.author: tamram +ms.custom: H1Hack27Feb2017, mvc, devcenter, devx-track-azurecli, mode-api, innovation-engine, linux-related-content +#Customer intent: As a developer or cluster operator, I want to deploy an AKS cluster and deploy Apache Airflow, so I can see how to run applications using the managed Kubernetes service in Azure. +--- + +# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster and Apache Airflow using Azure CLI + +[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://go.microsoft.com/fwlink/?linkid=2262758) + +Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you learn how to: + +- Deploy an AKS cluster using the Azure CLI. +- Deploy Apache Airflow to your AKS cluster. + +> [!NOTE] +> To get started with quickly provisioning an AKS cluster, this article includes steps to deploy a cluster with default settings for evaluation purposes only. Before deploying a production-ready cluster, we recommend that you familiarize yourself with our [baseline reference architecture][baseline-reference-architecture] to consider how it aligns with your business requirements. + +## Before you begin + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +- [!INCLUDE [quickstarts-free-trial-note](~/reusable-content/ce-skilling/azure/includes/quickstarts-free-trial-note.md)] + +[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] + +- This article requires version 2.0.64 or later of the Azure CLI. If you're using Azure Cloud Shell, the latest version is already installed there. +- Make sure that the identity you're using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. For more information, see [How to manage Azure subscriptions – Azure CLI](/cli/azure/manage-azure-subscriptions-azure-cli?tabs=bash#change-the-active-subscription). + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. + +Create a resource group using the [`az group create`][az-group-create] command. + +```azurecli-interactive +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" +export REGION="eastus2" +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION +``` + +Results: + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "testResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +## Create an AKS cluster + +Create an AKS cluster using the [`az aks create`][az-aks-create] command. The following example creates a cluster with one node and enables a system-assigned managed identity. + +```azurecli-interactive +export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" +az aks create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_AKS_CLUSTER_NAME \ + --node-count 1 \ + --generate-ssh-keys +``` + +Results: + +```json +{ + "aadProfile": null, + "addonProfiles": { + "httpApplicationRouting": { + "config": null, + "enabled": false + } + }, + "agentPoolProfiles": [ + { + "availabilityZones": null, + "count": 1, + "enableAutoScaling": false, + "enableEncryptionAtHost": false, + "enableFIPS": false, + "enableNodePublicIP": false, + "maxCount": null, + "maxPods": 110, + "minCount": null, + "mode": "System", + "name": "nodepool1", + "nodeImageVersion": "AKSUbuntu-xxxx.x.x.x", + "nodeLabels": null, + "nodeTaints": null, + "orchestratorVersion": "x.x.x", + "osDiskSizeGb": 128, + "osDiskType": "Managed", + "osSku": "Ubuntu", + "osType": "Linux", + "provisioningState": "Succeeded", + "scaleSetEvictionPolicy": null, + "scaleSetPriority": "Regular", + "spotMaxPrice": null, + "tags": null, + "type": "VirtualMachineScaleSets", + "upgradeSettings": { + "maxSurge": null + }, + "vmSize": "Standard_DS2_v2", + "vnetSubnetID": null + } + ], + "apiServerAccessProfile": null, + "autoScalerProfile": null, + "autoUpgradeProfile": null, + "azurePortalFQDN": "myAKSClusterxxxxxxxx-xxxxxxxx.hcp.eastus.azmk8s.io", + "azurePortalURL": "https://myAKSClusterxxxxxxxx-xxxxxxxx.hcp.eastus.azmk8s.io", + "dnsPrefix": "myAKSClusterxxxxxxxx", + "enablePodSecurityPolicy": null, + "enableRBAC": true, + "extendedLocation": null, + "fqdn": "myAKSClusterxxxxxxxx-xxxxxxxx.hcp.eastus.azmk8s.io", + "fqdnSubdomain": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.ContainerService/managedClusters/myAKSClusterxxxxxxxx", + "identity": null, + "identityProfile": null, + "kubernetesVersion": "x.x.x", + "linuxProfile": { + "adminUsername": "azureuser", + "ssh": { + "publicKeys": [ + { + "keyData": "ssh-rsa xxxxxxxx...xxxxxx" + } + ] + } + }, + "location": "eastus", + "maxAgentPools": 10, + "name": "myAKSClusterxxxxxxxx", + "networkProfile": { + "dnsServiceIP": "10.0.0.10", + "dockerBridgeCidr": "172.17.0.1/16", + "loadBalancerProfile": { + "allocatedOutboundPorts": null, + "effectiveOutboundIPs": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MC_myResourceGroup_myAKSClusterxxxxxxxx_eastus/providers/Microsoft.Network/publicIPAddresses/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "resourceGroup": "MC_myResourceGroup_myAKSClusterxxxxxxxx_eastus" + } + ], + "idleTimeoutInMinutes": null, + "managedOutboundIPs": { + "count": 1 + }, + "outboundIPPrefixes": null, + "outboundIPs": null + }, + "loadBalancerSku": "Standard", + "networkMode": null, + "networkPlugin": "kubenet", + "networkPolicy": null, + "outboundType": "loadBalancer", + "podCidr": null, + "serviceCidr": "10.0.0.0/16" + }, + "nodeResourceGroup": "MC_myResourceGroup_myAKSClusterxxxxxxxx_eastus", + "powerState": { + "code": "Running" + }, + "privateFQDN": null, + "provisioningState": "Succeeded", + "resourceGroup": "myResourceGroup", + "servicePrincipalProfile": { + "clientId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "secret": null + }, + "sku": { + "name": "Basic", + "tier": "Free" + }, + "tags": null, + "type": "Microsoft.ContainerService/ManagedClusters", + "windowsProfile": null +} +``` + +> [!NOTE] +> When you create a new cluster, AKS automatically creates a second resource group to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, use the [`az aks install-cli`][az-aks-install-cli] command. + +1. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + + ```azurecli-interactive + az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME + ``` + +1. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + + ```azurecli-interactive + kubectl get nodes + ``` + +## Deploy Apache Airflow + +To deploy Apache Airflow to your AKS cluster, follow these steps: + +1. **Add the Apache Airflow Helm repository**: Add the official Apache Airflow Helm repository. + + ```azurecli-interactive + helm repo add apache-airflow https://airflow.apache.org + helm repo update + ``` + +2. **Install Apache Airflow**: Create a namespace for Airflow and install Airflow using Helm. + + ```azurecli-interactive + kubectl create namespace airflow + helm install airflow apache-airflow/airflow --namespace airflow + ``` + +By following these steps, you will have Apache Airflow running on your AKS cluster. + +## Test the Apache Airflow Deployment + +You can validate that Apache Airflow is running by checking the status of the Airflow pods. + +### **Check the Status of Airflow Pods**: + ```bash + #!/bin/bash + + NAMESPACE="airflow" + POD_STATUS=$(kubectl get pods --namespace $NAMESPACE -o jsonpath='{.items[*].status.phase}') + + if [[ $POD_STATUS == *"Running"* ]]; then + echo "All Airflow pods are running." + else + echo "Some Airflow pods are not running." + exit 1 + fi + ``` + +### Example Output + +After running the script, you should see output similar to this: + +```OUTPUT +All Airflow pods are running. +``` + +If any of the pods are not running, the script will output: + +```OUTPUT +Some Airflow pods are not running. +``` + +## Delete the cluster + +If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. You can remove the resource group, container service, and all related resources using the [`az group delete`][az-group-delete] command. + +> [!NOTE] +> The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity so you don't need to manually remove it. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. + +To learn more about AKS and walk through a complete code-to-deployment example, continue to the Kubernetes cluster tutorial. + +> [!div class="nextstepaction"] +> [AKS tutorial][aks-tutorial] + + +[kubectl]: https://kubernetes.io/docs/reference/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[azure-resource-group]: /azure/azure-resource-manager/management/overview +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-group-create]: /cli/azure/group#az-group-create +[az-group-delete]: /cli/azure/group#az-group-delete +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json \ No newline at end of file diff --git a/scenarios/DeployIGonAKS/README.md b/scenarios/DeployIGonAKS/deploy-ig-on-aks.md similarity index 98% rename from scenarios/DeployIGonAKS/README.md rename to scenarios/DeployIGonAKS/deploy-ig-on-aks.md index 7798e7ed1..e224bb56a 100644 --- a/scenarios/DeployIGonAKS/README.md +++ b/scenarios/DeployIGonAKS/deploy-ig-on-aks.md @@ -14,22 +14,14 @@ ms.custom: innovation-engine Welcome to this tutorial where we will take you step by step in deploying [Inspektor Gadget](https://www.inspektor-gadget.io/) in an Azure Kubernetes Service (AKS) cluster with the kubectl plugin: `gadget`. This tutorial assumes you are logged into Azure CLI already and have selected a subscription to use with the CLI. -## Define Environment Variables +## Create a resource group -The First step in this tutorial is to define environment variables: +A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. ```bash export RANDOM_ID="$(openssl rand -hex 3)" export MY_RESOURCE_GROUP_NAME="myResourceGroup$RANDOM_ID" export REGION="eastus" -export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" -``` - -## Create a resource group - -A resource group is a container for related resources. All resources must be placed in a resource group. We will create one for this tutorial. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. - -```bash az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION ``` @@ -57,6 +49,7 @@ Create an AKS cluster using the az aks create command. This will take a few minutes. ```bash +export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" az aks create \ --resource-group $MY_RESOURCE_GROUP_NAME \ --name $MY_AKS_CLUSTER_NAME \ diff --git a/scenarios/DeployPremiumSSDV2/deploy-premium-ssd-v2.md b/scenarios/DeployPremiumSSDV2/deploy-premium-ssd-v2.md new file mode 100644 index 000000000..378d84370 --- /dev/null +++ b/scenarios/DeployPremiumSSDV2/deploy-premium-ssd-v2.md @@ -0,0 +1,265 @@ +--- +title: Deploy a Premium SSD v2 managed disk +description: Learn how to deploy a Premium SSD v2 and about its regional availability. +author: roygara +ms.author: rogarana +ms.date: 08/12/2024 +ms.topic: how-to +ms.service: azure-disk-storage +ms.custom: references_regions, devx-track-azurecli, devx-track-azurepowershell +--- + +# Deploy a Premium SSD v2 + +Azure Premium SSD v2 is designed for IO-intense enterprise workloads that require sub-millisecond disk latencies and high IOPS and throughput at a low cost. Premium SSD v2 is suited for a broad range of workloads such as SQL server, Oracle, MariaDB, SAP, Cassandra, Mongo DB, big data/analytics, gaming, on virtual machines or stateful containers. For conceptual information on Premium SSD v2, see [Premium SSD v2](disks-types.md#premium-ssd-v2). + +Premium SSD v2 support a 4k physical sector size by default, but can be configured to use a 512E sector size as well. While most applications are compatible with 4k sector sizes, some require 512 byte sector sizes. Oracle Database, for example, requires release 12.2 or later in order to support 4k native disks. + +## Limitations + +[!INCLUDE [disks-prem-v2-limitations](./includes/disks-prem-v2-limitations.md)] + +### Regional availability + +[!INCLUDE [disks-premv2-regions](./includes/disks-premv2-regions.md)] + +## Prerequisites + +- Install either the latest [Azure CLI](/cli/azure/install-azure-cli) or the latest [Azure PowerShell module](/powershell/azure/install-azure-powershell). + +## Determine region availability programmatically + +Since not every region and zone supports Premium SSD v2, you can use the Azure CLI or PowerShell to determine region and zone supportability. + +# [Azure CLI](#tab/azure-cli) + +To determine the regions and zones that support Premium SSD v2, replace `yourSubscriptionId` with your subscription, and then run the [az vm list-skus](/cli/azure/vm#az-vm-list-skus) command: + +```markdown +az login + +subscriptionId="" + +az account set --subscription $subscriptionId + +az vm list-skus --resource-type disks --query "[?name=='PremiumV2_LRS'].{Region:locationInfo[0].location, Zones:locationInfo[0].zones}" +``` + +# [PowerShell](#tab/azure-powershell) + +To determine the regions and zones that support Premium SSD v2, replace `yourSubscriptionId` with your subscription, and then run the [Get-AzComputeResourceSku](/powershell/module/az.compute/get-azcomputeresourcesku) command: + +```powershell +Connect-AzAccount + +$subscriptionId="yourSubscriptionId" + +Set-AzContext -Subscription $subscriptionId + +Get-AzComputeResourceSku | where {$_.ResourceType -eq 'disks' -and $_.Name -eq 'Premiumv2_LRS'} +``` + +# [Azure portal](#tab/portal) + +To programmatically determine the regions and zones you can deploy to, use either the Azure CLI, Azure PowerShell Module. + +--- + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation + +Create a resource group using the [`az group create`][az-group-create] command. + +```azurecli-interactive +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myResourceGroup$RANDOM_ID" +export REGION="eastus2" +az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION +``` + +Results: + +```JSON +{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "testResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" +} +``` + +Now that you know the region and zone to deploy to, follow the deployment steps in this article to create a Premium SSD v2 disk and attach it to a VM. + +## Use a Premium SSD v2 + +Create a Premium SSD v2 disk in an availability zone by using the [az disk create](/cli/azure/disk#az-disk-create) command. + +The following script creates a Premium SSD v2 with a 4k sector size, to deploy one with a 512 sector size, update the `$LOGICAL_SECTOR_SIZE` parameter. Replace the values of all the variables with your own, then run the following script: + +```azurecli-interactive +## Create a Premium SSD v2 disk +export MY_DISK_NAME="myDisk$RANDOM_ID" +##Replace 4096 with 512 to deploy a disk with 512 sector size +export LOGICAL_SECTOR_SIZE=4096 +az disk create -n $MY_DISK_NAME -g $MY_RESOURCE_GROUP_NAME \ +--size-gb 100 \ +--disk-iops-read-write 5000 \ +--disk-mbps-read-write 150 \ +--location $REGION \ +--sku PremiumV2_LRS \ +--zone "1" \ +--logical-sector-size $LOGICAL_SECTOR_SIZE +``` + +## Create the VM + +Then create a VM in the same region and availability zone that supports Premium Storage and attach the disk to it by using the [az vm create](/cli/azure/vm#az-vm-create) command. + +```azurecli-interactive +export MY_VM_NAME="myVM$RANDOM_ID" +export MY_VM_IMAGE="Win2016Datacenter" +export MY_VM_SIZE="Standard_D4s_v3" +export AZURE_USERNAME=azureuser +export AZURE_PASSWORD=$(openssl rand -base64 16 | tr -dc 'a-zA-Z0-9@#%^&*()-_=+[]{}|;:,.<>?') +az vm create -n $MY_VM_NAME -g $MY_RESOURCE_GROUP_NAME \ +--image $MY_VM_IMAGE \ +--authentication-type password --admin-password $AZURE_PASSWORD --admin-username $AZURE_USERNAME \ +--size $MY_VM_SIZE \ +--location $REGION \ +--zone "1" \ +--attach-data-disks $MY_DISK_NAME +``` + +# [PowerShell](#tab/azure-powershell) + +Create a Premium SSD v2 disk in an availability zone by using the [New-AzDiskConfig](/powershell/module/az.compute/new-azdiskconfig) to define the configuration of your disk and the [New-AzDisk](/powershell/module/az.compute/new-azdisk) command to create your disk. Next, create a VM in the same region and availability zone that supports Premium Storage by using the [az vm create](/cli/azure/vm#az-vm-create). Finally, attach the disk to it by using the [Get-AzVM](/powershell/module/az.compute/get-azvm) command to identify variables for the virtual machine, the [Get-AzDisk](/powershell/module/az.compute/get-azdisk) command to identify variables for the disk, the [Add-AzVMDataDisk](/powershell/module/az.compute/add-azvmdatadisk) command to add the disk, and the [Update-AzVM](/powershell/module/az.compute/update-azvm) command to attach the new disk to the virtual machine. + +The following script creates a Premium SSD v2 with a 4k sector size, to deploy one with a 512 sector size, update the `$LOGICAL_SECTOR_SIZE` parameter. Replace the values of all the variables with your own, then run the following script: + +```powershell +# Initialize variables +$MY_RESOURCE_GROUP_NAME = "yourResourceGroupName" +$REGION = "useast" +$zone = "yourZoneNumber" +$MY_DISK_NAME = "yourMY_DISK_NAME" +$diskSizeInGiB = 100 +$diskIOPS = 5000 +$diskThroughputInMBPS = 150 +#To use a 512 sector size, replace 4096 with 512 +$LOGICAL_SECTOR_SIZE=4096 +$lun = 1 +$MY_VM_NAME = "yourMY_VM_NAME" +$MY_VM_IMAGE = "Win2016Datacenter" +$MY_VM_SIZE = "Standard_D4s_v3" +$vmAdminUser = "yourAdminUserName" +$vmAdminPassword = ConvertTo-SecureString "yourAdminUserPassword" -AsPlainText -Force +$credential = New-Object System.Management.Automation.PSCredential ($vmAdminUser, $vmAdminPassword); + +# Create a Premium SSD v2 +$diskconfig = New-AzDiskConfig ` +-Location $REGION ` +-Zone $zone ` +-DiskSizeGB $diskSizeInGiB ` +-DiskIOPSReadWrite $diskIOPS ` +-DiskMBpsReadWrite $diskThroughputInMBPS ` +-AccountType PremiumV2_LRS ` +-LOGICAL_SECTOR_SIZE $LOGICAL_SECTOR_SIZE ` +-CreateOption Empty + +New-AzDisk ` +-ResourceGroupName $MY_RESOURCE_GROUP_NAME ` +-MY_DISK_NAME $MY_DISK_NAME ` +-Disk $diskconfig + +# Create the VM +New-AzVm ` + -ResourceGroupName $MY_RESOURCE_GROUP_NAME ` + -Name $MY_VM_NAME ` + -Location $REGION ` + -Zone $zone ` + -Image $MY_VM_IMAGE ` + -Size $MY_VM_SIZE ` + -Credential $credential + +# Attach the disk to the VM +$vm = Get-AzVM -ResourceGroupName $MY_RESOURCE_GROUP_NAME -Name $MY_VM_NAME +$disk = Get-AzDisk -ResourceGroupName $MY_RESOURCE_GROUP_NAME -Name $MY_DISK_NAME +$vm = Add-AzVMDataDisk -VM $vm -Name $MY_DISK_NAME -CreateOption Attach -ManagedDiskId $disk.Id -Lun $lun +Update-AzVM -VM $vm -ResourceGroupName $MY_RESOURCE_GROUP_NAME +``` + +# [Azure portal](#tab/portal) + +1. Sign in to the [Azure portal](https://portal.azure.com/). +1. Navigate to **Virtual machines** and follow the normal VM creation process. +1. On the **Basics** page, select a [supported region](#regional-availability) and set **Availability options** to **Availability zone**. +1. Select one of the zones. +1. Fill in the rest of the values on the page as you like. + + :::image type="content" source="media/disks-deploy-premium-v2/premv2-portal-deploy.png" alt-text="Screenshot of the basics page, region and availability options and zones highlighted." lightbox="media/disks-deploy-premium-v2/premv2-portal-deploy.png"::: + +1. Proceed to the **Disks** page. +1. Under **Data disks** select **Create and attach a new disk**. + + :::image type="content" source="media/disks-deploy-premium-v2/premv2-create-data-disk.png" alt-text="Screenshot highlighting create and attach a new disk on the disk page." lightbox="media/disks-deploy-premium-v2/premv2-create-data-disk.png"::: + +1. Select the **Disk SKU** and select **Premium SSD v2**. + + :::image type="content" source="media/disks-deploy-premium-v2/premv2-select.png" alt-text="Screenshot selecting Premium SSD v2 SKU." lightbox="media/disks-deploy-premium-v2/premv2-select.png"::: + +1. Select whether you'd like to deploy a 4k or 512 logical sector size. + + :::image type="content" source="media/disks-deploy-premium-v2/premv2-sector-size.png" alt-text="Screenshot of deployment logical sector size deployment options." lightbox="media/disks-deploy-premium-v2/premv2-sector-size.png"::: + +1. Proceed through the rest of the VM deployment, making any choices that you desire. + +You've now deployed a VM with a premium SSD v2. + +--- + +## Adjust disk performance + +You can adjust the performance of a Premium SSD v2 disk four times within a 24 hour period. Creating a disk counts as one of these times, so for the first 24 hours after creating a premium SSD v2 disk you can only adjust its performance up to three times. + +For conceptual information on adjusting disk performance, see [Premium SSD v2 performance](disks-types.md#premium-ssd-v2-performance). + +# [Azure CLI](#tab/azure-cli) + +Use the [az disk update](/cli/azure/disk#az-disk-update) command to change the performance configuration of your Premium SSD v2 disk. For example, you can use the `disk-iops-read-write` parameter to adjust the max IOPS limit, and the `disk-mbps-read-write` parameter to adjust the max throughput limit of your Premium SSD v2 disk. + +The following command adjusts the performance of your disk. Update the values in the command, and then run it: + +```azurecli +az disk update --subscription $subscription --resource-group $rgname --name $MY_DISK_NAME --disk-iops-read-write=5000 --disk-mbps-read-write=200 +``` + +# [PowerShell](#tab/azure-powershell) + +Use the [New-AzDiskUpdateConfig](/powershell/module/az.compute/new-azdiskupdateconfig) command to define your new performance configuration values for your Premium SSD v2 disks, and then use the [Update-AzDisk](/powershell/module/az.compute/update-azdisk) command to apply your configuration changes to your disk. For example, you can use the `DiskIOPSReadWrite` parameter to adjust the max IOPS limit, and the `DiskMBpsReadWrite` parameter to adjust the max throughput limit of your Premium SSD v2 disk. + +The following command adjusts the performance of your disk. Update the values in the command, and then run it: + +```azurepowershell +$diskupdateconfig = New-AzDiskUpdateConfig -DiskIOPSReadWrite 5000 -DiskMBpsReadWrite 200 +Update-AzDisk -ResourceGroupName $resourceGroup -MY_DISK_NAME $MY_DISK_NAME -DiskUpdate $diskupdateconfig +``` + +# [Azure portal](#tab/portal) + +1. Navigate to the disk you'd like to modify in the [Azure portal](https://portal.azure.com/). +1. Select **Size + Performance** +1. Set the values for **Disk IOPS** or **Disk throughput (MB/s)** or both, to meet your needs, then select **Save**. + +--- + +## Next steps + +Add a data disk by using either the [Azure portal](linux/attach-disk-portal.yml), [Azure CLI](linux/add-disk.md), or [PowerShell](windows/attach-disk-ps.md). + +Provide feedback on [Premium SSD v2](https://aka.ms/premium-ssd-v2-survey). \ No newline at end of file diff --git a/scenarios/GPUNodePoolAKS/gpu-node-pool-aks.md b/scenarios/GPUNodePoolAKS/gpu-node-pool-aks.md new file mode 100644 index 000000000..516aa2783 --- /dev/null +++ b/scenarios/GPUNodePoolAKS/gpu-node-pool-aks.md @@ -0,0 +1,516 @@ +--- +title: Create a multi-instance GPU node pool in Azure Kubernetes Service (AKS) +description: Learn how to create a multi-instance GPU node pool in Azure Kubernetes Service (AKS). +ms.topic: article +ms.date: 08/30/2023 +ms.author: juda +ms.subservice: aks-nodes +--- + +# Create a multi-instance GPU node pool in Azure Kubernetes Service (AKS) + +Nvidia's A100 GPU can be divided in up to seven independent instances. Each instance has its own memory and Stream Multiprocessor (SM). For more information on the Nvidia A100, see [Nvidia A100 GPU][Nvidia A100 GPU]. + +This article walks you through how to create a multi-instance GPU node pool in an Azure Kubernetes Service (AKS) cluster. + +## Prerequisites and limitations + +* An Azure account with an active subscription. If you don't have one, you can [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). +* Azure CLI version 2.2.0 or later installed and configured. Run `az --version` to find the version. If you need to install or upgrade, see [Install Azure CLI][install-azure-cli]. +* The Kubernetes command-line client, [kubectl](https://kubernetes.io/docs/reference/kubectl/), installed and configured. If you use Azure Cloud Shell, `kubectl` is already installed. If you want to install it locally, you can use the [`az aks install-cli`][az-aks-install-cli] command. +* Helm v3 installed and configured. For more information, see [Installing Helm](https://helm.sh/docs/intro/install/). +* You can't use Cluster Autoscaler with multi-instance node pools. + +## GPU instance profiles + +GPU instance profiles define how GPUs are partitioned. The following table shows the available GPU instance profile for the `Standard_ND96asr_v4`: + +| Profile name | Fraction of SM |Fraction of memory | Number of instances created | +|--|--|--|--| +| MIG 1g.5gb | 1/7 | 1/8 | 7 | +| MIG 2g.10gb | 2/7 | 2/8 | 3 | +| MIG 3g.20gb | 3/7 | 4/8 | 2 | +| MIG 4g.20gb | 4/7 | 4/8 | 1 | +| MIG 7g.40gb | 7/7 | 8/8 | 1 | + +As an example, the GPU instance profile of `MIG 1g.5gb` indicates that each GPU instance has 1g SM(Computing resource) and 5gb memory. In this case, the GPU is partitioned into seven instances. + +The available GPU instance profiles available for this instance size include `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g`, and `MIG7g`. + +> [!IMPORTANT] +> You can't change the applied GPU instance profile after node pool creation. + +## Create an AKS cluster + +1. Create an Azure resource group using the [`az group create`][az-group-create] command. + + ```azurecli-interactive + export RANDOM_ID="$(openssl rand -hex 3)" + export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" + export REGION="eastus2" + export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" + az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION + ``` + + Results: + + + ```JSON + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", + "location": "eastus", + "managedBy": null, + "name": "testResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" + } + ``` + +2. Create an AKS cluster using the [`az aks create`][az-aks-create] command. + + ```azurecli-interactive + az aks create \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --name $MY_AKS_CLUSTER_NAME\ + --node-count 1 \ + --generate-ssh-keys + ``` + + Results: + + + ```JSON + { + "aadProfile": null, + "addonProfiles": { + "httpApplicationRouting": null, + "kubeDashboard": null, + "omsagent": { + "config": { + "logAnalyticsWorkspaceResourceID": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/xxxxxx/providers/Microsoft.OperationalInsights/workspaces/xxxxxx" + }, + "enabled": false + } + }, + "agentPoolProfiles": [ + { + "availabilityZones": null, + "count": 1, + "enableAutoScaling": false, + "enableEncryptionAtHost": false, + "enableFips": false, + "enableNodePublicIP": false, + "gpuInstanceProfile": null, + "kubeletConfig": null, + "kubeletDiskType": "OS", + "linuxOSConfig": null, + "maxCount": null, + "maxPods": 110, + "minCount": null, + "mode": "System", + "name": "nodepool1", + "nodeImageVersion": "AKSUbuntu-xxxx.x.x.x", + "nodeLabels": null, + "nodePublicIPPrefixID": null, + "nodeTaints": null, + "orchestratorVersion": "x.x.x", + "osDiskSizeGB": 128, + "osDiskType": "Managed", + "osSKU": "Ubuntu", + "osType": "Linux", + "podSubnetID": null, + "powerState": { + "code": "Running" + }, + "provisioningState": "Succeeded", + "proximityPlacementGroupID": null, + "scaleSetEvictionPolicy": null, + "scaleSetPriority": "Regular", + "spotMaxPrice": null, + "tags": null, + "type": "VirtualMachineScaleSets", + "upgradeSettings": { + "maxSurge": null + }, + "vmSize": "Standard_DS2_v2", + "vnetSubnetID": null + } + ], + "apiServerAccessProfile": null, + "autoScalerProfile": null, + "autoUpgradeProfile": null, + "azurePortalFQDN": null, + "azurePortalURL": "https://portal.azure.com/#resource/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxxxx/providers/Microsoft.ContainerService/managedClusters/xxxxxx", + "creationData": null, + "currentKubernetesVersion": "x.x.x", + "diskEncryptionSetID": null, + "dnsPrefix": "xxxxxx", + "enablePodSecurityPolicy": null, + "enableRBAC": true, + "extendedLocation": null, + "fqdn": "xxxxxx-xxxxxx-xxxxxx.hcp.xxxxxx.azmk8s.io", + "fqdnSubdomain": null, + "httpProxyConfig": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/xxxxxx/providers/Microsoft.ContainerService/managedClusters/xxxxxx", + "identity": { + "principalId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "tenantId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "type": "SystemAssigned", + "userAssignedIdentities": null + }, + "identityProfile": null, + "ingressProfile": null, + "keyVaultSecretsProvider": null, + "kubernetesVersion": "x.x.x", + "location": "xxxxxx", + "maxAgentPools": 10, + "monitoringAddonProfile": null, + "name": "xxxxxx", + "networkProfile": { + "dnsServiceIP": "10.0.0.10", + "dockerBridgeCidr": "172.17.0.1/16", + "loadBalancerProfile": { + "allocatedOutboundPorts": null, + "effectiveOutboundIPs": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxxxx/providers/Microsoft.Network/publicIPAddresses/xxxxxx", + "resourceGroup": "xxxxxx" + } + ], + "enableMultipleStandardLoadBalancers": null, + "idleTimeoutInMinutes": null, + "managedOutboundIPs": { + "count": 1 + }, + "outboundIPPrefixes": null, + "outboundIPs": null, + "outboundPortsAllocated": null + }, + "loadBalancerSku": "Standard", + "networkMode": null, + "networkPlugin": "kubenet", + "networkPolicy": null, + "outboundType": "loadBalancer", + "podCidr": null, + "serviceCidr": "10.0.0.0/16" + }, + "nodeResourceGroup": "MC_xxxxxx_xxxxxx_xxxxxx", + "oidcIssuerProfile": null, + "podIdentityProfile": null, + "powerState": { + "code": "Running" + }, + "privateFQDN": null, + "privateLinkResources": null, + "provisioningState": "Succeeded", + "publicNetworkAccess": "Enabled", + "resourceGroup": "xxxxxx", + "securityProfile": null, + "servicePrincipalProfile": { + "clientId": "msi" + }, + "sku": { + "name": "Basic", + "tier": "Free" + }, + "storageProfile": { + "blobCsiDriver": { + "enabled": true + }, + "diskCsiDriver": { + "enabled": true + }, + "fileCsiDriver": { + "enabled": true + }, + "snapshotController": { + "enabled": true + } + }, + "tags": null, + "type": "Microsoft.ContainerService/ManagedClusters", + "windowsProfile": null + } + ``` + +## Create a multi-instance GPU node pool + +You can use either the Azure CLI or an HTTP request to the ARM API to create the node pool. + +### [Azure CLI](#tab/azure-cli) + +* Create a multi-instance GPU node pool using the [`az aks nodepool add`][az-aks-nodepool-add] command and specify the GPU instance profile. + + ```azurecli-interactive + export MY_NODE_POOL_NAME="mignode" + az aks nodepool add \ + --name $MY_NODE_POOL_NAME \ + --resource-group $MY_RESOURCE_GROUP_NAME \ + --cluster-name $MY_AKS_CLUSTER_NAME \ + --node-vm-size Standard_NC24ads_A100_v4 \ + --gpu-instance-profile MIG1g + ``` + + Results: + + + ```JSON + { + "agentPoolProfile": { + "count": 1, + "enableAutoScaling": false, + "enableEncryptionAtHost": false, + "enableFips": false, + "enableNodePublicIp": false, + "gpuInstanceProfile": "MIG1g", + "kubeletConfig": null, + "linuxOsConfig": null, + "maxCount": null, + "maxPods": 110, + "minCount": null, + "mode": "User", + "name": "mignode", + "nodeImageVersion": "AKSUbuntu-xxxx.x.x.x", + "nodeLabels": {}, + "nodePublicIpPrefixId": null, + "nodeTaints": [], + "orchestratorVersion": "x.x.x", + "osDiskSizeGb": 128, + "osDiskType": "Managed", + "osSku": "Ubuntu", + "osType": "Linux", + "podSubnetId": null, + "provisioningState": "Succeeded", + "proximityPlacementGroupId": null, + "scaleSetEvictionPolicy": null, + "scaleSetPriority": "Regular", + "spotMaxPrice": null, + "tags": null, + "type": "VirtualMachineScaleSets", + "upgradeSettings": { + "maxSurge": "1" + }, + "vmSize": "Standard_NC96ads_A100_v4", + "vnetSubnetId": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxxxx/providers/Microsoft.Network/virtualNetworks/xxxxxx/subnets/xxxxxx" + }, + "creationData": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxxxx/providers/Microsoft.ContainerService/managedClusters/xxxxxx/agentPools/mignode", + "name": "mignode", + "provisioningState": "Succeeded", + "resourceGroup": "xxxxxx", + "type": "Microsoft.ContainerService/managedClusters/agentPools" + } + ``` + +### [HTTP request](#tab/http-request) + +* Create a multi-instance GPU node pool by placing the GPU instance profile in the request body. + + ```http + { + "properties": { + "count": 1, + "vmSize": "Standard_ND96asr_v4", + "type": "VirtualMachineScaleSets", + "gpuInstanceProfile": "MIG1g" + } + } + ``` + +--- + +## Determine multi-instance GPU (MIG) strategy + +Before you install the Nvidia plugins, you need to specify which multi-instance GPU (MIG) strategy to use for GPU partitioning: *Single strategy* or *Mixed strategy*. The two strategies don't affect how you execute CPU workloads, but how GPU resources are displayed. + +* **Single strategy**: The single strategy treats every GPU instance as a GPU. If you use this strategy, the GPU resources are displayed as `nvidia.com/gpu: 1`. +* **Mixed strategy**: The mixed strategy exposes the GPU instances and the GPU instance profile. If you use this strategy, the GPU resource are displayed as `nvidia.com/mig1g.5gb: 1`. + +## Install the NVIDIA device plugin and GPU feature discovery + +1. Set your MIG strategy as an environment variable. You can use either single or mixed strategy. + + ```azurecli-interactive + # Single strategy + export MIG_STRATEGY=single + + # Mixed strategy + export MIG_STRATEGY=mixed + ``` + +2. Add the Nvidia device plugin and GPU feature discovery helm repos using the `helm repo add` and `helm repo update` commands. + + ```azurecli-interactive + helm repo add nvdp https://nvidia.github.io/k8s-device-plugin + helm repo add nvgfd https://nvidia.github.io/gpu-feature-discovery + helm repo update + ``` + +3. Install the Nvidia device plugin using the `helm install` command. + + ```azurecli-interactive + helm install \ + --version=0.14.0 \ + --generate-name \ + --set migStrategy=${MIG_STRATEGY} \ + nvdp/nvidia-device-plugin + ``` + +4. Install the GPU feature discovery using the `helm install` command. + + ```azurecli-interactive + helm install \ + --version=0.2.0 \ + --generate-name \ + --set migStrategy=${MIG_STRATEGY} \ + nvgfd/gpu-feature-discovery + ``` + +## Confirm multi-instance GPU capability + +1. Configure `kubectl` to connect to your AKS cluster using the [`az aks get-credentials`][az-aks-get-credentials] command. + + ```azurecli-interactive + az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME + ``` + +2. Verify the connection to your cluster using the `kubectl get` command to return a list of cluster nodes. + + ```azurecli-interactive + kubectl get nodes -o wide + ``` + +3. Confirm the node has multi-instance GPU capability using the `kubectl describe node` command. The following example command describes the node named *mignode*, which uses MIG1g as the GPU instance profile. + + ```azurecli-interactive + kubectl describe node mignode + ``` + + Your output should resemble the following example output: + + ```output + # Single strategy output + Allocatable: + nvidia.com/gpu: 56 + + # Mixed strategy output + Allocatable: + nvidia.com/mig-1g.5gb: 56 + ``` + +## Schedule work + +The following examples are based on cuda base image version 12.1.1 for Ubuntu22.04, tagged as `12.1.1-base-ubuntu22.04`. + +### Single strategy + +1. Create a file named `single-strategy-example.yaml` and copy in the following manifest. + + ```bash + cat < single-strategy-example.yaml + apiVersion: v1 + kind: Pod + metadata: + name: nvidia-single + spec: + containers: + - name: nvidia-single + image: nvidia/cuda:12.1.1-base-ubuntu22.04 + command: ["/bin/sh"] + args: ["-c","sleep 1000"] + resources: + limits: + "nvidia.com/gpu": 1 + EOF + ``` + +2. Deploy the application using the `kubectl apply` command and specify the name of your YAML manifest. + + ```azurecli-interactive + kubectl apply -f single-strategy-example.yaml + ``` + +3. Verify the allocated GPU devices using the `kubectl exec` command. This command returns a list of the cluster nodes. + + ```azurecli-interactive + kubectl exec nvidia-single -- nvidia-smi -L + ``` + + The following example resembles output showing successfully created deployments and services: + + ```output + GPU 0: NVIDIA A100 40GB PCIe (UUID: GPU-48aeb943-9458-4282-da24-e5f49e0db44b) + MIG 1g.5gb Device 0: (UUID: MIG-fb42055e-9e53-5764-9278-438605a3014c) + MIG 1g.5gb Device 1: (UUID: MIG-3d4db13e-c42d-5555-98f4-8b50389791bc) + MIG 1g.5gb Device 2: (UUID: MIG-de819d17-9382-56a2-b9ca-aec36c88014f) + MIG 1g.5gb Device 3: (UUID: MIG-50ab4b32-92db-5567-bf6d-fac646fe29f2) + MIG 1g.5gb Device 4: (UUID: MIG-7b6b1b6e-5101-58a4-b5f5-21563789e62e) + MIG 1g.5gb Device 5: (UUID: MIG-14549027-dd49-5cc0-bca4-55e67011bd85) + MIG 1g.5gb Device 6: (UUID: MIG-37e055e8-8890-567f-a646-ebf9fde3ce7a) + ``` + +### Mixed strategy + +1. Create a file named `mixed-strategy-example.yaml` and copy in the following manifest. + + ```yaml + cat < mixed-strategy-example.yaml + apiVersion: v1 + kind: Pod + metadata: + name: nvidia-mixed + spec: + containers: + - name: nvidia-mixed + image: nvidia/cuda:12.1.1-base-ubuntu22.04 + command: ["/bin/sh"] + args: ["-c","sleep 100"] + resources: + limits: + "nvidia.com/mig-1g.5gb": 1 + EOF + ``` + +2. Deploy the application using the `kubectl apply` command and specify the name of your YAML manifest. + + ```azurecli-interactive + kubectl apply -f mixed-strategy-example.yaml + ``` + +3. Verify the allocated GPU devices using the `kubectl exec` command. This command returns a list of the cluster nodes. + + ```azurecli-interactive + kubectl exec nvidia-mixed -- nvidia-smi -L + ``` + + The following example resembles output showing successfully created deployments and services: + + ```output + GPU 0: NVIDIA A100 40GB PCIe (UUID: GPU-48aeb943-9458-4282-da24-e5f49e0db44b) + MIG 1g.5gb Device 0: (UUID: MIG-fb42055e-9e53-5764-9278-438605a3014c) + ``` + +> [!IMPORTANT] +> The `latest` tag for CUDA images has been deprecated on Docker Hub. Please refer to [NVIDIA's repository](https://hub.docker.com/r/nvidia/cuda/tags) for the latest images and corresponding tags. + +## Troubleshooting + +If you don't see multi-instance GPU capability after creating the node pool, confirm the API version isn't older than *2021-08-01*. + +## Next steps + +For more information on AKS node pools, see [Manage node pools for a cluster in AKS](./manage-node-pools.md). + + +[az-group-create]: /cli/azure/group#az_group_create +[az-aks-create]: /cli/azure/aks#az_aks_create +[az-aks-nodepool-add]: /cli/azure/aks/nodepool#az_aks_nodepool_add +[install-azure-cli]: /cli/azure/install-azure-cli +[az-aks-install-cli]: /cli/azure/aks#az_aks_install_cli +[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials + + +[Nvidia A100 GPU]:https://www.nvidia.com/en-us/data-center/a100/ \ No newline at end of file diff --git a/scenarios/ObtainPerformanceMetricsLinuxSustem/README.md b/scenarios/ObtainPerformanceMetricsLinuxSustem/obtain-performance-metrics-linux-system.md similarity index 100% rename from scenarios/ObtainPerformanceMetricsLinuxSustem/README.md rename to scenarios/ObtainPerformanceMetricsLinuxSustem/obtain-performance-metrics-linux-system.md diff --git a/scenarios/PostgresRAGLLM/chat.py b/scenarios/PostgresRAGLLM/chat.py new file mode 100644 index 000000000..f46e87a29 --- /dev/null +++ b/scenarios/PostgresRAGLLM/chat.py @@ -0,0 +1,92 @@ +import argparse +from textwrap import dedent + +from langchain_text_splitters import RecursiveCharacterTextSplitter +from openai import AzureOpenAI + +from db import VectorDatabase + +parser = argparse.ArgumentParser() +parser.add_argument('--api-key', dest='api_key', type=str) +parser.add_argument('--endpoint', dest='endpoint', type=str) +parser.add_argument('--pguser', dest='pguser', type=str) +parser.add_argument('--phhost', dest='phhost', type=str) +parser.add_argument('--pgpassword', dest='pgpassword', type=str) +parser.add_argument('--pgdatabase', dest='pgdatabase', type=str) +parser.add_argument('--populate', dest='populate', action="store_true") +args = parser.parse_args() + + +class ChatBot: + def __init__(self): + self.db = VectorDatabase(pguser=args.pguser, pghost=args.phhost, pgpassword=args.pgpassword, pgdatabase=args.pgdatabase) + self.api = AzureOpenAI( + azure_endpoint=args.endpoint, + api_key=args.api_key, + api_version="2024-06-01", + ) + self.text_splitter = RecursiveCharacterTextSplitter( + chunk_size=512, + chunk_overlap=20, + length_function=len, + is_separator_regex=False, + ) + + def load_file(self, text_file: str): + with open(text_file, encoding="UTF-8") as f: + data = f.read() + chunks = self.text_splitter.create_documents([data]) + for i, chunk in enumerate(chunks): + text = chunk.page_content + embedding = self.__create_embedding(text) + self.db.save_embedding(i, text, embedding) + + def __create_embedding(self, text: str): + return self.api.embeddings.create(model="text-embedding-ada-002", input=text).data[0].embedding + + def get_answer(self, question: str): + question_embedding = self.__create_embedding(question) + context = self.db.search_documents(question_embedding) + + # fmt: off + system_promt = dedent(f"""\ + You are a friendly and helpful AI assistant. I am going to ask you a question about Zytonium. + Use the following piece of context to answer the question. + If the context is empty, try your best to answer without it. + Never mention the context. + Try to keep your answers concise unless asked to provide details. + + Context: {context} + + That is the end of the context. + """) + # fmt: on + + response = self.api.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": system_promt}, + {"role": "user", "content": question}, + ], + ) + return response.choices[0].message.content + + +def main(): + chat_bot = ChatBot() + + if args.populate: + print("Loading embedding data into database...") + chat_bot.load_file("knowledge.txt") + print("Done loading data.") + return + + while True: + q = input("Ask a question (q to exit): ") + if q == "q": + break + print(chat_bot.get_answer(q)) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scenarios/PostgresRAGLLM/db.py b/scenarios/PostgresRAGLLM/db.py new file mode 100644 index 000000000..1165cb540 --- /dev/null +++ b/scenarios/PostgresRAGLLM/db.py @@ -0,0 +1,25 @@ +import os + +from psycopg2 import connect + + +class VectorDatabase: + def __init__(self, pguser, pghost, pgpassword, pgdatabase): + self.conn = connect(user=pguser, password=pgpassword, host=pghost, port=5432, dbname=pgdatabase) + + def __exit__(self, exc_type, exc_value, traceback): + self.conn.close() + + def save_embedding(self, _id: int, data: str, embedding: list[float]): + with self.conn.cursor() as cursor: + cursor.execute("INSERT INTO embeddings (id, data, embedding) VALUES (%s, %s, %s)", (_id, data, embedding)) + self.conn.commit() + + def search_documents(self, question_embedding): + cursor = self.conn.cursor() + cursor.execute( + "SELECT data FROM embeddings v ORDER BY v.embedding <#> (%s)::vector LIMIT 1", + (question_embedding,), + ) + results = cursor.fetchall() + return list(map(lambda x: x[0], results)) \ No newline at end of file diff --git a/scenarios/PostgresRAGLLM/knowledge.txt b/scenarios/PostgresRAGLLM/knowledge.txt new file mode 100644 index 000000000..4b5536f5d --- /dev/null +++ b/scenarios/PostgresRAGLLM/knowledge.txt @@ -0,0 +1,17 @@ +Ladies and Gentlemen, + +Thank you for gathering here today. It is an honor to address such a distinguished audience. Today, I want to share with you some remarkable achievements and fascinating discoveries that have recently come to light. + +Firstly, our scientists have successfully developed a new type of energy source derived from the rare mineral, Zytonium, found only in the depths of the Pacific Ocean. This mineral has the potential to revolutionize our energy sector, providing a clean and virtually limitless supply of power. Early tests have shown that just one gram of Zytonium can power a city the size of Tokyo for an entire year. + +In the field of medicine, researchers have made groundbreaking progress in the fight against aging. A new treatment, known as Rejuvenex, has been shown to reverse the effects of aging at the cellular level. Clinical trials have demonstrated that individuals treated with Rejuvenex not only look younger but also experience improved cognitive function and physical vitality. This could potentially extend the average human lifespan to 150 years. + +Our advancements are not limited to Earth. The Mars Colonization Project has reached a significant milestone. The first human settlement on Mars, named New Horizon, is now fully operational. The colony is equipped with state-of-the-art facilities, including hydroponic farms, advanced life support systems, and a research center dedicated to studying the Martian environment. This marks a new era in space exploration and human expansion beyond our home planet. + +In the realm of technology, the development of Quantum AI has surpassed all expectations. This new form of artificial intelligence can process information at speeds previously thought impossible. Quantum AI has already solved complex problems in fields such as climate modeling, financial forecasting, and even predicting natural disasters with unprecedented accuracy. + +Lastly, I am thrilled to announce the discovery of a new species of intelligent life in the depths of the Amazon rainforest. These beings, known as Sylvans, possess unique abilities to communicate with plants and animals. Their society is based on principles of harmony and sustainability, offering us valuable insights into living in balance with nature. + +These incredible achievements are just the beginning. As we continue to push the boundaries of science and technology, we are paving the way for a brighter, more prosperous future for all of humanity. + +Thank you. \ No newline at end of file diff --git a/scenarios/PostgresRAGLLM/postgres-rag-llm.md b/scenarios/PostgresRAGLLM/postgres-rag-llm.md new file mode 100644 index 000000000..9f4399137 --- /dev/null +++ b/scenarios/PostgresRAGLLM/postgres-rag-llm.md @@ -0,0 +1,149 @@ +--- +title: 'Quickstart: Deploy a Postgres vector database' +description: Setup a Postgres vector database and openai resources to run a RAG-LLM model. +ms.topic: quickstart +ms.date: 09/06/2024 +author: aamini7 +ms.author: ariaamini +ms.custom: innovation-engine, linux-related-content +--- + +## Introduction + +In this doc, we go over how to host the infrastructure required to run a basic LLM model with RAG capabilities on Azure. +We first set up a Postgres database capable of storing vector embeddings for documents/knowledge files that we want to use to +augment our queries. We then create an Azure OpenAI deployment capable of generating embeddings and answering questions using the latest 'gpt-4-turbo' model. +We then use a python script to fill our postgres database with embeddings from a sample "knowledge.txt" file containing information about an imaginary +resource called 'Zytonium'. Once the database is filled with those embeddings, we use the same python script to answer any +questions we have about 'Zytonium'. The script will search the database for relevant information for our query using an embeddings search and +then augment our query with that relevant information before being sent our LLM to answer. + +## Set up resource group + +Set up a resource group with a random ID. + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export RG_NAME="myPostgresResourceGroup$RANDOM_ID" +export REGION="centralus" + +az group create \ + --name $RG_NAME \ + --location $REGION \ +``` + +## Create OpenAI resources + +Create the openai resource + +```bash +export OPEN_AI_SERVICE_NAME="openai-service-$RANDOM_ID" +export EMBEDDING_MODEL="text-embedding-ada-002" +export CHAT_MODEL="gpt-4-turbo-2024-04-09" + +az cognitiveservices account create \ + --name $OPEN_AI_SERVICE_NAME \ + --resource-group $RG_NAME \ + --location westus \ + --kind OpenAI \ + --sku s0 \ +``` + +## Create OpenAI deployments + +```bash +export EMBEDDING_MODEL="text-embedding-ada-002" +export CHAT_MODEL="gpt-4" + +az cognitiveservices account deployment create \ + --name $OPEN_AI_SERVICE_NAME \ + --resource-group $RG_NAME \ + --deployment-name $EMBEDDING_MODEL \ + --model-name $EMBEDDING_MODEL \ + --model-version "2" \ + --model-format OpenAI \ + --sku-capacity "1" \ + --sku-name "Standard" + +az cognitiveservices account deployment create \ + --name $OPEN_AI_SERVICE_NAME \ + --resource-group $RG_NAME \ + --deployment-name $CHAT_MODEL \ + --model-name $CHAT_MODEL \ + --model-version "turbo-2024-04-09" \ + --model-format OpenAI \ + --sku-capacity "1" \ + --sku-name "Standard" +``` + +## Create Database + +Create an Azure postgres database. + +```bash +export POSTGRES_SERVER_NAME="mydb$RANDOM_ID" +export PGHOST="${POSTGRES_SERVER_NAME}.postgres.database.azure.com" +export PGUSER="dbadmin$RANDOM_ID" +export PGPORT=5432 +export PGDATABASE="azure-ai-demo" +export PGPASSWORD="$(openssl rand -base64 32)" + +az postgres flexible-server create \ + --admin-password $PGPASSWORD \ + --admin-user $PGUSER \ + --location $REGION \ + --name $POSTGRES_SERVER_NAME \ + --database-name $PGDATABASE \ + --resource-group $RG_NAME \ + --sku-name Standard_B2s \ + --storage-auto-grow Disabled \ + --storage-size 32 \ + --tier Burstable \ + --version 16 \ + --yes -o JSON \ + --public-access 0.0.0.0 +``` + +## Enable postgres vector extension + +Set up the vector extension for postgres to allow storing vectors/embeddings. + +```bash +az postgres flexible-server parameter set \ + --resource-group $RG_NAME \ + --server-name $POSTGRES_SERVER_NAME \ + --name azure.extensions --value vector + +psql -c "CREATE EXTENSION IF NOT EXISTS vector;" + +psql \ + -c "CREATE TABLE embeddings(id int PRIMARY KEY, data text, embedding vector(1536));" \ + -c "CREATE INDEX ON embeddings USING hnsw (embedding vector_ip_ops);" +``` + +## Populate with data from knowledge file + +The chat bot uses a local file called "knowledge.txt" as the sample document to generate embeddings for +and to store those embeddings in the newly created postgres database. Then any questions you ask will +be augmented with context from the "knowledge.txt" after searching the document for the most relevant +pieces of context using the embeddings. The "knowledge.txt" is about a fictional material called Zytonium. +You can view the full knowledge.txt and the code for the chatbot by looking in the "scenarios/PostgresRagLlmDemo" directory. + +```bash +export ENDPOINT=$(az cognitiveservices account show --name $OPEN_AI_SERVICE_NAME --resource-group $RG_NAME | jq -r .properties.endpoint) +export API_KEY=$(az cognitiveservices account keys list --name $OPEN_AI_SERVICE_NAME --resource-group $RG_NAME | jq -r .key1) + +cd ~/scenarios/PostgresRagLlmDemo +pip install -r requirements.txt +python chat.py --populate --api-key $API_KEY --endpoint $ENDPOINT --pguser $PGUSER --phhost $PGHOST --pgpassword $PGPASSWORD --pgdatabase $PGDATABASE +``` + +## Run Chat bot + +This final step prints out the command you can copy/paste into the terminal to run the chatbot. `cd ~/scenarios/PostgresRagLlmDemo && python chat.py --api-key $API_KEY --endpoint $ENDPOINT --pguser $PGUSER --phhost $PGHOST --pgpassword $PGPASSWORD --pgdatabase $PGDATABASE` + +```bash +echo " +To run the chatbot, see the last step for more info. +" +``` \ No newline at end of file diff --git a/scenarios/PostgresRAGLLM/requirements.txt b/scenarios/PostgresRAGLLM/requirements.txt new file mode 100644 index 000000000..619e2f5e7 --- /dev/null +++ b/scenarios/PostgresRAGLLM/requirements.txt @@ -0,0 +1,4 @@ +azure-identity==1.17.1 +openai==1.42.0 +psycopg2==2.9.9 +langchain-text-splitters==0.2.2 \ No newline at end of file diff --git a/scenarios/azure-aks-docs/articles/aks/create-postgresql-ha.md b/scenarios/azure-aks-docs/articles/aks/create-postgresql-ha.md index 7bd545aef..16fe227ee 100644 --- a/scenarios/azure-aks-docs/articles/aks/create-postgresql-ha.md +++ b/scenarios/azure-aks-docs/articles/aks/create-postgresql-ha.md @@ -20,31 +20,6 @@ In this article, you create the infrastructure needed to deploy a highly availab * [Set environment variables](#set-environment-variables) for use throughout this guide. * [Install the required extensions](#install-required-extensions). -## Set environment variables - -Set the following environment variables for use throughout this guide: - -```bash -export SUFFIX=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-z0-9' | fold -w 8 | head -n 1) -export LOCAL_NAME="cnpg" -export TAGS="owner=user" -export RESOURCE_GROUP_NAME="rg-${LOCAL_NAME}-${SUFFIX}" -export PRIMARY_CLUSTER_REGION="westus3" -export AKS_PRIMARY_CLUSTER_NAME="aks-primary-${LOCAL_NAME}-${SUFFIX}" -export AKS_PRIMARY_MANAGED_RG_NAME="rg-${LOCAL_NAME}-primary-aksmanaged-${SUFFIX}" -export AKS_PRIMARY_CLUSTER_FED_CREDENTIAL_NAME="pg-primary-fedcred1-${LOCAL_NAME}-${SUFFIX}" -export AKS_PRIMARY_CLUSTER_PG_DNSPREFIX=$(echo $(echo "a$(openssl rand -hex 5 | cut -c1-11)")) -export AKS_UAMI_CLUSTER_IDENTITY_NAME="mi-aks-${LOCAL_NAME}-${SUFFIX}" -export AKS_CLUSTER_VERSION="1.29" -export PG_NAMESPACE="cnpg-database" -export PG_SYSTEM_NAMESPACE="cnpg-system" -export PG_PRIMARY_CLUSTER_NAME="pg-primary-${LOCAL_NAME}-${SUFFIX}" -export PG_PRIMARY_STORAGE_ACCOUNT_NAME="hacnpgpsa${SUFFIX}" -export PG_STORAGE_BACKUP_CONTAINER_NAME="backups" -export ENABLE_AZURE_PVC_UPDATES="true" -export MY_PUBLIC_CLIENT_IP=$(dig +short myip.opendns.com @resolver3.opendns.com) -``` - ## Install required extensions The `aks-preview`, `k8s-extension` and `amg` extensions provide more functionality for managing Kubernetes clusters and querying Azure resources. Install these extensions using the following [`az extension add`][az-extension-add] commands: @@ -78,6 +53,10 @@ kubectl krew install cnpg Create a resource group to hold the resources you create in this guide using the [`az group create`][az-group-create] command. ```bash +export TAGS="owner=user" +export LOCAL_NAME="cnpg" +export RESOURCE_GROUP_NAME="rg-${LOCAL_NAME}-${SUFFIX}" +export PRIMARY_CLUSTER_REGION="westus3" az group create \ --name $RESOURCE_GROUP_NAME \ --location $PRIMARY_CLUSTER_REGION \ @@ -93,6 +72,8 @@ In this section, you create a user-assigned managed identity (UAMI) to allow the 1. Create a user-assigned managed identity using the [`az identity create`][az-identity-create] command. ```bash + export SUFFIX=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-z0-9' | fold -w 8 | head -n 1) + export AKS_UAMI_CLUSTER_IDENTITY_NAME="mi-aks-${LOCAL_NAME}-${SUFFIX}" AKS_UAMI_WI_IDENTITY=$(az identity create \ --name $AKS_UAMI_CLUSTER_IDENTITY_NAME \ --resource-group $RESOURCE_GROUP_NAME \ @@ -124,6 +105,8 @@ The CNPG operator automatically generates a service account called *postgres* th 1. Create an object storage account to store PostgreSQL backups in the primary region using the [`az storage account create`][az-storage-account-create] command. ```bash + export PG_PRIMARY_STORAGE_ACCOUNT_NAME="hacnpgpsa${SUFFIX}" + az storage account create \ --name $PG_PRIMARY_STORAGE_ACCOUNT_NAME \ --resource-group $RESOURCE_GROUP_NAME \ @@ -137,6 +120,8 @@ The CNPG operator automatically generates a service account called *postgres* th 1. Create the storage container to store the Write Ahead Logs (WAL) and regular PostgreSQL on-demand and scheduled backups using the [`az storage container create`][az-storage-container-create] command. ```bash + export PG_STORAGE_BACKUP_CONTAINER_NAME="backups" + az storage container create \ --name $PG_STORAGE_BACKUP_CONTAINER_NAME \ --account-name $PG_PRIMARY_STORAGE_ACCOUNT_NAME \ @@ -268,7 +253,11 @@ You also add a user node pool to the AKS cluster to host the PostgreSQL cluster. export SYSTEM_NODE_POOL_VMSKU="standard_d2s_v3" export USER_NODE_POOL_NAME="postgres" export USER_NODE_POOL_VMSKU="standard_d4s_v3" - + export AKS_PRIMARY_CLUSTER_NAME="aks-primary-${LOCAL_NAME}-${SUFFIX}" + export AKS_PRIMARY_MANAGED_RG_NAME="rg-${LOCAL_NAME}-primary-aksmanaged-${SUFFIX}" + export AKS_CLUSTER_VERSION="1.29" + export MY_PUBLIC_CLIENT_IP=$(dig +short myip.opendns.com @resolver3.opendns.com) + az aks create \ --name $AKS_PRIMARY_CLUSTER_NAME \ --tags $TAGS \ @@ -333,6 +322,9 @@ In this section, you get the AKS cluster credentials, which serve as the keys th 2. Create the namespace for the CNPG controller manager services, the PostgreSQL cluster, and its related services by using the [`kubectl create namespace`][kubectl-create-namespace] command. ```bash + export PG_NAMESPACE="cnpg-database" + export PG_SYSTEM_NAMESPACE="cnpg-system" + kubectl create namespace $PG_NAMESPACE --context $AKS_PRIMARY_CLUSTER_NAME kubectl create namespace $PG_SYSTEM_NAMESPACE --context $AKS_PRIMARY_CLUSTER_NAME ``` @@ -440,7 +432,6 @@ To validate deployment of the PostgreSQL cluster and use client PostgreSQL tooli $AKS_PRIMARY_CLUSTER_NODERG_NAME \ --query id \ --output tsv) - echo $AKS_PRIMARY_CLUSTER_NODERG_NAME_SCOPE ``` @@ -550,4 +541,4 @@ In this section, you install the CNPG operator in the AKS cluster using Helm or [kubectl-apply]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_apply/ [deploy-postgresql]: ./deploy-postgresql-ha.md [install-krew]: https://krew.sigs.k8s.io/ -[cnpg-plugin]: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#using-krew +[cnpg-plugin]: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#using-krew \ No newline at end of file diff --git a/scenarios/azure-aks-docs/articles/aks/deploy-postgresql-ha.md b/scenarios/azure-aks-docs/articles/aks/deploy-postgresql-ha.md index 3d10f4277..9f2eb91bd 100644 --- a/scenarios/azure-aks-docs/articles/aks/deploy-postgresql-ha.md +++ b/scenarios/azure-aks-docs/articles/aks/deploy-postgresql-ha.md @@ -21,7 +21,7 @@ In this article, you deploy a highly available PostgreSQL database on AKS. 1. Generate a secret to validate the PostgreSQL deployment by interactive login for a bootstrap app user using the [`kubectl create secret`][kubectl-create-secret] command. ```bash - PG_DATABASE_APPUSER_SECRET=$(echo -n | openssl rand -base64 16) + export PG_DATABASE_APPUSER_SECRET=$(echo -n | openssl rand -base64 16) kubectl create secret generic db-user-pass \ --from-literal=username=app \ @@ -41,6 +41,7 @@ In this article, you deploy a highly available PostgreSQL database on AKS. * Deploy a ConfigMap to set environment variables for the PostgreSQL cluster using the following [`kubectl apply`][kubectl-apply] command: ```bash + export ENABLE_AZURE_PVC_UPDATES="true" cat < [!NOTE] -> When you create a new cluster, AKS automatically creates a second resource group to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) +> When you create a new cluster, AKS automatically creates a second resource group to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.yml) ## Connect to the cluster @@ -444,4 +444,4 @@ To learn more about AKS and walk through a complete code-to-deployment example, [az-group-delete]: /cli/azure/group#az-group-delete [kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests [aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json -[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json \ No newline at end of file diff --git a/scenarios/azure-aks-docs/articles/aks/postgresql-ha-overview.md b/scenarios/azure-aks-docs/articles/aks/postgresql-ha-overview.md index 7c7849259..455d6024e 100644 --- a/scenarios/azure-aks-docs/articles/aks/postgresql-ha-overview.md +++ b/scenarios/azure-aks-docs/articles/aks/postgresql-ha-overview.md @@ -53,7 +53,7 @@ This diagram illustrates a PostgreSQL cluster setup with one primary replica and Backups are stored on [Azure Blob Storage](/azure/storage/blobs/), providing another way to restore the database in the event of an issue with streaming replication from the primary replica. -:::image source="./media/postgresql-ha-overview/architecture-diagram.png" alt-text="Diagram of CNPG architecture." lightbox="./media/postgresql-ha-overview/architecture-diagram.png"::: +:::image source="./media/postgresql-ha-overview/postgres-architecture-diagram.png" alt-text="Diagram of CNPG architecture." lightbox="./media/postgresql-ha-overview/postgres-architecture-diagram.png"::: > [!NOTE] > For applications that require data separation at the database level, you can add more databases with postInitSQL commands and similar. It is not currently possible with the CNPG operator to add more databases in a declarative way. @@ -89,4 +89,4 @@ Backups are stored on [Azure Blob Storage](/azure/storage/blobs/), providing ano [install-vscode]: https://code.visualstudio.com/Download [install-krew]: https://krew.sigs.k8s.io/ [cnpg-plugin]: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#using-krew -[create-infrastructure]: ./create-postgresql-ha.md +[create-infrastructure]: ./create-postgresql-ha.md \ No newline at end of file diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/.openpublishing.redirection.virtual-machine-scale-sets.json b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/.openpublishing.redirection.virtual-machine-scale-sets.json similarity index 100% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/.openpublishing.redirection.virtual-machine-scale-sets.json rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/.openpublishing.redirection.virtual-machine-scale-sets.json diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/TOC.yml b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/TOC.yml similarity index 100% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/TOC.yml rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/TOC.yml diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/breadcrumb/toc.yml b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/breadcrumb/toc.yml similarity index 100% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/breadcrumb/toc.yml rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/breadcrumb/toc.yml diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md similarity index 99% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md index 1437a6d08..093f67a4c 100644 --- a/scenarios/azure-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md +++ b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md @@ -25,33 +25,14 @@ The Azure Cloud Shell is a free interactive shell that you can use to run the st To open the Cloud Shell, select **Open Cloud Shell** from the upper right corner of a code block. You can also launch Cloud Shell in a separate browser tab by going to [https://shell.azure.com/cli](https://shell.azure.com/cli). Select **Copy** to copy the blocks of code, paste it into the Cloud Shell, and press enter to run it. -## Define environment variables +## Create a resource group -Define environment variables as follows. +A resource group is a logical container into which Azure resources are deployed and managed. All resources must be placed in a resource group. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. ```bash export RANDOM_ID="$(openssl rand -hex 3)" export MY_RESOURCE_GROUP_NAME="myVMSSResourceGroup$RANDOM_ID" export REGION=EastUS -export MY_VMSS_NAME="myVMSS$RANDOM_ID" -export MY_USERNAME=azureuser -export MY_VM_IMAGE="Ubuntu2204" -export MY_VNET_NAME="myVNet$RANDOM_ID" -export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" -export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" -export MY_VM_SN_NAME="myVMSN$RANDOM_ID" -export MY_VM_SN_PREFIX="10.$NETWORK_PREFIX.0.0/24" -export MY_APPGW_SN_NAME="myAPPGWSN$RANDOM_ID" -export MY_APPGW_SN_PREFIX="10.$NETWORK_PREFIX.1.0/24" -export MY_APPGW_NAME="myAPPGW$RANDOM_ID" -export MY_APPGW_PUBLIC_IP_NAME="myAPPGWPublicIP$RANDOM_ID" -``` - -## Create a resource group - -A resource group is a logical container into which Azure resources are deployed and managed. All resources must be placed in a resource group. The following command creates a resource group with the previously defined $MY_RESOURCE_GROUP_NAME and $REGION parameters. - -```bash az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION -o JSON ``` @@ -78,6 +59,11 @@ Now you'll create network resources. In this step you're going to create a virtu #### Create virtual network and subnet ```bash +export MY_VNET_NAME="myVNet$RANDOM_ID" +export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" +export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/16" +export MY_VM_SN_NAME="myVMSN$RANDOM_ID" +export MY_VM_SN_PREFIX="10.$NETWORK_PREFIX.0.0/24" az network vnet create --name $MY_VNET_NAME --resource-group $MY_RESOURCE_GROUP_NAME --location $REGION --address-prefix $MY_VNET_PREFIX --subnet-name $MY_VM_SN_NAME --subnet-prefix $MY_VM_SN_PREFIX -o JSON ``` @@ -124,6 +110,10 @@ Results: Azure Application Gateway requires a dedicated subnet within your virtual network. The following command creates a subnet named $MY_APPGW_SN_NAME with a specified address prefix named $MY_APPGW_SN_PREFIX in your virtual network $MY_VNET_NAME. ```bash +export MY_APPGW_SN_NAME="myAPPGWSN$RANDOM_ID" +export MY_APPGW_SN_PREFIX="10.$NETWORK_PREFIX.1.0/24" +export MY_APPGW_NAME="myAPPGW$RANDOM_ID" +export MY_APPGW_PUBLIC_IP_NAME="myAPPGWPublicIP$RANDOM_ID" az network vnet subnet create --name $MY_APPGW_SN_NAME --resource-group $MY_RESOURCE_GROUP_NAME --vnet-name $MY_VNET_NAME --address-prefix $MY_APPGW_SN_PREFIX -o JSON ``` @@ -388,6 +378,9 @@ https://techcommunity.microsoft.com/t5/azure-compute-blog/breaking-change-for-vm Now create a Virtual Machine Scale Set with [az vmss create](/cli/azure/vmss). The following example creates a zone redundant scale set with an instance count of *2* with public IP in subnet $MY_VM_SN_NAME within your resource group $MY_RESOURCE_GROUP_NAME, integrates the Application Gateway, and generates SSH keys. Make sure to save the SSH keys if you need to log into your VMs via ssh. ```bash +export MY_VMSS_NAME="myVMSS$RANDOM_ID" +export MY_USERNAME=azureuser +export MY_VM_IMAGE="Ubuntu2204" az vmss create --name $MY_VMSS_NAME --resource-group $MY_RESOURCE_GROUP_NAME --image $MY_VM_IMAGE --admin-username $MY_USERNAME --generate-ssh-keys --public-ip-per-vm --orchestration-mode Uniform --instance-count 2 --zones 1 2 3 --vnet-name $MY_VNET_NAME --subnet $MY_VM_SN_NAME --vm-sku Standard_DS2_v2 --upgrade-policy-mode Automatic --app-gateway $MY_APPGW_NAME --backend-pool-name appGatewayBackendPool -o JSON ``` diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/index.yml b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/index.yml similarity index 100% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/index.yml rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/index.yml diff --git a/scenarios/azure-docs/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml b/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml similarity index 100% rename from scenarios/azure-docs/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml rename to scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/virtual-machine-scale-sets-faq.yml diff --git a/scenarios/azure-docs/articles/virtual-machines/linux/quick-create-cli.md b/scenarios/azure-compute-docs/articles/virtual-machines/linux/quick-create-cli.md similarity index 100% rename from scenarios/azure-docs/articles/virtual-machines/linux/quick-create-cli.md rename to scenarios/azure-compute-docs/articles/virtual-machines/linux/quick-create-cli.md diff --git a/scenarios/azure-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md b/scenarios/azure-compute-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md similarity index 98% rename from scenarios/azure-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md rename to scenarios/azure-compute-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md index 765de9f87..665bcec9f 100644 --- a/scenarios/azure-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md +++ b/scenarios/azure-compute-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md @@ -30,42 +30,10 @@ This article walks you through how to deploy an NGINX web server, Azure MySQL Fl > * Install WordPress This setup is for quick tests or proof of concept. For more on the LEMP stack, including recommendations for a production environment, see the [Ubuntu documentation](https://help.ubuntu.com/community/ApacheMySQLPHP). -This tutorial uses the CLI within the [Azure Cloud Shell](../../cloud-shell/overview.md), which is constantly updated to the latest version. To open the Cloud Shell, select **Try it** from the top of any code block. +This tutorial uses the CLI within the [Azure Cloud Shell](/azure/cloud-shell/overview), which is constantly updated to the latest version. To open the Cloud Shell, select **Try it** from the top of any code block. If you choose to install and use the CLI locally, this tutorial requires that you're running the Azure CLI version 2.0.30 or later. Find the version by running the `az --version` command. If you need to install or upgrade, see [Install Azure CLI]( /cli/azure/install-azure-cli). -## Variable declaration - -First we need to define a few variables that help with the configuration of the LEMP workload. - -```bash -export NETWORK_PREFIX="$(($RANDOM % 254 + 1))" -export RANDOM_ID="$(openssl rand -hex 3)" -export MY_RESOURCE_GROUP_NAME="myLEMPResourceGroup$RANDOM_ID" -export REGION="westeurope" -export MY_VM_NAME="myVM$RANDOM_ID" -export MY_VM_USERNAME="azureadmin" -export MY_VM_SIZE='Standard_DS2_v2' -export MY_VM_IMAGE='Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts-gen2:latest' -export MY_PUBLIC_IP_NAME="myPublicIP$RANDOM_ID" -export MY_DNS_LABEL="mydnslabel$RANDOM_ID" -export MY_NSG_NAME="myNSG$RANDOM_ID" -export MY_NSG_SSH_RULE="Allow-Access$RANDOM_ID" -export MY_VM_NIC_NAME="myVMNic$RANDOM_ID" -export MY_VNET_NAME="myVNet$RANDOM_ID" -export MY_VNET_PREFIX="10.$NETWORK_PREFIX.0.0/22" -export MY_SN_NAME="mySN$RANDOM_ID" -export MY_SN_PREFIX="10.$NETWORK_PREFIX.0.0/24" -export MY_MYSQL_DB_NAME="mydb$RANDOM_ID" -export MY_MYSQL_ADMIN_USERNAME="dbadmin$RANDOM_ID" -export MY_MYSQL_ADMIN_PW="$(openssl rand -base64 32)" -export MY_MYSQL_SN_NAME="myMySQLSN$RANDOM_ID" -export MY_WP_ADMIN_PW="$(openssl rand -base64 32)" -export MY_WP_ADMIN_USER="wpcliadmin" -export MY_AZURE_USER=$(az account show --query user.name --output tsv) -export FQDN="${MY_DNS_LABEL}.${REGION}.cloudapp.azure.com" -``` - -[az-identity-create]: /cli/azure/identity#az-identity-create -[az-grafana-create]: /cli/azure/grafana#az-grafana-create -[postgresql-ha-deployment-overview]: ./postgresql-ha-overview.md -[az-extension-add]: /cli/azure/extension#az_extension_add -[az-group-create]: /cli/azure/group#az_group_create -[az-storage-account-create]: /cli/azure/storage/account#az_storage_account_create -[az-storage-container-create]: /cli/azure/storage/container#az_storage_container_create -[inherit-from-azuread]: https://cloudnative-pg.io/documentation/1.23/appendixes/object_stores/#azure-blob-storage -[az-storage-account-show]: /cli/azure/storage/account#az_storage_account_show -[az-role-assignment-create]: /cli/azure/role/assignment#az_role_assignment_create -[az-monitor-account-create]: /cli/azure/monitor/account#az_monitor_account_create -[az-monitor-log-analytics-workspace-create]: /cli/azure/monitor/log-analytics/workspace#az_monitor_log_analytics_workspace_create -[azure-managed-grafana-pricing]: https://azure.microsoft.com/pricing/details/managed-grafana/ -[az-aks-create]: /cli/azure/aks#az_aks_create -[az-aks-node-pool-add]: /cli/azure/aks/nodepool#az_aks_nodepool_add -[az-aks-get-credentials]: /cli/azure/aks#az_aks_get_credentials -[kubectl-create-namespace]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_namespace/ -[az-aks-enable-addons]: /cli/azure/aks#az_aks_enable_addons -[kubectl-get]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[az-aks-show]: /cli/azure/aks#az_aks_show -[az-network-public-ip-create]: /cli/azure/network/public-ip#az_network_public_ip_create -[az-network-public-ip-show]: /cli/azure/network/public-ip#az_network_public_ip_show -[az-group-show]: /cli/azure/group#az_group_show -[helm-repo-add]: https://helm.sh/docs/helm/helm_repo_add/ -[helm-upgrade]: https://helm.sh/docs/helm/helm_upgrade/ -[kubectl-apply]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_apply/ -[deploy-postgresql]: ./deploy-postgresql-ha.md -[install-krew]: https://krew.sigs.k8s.io/ -[cnpg-plugin]: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#using-krew diff --git a/scenarios/azure-docs/articles/aks/deploy-postgresql-ha.md b/scenarios/azure-docs/articles/aks/deploy-postgresql-ha.md deleted file mode 100644 index cc185894a..000000000 --- a/scenarios/azure-docs/articles/aks/deploy-postgresql-ha.md +++ /dev/null @@ -1,994 +0,0 @@ ---- -title: 'Deploy a highly available PostgreSQL database on AKS with Azure CLI' -description: In this article, you deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator. -ms.topic: how-to -ms.date: 06/07/2024 -author: kenkilty -ms.author: kkilty -ms.custom: innovation-engine, aks-related-content ---- - -# Deploy a highly available PostgreSQL database on AKS - -In this article, you deploy a highly available PostgreSQL database on AKS. - -* If you haven't already created the required infrastructure for this deployment, follow the steps in [Create infrastructure for deploying a highly available PostgreSQL database on AKS][create-infrastructure] to get set up, and then you can return to this article. - -## Create secret for bootstrap app user - -1. Generate a secret to validate the PostgreSQL deployment by interactive login for a bootstrap app user using the [`kubectl create secret`][kubectl-create-secret] command. - - ```bash - PG_DATABASE_APPUSER_SECRET=$(echo -n | openssl rand -base64 16) - - kubectl create secret generic db-user-pass \ - --from-literal=username=app \ - --from-literal=password="${PG_DATABASE_APPUSER_SECRET}" \ - --namespace $PG_NAMESPACE \ - --context $AKS_PRIMARY_CLUSTER_NAME - ``` - -1. Validate that the secret was successfully created using the [`kubectl get`][kubectl-get] command. - - ```bash - kubectl get secret db-user-pass --namespace $PG_NAMESPACE --context $AKS_PRIMARY_CLUSTER_NAME - ``` - -## Set environment variables for the PostgreSQL cluster - -* Deploy a ConfigMap to set environment variables for the PostgreSQL cluster using the following [`kubectl apply`][kubectl-apply] command: - - ```bash - cat < 5432/TCP 3h57m - pg-primary-cnpg-sryti1qf-ro ClusterIP 10.0.237.19 5432/TCP 3h57m - pg-primary-cnpg-sryti1qf-rw ClusterIP 10.0.244.125 5432/TCP 3h57m - ``` - - > [!NOTE] - > There are three services: `namespace/cluster-name-ro` mapped to port 5433, `namespace/cluster-name-rw`, and `namespace/cluster-name-r` mapped to port 5433. It’s important to avoid using the same port as the read/write node of the PostgreSQL database cluster. If you want applications to access only the read-only replica of the PostgreSQL database cluster, direct them to port 5433. The final service is typically used for data backups but can also function as a read-only node. - -1. Get the service details using the [`kubectl get`][kubectl-get] command. - - ```bash - export PG_PRIMARY_CLUSTER_RW_SERVICE=$(kubectl get services \ - --namespace $PG_NAMESPACE \ - --context $AKS_PRIMARY_CLUSTER_NAME \ - -l "cnpg.io/cluster" \ - --output json | jq -r '.items[] | select(.metadata.name | endswith("-rw")) | .metadata.name') - - echo $PG_PRIMARY_CLUSTER_RW_SERVICE - - export PG_PRIMARY_CLUSTER_RO_SERVICE=$(kubectl get services \ - --namespace $PG_NAMESPACE \ - --context $AKS_PRIMARY_CLUSTER_NAME \ - -l "cnpg.io/cluster" \ - --output json | jq -r '.items[] | select(.metadata.name | endswith("-ro")) | .metadata.name') - - echo $PG_PRIMARY_CLUSTER_RO_SERVICE - ``` - -1. Configure the load balancer service with the following YAML files using the [`kubectl apply`][kubectl-apply] command. - - ```bash - cat < [!NOTE] -> You need the value of the app user password for PostgreSQL basic auth that was generated earlier and stored in the `$PG_DATABASE_APPUSER_SECRET` environment variable. - -* Validate the public PostgreSQL endpoints using the following `psql` commands: - - ```bash - echo "Public endpoint for PostgreSQL cluster: $AKS_PRIMARY_CLUSTER_ALB_DNSNAME" - - # Query the primary, pg_is_in_recovery = false - - psql -h $AKS_PRIMARY_CLUSTER_ALB_DNSNAME \ - -p 5432 -U app -d appdb -W -c "SELECT pg_is_in_recovery();" - ``` - - Example output - - ```output - pg_is_in_recovery - ------------------- - f - (1 row) - ``` - - ```bash - echo "Query a replica, pg_is_in_recovery = true" - - psql -h $AKS_PRIMARY_CLUSTER_ALB_DNSNAME \ - -p 5433 -U app -d appdb -W -c "SELECT pg_is_in_recovery();" - ``` - - Example output - - ```output - # Example output - - pg_is_in_recovery - ------------------- - t - (1 row) - ``` - - When successfully connected to the primary read-write endpoint, the PostgreSQL function returns `f` for *false*, indicating that the current connection is writable. - - When connected to a replica, the function returns `t` for *true*, indicating the database is in recovery and read-only. - -## Simulate an unplanned failover - -In this section, you trigger a sudden failure by deleting the pod running the primary, which simulates a sudden crash or loss of network connectivity to the node hosting the PostgreSQL primary. - -1. Check the status of the running pod instances using the following command: - - ```bash - kubectl cnpg status $PG_PRIMARY_CLUSTER_NAME --namespace $PG_NAMESPACE - ``` - - Example output - - ```output - Name Current LSN Rep role Status Node - --------------------------- ----------- -------- ------- ----------- - pg-primary-cnpg-sryti1qf-1 0/9000060 Primary OK aks-postgres-32388626-vmss000000 - pg-primary-cnpg-sryti1qf-2 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000001 - pg-primary-cnpg-sryti1qf-3 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000002 - ``` - -1. Delete the primary pod using the [`kubectl delete`][kubectl-delete] command. - - ```bash - PRIMARY_POD=$(kubectl get pod \ - --namespace $PG_NAMESPACE \ - --no-headers \ - -o custom-columns=":metadata.name" \ - -l role=primary) - - kubectl delete pod $PRIMARY_POD --grace-period=1 --namespace $PG_NAMESPACE - ``` - -1. Validate that the `pg-primary-cnpg-sryti1qf-2` pod instance is now the primary using the following command: - - ```bash - kubectl cnpg status $PG_PRIMARY_CLUSTER_NAME --namespace $PG_NAMESPACE - ``` - - Example output - - ```output - pg-primary-cnpg-sryti1qf-2 0/9000060 Primary OK aks-postgres-32388626-vmss000001 - pg-primary-cnpg-sryti1qf-1 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000000 - pg-primary-cnpg-sryti1qf-3 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000002 - ``` - -1. Reset the `pg-primary-cnpg-sryti1qf-1` pod instance as the primary using the following command: - - ```bash - kubectl cnpg promote $PG_PRIMARY_CLUSTER_NAME 1 --namespace $PG_NAMESPACE - ``` - -1. Validate that the pod instances have returned to their original state before the unplanned failover test using the following command: - - ```bash - kubectl cnpg status $PG_PRIMARY_CLUSTER_NAME --namespace $PG_NAMESPACE - ``` - - Example output - - ```output - Name Current LSN Rep role Status Node - --------------------------- ----------- -------- ------- ----------- - pg-primary-cnpg-sryti1qf-1 0/9000060 Primary OK aks-postgres-32388626-vmss000000 - pg-primary-cnpg-sryti1qf-2 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000001 - pg-primary-cnpg-sryti1qf-3 0/9000060 Standby (sync) OK aks-postgres-32388626-vmss000002 - ``` - -## Clean up resources - -* Once you're finished reviewing your deployment, delete all the resources you created in this guide using the [`az group delete`][az-group-delete] command. - - ```bash - az group delete --resource-group $RESOURCE_GROUP_NAME --no-wait --yes - ``` - -## Next steps - -In this how-to guide, you learned how to: - -* Use Azure CLI to create a multi-zone AKS cluster. -* Deploy a highly available PostgreSQL cluster and database using the CNPG operator. -* Set up monitoring for PostgreSQL using Prometheus and Grafana. -* Deploy a sample dataset to the PostgreSQL database. -* Perform PostgreSQL and AKS cluster upgrades. -* Simulate a cluster interruption and PostgreSQL replica failover. -* Perform a backup and restore of the PostgreSQL database. - -To learn more about how you can leverage AKS for your workloads, see [What is Azure Kubernetes Service (AKS)?][what-is-aks] - -## Contributors - -*This article is maintained by Microsoft. It was originally written by the following contributors*: - -* Ken Kilty | Principal TPM -* Russell de Pina | Principal TPM -* Adrian Joian | Senior Customer Engineer -* Jenny Hayes | Senior Content Developer -* Carol Smith | Senior Content Developer -* Erin Schaffer | Content Developer 2 -* Adam Sharif | Customer Engineer 2 - - -[helm-upgrade]: https://helm.sh/docs/helm/helm_upgrade/ -[create-infrastructure]: ./create-postgresql-ha.md -[kubectl-create-secret]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_secret/ -[kubectl-get]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[kubectl-apply]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_apply/ -[helm-repo-add]: https://helm.sh/docs/helm/helm_repo_add/ -[az-aks-show]: /cli/azure/aks#az_aks_show -[az-identity-federated-credential-create]: /cli/azure/identity/federated-credential#az_identity_federated_credential_create -[cluster-crd]: https://cloudnative-pg.io/documentation/1.23/cloudnative-pg.v1/#postgresql-cnpg-io-v1-ClusterSpec -[kubectl-describe]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_describe/ -[az-storage-blob-list]: /cli/azure/storage/blob/#az_storage_blob_list -[az-identity-federated-credential-delete]: /cli/azure/identity/federated-credential#az_identity_federated_credential_delete -[kubectl-delete]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_delete/ -[az-group-delete]: /cli/azure/group#az_group_delete -[what-is-aks]: ./what-is-aks.md diff --git a/scenarios/azure-docs/articles/aks/learn/aks-store-quickstart.yaml b/scenarios/azure-docs/articles/aks/learn/aks-store-quickstart.yaml deleted file mode 100644 index f9cf09662..000000000 --- a/scenarios/azure-docs/articles/aks/learn/aks-store-quickstart.yaml +++ /dev/null @@ -1,226 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rabbitmq -spec: - replicas: 1 - selector: - matchLabels: - app: rabbitmq - template: - metadata: - labels: - app: rabbitmq - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: rabbitmq - image: mcr.microsoft.com/mirror/docker/library/rabbitmq:3.10-management-alpine - ports: - - containerPort: 5672 - name: rabbitmq-amqp - - containerPort: 15672 - name: rabbitmq-http - env: - - name: RABBITMQ_DEFAULT_USER - value: "username" - - name: RABBITMQ_DEFAULT_PASS - value: "password" - resources: - requests: - cpu: 10m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: rabbitmq-enabled-plugins - mountPath: /etc/rabbitmq/enabled_plugins - subPath: enabled_plugins - volumes: - - name: rabbitmq-enabled-plugins - configMap: - name: rabbitmq-enabled-plugins - items: - - key: rabbitmq_enabled_plugins - path: enabled_plugins ---- -apiVersion: v1 -data: - rabbitmq_enabled_plugins: | - [rabbitmq_management,rabbitmq_prometheus,rabbitmq_amqp1_0]. -kind: ConfigMap -metadata: - name: rabbitmq-enabled-plugins ---- -apiVersion: v1 -kind: Service -metadata: - name: rabbitmq -spec: - selector: - app: rabbitmq - ports: - - name: rabbitmq-amqp - port: 5672 - targetPort: 5672 - - name: rabbitmq-http - port: 15672 - targetPort: 15672 - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: order-service -spec: - replicas: 1 - selector: - matchLabels: - app: order-service - template: - metadata: - labels: - app: order-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: order-service - image: ghcr.io/azure-samples/aks-store-demo/order-service:latest - ports: - - containerPort: 3000 - env: - - name: ORDER_QUEUE_HOSTNAME - value: "rabbitmq" - - name: ORDER_QUEUE_PORT - value: "5672" - - name: ORDER_QUEUE_USERNAME - value: "username" - - name: ORDER_QUEUE_PASSWORD - value: "password" - - name: ORDER_QUEUE_NAME - value: "orders" - - name: FASTIFY_ADDRESS - value: "0.0.0.0" - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - initContainers: - - name: wait-for-rabbitmq - image: busybox - command: ['sh', '-c', 'until nc -zv rabbitmq 5672; do echo waiting for rabbitmq; sleep 2; done;'] - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: order-service -spec: - type: ClusterIP - ports: - - name: http - port: 3000 - targetPort: 3000 - selector: - app: order-service ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: product-service -spec: - replicas: 1 - selector: - matchLabels: - app: product-service - template: - metadata: - labels: - app: product-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: product-service - image: ghcr.io/azure-samples/aks-store-demo/product-service:latest - ports: - - containerPort: 3002 - resources: - requests: - cpu: 1m - memory: 1Mi - limits: - cpu: 1m - memory: 7Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: product-service -spec: - type: ClusterIP - ports: - - name: http - port: 3002 - targetPort: 3002 - selector: - app: product-service ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: store-front -spec: - replicas: 1 - selector: - matchLabels: - app: store-front - template: - metadata: - labels: - app: store-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: store-front - image: ghcr.io/azure-samples/aks-store-demo/store-front:latest - ports: - - containerPort: 8080 - name: store-front - env: - - name: VUE_APP_ORDER_SERVICE_URL - value: "http://order-service:3000/" - - name: VUE_APP_PRODUCT_SERVICE_URL - value: "http://product-service:3002/" - resources: - requests: - cpu: 1m - memory: 200Mi - limits: - cpu: 1000m - memory: 512Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: store-front -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: store-front - type: LoadBalancer \ No newline at end of file diff --git a/scenarios/azure-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md b/scenarios/azure-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md deleted file mode 100644 index d6448e30f..000000000 --- a/scenarios/azure-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md +++ /dev/null @@ -1,447 +0,0 @@ ---- -title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI' -description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI. -ms.topic: quickstart -ms.date: 04/09/2024 -author: tamram -ms.author: tamram -ms.custom: H1Hack27Feb2017, mvc, devcenter, devx-track-azurecli, mode-api, innovation-engine, linux-related-content -#Customer intent: As a developer or cluster operator, I want to deploy an AKS cluster and deploy an application so I can see how to run applications using the managed Kubernetes service in Azure. ---- - -# Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI - -[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://go.microsoft.com/fwlink/?linkid=2286152) - -Azure Kubernetes Service (AKS) is a managed Kubernetes service that lets you quickly deploy and manage clusters. In this quickstart, you learn how to: - -- Deploy an AKS cluster using the Azure CLI. -- Run a sample multi-container application with a group of microservices and web front ends simulating a retail scenario. - -> [!NOTE] -> To get started with quickly provisioning an AKS cluster, this article includes steps to deploy a cluster with default settings for evaluation purposes only. Before deploying a production-ready cluster, we recommend that you familiarize yourself with our [baseline reference architecture][baseline-reference-architecture] to consider how it aligns with your business requirements. - -## Before you begin - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -- [!INCLUDE [quickstarts-free-trial-note](~/reusable-content/ce-skilling/azure/includes/quickstarts-free-trial-note.md)] - -[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] - -- This article requires version 2.0.64 or later of the Azure CLI. If you're using Azure Cloud Shell, the latest version is already installed there. -- Make sure that the identity you're using to create your cluster has the appropriate minimum permissions. For more details on access and identity for AKS, see [Access and identity options for Azure Kubernetes Service (AKS)](../concepts-identity.md). -- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. For more information, see [How to manage Azure subscriptions – Azure CLI](/cli/azure/manage-azure-subscriptions-azure-cli?tabs=bash#change-the-active-subscription). - -## Define environment variables - -Define the following environment variables for use throughout this quickstart: - -```azurecli-interactive -export RANDOM_ID="$(openssl rand -hex 3)" -export MY_RESOURCE_GROUP_NAME="myAKSResourceGroup$RANDOM_ID" -export REGION="westeurope" -export MY_AKS_CLUSTER_NAME="myAKSCluster$RANDOM_ID" -export MY_DNS_LABEL="mydnslabel$RANDOM_ID" -``` - -## Create a resource group - -An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. - -Create a resource group using the [`az group create`][az-group-create] command. - -```azurecli-interactive -az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION -``` - -Results: - -```JSON -{ - "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAKSResourceGroupxxxxxx", - "location": "eastus", - "managedBy": null, - "name": "testResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null, - "type": "Microsoft.Resources/resourceGroups" -} -``` - -## Create an AKS cluster - -Create an AKS cluster using the [`az aks create`][az-aks-create] command. The following example creates a cluster with one node and enables a system-assigned managed identity. - -```azurecli-interactive -az aks create \ - --resource-group $MY_RESOURCE_GROUP_NAME \ - --name $MY_AKS_CLUSTER_NAME \ - --node-count 1 \ - --generate-ssh-keys -``` - -> [!NOTE] -> When you create a new cluster, AKS automatically creates a second resource group to store the AKS resources. For more information, see [Why are two resource groups created with AKS?](../faq.md#why-are-two-resource-groups-created-with-aks) - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, use the [`az aks install-cli`][az-aks-install-cli] command. - -1. Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - - ```azurecli-interactive - az aks get-credentials --resource-group $MY_RESOURCE_GROUP_NAME --name $MY_AKS_CLUSTER_NAME - ``` - -1. Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - - ```azurecli-interactive - kubectl get nodes - ``` - -## Deploy the application - -To deploy the application, you use a manifest file to create all the objects required to run the [AKS Store application](https://github.com/Azure-Samples/aks-store-demo). A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. The manifest includes the following Kubernetes deployments and services: - -:::image type="content" source="media/quick-kubernetes-deploy-portal/aks-store-architecture.png" alt-text="Screenshot of Azure Store sample architecture." lightbox="media/quick-kubernetes-deploy-portal/aks-store-architecture.png"::: - -- **Store front**: Web application for customers to view products and place orders. -- **Product service**: Shows product information. -- **Order service**: Places orders. -- **Rabbit MQ**: Message queue for an order queue. - -> [!NOTE] -> We don't recommend running stateful containers, such as Rabbit MQ, without persistent storage for production. These are used here for simplicity, but we recommend using managed services, such as Azure CosmosDB or Azure Service Bus. - -1. Create a file named `aks-store-quickstart.yaml` and copy in the following manifest: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: rabbitmq - spec: - replicas: 1 - selector: - matchLabels: - app: rabbitmq - template: - metadata: - labels: - app: rabbitmq - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: rabbitmq - image: mcr.microsoft.com/mirror/docker/library/rabbitmq:3.10-management-alpine - ports: - - containerPort: 5672 - name: rabbitmq-amqp - - containerPort: 15672 - name: rabbitmq-http - env: - - name: RABBITMQ_DEFAULT_USER - value: "username" - - name: RABBITMQ_DEFAULT_PASS - value: "password" - resources: - requests: - cpu: 10m - memory: 128Mi - limits: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: rabbitmq-enabled-plugins - mountPath: /etc/rabbitmq/enabled_plugins - subPath: enabled_plugins - volumes: - - name: rabbitmq-enabled-plugins - configMap: - name: rabbitmq-enabled-plugins - items: - - key: rabbitmq_enabled_plugins - path: enabled_plugins - --- - apiVersion: v1 - data: - rabbitmq_enabled_plugins: | - [rabbitmq_management,rabbitmq_prometheus,rabbitmq_amqp1_0]. - kind: ConfigMap - metadata: - name: rabbitmq-enabled-plugins - --- - apiVersion: v1 - kind: Service - metadata: - name: rabbitmq - spec: - selector: - app: rabbitmq - ports: - - name: rabbitmq-amqp - port: 5672 - targetPort: 5672 - - name: rabbitmq-http - port: 15672 - targetPort: 15672 - type: ClusterIP - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: order-service - spec: - replicas: 1 - selector: - matchLabels: - app: order-service - template: - metadata: - labels: - app: order-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: order-service - image: ghcr.io/azure-samples/aks-store-demo/order-service:latest - ports: - - containerPort: 3000 - env: - - name: ORDER_QUEUE_HOSTNAME - value: "rabbitmq" - - name: ORDER_QUEUE_PORT - value: "5672" - - name: ORDER_QUEUE_USERNAME - value: "username" - - name: ORDER_QUEUE_PASSWORD - value: "password" - - name: ORDER_QUEUE_NAME - value: "orders" - - name: FASTIFY_ADDRESS - value: "0.0.0.0" - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - initContainers: - - name: wait-for-rabbitmq - image: busybox - command: ['sh', '-c', 'until nc -zv rabbitmq 5672; do echo waiting for rabbitmq; sleep 2; done;'] - resources: - requests: - cpu: 1m - memory: 50Mi - limits: - cpu: 75m - memory: 128Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: order-service - spec: - type: ClusterIP - ports: - - name: http - port: 3000 - targetPort: 3000 - selector: - app: order-service - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: product-service - spec: - replicas: 1 - selector: - matchLabels: - app: product-service - template: - metadata: - labels: - app: product-service - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: product-service - image: ghcr.io/azure-samples/aks-store-demo/product-service:latest - ports: - - containerPort: 3002 - resources: - requests: - cpu: 1m - memory: 1Mi - limits: - cpu: 1m - memory: 7Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: product-service - spec: - type: ClusterIP - ports: - - name: http - port: 3002 - targetPort: 3002 - selector: - app: product-service - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: store-front - spec: - replicas: 1 - selector: - matchLabels: - app: store-front - template: - metadata: - labels: - app: store-front - spec: - nodeSelector: - "kubernetes.io/os": linux - containers: - - name: store-front - image: ghcr.io/azure-samples/aks-store-demo/store-front:latest - ports: - - containerPort: 8080 - name: store-front - env: - - name: VUE_APP_ORDER_SERVICE_URL - value: "http://order-service:3000/" - - name: VUE_APP_PRODUCT_SERVICE_URL - value: "http://product-service:3002/" - resources: - requests: - cpu: 1m - memory: 200Mi - limits: - cpu: 1000m - memory: 512Mi - --- - apiVersion: v1 - kind: Service - metadata: - name: store-front - spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: store-front - type: LoadBalancer - ``` - - For a breakdown of YAML manifest files, see [Deployments and YAML manifests](../concepts-clusters-workloads.md#deployments-and-yaml-manifests). - - If you create and save the YAML file locally, then you can upload the manifest file to your default directory in CloudShell by selecting the **Upload/Download files** button and selecting the file from your local file system. - -1. Deploy the application using the [`kubectl apply`][kubectl-apply] command and specify the name of your YAML manifest. - - ```azurecli-interactive - kubectl apply -f aks-store-quickstart.yaml - ``` - -## Test the application - -You can validate that the application is running by visiting the public IP address or the application URL. - -Get the application URL using the following commands: - -```azurecli-interactive -runtime="5 minutes" -endtime=$(date -ud "$runtime" +%s) -while [[ $(date -u +%s) -le $endtime ]] -do - STATUS=$(kubectl get pods -l app=store-front -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') - echo $STATUS - if [ "$STATUS" == 'True' ] - then - export IP_ADDRESS=$(kubectl get service store-front --output 'jsonpath={..status.loadBalancer.ingress[0].ip}') - echo "Service IP Address: $IP_ADDRESS" - break - else - sleep 10 - fi -done -``` - -```azurecli-interactive -curl $IP_ADDRESS -``` - -Results: - -```HTML - - - - - - - - store-front - - - - - -
- - -``` - -```OUTPUT -echo "You can now visit your web server at $IP_ADDRESS" -``` - -:::image type="content" source="media/quick-kubernetes-deploy-cli/aks-store-application.png" alt-text="Screenshot of AKS Store sample application." lightbox="media/quick-kubernetes-deploy-cli/aks-store-application.png"::: - -## Delete the cluster - -If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. You can remove the resource group, container service, and all related resources using the [`az group delete`][az-group-delete] command. - -> [!NOTE] -> The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity so you don't need to manually remove it. - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. - -To learn more about AKS and walk through a complete code-to-deployment example, continue to the Kubernetes cluster tutorial. - -> [!div class="nextstepaction"] -> [AKS tutorial][aks-tutorial] - - -[kubectl]: https://kubernetes.io/docs/reference/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get - - -[kubernetes-concepts]: ../concepts-clusters-workloads.md -[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md -[azure-resource-group]: ../../azure-resource-manager/management/overview.md -[az-aks-create]: /cli/azure/aks#az-aks-create -[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials -[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli -[az-group-create]: /cli/azure/group#az-group-create -[az-group-delete]: /cli/azure/group#az-group-delete -[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests -[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json -[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json diff --git a/scenarios/azure-docs/articles/aks/postgresql-ha-overview.md b/scenarios/azure-docs/articles/aks/postgresql-ha-overview.md deleted file mode 100644 index a8319765f..000000000 --- a/scenarios/azure-docs/articles/aks/postgresql-ha-overview.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: 'Overview of deploying a highly available PostgreSQL database on AKS with Azure CLI' -description: Learn how to deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator with Azure CLI. -ms.topic: overview -ms.date: 06/07/2024 -author: kenkilty -ms.author: kkilty -ms.custom: innovation-engine, aks-related-content -#Customer intent: As a developer or cluster operator, I want to deploy a highly available PostgreSQL database on AKS so I can see how to run a stateful database workload using the managed Kubernetes service in Azure. ---- -# Deploy a highly available PostgreSQL database on AKS with Azure CLI - -In this guide, you deploy a highly available PostgreSQL cluster that spans multiple Azure availability zones on AKS with Azure CLI. - -This article walks through the prerequisites for setting up a PostgreSQL cluster on [Azure Kubernetes Service (AKS)][what-is-aks] and provides an overview of the full deployment process and architecture. - -## Prerequisites - -* This guide assumes a basic understanding of [core Kubernetes concepts][core-kubernetes-concepts] and [PostgreSQL][postgresql]. -* You need the **Owner** or **User Access Administrator** and the **Contributor** [Azure built-in roles][azure-roles] on a subscription in your Azure account. - -[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] - -* You also need the following resources installed: - - * [Azure CLI](/cli/azure/install-azure-cli) version 2.56 or later. - * [Azure Kubernetes Service (AKS) preview extension][aks-preview]. - * [jq][jq], version 1.5 or later. - * [kubectl][install-kubectl] version 1.21.0 or later. - * [Helm][install-helm] version 3.0.0 or later. - * [openssl][install-openssl] version 3.3.0 or later. - * [Visual Studio Code][install-vscode] or equivalent. - * [Krew][install-krew] version 0.4.4 or later. - * [kubectl CloudNativePG (CNPG) Plugin][cnpg-plugin]. - -## Deployment process - -In this guide, you learn how to: - -* Use Azure CLI to create a multi-zone AKS cluster. -* Deploy a highly available PostgreSQL cluster and database using the [CNPG operator][cnpg-plugin]. -* Set up monitoring for PostgreSQL using Prometheus and Grafana. -* Deploy a sample dataset to a PostgreSQL database. -* Perform PostgreSQL and AKS cluster upgrades. -* Simulate a cluster interruption and PostgreSQL replica failover. -* Perform backup and restore of a PostgreSQL database. - -## Deployment architecture - -This diagram illustrates a PostgreSQL cluster setup with one primary replica and two read replicas managed by the [CloudNativePG (CNPG)](https://cloudnative-pg.io/) operator. The architecture provides a highly available PostgreSQL running on an AKS cluster that can withstand a zone outage by failing over across replicas. - -Backups are stored on [Azure Blob Storage](/azure/storage/blobs/), providing another way to restore the database in the event of an issue with streaming replication from the primary replica. - -:::image source="./media/postgresql-ha-overview/architecture-diagram.png" alt-text="Diagram of CNPG architecture." lightbox="./media/postgresql-ha-overview/architecture-diagram.png"::: - -> [!NOTE] -> The CNPG operator supports only *one database per cluster*. Plan accordingly for applications that require data separation at the database level. - -## Next steps - -> [!div class="nextstepaction"] -> [Create the infrastructure to deploy a highly available PostgreSQL database on AKS using the CNPG operator][create-infrastructure] - -## Contributors - -*This article is maintained by Microsoft. It was originally written by the following contributors*: - -* Ken Kilty | Principal TPM -* Russell de Pina | Principal TPM -* Adrian Joian | Senior Customer Engineer -* Jenny Hayes | Senior Content Developer -* Carol Smith | Senior Content Developer -* Erin Schaffer | Content Developer 2 -* Adam Sharif | Customer Engineer 2 - - -[what-is-aks]: ./what-is-aks.md -[postgresql]: https://www.postgresql.org/ -[core-kubernetes-concepts]: ./concepts-clusters-workloads.md -[azure-roles]: ../role-based-access-control/built-in-roles.md -[aks-preview]: ./draft.md#install-the-aks-preview-azure-cli-extension -[jq]: https://jqlang.github.io/jq/ -[install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ -[install-helm]: https://helm.sh/docs/intro/install/ -[install-openssl]: https://www.openssl.org/ -[install-vscode]: https://code.visualstudio.com/Download -[install-krew]: https://krew.sigs.k8s.io/ -[cnpg-plugin]: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#using-krew -[create-infrastructure]: ./create-postgresql-ha.md diff --git a/scenarios/azure-docs/articles/static-web-apps/get-started-cli.md b/scenarios/azure-docs/articles/static-web-apps/get-started-cli.md index f21f4b98a..5cff0ead4 100644 --- a/scenarios/azure-docs/articles/static-web-apps/get-started-cli.md +++ b/scenarios/azure-docs/articles/static-web-apps/get-started-cli.md @@ -27,17 +27,6 @@ In this quickstart, you deploy a web application to Azure Static Web apps using - [Azure CLI](/cli/azure/install-azure-cli) installed (version 2.29.0 or higher). - [A Git setup](https://www.git-scm.com/downloads). -## Define environment variables - -The first step in this quickstart is to define environment variables. - -```bash -export RANDOM_ID="$(openssl rand -hex 3)" -export MY_RESOURCE_GROUP_NAME="myStaticWebAppResourceGroup$RANDOM_ID" -export REGION=EastUS2 -export MY_STATIC_WEB_APP_NAME="myStaticWebApp$RANDOM_ID" -``` - ## Create a repository (optional) (Optional) This article uses a GitHub template repository as another way to make it easy for you to get started. The template features a starter app to deploy to Azure Static Web Apps. @@ -57,6 +46,9 @@ Deploy the app as a static web app from the Azure CLI. 1. Create a resource group. ```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="myStaticWebAppResourceGroup$RANDOM_ID" +export REGION=EastUS2 az group create \ --name $MY_RESOURCE_GROUP_NAME \ --location $REGION @@ -81,6 +73,7 @@ Results: 2. Deploy a new static web app from your repository. ```bash +export MY_STATIC_WEB_APP_NAME="myStaticWebApp$RANDOM_ID" az staticwebapp create \ --name $MY_STATIC_WEB_APP_NAME \ --resource-group $MY_RESOURCE_GROUP_NAME \ diff --git a/scenarios/azure-docs/articles/virtual-machines/linux/attach-disk-portal.yml b/scenarios/azure-docs/articles/virtual-machines/linux/attach-disk-portal.yml deleted file mode 100644 index babdb3954..000000000 --- a/scenarios/azure-docs/articles/virtual-machines/linux/attach-disk-portal.yml +++ /dev/null @@ -1,259 +0,0 @@ -### YamlMime:HowTo - -metadata: - title: Attach a data disk to a Linux VM - description: Use the portal to attach new or existing data disk to a Linux VM. - author: roygara - ms.author: rogarana - ms.date: 03/19/2024 - ms.service: azure-disk-storage - ms.topic: how-to - ms.collection: linux - ms.custom: - - linux-related-content - - ge-structured-content-pilot - -title: | - Use the portal to attach a data disk to a Linux VM -introduction: | - **Applies to:** :heavy_check_mark: Linux VMs :heavy_check_mark: Flexible scale sets - - This article shows you how to attach both new and existing disks to a Linux virtual machine through the Azure portal. You can also [attach a data disk to a Windows VM in the Azure portal](../windows/attach-managed-disk-portal.yml). - -prerequisites: - summary: | - Before you attach disks to your VM, review these tips: - dependencies: - - The size of the virtual machine controls how many data disks you can attach. For details, see [Sizes for virtual machines](../sizes.md). - -procedureSection: - - title: | - Find the virtual machine - summary: | - Follow these steps: - steps: - - | - Go to the [Azure portal](https://portal.azure.com/) to find the VM. Search for and select **Virtual machines**. - - | - Select the VM you'd like to attach the disk to from the list. - - | - In the **Virtual machines** page, under **Settings**, select **Disks**. - - - title: | - Attach a new disk - summary: | - Follow these steps: - steps: - - | - On the **Disks** pane, under **Data disks**, select **Create and attach a new disk**. - - | - Enter a name for your managed disk. Review the default settings, and update the **Storage type**, **Size (GiB)**, **Encryption** and **Host caching** as necessary. - - :::image type="content" source="./media/attach-disk-portal/create-new-md.png" alt-text="Screenshot of review disk settings." lightbox="./media/attach-disk-portal/create-new-md.png"::: - - - | - When you're done, select **Save** at the top of the page to create the managed disk and update the VM configuration. - - - title: | - Attach an existing disk - summary: | - Follow these steps: - steps: - - | - On the **Disks** pane, under **Data disks**, select **Attach existing disks**. - - | - Select the drop-down menu for **Disk name** and select a disk from the list of available managed disks. - - | - Select **Save** to attach the existing managed disk and update the VM configuration: - - - title: | - Connect to the Linux VM to mount the new disk - summary: | - To partition, format, and mount your new disk so your Linux VM can use it, SSH into your VM. For more information, see [How to use SSH with Linux on Azure](mac-create-ssh-keys.md). The following example connects to a VM with the public IP address of *10.123.123.25* with the username *azureuser*: - code: | - ```bash - ssh azureuser@10.123.123.25 - ``` - - - title: | - Find the disk - summary: | - Once connected to your VM, you need to find the disk. In this example, we're using `lsblk` to list the disks. - code: | - ```bash - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - ``` - - The output is similar to the following example: - - ```output - sda 0:0:0:0 30G - ├─sda1 29.9G / - ├─sda14 4M - └─sda15 106M /boot/efi - sdb 1:0:1:0 14G - └─sdb1 14G /mnt - sdc 3:0:0:0 4G - ``` - - In this example, the disk that was added was `sdc`. It's a LUN 0 and is 4GB. - - For a more complex example, here's what multiple data disks look like in the portal: - - :::image type="content" source="./media/attach-disk-portal/find-disk.png" alt-text="Screenshot of multiple disks shown in the portal."::: - - In the image, you can see that there are 3 data disks: 4 GB on LUN 0, 16GB at LUN 1, and 32G at LUN 2. - - Here's what that might look like using `lsblk`: - - ```output - sda 0:0:0:0 30G - ├─sda1 29.9G / - ├─sda14 4M - └─sda15 106M /boot/efi - sdb 1:0:1:0 14G - └─sdb1 14G /mnt - sdc 3:0:0:0 4G - sdd 3:0:0:1 16G - sde 3:0:0:2 32G - ``` - - From the output of `lsblk` you can see that the 4GB disk at LUN 0 is `sdc`, the 16GB disk at LUN 1 is `sdd`, and the 32G disk at LUN 2 is `sde`. - - ### Prepare a new empty disk - - > [!IMPORTANT] - > If you are using an existing disk that contains data, skip to [mounting the disk](#mount-the-disk). - > The following instructions will delete data on the disk. - - If you're attaching a new disk, you need to partition the disk. - - The `parted` utility can be used to partition and to format a data disk. - - Use the latest version `parted` that is available for your distro. - - If the disk size is 2 tebibytes (TiB) or larger, you must use GPT partitioning. If disk size is under 2 TiB, then you can use either MBR or GPT partitioning. - - - The following example uses `parted` on `/dev/sdc`, which is where the first data disk will typically be on most VMs. Replace `sdc` with the correct option for your disk. We're also formatting it using the [XFS](https://xfs.wiki.kernel.org/) filesystem. - - ```bash - sudo parted /dev/sdc --script mklabel gpt mkpart xfspart xfs 0% 100% - sudo mkfs.xfs /dev/sdc1 - sudo partprobe /dev/sdc1 - ``` - - Use the [`partprobe`](https://linux.die.net/man/8/partprobe) utility to make sure the kernel is aware of the new partition and filesystem. Failure to use `partprobe` can cause the blkid or lslbk commands to not return the UUID for the new filesystem immediately. - - ### Mount the disk - - Create a directory to mount the file system using `mkdir`. The following example creates a directory at `/datadrive`: - - ```bash - sudo mkdir /datadrive - ``` - - Use `mount` to then mount the filesystem. The following example mounts the */dev/sdc1* partition to the `/datadrive` mount point: - - ```bash - sudo mount /dev/sdc1 /datadrive - ``` - To ensure that the drive is remounted automatically after a reboot, it must be added to the */etc/fstab* file. It's also highly recommended that the UUID (Universally Unique Identifier) is used in */etc/fstab* to refer to the drive rather than just the device name (such as, */dev/sdc1*). If the OS detects a disk error during boot, using the UUID avoids the incorrect disk being mounted to a given location. Remaining data disks would then be assigned those same device IDs. To find the UUID of the new drive, use the `blkid` utility: - - ```bash - sudo blkid - ``` - - The output looks similar to the following example: - - ```output - /dev/sda1: LABEL="cloudimg-rootfs" UUID="11111111-1b1b-1c1c-1d1d-1e1e1e1e1e1e" TYPE="ext4" PARTUUID="1a1b1c1d-11aa-1234-1a1a1a1a1a1a" - /dev/sda15: LABEL="UEFI" UUID="BCD7-96A6" TYPE="vfat" PARTUUID="1e1g1cg1h-11aa-1234-1u1u1a1a1u1u" - /dev/sdb1: UUID="22222222-2b2b-2c2c-2d2d-2e2e2e2e2e2e" TYPE="ext4" TYPE="ext4" PARTUUID="1a2b3c4d-01" - /dev/sda14: PARTUUID="2e2g2cg2h-11aa-1234-1u1u1a1a1u1u" - /dev/sdc1: UUID="33333333-3b3b-3c3c-3d3d-3e3e3e3e3e3e" TYPE="xfs" PARTLABEL="xfspart" PARTUUID="c1c2c3c4-1234-cdef-asdf3456ghjk" - ``` - - > [!NOTE] - > Improperly editing the **/etc/fstab** file could result in an unbootable system. If unsure, refer to the distribution's documentation for information on how to properly edit this file. You should create a backup of the **/etc/fstab** file is created before editing. - - Next, open the **/etc/fstab** file in a text editor. Add a line to the end of the file, using the UUID value for the `/dev/sdc1` device that was created in the previous steps, and the mountpoint of `/datadrive`. Using the example from this article, the new line would look like the following: - - ```config - UUID=33333333-3b3b-3c3c-3d3d-3e3e3e3e3e3e /datadrive xfs defaults,nofail 1 2 - ``` - - When you're done editing the file, save and close the editor. - - > [!NOTE] - > Later removing a data disk without editing fstab could cause the VM to fail to boot. Most distributions provide either the *nofail* and/or *nobootwait* fstab options. These options allow a system to boot even if the disk fails to mount at boot time. Consult your distribution's documentation for more information on these parameters. - > - > The *nofail* option ensures that the VM starts even if the filesystem is corrupt or the disk does not exist at boot time. Without this option, you may encounter behavior as described in [Cannot SSH to Linux VM due to FSTAB errors](/archive/blogs/linuxonazure/cannot-ssh-to-linux-vm-after-adding-data-disk-to-etcfstab-and-rebooting) - - - - title: | - Verify the disk - summary: | - You can now use `lsblk` again to see the disk and the mountpoint. - - ```bash - lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd" - ``` - - The output will look something like this: - - ```output - sda 0:0:0:0 30G - ├─sda1 29.9G / - ├─sda14 4M - └─sda15 106M /boot/efi - sdb 1:0:1:0 14G - └─sdb1 14G /mnt - sdc 3:0:0:0 4G - └─sdc1 4G /datadrive - ``` - - You can see that `sdc` is now mounted at `/datadrive`. - - ### TRIM/UNMAP support for Linux in Azure - - Some Linux kernels support TRIM/UNMAP operations to discard unused blocks on the disk. This feature is primarily useful to inform Azure that deleted pages are no longer valid and can be discarded. This feature can save money on disks that are billed based on the amount of consumed storage, such as unmanaged standard disks and disk snapshots. - - There are two ways to enable TRIM support in your Linux VM. As usual, consult your distribution for the recommended approach: - steps: - - | - Use the `discard` mount option in */etc/fstab*, for example: - - ```config - UUID=33333333-3b3b-3c3c-3d3d-3e3e3e3e3e3e /datadrive xfs defaults,discard 1 2 - ``` - - | - In some cases, the `discard` option may have performance implications. Alternatively, you can run the `fstrim` command manually from the command line, or add it to your crontab to run regularly: - - **Ubuntu** - - ```bash - sudo apt-get install util-linux - sudo fstrim /datadrive - ``` - - **RHEL** - - ```bash - sudo yum install util-linux - sudo fstrim /datadrive - ``` - - **SUSE** - - ```bash - sudo zypper install util-linux - sudo fstrim /datadrive - ``` - -relatedContent: - - text: Troubleshoot Linux VM device name changes - url: /troubleshoot/azure/virtual-machines/troubleshoot-device-names-problems - - text: Attach a data disk using the Azure CLI - url: add-disk.md -#For more information, and to help troubleshoot disk issues, see [Troubleshoot Linux VM device name changes](/troubleshoot/azure/virtual-machines/troubleshoot-device-names-problems). - -#You can also [attach a data disk](add-disk.md) using the Azure CLI. diff --git a/scenarios/azure-docs/articles/virtual-machines/linux/disk-encryption-faq.yml b/scenarios/azure-docs/articles/virtual-machines/linux/disk-encryption-faq.yml deleted file mode 100644 index f77fa18bd..000000000 --- a/scenarios/azure-docs/articles/virtual-machines/linux/disk-encryption-faq.yml +++ /dev/null @@ -1,200 +0,0 @@ -### YamlMime:FAQ -metadata: - title: FAQ - Azure Disk Encryption for Linux VMs - description: This article provides answers to frequently asked questions about Microsoft Azure Disk Encryption for Linux IaaS VMs. - author: msmbaldwin - ms.service: azure-virtual-machines - ms.collection: linux - ms.subservice: security - ms.topic: faq - ms.author: mbaldwin - ms.date: 08/06/2024 -title: Azure Disk Encryption for Linux virtual machines FAQ -summary: | - This article provides answers to frequently asked questions (FAQ) about Azure Disk Encryption for Linux virtual machines (VMs). For more information about this service, see [Azure Disk Encryption overview](disk-encryption-overview.md). - - -sections: - - name: Ignored - questions: - - question: | - What is Azure Disk Encryption for Linux virtual machines? - answer: | - Azure Disk Encryption for Linux virtual machines uses the dm-crypt feature of Linux to provide full disk encryption of the OS disk* and data disks. Additionally, it provides encryption of the temporary disk when using the [EncryptFormatAll feature](disk-encryption-linux.md#use-encryptformatall-feature-for-data-disks-on-linux-vms). The content flows encrypted from the VM to the Storage backend with a customer-managed key. - - See [Supported virtual machines and operating systems](disk-encryption-overview.md#supported-vms-and-operating-systems). - - - question: | - Where is Azure Disk Encryption in general availability (GA)? - answer: | - Azure Disk Encryption for Linux virtual machines is in general availability in all Azure public regions. - - - question: | - What user experiences are available with Azure Disk Encryption? - answer: | - Azure Disk Encryption GA supports Azure Resource Manager templates, Azure PowerShell, and Azure CLI. The different user experiences give you flexibility. You have three different options for enabling disk encryption for your virtual machines. For more information on the user experience and step-by-step guidance available in Azure Disk Encryption, see [Azure Disk Encryption scenarios for Linux](disk-encryption-linux.md). - - - question: | - How much does Azure Disk Encryption cost? - answer: | - There's no charge for encrypting VM disks with Azure Disk Encryption, but there are charges associated with the use of Azure Key Vault. For more information on Azure Key Vault costs, see the [Key Vault pricing](https://azure.microsoft.com/pricing/details/key-vault/) page. - - - question: | - How can I start using Azure Disk Encryption? - answer: | - To get started, read the [Azure Disk Encryption overview](disk-encryption-overview.md). - - - question: | - What VM sizes and operating systems support Azure Disk Encryption? - answer: | - The [Azure Disk Encryption overview](disk-encryption-overview.md) article lists the [VM sizes](disk-encryption-overview.md#supported-vms) and [VM operating systems](disk-encryption-overview.md#supported-operating-systems) that support Azure Disk Encryption. - - - question: | - Can I encrypt both boot and data volumes with Azure Disk Encryption? - answer: | - Yes, you can encrypt both boot and data volumes, or you can encrypt the data volume without having to encrypt the OS volume first. - - After you've encrypted the OS volume, disabling encryption on the OS volume isn't supported. For Linux virtual machines in a scale set, only the data volume can be encrypted. - - - question: | - Can I encrypt an unmounted volume with Azure Disk Encryption? - answer: | - No, Azure Disk Encryption only encrypts mounted volumes. - - - question: | - What is Storage server-side encryption? - answer: | - Storage server-side encryption encrypts Azure managed disks in Azure Storage. Managed disks are encrypted by default with Server-side encryption with a platform-managed key (as of June 10, 2017). You can manage encryption of managed disks with your own keys by specifying a customer-managed key. For more information see: [Server-side encryption of Azure managed disks](../disk-encryption.md). - - - question: | - How is Azure Disk Encryption different from other disk encryption solutions and when should I use each solution? - answer: | - See [Overview of managed disk encryption options](../disk-encryption-overview.md). - - - question: | - How do I rotate secrets or encryption keys? - answer: | - To rotate secrets, just call the same command you used originally to enable disk encryption, specifying a different Key Vault. To rotate the key encryption key, call the same command you used originally to enable disk encryption, specifying the new key encryption. - - >[!WARNING] - > - If you previously used [Azure Disk Encryption with Microsoft Entra app](disk-encryption-linux-aad.md) by specifying Microsoft Entra credentials to encrypt this VM, you must continue to use this option to encrypt your VM. You can't use Azure Disk Encryption on this encrypted VM as this isn't a supported scenario, meaning switching away from Microsoft Entra application for this encrypted VM isn't supported yet. - - - question: | - How do I add or remove a key encryption key if I didn't originally use one? - answer: | - To add a key encryption key, call the enable command again passing the key encryption key parameter. To remove a key encryption key, call the enable command again without the key encryption key parameter. - - - question: | - Does Azure Disk Encryption allow you to bring your own key (BYOK)? - answer: | - Yes, you can supply your own key encryption keys. These keys are safeguarded in Azure Key Vault, which is the key store for Azure Disk Encryption. For more information on the key encryption keys support scenarios, see [Creating and configuring a key vault for Azure Disk Encryption](disk-encryption-key-vault.md). - - - question: | - Can I use an Azure-created key encryption key? - answer: | - Yes, you can use Azure Key Vault to generate a key encryption key for Azure disk encryption use. These keys are safeguarded in Azure Key Vault, which is the key store for Azure Disk Encryption. For more information on the key encryption key, see [Creating and configuring a key vault for Azure Disk Encryption](disk-encryption-key-vault.md). - - - question: | - Can I use an on-premises key management service or HSM to safeguard the encryption keys? - answer: | - You can't use the on-premises key management service or HSM to safeguard the encryption keys with Azure Disk Encryption. You can only use the Azure Key Vault service to safeguard the encryption keys. For more information on the key encryption key support scenarios, see [Creating and configuring a key vault for Azure Disk Encryption](disk-encryption-key-vault.md). - - - question: | - What are the prerequisites to configure Azure Disk Encryption? - answer: | - There are prerequisites for Azure Disk Encryption. See the [Creating and configuring a key vault for Azure Disk Encryption](disk-encryption-key-vault.md) article to create a new key vault, or set up an existing key vault for disk encryption access to enable encryption, and safeguard secrets and keys. For more information on the key encryption key support scenarios, see [Creating and configuring a key vault for Azure Disk Encryption](disk-encryption-key-vault.md). - - - question: | - What are the prerequisites to configure Azure Disk Encryption with a Microsoft Entra app (previous release)? - answer: | - There are prerequisites for Azure Disk Encryption. See the [Azure Disk Encryption with Microsoft Entra ID](disk-encryption-linux-aad.md) content to create an Microsoft Entra application, create a new key vault, or set up an existing key vault for disk encryption access to enable encryption, and safeguard secrets and keys. For more information on the key encryption key support scenarios, see [Creating and configuring a key vault for Azure Disk Encryption with Microsoft Entra ID](disk-encryption-key-vault-aad.md). - - - question: | - Is Azure Disk Encryption using a Microsoft Entra app (previous release) still supported? - answer: | - Yes. Disk encryption using a Microsoft Entra app is still supported. However, when encrypting new virtual machines it's recommended that you use the new method rather than encrypting with a Microsoft Entra app. - - - question: | - Can I migrate virtual machines that were encrypted with a Microsoft Entra app to encryption without a Microsoft Entra app? - answer: Currently, there isn't a direct migration path for machines that were encrypted with a Microsoft Entra app to encryption without a Microsoft Entra app. Additionally, there isn't a direct path from encryption without a Microsoft Entra app to encryption with an AD app. - - - question: | - What version of Azure PowerShell does Azure Disk Encryption support? - answer: | - Use the latest version of the Azure PowerShell SDK to configure Azure Disk Encryption. Download the latest version of [Azure PowerShell](https://github.com/Azure/azure-powershell/releases). Azure Disk Encryption is *not* supported by Azure SDK version 1.1.0. - - > [!NOTE] - > The Linux Azure disk encryption preview extension "Microsoft.OSTCExtension.AzureDiskEncryptionForLinux" is deprecated. This extension was published for Azure disk encryption preview release. You should not use the preview version of the extension in your testing or production deployment. - - > For deployment scenarios like Azure Resource Manager (ARM), where you have a need to deploy Azure disk encryption extension for Linux VM to enable encryption on your Linux IaaS VM, you must use the Azure disk encryption production supported extension "Microsoft.Azure.Security.AzureDiskEncryptionForLinux". - - - question: | - Can I apply Azure Disk Encryption on my custom Linux image? - answer: | - You can't apply Azure Disk Encryption on your custom Linux image. Only the gallery Linux images for the supported distributions called out previously are supported. Custom Linux images aren't currently supported. - - - question: | - Can I apply updates to a Linux Red Hat VM that uses the yum update? - answer: | - Yes, you can perform a yum update on a Red Hat Linux VM. For more information, see [Azure Disk Encryption on an isolated network](disk-encryption-isolated-network.md). - - - question: | - What is the recommended Azure disk encryption workflow for Linux? - answer: | - The following workflow is recommended to have the best results on Linux: - * Start from the unmodified stock gallery image corresponding to the needed OS distro and version - * Back up any mounted drives you want encrypted. This back up allows for recovery if there's a failure, for example if the VM is rebooted before encryption has completed. - * Encrypt (can take several hours or even days depending on VM characteristics and size of any attached data disks) - * Customize, and add software to the image as needed. - - If this workflow isn't possible, relying on [Storage Service Encryption (SSE)](../../storage/common/storage-service-encryption.md) at the platform storage account layer may be an alternative to full disk encryption using dm-crypt. - - - question: | - What is the disk "Bek Volume" or "/mnt/azure_bek_disk"? - answer: | - The "Bek volume" is a local data volume that securely stores the encryption keys for Encrypted Azure virtual machines. - > [!NOTE] - > Do not delete or edit any contents in this disk. Do not unmount the disk since the encryption key presence is needed for any encryption operations on the IaaS VM. - - - - question: | - What encryption method does Azure Disk Encryption use? - answer: | - Azure Disk Encryption uses the decrypt default of aes-xts-plain64 with a 256-bit volume master key. - - - question: | - If I use EncryptFormatAll and specify all volume types, will it erase the data on the data drives that we already encrypted? - answer: | - No, data won't be erased from data drives that are already encrypted using Azure Disk Encryption. Similar to how EncryptFormatAll didn't re-encrypt the OS drive, it won't re-encrypt the already encrypted data drive. For more information, see the [EncryptFormatAll criteria](disk-encryption-linux.md#use-encryptformatall-feature-for-data-disks-on-linux-vms). - - - question: | - Is XFS filesystem supported? - answer: | - Encryption of XFS OS disks is supported. - - Encryption of XFS data disks is supported only when the EncryptFormatAll parameter is used. This option reformats the volume, erasing any data previously there. For more information, see the [EncryptFormatAll criteria](disk-encryption-linux.md#use-encryptformatall-feature-for-data-disks-on-linux-vms). - - - question: | - Is resizing the OS partition supported? - answer: | - Resize of an Azure Disk Encryption encrypted OS disk isn't supported. - - - question: | - Can I backup and restore an encrypted VM? - answer: | - Azure Backup provides a mechanism to backup and restore encrypted VM's within the same subscription and region. For instructions, please see [Back up and restore encrypted virtual machines with Azure Backup](../../backup/backup-azure-vms-encryption.md). Restoring an encrypted VM to a different region is not currently supported. - - - question: | - Where can I go to ask questions or provide feedback? - answer: | - You can ask questions or provide feedback on the [Microsoft Q&A question page for Azure Disk Encryption](/answers/topics/azure-disk-encryption.html). - -additionalContent: | - - ## Next steps - - In this document, you learned more about the most frequent questions related to Azure Disk Encryption. For more information about this service, see the following articles: - - - [Azure Disk Encryption Overview](disk-encryption-overview.md) - - [Apply disk encryption in Azure Security Center](../../security-center/asset-inventory.md) - - [Azure data encryption at rest](../../security/fundamentals/encryption-atrest.md) diff --git a/scenarios/azure-docs/articles/virtual-machines/linux/faq.yml b/scenarios/azure-docs/articles/virtual-machines/linux/faq.yml deleted file mode 100644 index 5700bcc9c..000000000 --- a/scenarios/azure-docs/articles/virtual-machines/linux/faq.yml +++ /dev/null @@ -1,141 +0,0 @@ -### YamlMime:FAQ -metadata: - title: Frequently asked questions for Linux VMs in Azure - description: Provides answers to some of the common questions about Linux virtual machines created with the Resource Manager model. - author: ju-shim - ms.service: azure-virtual-machines - ms.collection: linux - ms.topic: faq - ms.date: 03/06/2024 - ms.author: jushiman -title: Frequently asked question about Linux Virtual Machines -summary: | - This article addresses some common questions about Linux virtual machines created in Azure using the Resource Manager deployment model. For the Windows version of this topic, see [Frequently asked question about Windows Virtual Machines](../windows/faq.yml) - - -sections: - - name: Ignored - questions: - - question: | - What can I run on an Azure VM? - answer: | - All subscribers can run server software on an Azure virtual machine. For more information, see [Linux on Azure-Endorsed Distributions](endorsed-distros.md) - - - question: | - How much storage can I use with a virtual machine? - answer: | - Each data disk can be up to 32,767 GiB. The number of data disks you can use depends on the size of the virtual machine. For details, see [Sizes for Virtual Machines](../sizes.md). - - Azure Managed Disks are the recommended disk storage offerings for use with Azure Virtual Machines for persistent storage of data. You can use multiple Managed Disks with each Virtual Machine. Managed Disks offer two types of durable storage options: Premium and Standard Managed Disks. For pricing information, see [Managed Disks Pricing](https://azure.microsoft.com/pricing/details/managed-disks). - - Azure storage accounts can also provide storage for the operating system disk and any data disks. Each disk is a .vhd file stored as a page blob. For pricing details, see [Storage Pricing Details](https://azure.microsoft.com/pricing/details/storage/). - - - question: | - How can I access my virtual machine? - answer: | - Establish a remote connection to sign on to the virtual machine, using Secure Shell (SSH). See the instructions on how to connect [from Windows](ssh-from-windows.md) or - [from Linux and Mac](mac-create-ssh-keys.md). By default, SSH allows a maximum of 10 concurrent connections. You can increase this number by editing the configuration file. - - If you’re having problems, check out [Troubleshoot Secure Shell (SSH) connections](/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - - - question: | - Can I use the temporary disk (/dev/sdb1) to store data? - answer: | - Don't use the temporary disk (/dev/sdb1) to store data. It is only there for temporary storage. You risk losing data that can’t be recovered. - - - question: | - Can I copy or clone an existing Azure VM? - answer: | - Yes. For instructions, see [How to create a copy of a Linux virtual machine in the Resource Manager deployment model](/previous-versions/azure/virtual-machines/linux/copy-vm). - - - question: | - Why am I not seeing Canada Central and Canada East regions through Azure Resource Manager? - answer: | - The two new regions of Canada Central and Canada East are not automatically registered for virtual machine creation for existing Azure subscriptions. This registration is done automatically when a virtual machine is deployed through the Azure portal to any other region using Azure Resource Manager. After a virtual machine is deployed to any other Azure region, the new regions should be available for subsequent virtual machines. - - - question: | - Can I add a NIC to my VM after it's created? - answer: | - Yes, this is now possible. The VM first needs to be stopped deallocated. Then you can add or remove a NIC (unless it's the last NIC on the VM). - - - question: | - Are there any computer name requirements? - answer: | - Yes. The computer name can be a maximum of 64 characters in length. See [Naming conventions rules and restrictions](/azure/architecture/best-practices/resource-naming) for more information around naming your resources. - - - question: | - Are there any resource group name requirements? - answer: | - Yes. The resource group name can be a maximum of 90 characters in length. See [Naming conventions rules and restrictions](/azure/architecture/best-practices/resource-naming) for more information about resource groups. - - - question: | - What are the username requirements when creating a VM? - answer: | - Usernames should be 1 - 32 characters in length. - - The following usernames are not allowed: - - - `1` - - `123` - - `a` - - `actuser` - - `adm` - - `admin` - - `admin1` - - `admin2` - - `administrator` - - `aspnet` - - `backup` - - `console` - - `david` - - `guest` - - `john` - - `owner` - - `root` - - `server` - - `sql` - - `support_388945a0` - - `support` - - `sys` - - `test` - - `test1` - - `test2` - - `test3` - - `user` - - `user1` - - `user2` - - `user3` - - `user4` - - `user5` - - `video` - - - - question: | - What are the password requirements when creating a VM? - answer: | - There are varying password length requirements, depending on the tool you are using: - - Azure portal - between 12 - 72 characters - - Azure PowerShell - between 8 - 123 characters - - Azure CLI - between 12 - 123 characters - - Azure Resource Manager (ARM) templates - 12 - 72 characters and control characters are not allowed - - - Passwords must also meet 3 out of the following 4 complexity requirements: - - * Have lower characters - * Have upper characters - * Have a digit - * Have a special character (Regex match [\W_]) - - The following passwords are not allowed: - - * abc@123 - * P@$$w0rd - * P@ssw0rd - * P@ssword123 - * Pa$$word - * pass@word1 - * Password! - * Password1 - * Password22 - * iloveyou! diff --git a/scenarios/metadata.json b/scenarios/metadata.json index 6461a8a03..3d9ddc915 100644 --- a/scenarios/metadata.json +++ b/scenarios/metadata.json @@ -1,11 +1,11 @@ [ { "status": "active", - "key": "azure-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", + "key": "azure-aks-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", "title": "Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI", "description": "Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-aks-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", "documentationUrl": "https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli", "nextSteps": [ { @@ -49,12 +49,25 @@ "Microsoft.ContainerService/managedClusters/GetAccessProfiles/action", "Microsoft.Network/publicIPAddresses/list/action" ], - "region": "eastus" + "configurableParams": [ + { + "inputType": "textInput", + "commandKey": "MY_RESOURCE_GROUP_NAME", + "title": "Resource Group Name", + "defaultValue": "" + }, + { + "inputType": "textInput", + "commandKey": "MY_AKS_CLUSTER_NAME", + "title": "AKS Cluster Name", + "defaultValue": "" + } + ] } }, { "status": "active", - "key": "azure-docs/articles/mysql/flexible-server/tutorial-deploy-wordpress-on-aks.md", + "key": "azure-databases-docs/articles/mysql/flexible-server/tutorial-deploy-wordpress-on-aks.md", "title": "Tutorial: Deploy WordPress on AKS cluster by using Azure CLI", "description": "Learn how to quickly build and deploy WordPress on AKS with Azure Database for MySQL - Flexible Server.", "stackDetails": [ @@ -67,7 +80,7 @@ "Azure Private DNS Zone for Azure MySQL Flexible Server", "Use port 22 for SSH and ports 80, 443 for web traffic" ], - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/mysql/flexible-server/tutorial-deploy-wordpress-on-aks.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-databases-docs/articles/mysql/flexible-server/tutorial-deploy-wordpress-on-aks.md", "documentationUrl": "https://learn.microsoft.com/en-us/azure/mysql/flexible-server/tutorial-deploy-wordpress-on-aks", "nextSteps": [ { @@ -124,8 +137,7 @@ "Microsoft.Authorization/roleAssignments/read", "Microsoft.Authorization/roleDefinitions/read", "Microsoft.Authorization/roleDefinitions/write" - ], - "region": "eastus" + ] } }, { @@ -143,16 +155,15 @@ } ], "configurations": { - "region": "EastUS2" } }, { "status": "active", - "key": "azure-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md", + "key": "azure-compute-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md", "title": "Create virtual machines in a Flexible scale set using Azure CLI", "description": "Learn how to create a Virtual Machine Scale Set in Flexible orchestration mode using Azure CLI.", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-compute-docs/articles/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli.md", "documentationUrl": "https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/flexible-virtual-machine-scale-sets-cli", "nextSteps": [ { @@ -169,12 +180,11 @@ } ], "configurations": { - "region": "EastUS" } }, { "status": "active", - "key": "azure-docs/articles/virtual-machines/linux/quick-create-cli.md", + "key": "azure-compute-docs/articles/virtual-machines/linux/quick-create-cli.md", "title": "Quickstart: Use the Azure CLI to create a Linux Virtual Machine", "description": "In this quickstart, you learn how to use the Azure CLI to create a Linux virtual machine", "stackDetails": [ @@ -183,7 +193,7 @@ "Network interface with public IP and network security group", "Port 22 will be opened" ], - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/virtual-machines/linux/quick-create-cli.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-compute-docs/articles/virtual-machines/linux/quick-create-cli.md", "documentationUrl": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-cli", "nextSteps": [ { @@ -215,29 +225,42 @@ "Microsoft.Compute/virtualMachines/read", "Microsoft.Network/publicIPAddresses/read", "Microsoft.Compute/virtualMachines/instanceView/read" - ], - "region": "EastUS" + ] } }, { "status": "active", - "key": "azure-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md", + "key": "azure-compute-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md", "title": "Tutorial - Deploy a LEMP stack using WordPress on a VM", "description": "In this tutorial, you learn how to install the LEMP stack, and WordPress, on a Linux virtual machine in Azure.", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-compute-docs/articles/virtual-machines/linux/tutorial-lemp-stack.md", "documentationUrl": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-lemp-stack", + "nextSteps": [ + { + "title": "Learn about virtual machines", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/" + }, + { + "title": "Create and manage Linux VMs with the Azure CLI", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-manage-vm" + }, + { + "title": "Secure your Linux VM", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-secure-vm" + } + + ], "configurations": { - "region": "westeurope" } }, { "status": "active", - "key": "DeployIGonAKS/README.md", + "key": "DeployIGonAKS/deploy-ig-on-aks.md", "title": "Deploy Inspektor Gadget in an Azure Kubernetes Service cluster", "description": "This tutorial shows how to deploy Inspektor Gadget in an AKS cluster", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployIGonAKS/README.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployIGonAKS/deploy-ig-on-aks.md", "documentationUrl": "", "nextSteps": [ { @@ -254,16 +277,15 @@ } ], "configurations": { - "region": "eastus" } }, { "status": "active", - "key": "CreateAKSWebApp/README.md", + "key": "CreateAKSWebApp/create-aks-webapp.md", "title": "Deploy a Scalable & Secure Azure Kubernetes Service cluster using the Azure CLI", "description": "This tutorial where we will take you step by step in creating an Azure Kubernetes Web Application that is secured via https.", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateAKSWebApp/README.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateAKSWebApp/create-aks-webapp.md", "documentationUrl": "", "nextSteps": [ { @@ -284,35 +306,6 @@ } ], "configurations": { - "region": "eastus" - } - }, - { - "status": "inactive", - "key": "BlobVisionOnAKS/blob-vision-aks.md" - }, - { - "status": "inactive", - "key": "CreateAKSDeployment/create-aks-deployment.md", - "title": "Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI", - "description": "Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI.", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateAKSDeployment/create-aks-deployment.md", - "documentationUrl": "", - "configurations": { - "region": "westeurope" - } - }, - { - "status": "inactive", - "key": "CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md", - "title": "Create a Container App leveraging Blob Store, SQL, and Computer Vision", - "description": "This tutorial shows how to create a Container App leveraging Blob Store, SQL, and Computer Vision", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md", - "documentationUrl": "", - "configurations": { - "region": "westus" } }, { @@ -323,56 +316,25 @@ "stackDetails": "", "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateRHELVMAndSSH/create-rhel-vm-ssh.md", "documentationUrl": "", + "nextSteps": [ + { + "title": "Learn about virtual machines", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/" + }, + { + "title": "Create an Ubuntu Virtual Machine", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-cli" + }, + { + "title": "Create custom VM images", + "url": "https://learn.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-custom-images" + }, + { + "title": "Load Balance VMs", + "url": "https://learn.microsoft.com/en-us/azure/load-balancer/quickstart-load-balancer-standard-public-cli" + } + ], "configurations": { - "region": "westeurope" - } - }, - { - "status": "inactive", - "key": "DeployHAPGonARO/deploy-ha-pg-aro.md", - "title": "Create a Highly Available PostgreSQL Cluster on Azure Red Hat OpenShift", - "description": "This tutorial shows how to create a Highly Available PostgreSQL cluster on Azure Red Hat OpenShift (ARO) using the CloudNativePG operator", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployHAPGonARO/deploy-ha-pg-aro.md", - "documentationUrl": "", - "configurations": { - "region": "" - } - }, - { - "status": "inactive", - "key": "azure-docs/articles/aks/create-postgresql-ha.md", - "title": "Create infrastructure for deploying a highly available PostgreSQL database on AKS", - "description": "Create the infrastructure needed to deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/aks/create-postgresql-ha.md", - "documentationUrl": "", - "configurations": { - "region": "westus3" - } - }, - { - "status": "inactive", - "key": "azure-docs/articles/aks/deploy-postgresql-ha.md", - "title": "Deploy a highly available PostgreSQL database on AKS with Azure CLI", - "description": "In this article, you deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator.", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/aks/deploy-postgresql-ha.md", - "documentationUrl": "", - "configurations": { - "region": "" - } - }, - { - "status": "inactive", - "key": "azure-docs/articles/aks/postgresql-ha-overview.md", - "title": "Overview of deploying a highly available PostgreSQL database on AKS with Azure CLI", - "description": "Learn how to deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator with Azure CLI.", - "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-docs/articles/aks/postgresql-ha-overview.md", - "documentationUrl": "", - "configurations": { - "region": "" } }, { @@ -402,24 +364,40 @@ } ], "configurations": { - "permissions": [], - "region": "australiaeast" + "permissions": [] + } + }, + { + "status": "active", + "key": "WorkloadIdentityAKS/workload-identity-aks.md", + "title": "Deploy and configure an AKS cluster with workload identity", + "description": "In this Azure Kubernetes Service (AKS) article, you deploy an Azure Kubernetes Service cluster and configure it with a Microsoft Entra Workload ID.", + "stackDetails": [], + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/WorkloadIdentityAKS/workload-identity-aks.md", + "documentationUrl": "", + "nextSteps": [ + { + "title": "Kubectl Describe Command Reference", + "url": "https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#describe" + } + ], + "configurations": { + "permissions": [] } }, { - "status": "inactive", - "key": "ObtainPerformanceMetricsLinuxSustem/README.md", + "status": "active", + "key": "ObtainPerformanceMetricsLinuxSustem/obtain-performance-metrics-linux-system.md", "title": "Obtaining Performance metrics from a Linux system", "description": "Learn how to obtainer Performance metrics from a Linux system.", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/ObtainPerformanceMetricsLinuxSustem/README.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/ObtainPerformanceMetricsLinuxSustem/obtain-performance-metrics-linux-system.md", "documentationUrl": "", "configurations": { - "region": "" } }, { - "status": "inactive", + "status": "active", "key": "azure-aks-docs/articles/aks/create-postgresql-ha.md", "title": "Create infrastructure for deploying a highly available PostgreSQL database on AKS", "description": "Create the infrastructure needed to deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator.", @@ -427,11 +405,10 @@ "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-aks-docs/articles/aks/create-postgresql-ha.md", "documentationUrl": "", "configurations": { - "region": "westus3" } }, { - "status": "inactive", + "status": "active", "key": "azure-aks-docs/articles/aks/deploy-postgresql-ha.md", "title": "Deploy a highly available PostgreSQL database on AKS with Azure CLI", "description": "In this article, you deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator.", @@ -439,11 +416,10 @@ "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-aks-docs/articles/aks/deploy-postgresql-ha.md", "documentationUrl": "", "configurations": { - "region": "" } }, { - "status": "inactive", + "status": "active", "key": "azure-aks-docs/articles/aks/postgresql-ha-overview.md", "title": "Overview of deploying a highly available PostgreSQL database on AKS with Azure CLI", "description": "Learn how to deploy a highly available PostgreSQL database on AKS using the CloudNativePG operator.", @@ -451,19 +427,154 @@ "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-aks-docs/articles/aks/postgresql-ha-overview.md", "documentationUrl": "", "configurations": { - "region": "" } }, { - "status": "inactive", - "key": "azure-aks-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", - "title": "Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI", - "description": "Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI.", + "status": "active", + "key": "CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md", + "title": "Create a Container App leveraging Blob Store, SQL, and Computer Vision", + "description": "This tutorial shows how to create a Container App leveraging Blob Store, SQL, and Computer Vision", "stackDetails": "", - "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/azure-aks-docs/articles/aks/learn/quick-kubernetes-deploy-cli.md", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateContainerAppDeploymentFromSource/create-container-app-deployment-from-source.md", + "documentationUrl": "", + "configurations": { + } + }, + { + "status": "active", + "key": "BlobVisionOnAKS/blob-vision-aks.md" + }, + { + "status": "active", + "key": "DeployHAPGonARO/deploy-ha-pg-aro.md", + "title": "Create a Highly Available PostgreSQL Cluster on Azure Red Hat OpenShift", + "description": "This tutorial shows how to create a Highly Available PostgreSQL cluster on Azure Red Hat OpenShift (ARO) using the CloudNativePG operator", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployHAPGonARO/deploy-ha-pg-aro.md", + "documentationUrl": "", + "configurations": { + } + }, + { + "status": "active", + "key": "AIChatApp/ai-chat-app.md", + "title": "Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps", + "description": "", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/AIChatApp/ai-chat-app.md", "documentationUrl": "", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "ConfigurePythonContainer/configure-python-container.md", + "title": "Configure Linux Python apps", + "description": "Learn how to configure the Python container in which web apps are run, using both the Azure portal and the Azure CLI.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/ConfigurePythonContainer/configure-python-container.md", + "documentationUrl": "https://learn.microsoft.com/en-us/azure/app-service/configure-language-python", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "CreateSpeechService/create-speech-service.md", + "title": "Quickstart: The Speech CLI - Speech service", + "description": "In this Azure AI Speech CLI quickstart, you interact with speech to text, text to speech, and speech translation without having to write code.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateSpeechService/create-speech-service.md", + "documentationUrl": "https://learn.microsoft.com/en-us/azure/ai-services/speech-service/spx-basics?tabs=windowsinstall%2Cterminal", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "DeployApacheAirflowOnAKS/deploy-apache-airflow-on-aks.md", + "title": "Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster and Apache Airflow using Azure CLI", + "description": "Learn how to quickly deploy a Kubernetes cluster and deploy Apache Airflow in Azure Kubernetes Service (AKS) using Azure CLI.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployApacheAirflowOnAKS/deploy-apache-airflow-on-aks.md", + "documentationUrl": "", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "DeployPremiumSSDV2/deploy-premium-ssd-v2.md", + "title": "Deploy a Premium SSD v2 managed disk", + "description": "Learn how to deploy a Premium SSD v2 and about its regional availability.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/DeployPremiumSSDV2/deploy-premium-ssd-v2.md", + "documentationUrl": "https://learn.microsoft.com/en-us/azure/virtual-machines/disks-deploy-premium-v2?tabs=azure-cli", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "GPUNodePoolAKS/gpu-node-pool-aks.md", + "title": "Create a multi-instance GPU node pool in Azure Kubernetes Service (AKS)", + "description": "Learn how to create a multi-instance GPU node pool in Azure Kubernetes Service (AKS).", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/GPUNodePoolAKS/gpu-node-pool-aks.md", + "documentationUrl": "https://learn.microsoft.com/en-us/azure/aks/gpu-multi-instance?tabs=azure-cli", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "PostgresRAGLLM/postgres-rag-llm.md", + "title": "Quickstart: Deploy a Postgres vector database", + "description": "Setup a Postgres vector database and openai resources to run a RAG-LLM model.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/PostgresRAGLLM/postgres-rag-llm.md", + "documentationUrl": "", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "CreateAOAIDeployment/create-aoai-deployment.md", + "title": "Create and manage Azure OpenAI Service deployments with the Azure CLI", + "description": "Learn how to use the Azure CLI to create an Azure OpenAI resource and manage deployments with the Azure OpenAI Service.", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/CreateAOAIDeployment/create-aoai-deployment.md", + "documentationUrl": "", + "nextSteps": [], + "configurations": { + "permissions": [] + } + }, + { + "status": "active", + "key": "AKSKaito/aks-kaito.md", + "title": "Deploy an AI model on Azure Kubernetes Service (AKS) with the AI toolchain operator (preview)", + "description": "Learn how to enable the AI toolchain operator add-on on Azure Kubernetes Service (AKS) to simplify OSS AI model management and deployment", + "stackDetails": "", + "sourceUrl": "https://raw.githubusercontent.com/MicrosoftDocs/executable-docs/main/scenarios/AKSKaito/aks-kaito.md", + "documentationUrl": "", + "nextSteps": [ + { + "title": "Check out the KAITO GitHub repository", + "url": "https://github.com/Azure/kaito" + } + ], "configurations": { - "region": "westeurope" + "permissions": [] } } ] \ No newline at end of file