简体   繁体   English

terraform azurerm - 不能破坏公共 ip

[英]terraform azurerm - cannot destroy public ip

New to terraform so i'm hoping this is an easy issue. terraform 的新手,所以我希望这是一个简单的问题。 I'm creating some resources in azure and deploying a simple flask application to AKS.我正在 azure 中创建一些资源,并将一个简单的 flask 应用程序部署到 AKS。 Creating works fine using terraform plan.使用 terraform 计划创建工作正常。 I can see that azure is provisioned correctly and I can hit the flask app.我可以看到 azure 已正确配置,我可以点击 flask 应用程序。

When I try to run terraform destroy I get the error - "StatusCode=400...In order to delete the public IP, disassociate/detach the Public IP address from the resource.当我尝试运行 terraform destroy 时,出现错误 - “StatusCode=400...为了删除公共 IP,从资源中分离/分离公共 IP 地址。

Main.tf主程序

variable "subscription_id" {}
variable "client_id" {}
variable "client_secret" {}
variable "tenant_id" {}

provider "azurerm" {
    version         = "=1.28.0"
    tenant_id       = "${var.tenant_id}"
    subscription_id = "${var.subscription_id}"
}

resource "azurerm_resource_group" "aks" {
    name        = "${var.name_prefix}"
    location    = "${var.location}"
}

resource "azurerm_kubernetes_cluster" "k8s" {
    name                    = "${var.name_prefix}-aks"
    kubernetes_version      = "${var.kubernetes_version}"
    location                = "${azurerm_resource_group.aks.location}"
    resource_group_name     = "${azurerm_resource_group.aks.name}"
    dns_prefix              = "AKS-${var.dns_prefix}"

    agent_pool_profile {
        name                = "${var.node_pool_name}"
        count               = "${var.node_pool_size}"
        vm_size             = "${var.node_pool_vmsize}"
        os_type             = "${var.node_pool_os}"
        os_disk_size_gb     = 30
    }

    service_principal {
        client_id           = "${var.client_id}"
        client_secret       = "${var.client_secret}"
    }

    tags = {
        environment = "${var.env_tag}"
    }
}

provider "helm" {
  install_tiller = true

  kubernetes {
    host                   = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
    client_certificate     = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
    client_key             = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
    cluster_ca_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
  }
}

# Create Static Public IP Address to be used by Nginx Ingress
resource "azurerm_public_ip" "nginx_ingress" {
  name                         = "nginx-ingress-public-ip"
  location                     = "${azurerm_kubernetes_cluster.k8s.location}"
  resource_group_name          = "${azurerm_kubernetes_cluster.k8s.node_resource_group}"
  allocation_method            = "Static"
  domain_name_label            = "${var.name_prefix}"
}

# Add Kubernetes Stable Helm charts repo
data "helm_repository" "stable" {
  name = "stable"
  url  = "https://kubernetes-charts.storage.googleapis.com"
}

# Install Nginx Ingress using Helm Chart
resource "helm_release" "nginx_ingress" {
  name       = "nginx-ingress"
  repository = "${data.helm_repository.stable.metadata.0.name}"
  chart      = "nginx-ingress"

  set {
    name  = "rbac.create"
    value = "false"
  }

  set {
    name  = "controller.service.externalTrafficPolicy"
    value = "Local"
  }

  set {
    name  = "controller.service.loadBalancerIP"
    value = "${azurerm_public_ip.nginx_ingress.ip_address}"
  }
}

Also deploying my kube.netes stuff in this file k8s.tf还在这个文件 k8s.tf 中部署我的 kube.netes 东西

provider "kubernetes" {
    host                    = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
    username                = "${azurerm_kubernetes_cluster.k8s.kube_config.0.username}"
    password                = "${azurerm_kubernetes_cluster.k8s.kube_config.0.password}"
    client_certificate      = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
    client_key              = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
    cluster_ca_certificate  = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
}

resource "kubernetes_deployment" "flask-api-deployment" {
    metadata {
        name = "flask-api-deployment"
    }

    spec {
        replicas = 2

        selector {
            match_labels {
                component = "api"
            }
        }

        template {
            metadata {
                labels = {
                    component = "api"
                }
            }

            spec {
                container {
                    image = "xxx.azurecr.io/sampleflask:0.1.0"
                    name = "flask-api"
                    port {
                        container_port = 5000
                    }
                }
            }
        }
    }
}

resource "kubernetes_service" "api-cluster-ip-service" {
    metadata {
        name = "flask-api-cluster-ip-service"
    }

    spec {
        selector {
            component = "api"
        }

        port {
            port = 5000
            target_port = 5000
        }
    }
}

resource "kubernetes_ingress" "flask-ingress-service" {
    metadata {
        name = "flask-ingress-service"
    }

    spec {
        backend {
            service_name = "flask-api-cluster-ip-service"
            service_port = 5000
        }
    }
}

For your issue, this is a problem about the sequence of the resources. 对于您的问题,这是有关资源顺序的问题。 When you create the nginx ingress with the public IP, the public IP should be created first. 当使用公共IP创建Nginx入口时,应首先创建公共IP。 But when you delete the public IP, it's still in use by the nginx ingress. 但是,当您删除公共IP时,nginx入口仍在使用它。 So It causes the error. 因此,它会导致错误。

The solution is that you can detach the public IP from the resource which uses it. 解决方案是您可以将公用IP与使用它的资源分离。 Then use the destroy the resource from the Terraform. 然后使用销毁Terraform中的资源。 You can take a look at the explanation in the issue . 您可以查看问题中的解释

The user @4c74356b41 is right, but to give more information assuming a config like this:用户 @4c74356b41 是对的,但要提供更多信息,请假设配置如下:

resource "azurerm_kubernetes_cluster" "k8s" {
  name                = "aks-e2x-nuffield-uat"
  resource_group_name = azurerm_resource_group.core_rg.name
  location            = azurerm_resource_group.core_rg.location
  dns_prefix          = "aks-e2x-nuffield-uat-dns"
  kubernetes_version  = var.k8s_version
  # NOTE currently only a single node pool, default, is configured
  private_cluster_enabled = true
...
  network_profile {
    network_plugin     = "kubenet"
    load_balancer_sku  = "standard"
    service_cidr       = var.k8s_service_subnet
    pod_cidr           = var.k8s_pod_subnet
    docker_bridge_cidr = "172.17.0.1/16"
    dns_service_ip     = "40.0.8.10" # within the service subnet
  }
}

Where the load_balancer_sku is set to standard, you can access the public IP to be used elsewhere like this:load_balancer_sku设置为标准的情况下,您可以访问公共 IP 以在其他地方使用,如下所示:

data "azurerm_public_ip" "k8s_load_balancer_ip" {
  name = reverse(split("/", tolist(azurerm_kubernetes_cluster.k8s.network_profile.0.load_balancer_profile.0.effective_outbound_ips)[0]))[0]
  resource_group_name = azurerm_kubernetes_cluster.k8s.node_resource_group
}

output "ingress_public_ip" {
  # value = azurerm_public_ip.ingress.ip_address
  value = data.azurerm_public_ip.k8s_load_balancer_ip.ip_address
}

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM