简体   繁体   English

使用 terraform 创建 aks 集群时出现 Hashicorp 提供程序错误

[英]Getting Hashicorp Provider Error while creating aks cluster using terraform

I am getting error while creating AKS Cluster Using Terraform使用 Terraform 创建 AKS 集群时出现错误

Error:错误:

Error: Failed to query available provider packages
Could not retrieve the list of available versions for provider hashicorp/file: provider registry registry.terraform.io does not have a provider named
registry.terraform.io/hashicorp/file

All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on hashicorp/file, run the following command:
    terraform providers

Above is the error i am facing.以上是我面临的错误。 I have written Terraform code as shown below.我已经编写了如下所示的 Terraform 代码。

provider.tf:
============

provider "azurerm" {
  features {}
}

terraform {
  required_providers {
    azurerm = {
      source  = "hashicorp/azurerm"
      version = "2.39.0"
    }
  }
}


terraform.tfvars:
=================

resource_group_name = "a0474899701"
location            = "CentralUS"
cluster_name        = "aks01"
kubernetes_version  = "1.24.4"
system_node_count   = 2
user_node_count     = 1
spot_node_count     = 2
acr_name            = "devops_acr_tf"
aks_network_plugin  = "kubenet"
client_id           = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
client_secret       = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"


main.tf:
========
# Create an Resource Group
resource "azurerm_resource_group" "aks-rg" {
  name     = var.resource_group_name
  location = var.location
}

# Create an ACR instance
resource "azurerm_container_registry" "acr" {
  name                = var.acr_name
  resource_group_name = azurerm_resource_group.aks-rg.name
  location            = var.location
  sku                 = "Standard"
  admin_enabled       = false
}

# Create a role assignment to allow AKS to access ACR
resource "azurerm_role_assignment" "role_acrpull" {
  scope                            = azurerm_container_registry.acr.id
  role_definition_name             = "AcrPull"
  # principal_id                     = azurerm_kubernetes_cluster.aks.kubelet_identity.0.object_id
  principal_id                     = azurerm_kubernetes_cluster.aks.kubelet_identity.0.client_id
  skip_service_principal_aad_check = true
}

# Create a Kubernetes secret to hold the ACR credentials
# It holds the ACR credentials in a Docker config JSON format
resource "kubernetes_secret" "acr_credentials" {
  metadata {
    name = "acr-credentials"
  }

  data = {
    ".dockerconfigjson" = azurerm_container_registry.acr.docker_config_json
  }
}    

# Private Key Creation
resource "tls_private_key" "aks_ssh_key" {
  algorithm = "RSA"
}

resource "tls_public_key" "aks_ssh_key" {
  private_key_pem = tls_private_key.aks_ssh_key.private_key_pem
}

resource "file" "private_key" {
  content  = tls_private_key.aks_ssh_key.private_key_pem
  filename = "aks_private_key.pem"
}

# virtual network (aks_vnet) is created in the same resource group
resource "azurerm_virtual_network" "aks_vnet" {
  name                = "${var.resource_group_name}-vnet01"
  # address_space       = ["10.0.0.0/16"]
  address_space       = ["10.172.144.0/26"]
  location            = azurerm_resource_group.aks_rg.location
  resource_group_name = azurerm_resource_group.aks_rg.name
}

# subnet (aks_subnet) is created within the virtual network
resource "azurerm_subnet" "aks_subnet" {
  name                 = "${var.resource_group_name}-vnet01-subnet01"
  resource_group_name  = azurerm_resource_group.aks_rg.name
  virtual_network_name = azurerm_virtual_network.aks_vnet.name
  # address_prefix       = "10.0.1.0/24"
  address_prefix       = "10.172.144.0/27"
}


resource "azurerm_network_security_group" "azure-sg" {
  name                = "${var.resource_group_name}-nsg01"
  location            = azurerm_resource_group.aks_rg.location
  resource_group_name = azurerm_resource_group.aks_rg.name

  security_rule {
    name                       = "allow-ssh"
    priority                   = 100
    direction                  = "Inbound"
    access                     = "Allow"
    protocol                   = "Tcp"
    source_port_range          = "*"
    destination_port_range     = "22"
    source_address_prefix      = "*"
    destination_address_prefix = "*"
  }
}

resource "azurerm_kubernetes_cluster" "aks" {
  name                = var.cluster_name
  kubernetes_version  = var.kubernetes_version
  location            = var.location
  resource_group_name = azurerm_resource_group.aks-rg.name
  security_group_name = azurerm_network_security_group.azure-sg.name
  dns_prefix          = var.cluster_name
  
  default_node_pool {
    name                = "system"
    node_count          = var.system_node_count
    vm_size             = "Standard_E4as_v4"
    os_disk_size_gb     = 20
    os_disk_type        = "Ephemeral"
    vnet_subnet_id      = azurerm_subnet.aks_subnet.id
    os_type             = "Linux"
    node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
    enable_node_public_ip = false
    enable_auto_scaling = false
  }
  additional_node_pools {
    name                = "user"
    node_count          = var.user_node_count
    vm_size             = "Standard_E8as_v4"
    os_disk_size_gb     = 20
    os_disk_type        = "Ephemeral"
    vnet_subnet_id      = azurerm_subnet.aks_subnet.id
    type                = "User"
      # os_type             = "RedHat"
    os_type             = "Linux"
    node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
    enable_node_public_ip = false
    enable_auto_scaling = false
  }
  additional_node_pools {
    name                = "spot"
    node_count          = var.spot_node_count
    vm_size             = "Standard_D2s_v3"
    os_disk_size_gb     = 20
    os_disk_type        = "Ephemeral"
    vnet_subnet_id      = azurerm_subnet.aks_subnet.id
    type                = "User"
      # os_type             = "RedHat"
    os_type             = "Linux"
    node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
    max_price           = 0.5
    enable_node_public_ip = false
    enable_auto_scaling = false
    eviction_policy      = "Spot"
    taints               = ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule"]
    labels = {
      "kubernetes.azure.com/scalesetpriority" = "spot"
    }
  }

  kubernetes_cluster_config {
    max_pods_per_node = "110"
  }
  
  identity {
    type = "SystemAssigned"
  }
  
  linux_profile {
    admin_username = "azureuser"

    ssh_key {
      key_data = tls_public_key.aks_ssh_key.public_key_openssh
    }
  }
  
  network_profile {
    pod_cidr = "172.32.0.0/19"
    service_cidr = "172.32.0.0/19"
    load_balancer_sku = "Standard"
    network_plugin    = var.aks_network_plugin 
    dns_service_ip = "172.32.0.10"
    docker_bridge_cidr = "172.34.0.1/16"
  }
  
  service_principal {
    client_id     = var.client_id
    client_secret = var.client_secret
  }
  
  tags = {
    Environment = "Development"
  }
}

# ACR can be attached to the AKS cluster using the "azurerm_kubernetes_cluster_container_registry_config" resource type
resource "azurerm_kubernetes_cluster_container_registry_config" "acr_config" {
  cluster_name        = azurerm_kubernetes_cluster.aks.name
  registry_id         = azurerm_container_registry.acr.id
  namespace           = "aks"
  default_action      = "Allow"
}

Above is my Code I am facing above error.以上是我面临上述错误的代码。 even i have changed my provider.tf still facing same issue.即使我已经改变了我的 provider.tf 仍然面临同样的问题。 Can anyone please tell me How to solve this error谁能告诉我如何解决这个错误

Thanks谢谢

I tried to reproduce the same in my environment to create AKS Cluster using Terraform:我尝试在我的环境中重现相同的内容以使用 Terraform 创建 AKS 集群:

Kindly use the below Terraform code to create AKS Cluster .请使用以下Terraform代码创建AKS 集群

Terraform Code: Terraform 代码:

provider "azurerm" {
  features {}
}
resource "azurerm_resource_group" "venkatesh" {
  name     = "venkat-resources"
  location = "West Europe"
}

resource "azurerm_container_registry" "venkatreg" {
  name                = "Testcontainerregistery"
  resource_group_name = azurerm_resource_group.venkatesh.name
  location            = azurerm_resource_group.venkatesh.location
  sku                 = "Premium"
}

resource "azurerm_kubernetes_cluster" "venkatcluster" {
  name                = "example-aks1"
  location            = azurerm_resource_group.venkatesh.location
  resource_group_name = azurerm_resource_group.venkatesh.name
  dns_prefix          = "exampleaks1"

  default_node_pool {
    name       = "default"
    node_count = 1
    vm_size    = "Standard_D2_v2"
  }

  identity {
    type = "SystemAssigned"
  }

  tags = {
    Environment = "Production"
  }
}

resource "azurerm_role_assignment" "example" {
  principal_id                     = azurerm_kubernetes_cluster.venkatcluster.kubelet_identity[0].object_id
  role_definition_name             = "AcrPull"
  scope                            = azurerm_container_registry.venkatreg.id
  skip_service_principal_aad_check = true
}

Terraform apply: Terraform 申请:

在此处输入图像描述

Once ran the code resources are created successfully.运行后,代码资源创建成功。

在此处输入图像描述

Reference: Create a Kube.netes cluster with Azure Kube.netes Service using Terraform.参考: 用Azure Kube.netes服务使用Terraform创建一个Kube.netes集群。

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

相关问题 使用 Terraform 安装 AKS 时获取不受支持的块类型 - Getting Unsupported block type while installing AKS using Terraform Terraform 提供商问题:registry.terraform.io/hashicorp/s3 - Terraform Provider issue: registry.terraform.io/hashicorp/s3 Terraform 创建原则时出错 - Terraform error while creating principles 是否可以使用 terraform 在 aks 集群中使用从组织验证的自定义图像? - is it possible to use custom image verified from organization in aks cluster using terraform? 在 terraform 模块中使用列表(字符串)数据类型时出错 - getting error while using list(string) data type in terraform module 尝试使用 Terraform Azure AD Provider 2.0 进行应用程序注册时出错 - Error while trying to do App Registration using Terraform Azure AD Provider 2.0 使用 Terraform 创建具有托管标识的 Azure AKS 会出现 AutoUpgradePreview not enabled 错误 - Create Azure AKS with Managed Identity using Terraform gives AutoUpgradePreview not enabled error 尝试使用 terraform 使用 google_bigquery_data_transfer_config 复制数据时出错 - getting error while trying to copy data using google_bigquery_data_transfer_config using terraform 在多命名空间集群 AKS 中使用入口 - Using ingress in multi namespace cluster AKS Terraform Azuread 提供商授权错误 - Terraform Azuread provider Authorizer Error
 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM