简体   繁体   English

terraform - 在您的集群中未找到容器实例

[英]terraform - No Container Instances were found in your cluster

I deploy ecs using terraform.我使用 terraform 部署 ecs。

When I run terraform apply everything is okay but when I browse to ecs service on events tab I have this error:当我运行terraform apply一切正常,但是当我在事件选项卡上浏览到 ecs 服务时出现此错误:

service nginx-ecs-service was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.

.. How do I fix that?我该如何解决? What is missing in my terraform file?我的 terraform 文件中缺少什么?

locals {
  name        = "myapp"
  environment = "prod"

  # This is the convention we use to know what belongs to each other
  ec2_resources_name = "${local.name}-${local.environment}"
}

resource "aws_iam_server_certificate" "lb_cert" {
  name              = "lb_cert"
  certificate_body  = "${file("./www.example.com/cert.pem")}"
  private_key       = "${file("./www.example.com/privkey.pem")}"
  certificate_chain = "${file("./www.example.com/chain.pem")}"
}


resource "aws_security_group" "bastion-sg" {
  name   = "bastion-security-group"
  vpc_id = "${module.vpc.vpc_id}"

  ingress {
    protocol    = "tcp"
    from_port   = 22
    to_port     = 22
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    protocol    = -1
    from_port   = 0
    to_port     = 0
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_instance" "bastion" {
  depends_on = ["aws_security_group.bastion-sg"]

  ami                         = "ami-0d5d9d301c853a04a"
  key_name                    = "myapp"
  instance_type               = "t2.micro"
  vpc_security_group_ids      = ["${aws_security_group.bastion-sg.id}"]
  associate_public_ip_address = true

  subnet_id = "${element(module.vpc.public_subnets, 0)}"

  tags = {
    Name = "bastion"
  }
}

# VPC Definition
module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  version = "~> 2.0"

  name = "my-vpc"
  cidr = "10.1.0.0/16"

  azs = ["us-east-2a", "us-east-2b", "us-east-2c"]

  private_subnets = ["10.1.1.0/24", "10.1.2.0/24", "10.1.3.0/24"]
  public_subnets  = ["10.1.101.0/24", "10.1.102.0/24", "10.1.103.0/24"]

  single_nat_gateway   = true
  enable_nat_gateway   = true
  enable_vpn_gateway   = false
  enable_dns_hostnames = true

  public_subnet_tags = {
    Name = "public"
  }

  private_subnet_tags = {
    Name = "private"
  }

  public_route_table_tags = {
    Name = "public-RT"
  }

  private_route_table_tags = {
    Name = "private-RT"
  }

  tags = {
    Environment = local.environment
    Name        = local.name
  }
}

# ------------
resource "aws_ecs_cluster" "public-ecs-cluster" {
  name = "myapp-${local.environment}"

  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_security_group" "ecs-vpc-secgroup" {
  name        = "ecs-vpc-secgroup"
  description = "ecs-vpc-secgroup"
  # vpc_id      = "vpc-b8daecde"
  vpc_id = "${module.vpc.vpc_id}"

  ingress {
    from_port   = 0
    to_port     = 65535
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "ecs-security-group"
  }
}

resource "aws_lb" "nginx-ecs-alb" {
  name               = "nginx-ecs-alb"
  internal           = false
  load_balancer_type = "application"
  subnets            = module.vpc.public_subnets
  security_groups    = ["${aws_security_group.ecs-vpc-secgroup.id}"]
}

resource "aws_alb_target_group" "nginx-ecs-tg" {
  name     = "nginx-ecs-tg"
  port     = "80"
  protocol = "HTTP"
  vpc_id   = "${module.vpc.vpc_id}"

  health_check {
    healthy_threshold   = 3
    unhealthy_threshold = 10
    timeout             = 5
    interval            = 10
    path                = "/"
  }

  depends_on = ["aws_lb.nginx-ecs-alb"]
}

resource "aws_alb_listener" "alb_listener" {
  load_balancer_arn = "${aws_lb.nginx-ecs-alb.arn}"
  port              = "80"
  protocol          = "HTTP"

  default_action {
    target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
    type             = "forward"
  }
}

resource "aws_ecs_task_definition" "nginx-image" {
  family                = "nginx-server"
  network_mode          = "bridge"
  container_definitions = <<DEFINITION
    [
      {
        "name": "nginx-web",
        "image": "nginx:latest",
        "essential": true,
        "portMappings": [
          {
            "containerPort": 80,
            "hostPort": 0,
            "protocol": "tcp"
          }
        ],
        "memory": 128,
        "cpu": 10
      }
    ]
    DEFINITION
}

data "aws_ecs_task_definition" "nginx-image" {
  depends_on      = ["aws_ecs_task_definition.nginx-image"]
  task_definition = "${aws_ecs_task_definition.nginx-image.family}"
}


resource "aws_launch_configuration" "ecs-launch-configuration" {
  name                 = "ecs-launch-configuration"
  image_id             = "ami-0d5d9d301c853a04a"
  instance_type        = "t2.micro"
  iam_instance_profile = "ecsInstanceRole"

  root_block_device {
    volume_type           = "standard"
    volume_size           = 35
    delete_on_termination = true
  }

  security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
  associate_public_ip_address = "true"
  key_name                    = "myapp"
  user_data                   = <<-EOF
                                      #!/bin/bash
                                      echo ECS_CLUSTER=${aws_ecs_cluster.public-ecs-cluster.name} >> /etc/ecs/ecs.config
                                    EOF
}

resource "aws_autoscaling_group" "ecs-autoscaling-group" {
  name             = "ecs-autoscaling-group"
  max_size         = "1"
  min_size         = "1"
  desired_capacity = "1"
  # vpc_zone_identifier  = ["subnet-5c66053a", "subnet-9cd1a2d4"]
  vpc_zone_identifier = module.vpc.public_subnets

  launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
  health_check_type    = "EC2"
  default_cooldown     = "300"


  lifecycle {
    create_before_destroy = true
  }

  tag {
    key                 = "Name"
    value               = "wizardet972_ecs-instance"
    propagate_at_launch = true
  }

  tag {
    key                 = "Owner"
    value               = "Wizardnet972"
    propagate_at_launch = true
  }

}


resource "aws_autoscaling_policy" "ecs-scale" {
  name                      = "ecs-scale-policy"
  policy_type               = "TargetTrackingScaling"
  autoscaling_group_name    = "${aws_autoscaling_group.ecs-autoscaling-group.name}"
  estimated_instance_warmup = 60

  target_tracking_configuration {
    predefined_metric_specification {
      predefined_metric_type = "ASGAverageCPUUtilization"
    }

    target_value = "70"
  }
}

resource "aws_ecs_service" "nginx-ecs-service" {
  name    = "nginx-ecs-service"
  cluster = "${aws_ecs_cluster.public-ecs-cluster.id}"
  task_definition = "${aws_ecs_task_definition.nginx-image.family}:${max("${aws_ecs_task_definition.nginx-image.revision}", "${aws_ecs_task_definition.nginx-image.revision}")}"

  launch_type = "EC2"

  desired_count = 1

  load_balancer {
    target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
    container_name   = "nginx-web"
    container_port   = 80
  }

  depends_on = ["aws_ecs_task_definition.nginx-image"]
}

Update:更新:

I tried to create the terraform stack you shared with me, I was able to reproduce the issue.我尝试创建您与我共享的 terraform 堆栈,我能够重现该问题。

The issue was, The ec2 instance was unhealthy and the autoscaling group was continuously terminating the instance and launch a new one.问题是,ec2 实例运行状况不佳,自动缩放组不断终止实例并启动新实例。

the solution was to remove the following configuration.I think the volume_type standard was causing trouble.解决方案是删除以下配置。我认为 volume_type 标准造成了麻烦。

root_block_device {
  volume_type = "standard"
  volume_size = 100
  delete_on_termination = true
}

See if you have done the basic steps to prepare the ec2 instance.查看您是否已完成准备 ec2 实例的基本步骤。 You should use an ecs-optimized ami to create the instance and then attach the AmazonEC2ContainerServiceforEC2Role permission to IAM role.您应该使用ecs-optimized ami 创建实例,然后将AmazonEC2ContainerServiceforEC2Role权限附加到 IAM 角色。

Reference:参考:

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

相关问题 在集群中未找到容器实例 - No Container Instances were found in your cluster AWS EC2 ECS 在您的集群中未找到任何容器实例 - AWS EC2 ECS No Container Instances were found in your cluster 运行任务时出现 AWS ECS 错误:在您的集群中未找到任何容器实例 - AWS ECS Error when running task: No Container Instances were found in your cluster AWS ECS-EC2 错误:在您的集群中找不到容器实例 - AWS ECS-EC2 ERROR: No Container Instances were found in your cluster Terraform 列出ECS集群容器实例 - Terraform list ECS cluster container instances 部署到 Lightsail 实例失败,因为找不到您的部署组的实例 - The deployment to Lightsail instances failed because no instances were found for your deployment group 用于 codedeploy 的蓝绿色部署失败(部署失败,因为在您的绿色队列中找不到任何实例。(错误代码:NO_INSTANCES)) - Blue green deploy for codedeploy fails ( The deployment failed because no instances were found in your green fleet. (Error code: NO_INSTANCES)) 如何使用Terraform更新AWS ECS集群实例? - How to update AWS ECS cluster instances with Terraform? 使用 AWS CodeDeploy 时,为什么会出现部署失败,因为在您的绿色队列中未找到任何实例? - Why do I get The deployment failed because no instances were found in your green fleet when using AWS CodeDeploy? 在Terraform设置中替换ECS容器实例 - Replace ECS container instances in terraform setup
 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM