简体   繁体   English

Kubernetes 替换部署失败

[英]Kubernetes replace deployment fails

I have created a NodeJS script for deploying review apps to Kubernetes for my GitLab repository.我为我的 GitLab 存储库创建了一个 NodeJS 脚本,用于将评论应用程序部署到 Kubernetes。 To do this, I'm using the Kubernetes NodeJS client.为此,我使用了 Kubernetes NodeJS 客户端。

For completeness sake, I have included truncated definitions of the Kubernetes resources.为了完整起见,我包含了 Kubernetes 资源的截断定义。

const k8s = require('@kubernetes/client-node');

const logger = require('../logger');

const {
  CI_COMMIT_REF_NAME,
  CI_ENVIRONMENT_SLUG,
  CI_ENVIRONMENT_URL,
  CI_REGISTRY_IMAGE,
  KUBE_NAMESPACE,
} = process.env;

const { hostname } = new URL(CI_ENVIRONMENT_URL);

const mysqlDeployment = {
  apiVersion: 'apps/v1',
  kind: 'Deployment',
  metadata: {
    name: `${CI_ENVIRONMENT_SLUG}-mysql`,
    labels: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'mysql',
    },
  },
  spec: {
    replicas: 1,
    selector: {
      matchLabels: {
        app: CI_ENVIRONMENT_SLUG,
        tier: 'mysql',
      },
    },
    template: {
      metadata: {
        labels: {
          app: CI_ENVIRONMENT_SLUG,
          tier: 'mysql',
        },
      },
      spec: {
        containers: [
          {
            image: 'mysql:8',
            name: 'mysql',
          },
        ],
        ports: { containerPort: 3306 },
      },
    },
  },
};

const mysqlService = {
  apiVersion: 'v1',
  kind: 'Service',
  metadata: {
    name: `${CI_ENVIRONMENT_SLUG}-mysql`,
    labels: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'mysql',
    },
  },
  spec: {
    ports: [{ port: 3306 }],
    selector: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'mysql',
    },
    clusterIP: 'None',
  },
};

const appDeployment = {
  apiVersion: 'apps/v1',
  kind: 'Deployment',
  metadata: {
    name: `${CI_ENVIRONMENT_SLUG}-frontend`,
    labels: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'frontend',
    },
  },
  spec: {
    replicas: 1,
    selector: {
      matchLabels: {
        app: CI_ENVIRONMENT_SLUG,
        tier: 'frontend',
      },
    },
    template: {
      metadata: {
        labels: {
          app: CI_ENVIRONMENT_SLUG,
          tier: 'frontend',
        },
      },
      spec: {
        containers: [
          {
            image: `${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_NAME}`,
            imagePullPolicy: 'Always',
            name: 'app',
            ports: [{ containerPort: 9999 }],
          },
        ],
        imagePullSecrets: [{ name: 'registry.gitlab.com' }],
      },
    },
  },
};

const appService = {
  apiVersion: 'v1',
  kind: 'Service',
  metadata: {
    name: `${CI_ENVIRONMENT_SLUG}-frontend`,
    labels: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'frontend',
    },
  },
  spec: {
    ports: [{ port: 9999 }],
    selector: {
      app: CI_ENVIRONMENT_SLUG,
      tier: 'frontend',
    },
    clusterIP: 'None',
  },
};

const ingress = {
  apiVersion: 'extensions/v1beta1',
  kind: 'Ingress',
  metadata: {
    name: `${CI_ENVIRONMENT_SLUG}-ingress`,
    labels: {
      app: CI_ENVIRONMENT_SLUG,
    },
    annotations: {
      'certmanager.k8s.io/cluster-issuer': 'letsencrypt-prod',
      'kubernetes.io/ingress.class': 'nginx',
      'nginx.ingress.kubernetes.io/proxy-body-size': '50m',
    },
  },
  spec: {
    tls: [
      {
        hosts: [hostname],
        secretName: `${CI_ENVIRONMENT_SLUG}-prod`,
      },
    ],
    rules: [
      {
        host: hostname,
        http: {
          paths: [
            {
              path: '/',
              backend: {
                serviceName: `${CI_ENVIRONMENT_SLUG}-frontend`,
                servicePort: 9999,
              },
            },
          ],
        },
      },
    ],
  },
};

I use the following functions to deploy these resources to Kubernetes.我使用以下函数将这些资源部署到 Kubernetes。

async function noConflict(resource, create, replace) {
  const { kind } = resource;
  const { name } = resource.metadata;
  try {
    logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
    await create(KUBE_NAMESPACE, resource);
    logger.info(`Created ${kind.toLowerCase()}: ${name}`);
  } catch (err) {
    if (err.response.statusCode !== 409) {
      throw err;
    }
    logger.warn(`${kind} ${name} already exists… Replacing instead.`);
    await replace(name, KUBE_NAMESPACE, resource);
    logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
  }
}

async function deploy() {
  const kc = new k8s.KubeConfig();
  kc.loadFromDefault();
  const apps = kc.makeApiClient(k8s.Apps_v1Api);
  const beta = kc.makeApiClient(k8s.Extensions_v1beta1Api);
  const core = kc.makeApiClient(k8s.Core_v1Api);

  await noConflict(
    mysqlDeployment,
    apps.createNamespacedDeployment.bind(apps),
    apps.replaceNamespacedDeployment.bind(apps),
  );
  await noConflict(
    mysqlService,
    core.createNamespacedService.bind(core),
    core.replaceNamespacedService.bind(core),
  );
  await noConflict(
    appDeployment,
    apps.createNamespacedDeployment.bind(apps),
    apps.replaceNamespacedDeployment.bind(apps),
  );
  await noConflict(
    appService,
    core.createNamespacedService.bind(core),
    core.replaceNamespacedService.bind(core),
  );
  await noConflict(
    ingress,
    beta.createNamespacedIngress.bind(beta),
    beta.replaceNamespacedIngress.bind(beta),
  );
}

The initial deployment goes fine, but the replacement of the mysql service fails with the following HTTP request body.初始部署顺利,但更换 mysql 服务失败,出现以下 HTTP 请求正文。

{ kind: 'Status',
  apiVersion: 'v1',
  metadata: {},
  status: 'Failure',
  message:
   'Service "review-fix-kubern-8a4yh2-mysql" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update',
  reason: 'Invalid',
  details:
   { name: 'review-fix-kubern-8a4yh2-mysql',
     kind: 'Service',
     causes: [Array] },
  code: 422 } }

I have tried modifying noConflict to get the current version, and use the active versionResource to replace resources.我尝试修改noConflict以获取当前版本,并使用活动的versionResource来替换资源。

async function noConflict(resource, create, get, replace) {
  const { kind, metadata } = resource;
  const { name } = resource.metadata;
  try {
    logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
    await create(KUBE_NAMESPACE, resource);
    logger.info(`Created ${kind.toLowerCase()}: ${name}`);
  } catch (err) {
    if (err.response.statusCode !== 409) {
      throw err;
    }
    logger.warn(`${kind} ${name} already exists… Replacing instead.`);
    const {
      body: {
        metadata: { resourceVersion },
      },
    } = await get(name, KUBE_NAMESPACE);
    const body = {
      ...resource,
      metadata: {
        ...metadata,
        resourceVersion,
      },
    };
    logger.warn(`${kind} ${name} already exists… Replacing instead.`);
    await replace(name, KUBE_NAMESPACE, body);
    logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
  }
}

However, this gives me another error.但是,这给了我另一个错误。

{ kind: 'Status',
  apiVersion: 'v1',
  metadata: {},
  status: 'Failure',
  message:
   'Service "review-prevent-ku-md2ghh-frontend" is invalid: spec.clusterIP: Invalid value: "": field is immutable',
  reason: 'Invalid',
  details:
   { name: 'review-prevent-ku-md2ghh-frontend',
     kind: 'Service',
     causes: [Array] },
  code: 422 } }

What should I do to replace the running resources?我应该怎么做来替换正在运行的资源?

Whether or not the the database stays up, is a minor detail.数据库是否保持正常运行是一个次要的细节。

Update更新

To address the comment by LouisBaumann:针对 LouisBaumann 的评论:

I have changed by code to the following, where read is the respective read call for each resource.我已通过代码更改为以下内容,其中read是每个资源的相应读取调用。

async function noConflict(resource, create, read, replace) {
  const { kind } = resource;
  const { name } = resource.metadata;
  try {
    logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
    await create(KUBE_NAMESPACE, resource);
    logger.info(`Created ${kind.toLowerCase()}: ${name}`);
  } catch (err) {
    if (err.response.statusCode !== 409) {
      throw err;
    }
    logger.warn(`${kind} ${name} already exists… Replacing instead.`);
    const { body: existing } = await read(name, KUBE_NAMESPACE);
    await replace(name, KUBE_NAMESPACE, merge(existing, resource));
    logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
  }
}

The above doesn't crash, but it doesn't update the review environment either.以上不会崩溃,但它也不会更新审查环境。

Update更新

To address the answer by Crou:解决Crou的答案:

I have updated the replace calls with patch calls.我已经用补丁调用更新了替换调用。 So the noConflict function becomes:所以noConflict函数变为:

async function noConflict(resource, create, patch) {
  const { kind } = resource;
  const { name } = resource.metadata;
  try {
    logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
    await create(KUBE_NAMESPACE, resource);
    logger.info(`Created ${kind.toLowerCase()}: ${name}`);
  } catch (err) {
    if (err.response.statusCode !== 409) {
      throw err;
    }
    logger.warn(`${kind} ${name} already exists… Patching instead.`);
    await patch(name, KUBE_NAMESPACE, resource);
    logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
  }
}

I also changed the noConflict calls to pass the patch versions instead of the replace functions.我还更改了noConflict调用以传递补丁版本而不是替换函数。

await noConflict(
  mysqlDeployment,
  apps.createNamespacedDeployment.bind(apps),
  apps.patchNamespacedDeployment.bind(apps),
);
// etc

This resulted in the following error:这导致了以下错误:

{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "415: Unsupported Media Type",
  "reason": "UnsupportedMediaType",
  "details": {},
  "code": 415
}

From what I understand you are using replace incorrectly. 据我了解,您使用的replace错误。

Replace a resource by filename or stdin. 用文件名或标准输入替换资源。

JSON and YAML formats are accepted. 接受JSON和YAML格式。 If replacing an existing resource, the complete resource spec must be provided. 如果要替换现有资源,则必须提供完整的资源规范。 This can be obtained by 这可以通过

$ kubectl get TYPE NAME -o yaml

If you do replace without getting the yaml from Kubernetes, you are missing resourceVersion . 如果您在没有从Kubernetes获得yaml情况下进行替换,则将缺少resourceVersion So this is why you get the error: 所以这就是为什么您得到错误的原因:

Service "review-fix-kubern-8a4yh2-mysql" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update

You should use patch or apply if you are replacing just parts of the Deployment . 如果仅替换Deployment一部分,则应使用patchapply

I've been trough the same issue as you today, what i wanted to mimic using the node client was the kubectl apply -f command luckily i found a typescript snippet here .我今天遇到了和你一样的问题,我想使用节点客户端模拟的是kubectl apply -f命令,幸运的是我在这里找到了一个打字稿片段。 Hope it helps out!.希望能帮到你!。

So i end up using in javascript所以我最终在 javascript 中使用

const kubernetesConfig = new k8s.KubeConfig()
kubernetesConfig.loadFromFile("kube-config.yaml")
this.k8sClient = k8s.KubernetesObjectApi.makeApiClient(kubernetesConfig)

and created an apply function并创建了一个apply function

 async apply(spec) {
  try {
    // try to get the resource, if it does not exist an error will be thrown and we will end up in the catch
    // block.
    await this.k8sClient.read(spec)
    // we got the resource, so it exists, so patch it
    await this.k8sClient.patch(spec)
  } catch (e) {
    // we did not get the resource, so it does not exist, so create it
    await this.k8sClient.create(spec)
  }
},

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM